From noreply at buildbot.pypy.org Thu Dec 1 01:31:27 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Thu, 1 Dec 2011 01:31:27 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Fixed a major bug concerning Signed/Unsigned and setup() errors. Message-ID: <20111201003127.28F108208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50030:da6c8b877b3f Date: 2011-12-01 01:30 +0100 http://bitbucket.org/pypy/pypy/changeset/da6c8b877b3f/ Log: Fixed a major bug concerning Signed/Unsigned and setup() errors. Finding this bug did cost me over two days. I was not aware that the g_prerequisite.h does not get included into the special Python.h, which I was also not aware of. I have not so much problems finding failures, but setup() errors in masses are a special challenge ;-) Hopefully this changes a lot in the Buildbot output. diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -35,6 +35,8 @@ # define Py_LOCAL_INLINE(type) static __inline type __fastcall #endif +#include "signed_defn.h" + /* Deprecated DL_IMPORT and DL_EXPORT macros */ #ifdef _WIN32 # if defined(Py_BUILD_CORE) diff --git a/pypy/translator/c/src/g_prerequisite.h b/pypy/translator/c/src/g_prerequisite.h --- a/pypy/translator/c/src/g_prerequisite.h +++ b/pypy/translator/c/src/g_prerequisite.h @@ -8,14 +8,7 @@ # include "Python.h" #endif -#ifdef _WIN64 -# define Signed __int64 -# define SIGNED_MIN LLONG_MIN -#else -# define Signed long -# define SIGNED_MIN LONG_MIN -#endif -#define Unsigned unsigned Signed +#include "signed_defn.h" #ifdef _WIN32 # include /* needed, otherwise _lseeki64 truncates to 32-bits (??) */ diff --git a/pypy/translator/c/src/signed_defn.h b/pypy/translator/c/src/signed_defn.h new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/signed_defn.h @@ -0,0 +1,13 @@ +/* this file defines Signed and Unsigned */ + +#ifdef _WIN64 + typedef __int64 Signed; + typedef unsigned __int64 Unsigned; +# define SIGNED_MIN LLONG_MIN +#else + typedef long Signed; + typedef unsigned long Unsigned; +# define SIGNED_MIN LONG_MIN +#endif + +/* end of signed_def.h */ From notifications-noreply at bitbucket.org Thu Dec 1 03:27:02 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 01 Dec 2011 02:27:02 -0000 Subject: [pypy-commit] Notification: jitviewer Message-ID: <20111201022702.20808.7203@bitbucket02.managed.contegix.com> You have received a notification from gabriel_h. Hi, I forked jitviewer. My fork is at https://bitbucket.org/gabriel_h/jitviewer. -- Disable notifications at https://bitbucket.org/account/notifications/ From pullrequests-noreply at bitbucket.org Thu Dec 1 11:20:49 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Thu, 01 Dec 2011 10:20:49 -0000 Subject: [pypy-commit] [OPEN] Pull request #2 for pypy/jitviewer: Makes jitviewer compatible with Flask v0.8 Message-ID: A new pull request has been opened by Gabriel Hege. gabriel_h/pypy-jitviewer has changes to be pulled into pypy/jitviewer. https://bitbucket.org/pypy/jitviewer/pull-request/2/makes-jitviewer-compatible-with-flask-v08 Title: Makes jitviewer compatible with Flask v0.8 Used to give the following error message: Traceback (most recent call last): File "app_main.py", line 51, in run_toplevel File "/home/gabriel/tmpfs/pypy/bin/jitviewer.py", line 7, in execfile(__file__) File "/home/gabriel/src/pypy-jitviewer/bin/jitviewer.py", line 238, in main() File "/home/gabriel/src/pypy-jitviewer/bin/jitviewer.py", line 195, in main app = OverrideFlask('__name__', root_path=PATH) File "/home/gabriel/src/pypy-jitviewer/bin/jitviewer.py", line 157, in __init__ flask.Flask.__init__(self, *args, **kwargs) File "/home/gabriel/tmpfs/pypy/site-packages/flask/app.py", line 277, in __init__ template_folder=template_folder) File "/home/gabriel/tmpfs/pypy/site-packages/flask/helpers.py", line 572, in __init__ self.root_path = get_root_path(self.import_name) File "/home/gabriel/tmpfs/pypy/site-packages/flask/helpers.py", line 482, in get_root_path __import__(import_name) ImportError: No module named __name__ Changes to be pulled: -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Thu Dec 1 11:24:07 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 01 Dec 2011 10:24:07 -0000 Subject: [pypy-commit] [COMMENT] Pull request #2 for pypy/jitviewer: Makes jitviewer compatible with Flask v0.8 Message-ID: <20111201102407.31910.7944@bitbucket13.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/jitviewer/pull-request/2/makes-jitviewer-compatible-with-flask-v08#comment-1240 fijal said: funnily enough I just pushed changes that make it work, for unrelated reasons. Speaking of which, can you think about a way to make templates and static also work from the checkout? Cheers, fijal -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Dec 1 11:34:31 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 1 Dec 2011 11:34:31 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: Implement instance_ptr_eq and instance_ptr_ne Message-ID: <20111201103431.DE8E28208A@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50031:86dcb23ea814 Date: 2011-12-01 11:34 +0100 http://bitbucket.org/pypy/pypy/changeset/86dcb23ea814/ Log: Implement instance_ptr_eq and instance_ptr_ne diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -155,10 +155,10 @@ emit_guard_uint_lt = gen_emit_cmp_op_guard('uint_lt', c.LO) emit_guard_uint_ge = gen_emit_cmp_op_guard('uint_ge', c.HS) - emit_op_ptr_eq = emit_op_int_eq - emit_op_ptr_ne = emit_op_int_ne - emit_guard_ptr_eq = emit_guard_int_eq - emit_guard_ptr_ne = emit_guard_int_ne + emit_op_ptr_eq = emit_op_instance_ptr_eq = emit_op_int_eq + emit_op_ptr_ne = emit_op_instance_ptr_ne = emit_op_int_ne + emit_guard_ptr_eq = emit_guard_instance_ptr_eq = emit_guard_int_eq + emit_guard_ptr_ne = emit_guard_instance_ptr_ne = emit_guard_int_ne emit_op_int_add_ovf = emit_op_int_add emit_op_int_sub_ovf = emit_op_int_sub From notifications-noreply at bitbucket.org Thu Dec 1 11:41:34 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 01 Dec 2011 10:41:34 -0000 Subject: [pypy-commit] [COMMENT] Pull request #2 for pypy/jitviewer: Makes jitviewer compatible with Flask v0.8 Message-ID: <20111201104134.19407.92700@bitbucket02.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/jitviewer/pull-request/2/makes-jitviewer-compatible-with-flask-v08#comment-1241 Gabriel Hege (gabriel_h) said: I've got no idea. I don't know Flask. I just looked at the backtrace and removing the comments seemed like the right thing to do -- and it worked... -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Thu Dec 1 11:42:50 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 01 Dec 2011 10:42:50 -0000 Subject: [pypy-commit] [COMMENT] Pull request #2 for pypy/jitviewer: Makes jitviewer compatible with Flask v0.8 Message-ID: <20111201104250.5853.64276@bitbucket12.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/jitviewer/pull-request/2/makes-jitviewer-compatible-with-flask-v08#comment-1242 fijal said: ok, cool :) the default should work just fine (I pass 'jitviewer' instead of __name__ but it should nto matter). -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Dec 1 11:50:28 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Thu, 1 Dec 2011 11:50:28 +0100 (CET) Subject: [pypy-commit] pypy default: added interp_magic method to get the strategy of a list on the application level Message-ID: <20111201105028.9C8768208A@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: Changeset: r50032:497907d5d515 Date: 2011-12-01 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/497907d5d515/ Log: added interp_magic method to get the strategy of a list on the application level diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -4,7 +4,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - +from pypy.objspace.std.listobject import W_ListObject, IntegerListStrategy, StringListStrategy, FloatListStrategy, RangeListStrategy, EmptyListStrategy, ObjectListStrategy def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +73,9 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + str_type = None + if isinstance(w_list, W_ListObject): + str_type = w_list.strategy._type + return space.wrap(str_type) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,20 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + l = [1,2,3] + assert list_strategy(l) == "int" + l = ["a","b","c"] + assert list_strategy(l) == "str" + l = [1.1,2.2,3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1,"b",3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + assert list_strategy(o) == None diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -326,6 +326,8 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + _type = "empty" + def __init__(self, space): ListStrategy.__init__(self, space) # cache an empty list that is used whenever getitems is called (i.e. sorting) @@ -426,6 +428,8 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" + _type = "range" + def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -864,6 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _type = "object" def unwrap(self, w_obj): return w_obj @@ -892,6 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 + _type = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -918,6 +924,7 @@ class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 + _type = "float" def wrap(self, floatval): return self.space.wrap(floatval) @@ -944,6 +951,7 @@ class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _type = "str" def wrap(self, stringval): return self.space.wrap(stringval) From noreply at buildbot.pypy.org Thu Dec 1 12:22:10 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Thu, 1 Dec 2011 12:22:10 +0100 (CET) Subject: [pypy-commit] pypy default: better name for application representation of list strategies Message-ID: <20111201112210.235E28208A@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: Changeset: r50033:92f0d0594f88 Date: 2011-12-01 12:21 +0100 http://bitbucket.org/pypy/pypy/changeset/92f0d0594f88/ Log: better name for application representation of list strategies diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -4,7 +4,6 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache -from pypy.objspace.std.listobject import W_ListObject, IntegerListStrategy, StringListStrategy, FloatListStrategy, RangeListStrategy, EmptyListStrategy, ObjectListStrategy def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -75,7 +74,8 @@ return space.wrap(42) def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject str_type = None if isinstance(w_list, W_ListObject): - str_type = w_list.strategy._type + str_type = w_list.strategy._applevel_repr return space.wrap(str_type) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -326,7 +326,7 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" - _type = "empty" + _applevel_repr = "empty" def __init__(self, space): ListStrategy.__init__(self, space) @@ -428,7 +428,7 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" - _type = "range" + _applevel_repr = "range" def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) @@ -868,7 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None - _type = "object" + _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -897,7 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 - _type = "int" + _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -924,7 +924,7 @@ class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 - _type = "float" + _applevel_repr = "float" def wrap(self, floatval): return self.space.wrap(floatval) @@ -951,7 +951,7 @@ class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None - _type = "str" + _applevel_repr = "str" def wrap(self, stringval): return self.space.wrap(stringval) From noreply at buildbot.pypy.org Thu Dec 1 14:06:45 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 1 Dec 2011 14:06:45 +0100 (CET) Subject: [pypy-commit] pypy default: make the JIT aware of the downcasts that are present in rtyped-flowgraphs. This Message-ID: <20111201130645.491518208A@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r50034:93a45b7c9431 Date: 2011-12-01 14:06 +0100 http://bitbucket.org/pypy/pypy/changeset/93a45b7c9431/ Log: make the JIT aware of the downcasts that are present in rtyped- flowgraphs. This allows the JIT to sometimes find out the class of a variable without having to produce a guard_class. diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -207,7 +207,19 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6482,6 +6482,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -243,6 +243,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3585,6 +3585,67 @@ self.interp_operations(f, [5], translationoptions=translationoptions) + def test_annotation_gives_knowledge_to_tracer(self): + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue From noreply at buildbot.pypy.org Thu Dec 1 14:07:55 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 1 Dec 2011 14:07:55 +0100 (CET) Subject: [pypy-commit] pypy int-tag-untag-as-operations: merge default Message-ID: <20111201130755.6A3988208A@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: int-tag-untag-as-operations Changeset: r50035:9cf49191fdb7 Date: 2011-12-01 14:07 +0100 http://bitbucket.org/pypy/pypy/changeset/9cf49191fdb7/ Log: merge default diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,19 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -485,8 +499,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1180,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -529,6 +529,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6616,6 +6616,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -252,6 +252,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -496,6 +496,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3583,6 +3583,68 @@ assert f(5) == 41 translationoptions = {'withsmallfuncsets': 3} self.interp_operations(f, [5], translationoptions=translationoptions) + + def test_annotation_gives_knowledge_to_tracer(self): + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue class Base(object): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,10 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + str_type = None + if isinstance(w_list, W_ListObject): + str_type = w_list.strategy._applevel_repr + return space.wrap(str_type) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,20 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + l = [1,2,3] + assert list_strategy(l) == "int" + l = ["a","b","c"] + assert list_strategy(l) == "str" + l = [1.1,2.2,3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1,"b",3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + assert list_strategy(o) == None diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -326,6 +326,8 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + _applevel_repr = "empty" + def __init__(self, space): ListStrategy.__init__(self, space) # cache an empty list that is used whenever getitems is called (i.e. sorting) @@ -426,6 +428,8 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" + _applevel_repr = "range" + def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -864,6 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -892,6 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 + _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -918,6 +924,7 @@ class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 + _applevel_repr = "float" def wrap(self, floatval): return self.space.wrap(floatval) @@ -944,6 +951,7 @@ class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "str" def wrap(self, stringval): return self.space.wrap(stringval) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -470,11 +470,17 @@ l.extend(iter([1, 2, 3, 4])) assert l is l0 assert l == [1, 1, 2, 3, 4] + l = l0 = ['a'] l.extend(iter(['b', 'c', 'd'])) assert l == ['a', 'b', 'c', 'd'] assert l is l0 + l = l0 = [1.2] + l.extend(iter([2.3, 3.4, 4.5])) + assert l == [1.2, 2.3, 3.4, 4.5] + assert l is l0 + def test_sort(self): l = l0 = [1, 5, 3, 0] l.sort() @@ -493,6 +499,10 @@ l.sort(reverse=True) assert l == ["d", "c", "b", "a"] + l = [3.3, 2.2, 4.4, 1.1, 3.1, 5.5] + l.sort() + assert l == [1.1, 2.2, 3.1, 3.3, 4.4, 5.5] + def test_sort_cmp(self): def lencmp(a,b): return cmp(len(a), len(b)) l = [ 'a', 'fiver', 'tre', '' ] @@ -546,11 +556,19 @@ assert l[-2] == 6 raises(IndexError, "l[len(l)]") raises(IndexError, "l[-len(l)-1]") + l = ['a', 'b', 'c'] assert l[0] == 'a' assert l[-1] == 'c' assert l[-2] == 'b' raises(IndexError, "l[len(l)]") + + l = [1.1, 2.2, 3.3] + assert l[0] == 1.1 + assert l[-1] == 3.3 + assert l[-2] == 2.2 + raises(IndexError, "l[len(l)]") + l = [] raises(IndexError, "l[1]") @@ -588,6 +606,16 @@ assert l is l0 raises(IndexError, "del l[0]") + l = l0 = [1.1, 2.2, 3.3] + del l[0] + assert l == [2.2, 3.3] + del l[-1] + assert l == [2.2] + del l[-1] + assert l == [] + assert l is l0 + raises(IndexError, "del l[0]") + l = range(10) del l[5] assert l == [0, 1, 2, 3, 4, 6, 7, 8, 9] @@ -627,9 +655,15 @@ del l[:] assert l is l0 assert l == [] + l = ['a', 'b'] del l[:] assert l == [] + + l = [1.1, 2.2] + del l[:] + assert l == [] + l = range(5) del l[:] assert l == [] @@ -640,6 +674,11 @@ assert l is l0 assert l == [1,2,3,4,5] + l = l0 = [1.1,2.2,3.3] + l += [4.4,5.5] + assert l is l0 + assert l == [1.1,2.2,3.3,4.4,5.5] + l = l0 = ['a', 'b', 'c'] l1 = l[:] l += ['d'] @@ -697,6 +736,11 @@ l *= -5 assert l == [] + l = l0 = [1.1, 2.2] + l *= 2 + assert l is l0 + assert l == [1.1, 2.2, 1.1, 2.2] + l = range(2) l *= 2 assert l == [0, 1, 0, 1] @@ -731,6 +775,10 @@ assert c.index(0) == 0 raises(ValueError, c.index, 3) + c = [0.0, 2.2, 4.4] + assert c.index(0) == 0.0 + raises(ValueError, c.index, 3) + def test_index_cpython_bug(self): if self.on_cpython: skip("cpython has a bug here") @@ -779,6 +827,10 @@ l[::3] = ('a', 'b') assert l == ['a', 1, 2, 'b', 4, 5] + l = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5] + l[::3] = ('a', 'b') + assert l == ['a', 1.1, 2.2, 'b', 4.4, 5.5] + def test_setslice_with_self(self): l = [1,2,3,4] l[:] = l @@ -835,6 +887,10 @@ l.append("a") assert l == [1,2,3,"a"] + l = [1.1, 2.2, 3.3] + l.append(4.4) + assert l == [1.1, 2.2, 3.3, 4.4] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -875,6 +931,10 @@ l.pop() assert l == range(9) + l = [1.1, 2.2, 3.3] + l.pop() + assert l == [1.1, 2.2] + l = [] raises(IndexError, l.pop, 0) @@ -897,16 +957,19 @@ l2 = ["1", "2", "3", "4"] l3 = range(5) l4 = [1, 2, 3, "4"] + l5 = [1.1, 2.2, 3.3, 4.4] raises(IndexError, l1.pop, -5) raises(IndexError, l2.pop, -5) raises(IndexError, l3.pop, -6) raises(IndexError, l4.pop, -5) + raises(IndexError, l5.pop, -5) assert l1.pop(-2) == 3 assert l2.pop(-2) == "3" assert l3.pop(-2) == 3 assert l4.pop(-2) == 3 + assert l5.pop(-2) == 3.3 def test_remove(self): c = list('hello world') @@ -925,6 +988,13 @@ l = [0, 3, 5] raises(ValueError, c.remove, 2) + l = [0.0, 1.1, 2.2, 3.3, 4.4] + l.remove(2.2) + assert l == [0.0, 1.1, 3.3, 4.4] + l = [0.0, 3.3, 5.5] + raises(ValueError, c.remove, 2) + raises(ValueError, c.remove, 2.2) + def test_reverse(self): c = list('hello world') c.reverse() From noreply at buildbot.pypy.org Thu Dec 1 14:14:30 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 1 Dec 2011 14:14:30 +0100 (CET) Subject: [pypy-commit] pypy default: Change __pypy__.list_strategy to raise a TypeError on non-list arguments Message-ID: <20111201131430.4EFF58208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50036:4eefaeec6777 Date: 2011-12-01 08:14 -0500 http://bitbucket.org/pypy/pypy/changeset/4eefaeec6777/ Log: Change __pypy__.list_strategy to raise a TypeError on non-list arguments diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -75,7 +75,7 @@ def list_strategy(space, w_list): from pypy.objspace.std.listobject import W_ListObject - str_type = None if isinstance(w_list, W_ListObject): - str_type = w_list.strategy._applevel_repr - return space.wrap(str_type) + return space.wrap(w_list.strategy._applevel_repr) + else: + raise OperationError(space.w_TypeError, space.wrap("Can only get the list strategy of a list")) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -57,17 +57,18 @@ def test_list_strategy(self): from __pypy__ import list_strategy - l = [1,2,3] + + l = [1, 2, 3] assert list_strategy(l) == "int" - l = ["a","b","c"] + l = ["a", "b", "c"] assert list_strategy(l) == "str" - l = [1.1,2.2,3.3] + l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) assert list_strategy(l) == "range" - l = [1,"b",3] + l = [1, "b", 3] assert list_strategy(l) == "object" l = [] assert list_strategy(l) == "empty" o = 5 - assert list_strategy(o) == None + raises(TypeError, list_strategy, 5) From noreply at buildbot.pypy.org Thu Dec 1 14:41:16 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Dec 2011 14:41:16 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: added new layout for PyPy stackframes Message-ID: <20111201134116.F27738208A@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50037:cfa0b6e8653e Date: 2011-12-01 14:40 +0100 http://bitbucket.org/pypy/pypy/changeset/cfa0b6e8653e/ Log: added new layout for PyPy stackframes diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py @@ -0,0 +1,45 @@ +""" + + PyPy PPC Stackframe + + + --------------------------- -- + | | | + | FPR SAVE AREA | |>> len(NONVOLATILES_FPR) * WORD + | | | + --------------------------- -- + | | | + | GPR SAVE AREA | |>> len(NONVOLATILES) * WORD + | | | + --------------------------- -- + | | | + | FLOAT/INT CONVERSION | |>> ? * WORD + | | | + --------------------------- -- + | | | + | SPILLING AREA | |>> regalloc.frame_manager.frame_depth * WORD + | (LOCAL VARIABLE SPACE) | | + --------------------------- -- + | | | + | ENCODING AREA | |>> len(MANAGED_REGS) * WORD + | (ALLOCA AREA) | | + --------------------------- -- + | | | + | PARAMETER SAVE AREA | |>> use MAX(number of parameters + | | | passed on stack in emit_call) * WORD + --------------------------- -- + | TOC POINTER | WORD | + --------------------------- | + | < RESERVED > | WORD | + --------------------------- | + | < RESERVED > | WORD | + --------------------------- |>> 6 WORDS + | SAVED LR | WORD | + --------------------------- | + | SAVED CR | WORD | + --------------------------- | + | BACK CHAIN | WORD | + SP -> --------------------------- -- + + +""" From noreply at buildbot.pypy.org Thu Dec 1 14:43:42 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 1 Dec 2011 14:43:42 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: a retrace can now end with a bad VirtualState as it now has a (possible unrelated) bridge as preamble Message-ID: <20111201134342.5C30C8208A@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50038:2e06799c48d5 Date: 2011-12-01 14:43 +0100 http://bitbucket.org/pypy/pypy/changeset/2e06799c48d5/ Log: a retrace can now end with a bad VirtualState as it now has a (possible unrelated) bridge as preamble diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -1,5 +1,5 @@ from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes +from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException @@ -272,7 +272,11 @@ # Construct jumpargs from the virtual state original_jumpargs = jumpop.getarglist()[:] values = [self.getvalue(arg) for arg in jumpop.getarglist()] - jumpargs = virtual_state.make_inputargs(values, self.optimizer) + try: + jumpargs = virtual_state.make_inputargs(values, self.optimizer) + except BadVirtualState: + # FIXME: Produce jump to preamble instead (see test_retrace_not_matching_bridge) + raise InvalidLoop jumpop.initarglist(jumpargs) # Inline the short preamble at the end of the loop diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -14,6 +14,9 @@ from pypy.rlib.objectmodel import we_are_translated import os +class BadVirtualState(Exception): + pass + class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): position = -1 @@ -104,7 +107,8 @@ def enum_forced_boxes(self, boxes, value, optimizer): assert isinstance(value, virtualize.AbstractVirtualStructValue) - assert value.is_virtual() + if not value.is_virtual(): + raise BadVirtualState for i in range(len(self.fielddescrs)): v = value._fields[self.fielddescrs[i]] s = self.fieldstate[i] @@ -181,7 +185,8 @@ def enum_forced_boxes(self, boxes, value, optimizer): assert isinstance(value, virtualize.VArrayValue) - assert value.is_virtual() + if not value.is_virtual(): + raise BadVirtualState for i in range(len(self.fieldstate)): v = value._items[i] s = self.fieldstate[i] @@ -249,7 +254,8 @@ def enum_forced_boxes(self, boxes, value, optimizer): assert isinstance(value, virtualize.VArrayStructValue) - assert value.is_virtual() + if not value.is_virtual(): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -1,5 +1,5 @@ import py -from pypy.rlib.jit import JitDriver, promote +from pypy.rlib.jit import JitDriver, promote, dont_look_inside from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -764,6 +764,42 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_retrace_not_matching_bridge(self): + @dont_look_inside + def external(node): + return node.value + 1 + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'node', 'node2']) + class A(): + def new(self): + return A() + def val(self, i): + return i + 7 + class B(A): + def new(self): + return B() + def val(self, i): + return i + 42 + def f(n): + node = self._new() + node2 = A() + node.value = 0 + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, node=node, node2=node2) + next = self._new() + next.value = node.value + n + node2.val(i) + if i != 7: + next.value += external(next) + else: + node2 = B() + node = next + node2 = node2.new() + + i += 1 + return node.value + res = self.meta_interp(f, [10], repeat=10) + assert res == f(10) + class VirtualMiscTests: def test_multiple_equal_virtuals(self): From notifications-noreply at bitbucket.org Thu Dec 1 15:01:41 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 01 Dec 2011 14:01:41 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20111201140141.3344.22845@bitbucket13.managed.contegix.com> You have received a notification from andersas. Hi, I forked pypy. My fork is at https://bitbucket.org/andersas/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Thu Dec 1 15:32:14 2011 From: noreply at buildbot.pypy.org (gutworth) Date: Thu, 1 Dec 2011 15:32:14 +0100 (CET) Subject: [pypy-commit] pypy default: keep lines at a reasonable length Message-ID: <20111201143214.BE8DB8208A@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r50039:7377199358ba Date: 2011-12-01 09:32 -0500 http://bitbucket.org/pypy/pypy/changeset/7377199358ba/ Log: keep lines at a reasonable length diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -78,4 +78,5 @@ if isinstance(w_list, W_ListObject): return space.wrap(w_list.strategy._applevel_repr) else: - raise OperationError(space.w_TypeError, space.wrap("Can only get the list strategy of a list")) + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) From noreply at buildbot.pypy.org Thu Dec 1 16:56:24 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 1 Dec 2011 16:56:24 +0100 (CET) Subject: [pypy-commit] pypy default: make optimizeopt kill same_as Message-ID: <20111201155624.8473B8208A@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50040:e4a0b9e4d23b Date: 2011-12-01 16:56 +0100 http://bitbucket.org/pypy/pypy/changeset/e4a0b9e4d23b/ Log: make optimizeopt kill same_as diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -491,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -612,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -761,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): From noreply at buildbot.pypy.org Thu Dec 1 21:34:05 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Dec 2011 21:34:05 +0100 (CET) Subject: [pypy-commit] jitviewer default: fix (maybe) newer flasks Message-ID: <20111201203405.70CEA8208A@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r176:c87055494d46 Date: 2011-12-01 12:01 +0200 http://bitbucket.org/pypy/jitviewer/changeset/c87055494d46/ Log: fix (maybe) newer flasks diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -192,7 +192,7 @@ log, loops = import_log(filename, ParserWithHtmlRepr) parse_log_counts(extract_category(log, 'jit-backend-count'), loops) storage.reconnect_loops(loops) - app = OverrideFlask('__name__', root_path=PATH) + app = OverrideFlask('jitviewer', root_path=PATH) server = Server(filename, storage) app.debug = True app.route('/')(server.index) From noreply at buildbot.pypy.org Thu Dec 1 21:38:18 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Thu, 1 Dec 2011 21:38:18 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge default Message-ID: <20111201203818.5C8918208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50041:78ed73b0b1af Date: 2011-12-01 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/78ed73b0b1af/ Log: merge default diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,19 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -481,8 +495,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1180,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6482,6 +6482,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -243,6 +243,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3585,6 +3585,67 @@ self.interp_operations(f, [5], translationoptions=translationoptions) + def test_annotation_gives_knowledge_to_tracer(self): + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -612,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -761,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -255,10 +255,8 @@ s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] graph = copygraph(graph) - graph.startblock.isstartblock = False [jmpp] = find_jit_merge_points([graph]) graph.startblock = support.split_before_jit_merge_point(*jmpp) - graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot # to list some variable in greens=[] or reds=[] in JitDriver, # or that a jit_merge_point() takes a constant as an argument. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,11 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + if isinstance(w_list, W_ListObject): + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,21 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + + l = [1, 2, 3] + assert list_strategy(l) == "int" + l = ["a", "b", "c"] + assert list_strategy(l) == "str" + l = [1.1, 2.2, 3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1, "b", 3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + raises(TypeError, list_strategy, 5) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -131,7 +131,7 @@ def binop(func): - func._annspecialcase_ = "specialize:call_location" + specialize.argtype(1, 2)(func) @functools.wraps(func) def impl(self, v1, v2): return self.adapt_val(func(self, @@ -141,6 +141,7 @@ return impl def raw_binop(func): + specialize.argtype(1, 2)(func) # Returns the result unwrapped. @functools.wraps(func) def impl(self, v1, v2): @@ -151,6 +152,7 @@ return impl def unaryop(func): + specialize.argtype(1)(func) @functools.wraps(func) def impl(self, v): return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1110,6 +1110,14 @@ def debug_repr(self): return 'Slice(%s)' % self.parent.debug_repr() + def copy(self): + array = NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = self.start_iter() + while not iter.done(): + array.setitem(iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + return array + class NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -760,6 +760,19 @@ a[::-1] = a + a assert (a == [8, 6, 4, 2, 0]).all() + def test_debug_repr(self): + from numpypy import zeros, sin + a = zeros(1) + assert a.__debug_repr__() == 'Array' + assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' + assert (a[::2]).__debug_repr__() == 'Slice(Array)' + assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' + assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' + assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + b = a + a + b[0] = 3 + assert b.__debug_repr__() == 'Call2(add, forced=Array)' + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -989,18 +1002,11 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 - def test_debug_repr(self): - from numpypy import zeros, sin - a = zeros(1) - assert a.__debug_repr__() == 'Array' - assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' - assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' - assert sin(a).__debug_repr__() == 'Call1(sin, Array)' - b = a + a - b[0] = 3 - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_slice_copy(self): + from numpypy import zeros + a = zeros((10, 10)) + b = a[0].copy() + assert (b == zeros(10)).all() class AppTestSupport(object): def setup_class(cls): diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -40,7 +40,6 @@ def __init__(self, name, startblock, return_var=None): self.name = name # function name (possibly mangled already) self.startblock = startblock - self.startblock.isstartblock = True # build default returnblock self.returnblock = Block([return_var or Variable()]) self.returnblock.operations = () @@ -173,11 +172,10 @@ class Block(object): - __slots__ = """isstartblock inputargs operations exitswitch + __slots__ = """inputargs operations exitswitch exits blockcolor""".split() def __init__(self, inputargs): - self.isstartblock = False self.inputargs = list(inputargs) # mixed list of variable/const XXX self.operations = [] # list of SpaceOperation(s) self.exitswitch = None # a variable or @@ -454,7 +452,6 @@ newblock.closeblock(*newlinks) newstartblock = blockmap[graph.startblock] - newstartblock.isstartblock = True newgraph = FunctionGraph(graph.name, newstartblock) newgraph.returnblock = blockmap[graph.returnblock] newgraph.exceptblock = blockmap[graph.exceptblock] @@ -492,7 +489,6 @@ for block in graph.iterblocks(): - assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( "block.exits is a %s (closeblock() or recloseblock() missing?)" % (type(block.exits).__name__,)) diff --git a/pypy/objspace/flow/test/test_checkgraph.py b/pypy/objspace/flow/test/test_checkgraph.py --- a/pypy/objspace/flow/test/test_checkgraph.py +++ b/pypy/objspace/flow/test/test_checkgraph.py @@ -13,20 +13,6 @@ py.test.raises(AssertionError, checkgraph, g) -def test_nostartblock(): - g = FunctionGraph("g", Block([])) - g.startblock.closeblock(Link([Constant(1)], g.returnblock)) - g.startblock.isstartblock = False - py.test.raises(AssertionError, checkgraph, g) - -def test_twostartblocks(): - g = FunctionGraph("g", Block([])) - b = Block([]) - b.isstartblock = True - g.startblock.closeblock(Link([], b)) - b.closeblock(Link([Constant(1)], g.returnblock)) - py.test.raises(AssertionError, checkgraph, g) - def test_exitlessblocknotexitblock(): g = FunctionGraph("g", Block([])) py.test.raises(AssertionError, checkgraph, g) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -50,6 +50,13 @@ else: return space.fromcache(StringListStrategy) + # check for floats + for w_obj in list_w: + if not is_W_FloatObject(w_obj): + break + else: + return space.fromcache(FloatListStrategy) + return space.fromcache(ObjectListStrategy) def is_W_IntObject(w_object): @@ -60,7 +67,9 @@ from pypy.objspace.std.stringobject import W_StringObject return type(w_object) is W_StringObject - +def is_W_FloatObject(w_object): + from pypy.objspace.std.floatobject import W_FloatObject + return type(w_object) is W_FloatObject class W_ListObject(W_AbstractListObject): from pypy.objspace.std.listtype import list_typedef as typedef @@ -317,6 +326,8 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + _applevel_repr = "empty" + def __init__(self, space): ListStrategy.__init__(self, space) # cache an empty list that is used whenever getitems is called (i.e. sorting) @@ -364,6 +375,8 @@ strategy = self.space.fromcache(IntegerListStrategy) elif is_W_StringObject(w_item): strategy = self.space.fromcache(StringListStrategy) + elif is_W_FloatObject(w_item): + strategy = self.space.fromcache(FloatListStrategy) else: strategy = self.space.fromcache(ObjectListStrategy) @@ -415,6 +428,8 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" + _applevel_repr = "range" + def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -853,6 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -881,6 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 + _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -905,8 +922,36 @@ if reverse: l.reverse() +class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = 0.0 + _applevel_repr = "float" + + def wrap(self, floatval): + return self.space.wrap(floatval) + + def unwrap(self, w_float): + return self.space.float_w(w_float) + + erase, unerase = rerased.new_erasing_pair("float") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def is_correct_type(self, w_obj): + return is_W_FloatObject(w_obj) + + def list_is_correct_type(self, w_list): + return w_list.strategy is self.space.fromcache(FloatListStrategy) + + def sort(self, w_list, reverse): + l = self.unerase(w_list.lstorage) + sorter = FloatSort(l, len(l)) + sorter.sort() + if reverse: + l.reverse() + class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "str" def wrap(self, stringval): return self.space.wrap(stringval) @@ -934,6 +979,7 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) + # _______________________________________________________ init_signature = Signature(['sequence'], None, None) @@ -1282,6 +1328,7 @@ TimSort = make_timsort_class() IntBaseTimSort = make_timsort_class() +FloatBaseTimSort = make_timsort_class() StringBaseTimSort = make_timsort_class() class KeyContainer(baseobjspace.W_Root): @@ -1302,6 +1349,10 @@ def lt(self, a, b): return a < b +class FloatSort(FloatBaseTimSort): + def lt(self, a, b): + return a < b + class StringSort(StringBaseTimSort): def lt(self, a, b): return a < b diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -471,11 +471,17 @@ l.extend(iter([1, 2, 3, 4])) assert l is l0 assert l == [1, 1, 2, 3, 4] + l = l0 = ['a'] l.extend(iter(['b', 'c', 'd'])) assert l == ['a', 'b', 'c', 'd'] assert l is l0 + l = l0 = [1.2] + l.extend(iter([2.3, 3.4, 4.5])) + assert l == [1.2, 2.3, 3.4, 4.5] + assert l is l0 + def test_sort(self): l = l0 = [1, 5, 3, 0] l.sort() @@ -494,6 +500,10 @@ l.sort(reverse=True) assert l == ["d", "c", "b", "a"] + l = [3.3, 2.2, 4.4, 1.1, 3.1, 5.5] + l.sort() + assert l == [1.1, 2.2, 3.1, 3.3, 4.4, 5.5] + def test_sort_cmp(self): def lencmp(a,b): return cmp(len(a), len(b)) l = [ 'a', 'fiver', 'tre', '' ] @@ -547,11 +557,19 @@ assert l[-2] == 6 raises(IndexError, "l[len(l)]") raises(IndexError, "l[-len(l)-1]") + l = ['a', 'b', 'c'] assert l[0] == 'a' assert l[-1] == 'c' assert l[-2] == 'b' raises(IndexError, "l[len(l)]") + + l = [1.1, 2.2, 3.3] + assert l[0] == 1.1 + assert l[-1] == 3.3 + assert l[-2] == 2.2 + raises(IndexError, "l[len(l)]") + l = [] raises(IndexError, "l[1]") @@ -589,6 +607,16 @@ assert l is l0 raises(IndexError, "del l[0]") + l = l0 = [1.1, 2.2, 3.3] + del l[0] + assert l == [2.2, 3.3] + del l[-1] + assert l == [2.2] + del l[-1] + assert l == [] + assert l is l0 + raises(IndexError, "del l[0]") + l = range(10) del l[5] assert l == [0, 1, 2, 3, 4, 6, 7, 8, 9] @@ -628,9 +656,15 @@ del l[:] assert l is l0 assert l == [] + l = ['a', 'b'] del l[:] assert l == [] + + l = [1.1, 2.2] + del l[:] + assert l == [] + l = range(5) del l[:] assert l == [] @@ -641,6 +675,11 @@ assert l is l0 assert l == [1,2,3,4,5] + l = l0 = [1.1,2.2,3.3] + l += [4.4,5.5] + assert l is l0 + assert l == [1.1,2.2,3.3,4.4,5.5] + l = l0 = ['a', 'b', 'c'] l1 = l[:] l += ['d'] @@ -698,6 +737,11 @@ l *= -5 assert l == [] + l = l0 = [1.1, 2.2] + l *= 2 + assert l is l0 + assert l == [1.1, 2.2, 1.1, 2.2] + l = range(2) l *= 2 assert l == [0, 1, 0, 1] @@ -732,6 +776,10 @@ assert c.index(0) == 0 raises(ValueError, c.index, 3) + c = [0.0, 2.2, 4.4] + assert c.index(0) == 0.0 + raises(ValueError, c.index, 3) + def test_index_cpython_bug(self): if self.on_cpython: skip("cpython has a bug here") @@ -780,6 +828,10 @@ l[::3] = ('a', 'b') assert l == ['a', 1, 2, 'b', 4, 5] + l = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5] + l[::3] = ('a', 'b') + assert l == ['a', 1.1, 2.2, 'b', 4.4, 5.5] + def test_setslice_with_self(self): l = [1,2,3,4] l[:] = l @@ -836,6 +888,10 @@ l.append("a") assert l == [1,2,3,"a"] + l = [1.1, 2.2, 3.3] + l.append(4.4) + assert l == [1.1, 2.2, 3.3, 4.4] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -876,6 +932,10 @@ l.pop() assert l == range(9) + l = [1.1, 2.2, 3.3] + l.pop() + assert l == [1.1, 2.2] + l = [] raises(IndexError, l.pop, 0) @@ -898,16 +958,19 @@ l2 = ["1", "2", "3", "4"] l3 = range(5) l4 = [1, 2, 3, "4"] + l5 = [1.1, 2.2, 3.3, 4.4] raises(IndexError, l1.pop, -5) raises(IndexError, l2.pop, -5) raises(IndexError, l3.pop, -6) raises(IndexError, l4.pop, -5) + raises(IndexError, l5.pop, -5) assert l1.pop(-2) == 3 assert l2.pop(-2) == "3" assert l3.pop(-2) == 3 assert l4.pop(-2) == 3 + assert l5.pop(-2) == 3.3 def test_remove(self): c = list('hello world') @@ -926,6 +989,13 @@ l = [0, 3, 5] raises(ValueError, c.remove, 2) + l = [0.0, 1.1, 2.2, 3.3, 4.4] + l.remove(2.2) + assert l == [0.0, 1.1, 3.3, 4.4] + l = [0.0, 3.3, 5.5] + raises(ValueError, c.remove, 2) + raises(ValueError, c.remove, 2.2) + def test_reverse(self): c = list('hello world') c.reverse() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, StringListStrategy, RangeListStrategy, make_range_list +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -15,7 +15,7 @@ def test_empty_to_any(self): l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) - l.append(self.space.wrap(1.)) + l.append(self.space.wrap((1,3))) assert isinstance(l.strategy, ObjectListStrategy) l = W_ListObject(self.space, []) @@ -28,6 +28,11 @@ l.append(self.space.wrap('a')) assert isinstance(l.strategy, StringListStrategy) + l = W_ListObject(self.space, []) + assert isinstance(l.strategy, EmptyListStrategy) + l.append(self.space.wrap(1.2)) + assert isinstance(l.strategy, FloatListStrategy) + def test_int_to_any(self): l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) @@ -44,6 +49,14 @@ l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) + def test_float_to_any(self): + l = W_ListObject(self.space, [self.space.wrap(1.1),self.space.wrap(2.2),self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.append(self.space.wrap(4.4)) + assert isinstance(l.strategy, FloatListStrategy) + l.append(self.space.wrap("a")) + assert isinstance(l.strategy, ObjectListStrategy) + def test_setitem(self): # This should work if test_listobject.py passes l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) @@ -65,6 +78,12 @@ l.setitem(0, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) + # FloatStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap(1.2),self.space.wrap(2.3),self.space.wrap(3.4)]) + assert isinstance(l.strategy, FloatListStrategy) + l.setitem(0, self.space.wrap("a")) + assert isinstance(l.strategy, ObjectListStrategy) + def test_insert(self): # no change l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) @@ -84,6 +103,12 @@ l.insert(3, self.space.wrap('d')) assert isinstance(l.strategy, ObjectListStrategy) + # FloatStrategy + l = W_ListObject(self.space, [self.space.wrap(1.1),self.space.wrap(2.2),self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.insert(3, self.space.wrap('d')) + assert isinstance(l.strategy, ObjectListStrategy) + # EmptyStrategy l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -95,7 +120,9 @@ l.insert(0, self.space.wrap(2)) assert isinstance(l.strategy, IntegerListStrategy) - def notest_list_empty_after_delete(self): + def test_list_empty_after_delete(self): + import py + py.test.skip("return to emptyliststrategy is not supported anymore") l = W_ListObject(self.space, [self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.deleteitem(0) @@ -117,21 +144,36 @@ l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) + # IntegerStrategy to IntegerStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + # ObjectStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap('b'), self.space.wrap(3)]) assert isinstance(l.strategy, ObjectListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, ObjectListStrategy) + # IntegerStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')])) assert isinstance(l.strategy, ObjectListStrategy) + # StringStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')]) + assert isinstance(l.strategy, StringListStrategy) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, ObjectListStrategy) + + # FloatStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, ObjectListStrategy) + def test_setslice_List(self): def wrapitems(items): @@ -160,6 +202,11 @@ keep_other_strategy(l, 0, 2, other.length(), other) assert l.strategy is self.space.fromcache(StringListStrategy) + l = W_ListObject(self.space, wrapitems([1.1, 2.2, 3.3, 4.4, 5.5])) + other = W_ListObject(self.space, []) + keep_other_strategy(l, 0, 1, l.length(), other) + assert l.strategy is self.space.fromcache(FloatListStrategy) + l = W_ListObject(self.space, wrapitems(["a",3,"c",4,"e"])) other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) @@ -194,6 +241,11 @@ l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + l = W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) + assert isinstance(l.strategy, ObjectListStrategy) + def test_empty_extend_with_any(self): empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -220,6 +272,11 @@ empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) + empty.extend(W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)])) + assert isinstance(empty.strategy, FloatListStrategy) + + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(self.space, [])) assert isinstance(empty.strategy, EmptyListStrategy) @@ -293,12 +350,13 @@ l.setslice(0, 1, 3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) - def test_get_items_copy(self): + def test_copy_list(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) - l2 = l1.getitems() + l2 = l1.clone() l2.append(self.space.wrap(4)) assert not l2 == l1.getitems() + def test_getitems_does_not_copy_object_list(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap("two"), self.space.wrap(3)]) l2 = l1.getitems() l2.append(self.space.wrap("four")) @@ -345,7 +403,6 @@ # should not raise assert getslice__List_ANY_ANY(self.space, l, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) - def test_add_to_rangelist(self): l1 = make_range_list(self.space, 1, 1, 3) l2 = W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5)]) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -176,7 +176,6 @@ return decorator @oopspec("jit.isconstant(value)") - at specialize.ll() def isconstant(value): """ While tracing, returns whether or not the value is currently known to be @@ -186,9 +185,9 @@ This is for advanced usage only. """ return NonConstant(False) +isconstant._annspecialcase_ = "specialize:call_location" @oopspec("jit.isvirtual(value)") - at specialize.ll() def isvirtual(value): """ Returns if this value is virtual, while tracing, it's relatively @@ -197,6 +196,7 @@ This is for advanced usage only. """ return NonConstant(False) +isvirtual._annspecialcase_ = "specialize:call_location" class Entry(ExtRegistryEntry): _about_ = hint diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -92,7 +92,6 @@ # make a copy of the graph that will reload the values graph2 = copygraph(fnptr._obj.graph) block2 = graph2.startblock - block2.isstartblock = False block1 = Block([]) reloadedvars = [] for v, c_p in zip(block2.inputargs, sra): @@ -109,7 +108,6 @@ [w], v)) reloadedvars.append(v) block1.closeblock(Link(reloadedvars, block2)) - block1.isstartblock = True graph2.startblock = block1 FUNC2 = lltype.FuncType([], FUNC1.RESULT) fnptr2 = lltype.functionptr(FUNC2, diff --git a/pypy/rpython/memory/gctransform/test/test_transform.py b/pypy/rpython/memory/gctransform/test/test_transform.py --- a/pypy/rpython/memory/gctransform/test/test_transform.py +++ b/pypy/rpython/memory/gctransform/test/test_transform.py @@ -102,12 +102,12 @@ llops.genop("gc_pop_alive", [var]) -def checkblock(block, is_borrowed): +def checkblock(block, is_borrowed, is_start_block): if block.operations == (): # a return/exception block -- don't want to think about them # (even though the test passes for somewhat accidental reasons) return - if block.isstartblock: + if is_start_block: refs_in = 0 else: refs_in = len([v for v in block.inputargs if isinstance(v, Variable) @@ -167,7 +167,7 @@ if check: for graph, is_borrowed in graphs_borrowed.iteritems(): for block in graph.iterblocks(): - checkblock(block, is_borrowed) + checkblock(block, is_borrowed, block is graph.startblock) return t, transformer def getops(graph): diff --git a/pypy/rpython/memory/gctransform/transform.py b/pypy/rpython/memory/gctransform/transform.py --- a/pypy/rpython/memory/gctransform/transform.py +++ b/pypy/rpython/memory/gctransform/transform.py @@ -263,9 +263,7 @@ # still be empty (but let's check) if starts_with_empty_block(graph) and inserted_empty_startblock: old_startblock = graph.startblock - graph.startblock.isstartblock = False graph.startblock = graph.startblock.exits[0].target - graph.startblock.isstartblock = True checkgraph(graph) diff --git a/pypy/rpython/normalizecalls.py b/pypy/rpython/normalizecalls.py --- a/pypy/rpython/normalizecalls.py +++ b/pypy/rpython/normalizecalls.py @@ -116,8 +116,6 @@ v = Constant(default) outlist.append(v) newblock.closeblock(Link(outlist, oldblock)) - oldblock.isstartblock = False - newblock.isstartblock = True graph.startblock = newblock for i in range(len(newdefaults)-1,-1,-1): if newdefaults[i] is NODEFAULT: @@ -171,8 +169,6 @@ # prepare the output args of newblock and link outlist = inlist[:] newblock.closeblock(Link(outlist, oldblock)) - oldblock.isstartblock = False - newblock.isstartblock = True graph.startblock = newblock # finished checkgraph(graph) diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -453,7 +453,6 @@ #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) - copiedstartblock.isstartblock = False #find args passed to startblock of inlined function passon_args = [] for arg in self.op.args[1:]: diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -391,7 +391,6 @@ virtualframe = VirtualFrame(graph2.startblock, 0, nodelist) graphbuilder = GraphBuilder(self, graph2) specblock = graphbuilder.start_from_virtualframe(virtualframe) - specblock.isstartblock = True specgraph = graph2 specgraph.name += '_mallocv' specgraph.startblock = specblock diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -50,7 +50,8 @@ # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) - simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks())) + simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks()), + [graph]) if progress and option.view: t.view() if expected_result is not Ellipsis: diff --git a/pypy/translator/c/test/test_refcount.py b/pypy/translator/c/test/test_refcount.py --- a/pypy/translator/c/test/test_refcount.py +++ b/pypy/translator/c/test/test_refcount.py @@ -229,7 +229,6 @@ graph = t.buildflowgraph(g) assert graph.startblock.operations == [] graph.startblock = graph.startblock.exits[0].target - graph.startblock.isstartblock = True from pypy.objspace.flow.model import checkgraph checkgraph(graph) t._prebuilt_graphs[g] = graph diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -397,7 +397,8 @@ def transform_dead_op_vars(graph, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a graph.""" - return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), translator) + return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), + [graph], translator) # the set of operations that can safely be removed # (they have no side effects, at least in R-Python) @@ -419,11 +420,19 @@ hasattr: True, } -def transform_dead_op_vars_in_blocks(blocks, translator=None): +def find_start_blocks(graphs): + start_blocks = set() + for graph in graphs: + start_blocks.add(graph.startblock) + return start_blocks + +def transform_dead_op_vars_in_blocks(blocks, graphs, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a set of blocks""" read_vars = {} # set of variables really used variable_flow = {} # map {Var: list-of-Vars-it-depends-on} + set_of_blocks = set(blocks) + start_blocks = find_start_blocks(graphs) def canremove(op, block): if op.opname not in CanRemove: @@ -451,7 +460,7 @@ if block.exits: for link in block.exits: - if link.target not in blocks: + if link.target not in set_of_blocks: for arg, targetarg in zip(link.args, link.target.inputargs): read_vars[arg] = True read_vars[targetarg] = True @@ -465,7 +474,7 @@ read_vars[arg] = True # an input block's inputargs should not be modified, even if some # of the function's input arguments are not actually used - if block.isstartblock: + if block in start_blocks: for arg in block.inputargs: read_vars[arg] = True diff --git a/pypy/translator/transform.py b/pypy/translator/transform.py --- a/pypy/translator/transform.py +++ b/pypy/translator/transform.py @@ -115,7 +115,7 @@ # to kill dead (never-followed) links, # which can possibly remove more variables. from pypy.translator.simplify import transform_dead_op_vars_in_blocks - transform_dead_op_vars_in_blocks(block_subset) + transform_dead_op_vars_in_blocks(block_subset, self.translator.graphs) def transform_dead_code(self, block_subset): """Remove dead code: these are the blocks that are not annotated at all diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -42,9 +42,7 @@ vars = [copyvar(annotator, v) for v in graph.startblock.inputargs] newblock = Block(vars) newblock.closeblock(Link(vars, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newblock - graph.startblock.isstartblock = True def starts_with_empty_block(graph): return (not graph.startblock.operations @@ -151,9 +149,7 @@ newop = SpaceOperation('direct_call', [c_initial_func], v_none) extrablock.operations = [newop] extrablock.closeblock(Link(args, entry_point.startblock)) - entry_point.startblock.isstartblock = False entry_point.startblock = extrablock - entry_point.startblock.isstartblock = True checkgraph(entry_point) def call_final_function(translator, final_func, annhelper=None): From noreply at buildbot.pypy.org Thu Dec 1 21:38:19 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Thu, 1 Dec 2011 21:38:19 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: a bit of progress with libffi and friends. 16 tests pass, 5 fail. Message-ID: <20111201203819.89E338208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50042:fd39e63b03e0 Date: 2011-12-01 21:37 +0100 http://bitbucket.org/pypy/pypy/changeset/fd39e63b03e0/ Log: a bit of progress with libffi and friends. 16 tests pass, 5 fail. There is still a problem with pointers and sizes... diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -770,12 +770,15 @@ # # safety check that no-one is trying to make annotation and translation # faster by providing the -O option to Python. -try: - assert False -except AssertionError: - pass # fine -else: - raise RuntimeError("The annotator relies on 'assert' statements from the\n" +import os +if "WINGDB_PYTHON" not in os.environ: + # ...but avoiding this boring check in the IDE + try: + assert False + except AssertionError: + pass # fine + else: + raise RuntimeError("The annotator relies on 'assert' statements from the\n" "\tannotated program: you cannot run it with 'python -O'.") # this has the side-effect of registering the unary and binary operations diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -79,16 +79,20 @@ types._import() +# this was '_fits_into_long', which is not adequate, because long is +# not necessary the type where we compute with. Actually meant is +# the type 'Signed'. + @specialize.arg(0) -def _fits_into_long(TYPE): +def _fits_into_signed(TYPE): if isinstance(TYPE, lltype.Ptr): - return True # pointers always fits into longs + return True # pointers always fits into Signeds if not isinstance(TYPE, lltype.Primitive): return False if TYPE is lltype.Void or TYPE is rffi.FLOAT or TYPE is rffi.DOUBLE: return False sz = rffi.sizeof(TYPE) - return sz <= rffi.sizeof(rffi.LONG) + return sz <= rffi.sizeof(rffi.SIGNED) # ====================================================================== @@ -115,7 +119,7 @@ def arg(self, val): TYPE = lltype.typeOf(val) _check_type(TYPE) - if _fits_into_long(TYPE): + if _fits_into_signed(TYPE): cls = IntArg val = rffi.cast(rffi.LONG, val) elif TYPE is rffi.DOUBLE: @@ -250,7 +254,7 @@ if is_struct: assert types.is_struct(self.restype) res = self._do_call_raw(self.funcsym, ll_args) - elif _fits_into_long(RESULT): + elif _fits_into_signed(RESULT): assert not types.is_struct(self.restype) res = self._do_call_int(self.funcsym, ll_args) elif RESULT is rffi.DOUBLE: diff --git a/pypy/rlib/test/autopath.py b/pypy/rlib/test/autopath.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/autopath.py @@ -0,0 +1,131 @@ +""" +self cloning, automatic path configuration + +copy this into any subdirectory of pypy from which scripts need +to be run, typically all of the test subdirs. +The idea is that any such script simply issues + + import autopath + +and this will make sure that the parent directory containing "pypy" +is in sys.path. + +If you modify the master "autopath.py" version (in pypy/tool/autopath.py) +you can directly run it which will copy itself on all autopath.py files +it finds under the pypy root directory. + +This module always provides these attributes: + + pypydir pypy root directory path + this_dir directory where this autopath.py resides + +""" + +def __dirinfo(part): + """ return (partdir, this_dir) and insert parent of partdir + into sys.path. If the parent directories don't have the part + an EnvironmentError is raised.""" + + import sys, os + try: + head = this_dir = os.path.realpath(os.path.dirname(__file__)) + except NameError: + head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) + + error = None + while head: + partdir = head + head, tail = os.path.split(head) + if tail == part: + checkfile = os.path.join(partdir, os.pardir, 'pypy', '__init__.py') + if not os.path.exists(checkfile): + error = "Cannot find %r" % (os.path.normpath(checkfile),) + break + else: + error = "Cannot find the parent directory %r of the path %r" % ( + partdir, this_dir) + if not error: + # check for bogus end-of-line style (e.g. files checked out on + # Windows and moved to Unix) + f = open(__file__.replace('.pyc', '.py'), 'r') + data = f.read() + f.close() + if data.endswith('\r\n') or data.endswith('\r'): + error = ("Bad end-of-line style in the .py files. Typically " + "caused by a zip file or a checkout done on Windows and " + "moved to Unix or vice-versa.") + if error: + raise EnvironmentError("Invalid source tree - bogus checkout! " + + error) + + pypy_root = os.path.join(head, '') + try: + sys.path.remove(head) + except ValueError: + pass + sys.path.insert(0, head) + + munged = {} + for name, mod in sys.modules.items(): + if '.' in name: + continue + fn = getattr(mod, '__file__', None) + if not isinstance(fn, str): + continue + newname = os.path.splitext(os.path.basename(fn))[0] + if not newname.startswith(part + '.'): + continue + path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') + if path.startswith(pypy_root) and newname != part: + modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) + if newname != '__init__': + modpaths.append(newname) + modpath = '.'.join(modpaths) + if modpath not in sys.modules: + munged[modpath] = mod + + for name, mod in munged.iteritems(): + if name not in sys.modules: + sys.modules[name] = mod + if '.' in name: + prename = name[:name.rfind('.')] + postname = name[len(prename)+1:] + if prename not in sys.modules: + __import__(prename) + if not hasattr(sys.modules[prename], postname): + setattr(sys.modules[prename], postname, mod) + + return partdir, this_dir + +def __clone(): + """ clone master version of autopath.py into all subdirs """ + from os.path import join, walk + if not this_dir.endswith(join('pypy','tool')): + raise EnvironmentError("can only clone master version " + "'%s'" % join(pypydir, 'tool',_myname)) + + + def sync_walker(arg, dirname, fnames): + if _myname in fnames: + fn = join(dirname, _myname) + f = open(fn, 'rwb+') + try: + if f.read() == arg: + print "checkok", fn + else: + print "syncing", fn + f = open(fn, 'w') + f.write(arg) + finally: + f.close() + s = open(join(pypydir, 'tool', _myname), 'rb').read() + walk(pypydir, sync_walker, s) + +_myname = 'autopath.py' + +# set guaranteed attributes + +pypydir, this_dir = __dirinfo('pypy') + +if __name__ == '__main__': + __clone() diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -1,6 +1,6 @@ import sys -import py +import py, autopath from pypy.rlib.libffi import (CDLL, Func, get_libc_name, ArgChain, types, IS_32_BIT, array_getitem, array_setitem) @@ -121,7 +121,9 @@ exports.append(match.group(1)) # c_file.write(py.code.Source('\n'.join(snippets))) - eci = ExternalCompilationInfo(export_symbols=exports) + eci = ExternalCompilationInfo( + export_symbols=exports, + include_dirs = [str(py.path.local(autopath.pypydir).join('translator', 'c'))]) cls.libfoo_name = str(platform.compile([c_file], eci, 'x', standalone=False)) @@ -235,9 +237,11 @@ def test_pointer_as_argument(self): """#include - long inc(long* x) + #include "src/signed_defn.h" + + Signed inc(Signed* x) { - long oldval; + Signed oldval; if (x == NULL) return -1; oldval = *x; @@ -247,14 +251,13 @@ """ libfoo = self.get_libfoo() func = (libfoo, 'inc', [types.pointer], types.slong) - LONGP = lltype.Ptr(rffi.CArray(rffi.LONG)) - null = lltype.nullptr(LONGP.TO) - res = self.call(func, [null], rffi.LONG) + null = lltype.nullptr(rffi.SIGNEDP.TO) + res = self.call(func, [null], rffi.SIGNED) assert res == -1 # ptr_result = lltype.malloc(LONGP.TO, 1, flavor='raw') ptr_result[0] = 41 - res = self.call(func, [ptr_result], rffi.LONG) + res = self.call(func, [ptr_result], rffi.SIGNED) if self.__class__ is TestLibffiCall: # the function was called only once assert res == 41 @@ -436,7 +439,7 @@ libfoo = CDLL(self.libfoo_name) make_point = (libfoo, 'make_point', [types.slong, types.slong], ffi_point) # - PTR = lltype.Ptr(rffi.CArray(rffi.LONG)) + PTR = lltype.Ptr(rffi.CArray(rffi.SIGNED)) p = self.call(make_point, [12, 34], PTR, is_struct=True, jitif=["byval"]) assert p[0] == 12 diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -647,8 +647,9 @@ # float * FLOATP = lltype.Ptr(lltype.Array(FLOAT, hints={'nolength': True})) -# Signed * -SIGNEDP = lltype.Ptr(lltype.Array(lltype.Signed, hints={'nolength': True})) +# Signed, Signed * +SIGNED = lltype.Signed +SIGNEDP = lltype.Ptr(lltype.Array(SIGNED, hints={'nolength': True})) # various type mapping From noreply at buildbot.pypy.org Thu Dec 1 21:50:52 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Thu, 1 Dec 2011 21:50:52 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: unclear, reverting a change which did not help Message-ID: <20111201205052.BC6F08208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50043:6055c7b753ec Date: 2011-12-01 21:49 +0100 http://bitbucket.org/pypy/pypy/changeset/6055c7b753ec/ Log: unclear, reverting a change which did not help diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -439,7 +439,7 @@ libfoo = CDLL(self.libfoo_name) make_point = (libfoo, 'make_point', [types.slong, types.slong], ffi_point) # - PTR = lltype.Ptr(rffi.CArray(rffi.SIGNED)) + PTR = lltype.Ptr(rffi.CArray(rffi.LONG)) p = self.call(make_point, [12, 34], PTR, is_struct=True, jitif=["byval"]) assert p[0] == 12 From noreply at buildbot.pypy.org Thu Dec 1 22:18:33 2011 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Dec 2011 22:18:33 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: copy/paste test and doc string from numpy Message-ID: <20111201211833.382E68208A@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50044:1780af5403c8 Date: 2011-11-30 23:08 +0200 http://bitbucket.org/pypy/pypy/changeset/1780af5403c8/ Log: copy/paste test and doc string from numpy diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -501,6 +501,15 @@ descr_argmin = _reduce_argmax_argmin_impl("min") def descr_dot(self, space, w_other): + '''Dot product of two arrays. + + For 2-D arrays it is equivalent to matrix multiplication, and for 1-D + arrays to inner product of vectors (without complex conjugation). For + N dimensions it is a sum product over the last axis of `a` and + the second-to-last of `b`:: + + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])''' + #numpy's doc string :) w_other = convert_to_array(space, w_other) if isinstance(w_other, Scalar): return self.descr_mul(space, w_other) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -700,6 +700,15 @@ assert a.dot(range(5)) == 30 assert dot(range(5), range(5)) == 30 assert (dot(5, [1, 2, 3]) == [5, 10, 15]).all() + a = array([[range(4), range(4, 8), range(8, 12)], + [range(12, 16),range(16, 20),range(20, 24)]]) + raises(ValueError,"a.dot(a)") + b = a[0, :, :].T + #Superfluous shape test makes the intention of the test clearer + assert a.shape == (2, 3, 4) + assert b.shape == (4, 3) + c = a.dot(b) + assert (c == [[[14, 38,62], [38, 126, 214], [62, 214, 366]], def test_dot_constant(self): from numpypy import array From noreply at buildbot.pypy.org Thu Dec 1 22:18:34 2011 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Dec 2011 22:18:34 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: shape matching, output creation implemented Message-ID: <20111201211834.60138820C2@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50045:c40572851869 Date: 2011-12-01 00:17 +0200 http://bitbucket.org/pypy/pypy/changeset/c40572851869/ Log: shape matching, output creation implemented diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -513,10 +513,40 @@ w_other = convert_to_array(space, w_other) if isinstance(w_other, Scalar): return self.descr_mul(space, w_other) - else: + elif len(self.shape) < 2 and len(w_other.shape) < 2: w_res = self.descr_mul(space, w_other) assert isinstance(w_res, BaseArray) return w_res.descr_sum(space) + #Do the dims match? + my_critical_dim_size = self.shape[-1] + other_critical_dim_size = w_other.shape[0] + if len(w_other.shape) > 2: + other_critical_dim_size = w_other.shape[-2] + if my_critical_dim_size != other_critical_dim_size: + raise OperationError(space.w_ValueError, space.wrap( + "objects are not aligned")) + out_shape = self.shape[:-1] + w_other.shape[0:-2] + w_other.shape[-1:] + out_size = 1 + for os in out_shape: + out_size *= os + dtype = interp_ufuncs.find_binop_result_dtype(space, + self.find_dtype(), w_other.find_dtype()) + #TODO: what should the order be? C or F? + arr = NDimArray(out_size, out_shape, dtype=dtype) + return arr + out_iter = ArrayIterator(out_size) + me_iter = BroadcastIterator(self,self.shape[:len(self.size)-1] + [1]) + other_iter = BroadcastIter(self, + w_other.shape[:-2] + [1] + w_other.shape[-1]) + call2 = instantiate(Call2) + call2.left = self + call2.right = w_other + call2.calc_dtype = None + call2.size = my_critical_dim_size + + while not out_iter.done(): + pass + def get_concrete(self): raise NotImplementedError diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -161,6 +161,7 @@ self.signature, w_lhs.signature, w_rhs.signature ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + edf = jkl w_res = Call2(new_sig, new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -709,6 +709,7 @@ assert b.shape == (4, 3) c = a.dot(b) assert (c == [[[14, 38,62], [38, 126, 214], [62, 214, 366]], + [[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all() def test_dot_constant(self): from numpypy import array From noreply at buildbot.pypy.org Thu Dec 1 22:18:35 2011 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Dec 2011 22:18:35 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: add two arg functionality to test_compile Message-ID: <20111201211835.87FEC8208A@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50046:84755e29506f Date: 2011-12-01 22:59 +0200 http://bitbucket.org/pypy/pypy/changeset/84755e29506f/ Log: add two arg functionality to test_compile diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -30,6 +30,7 @@ pass SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative"] +TWO_ARG_FUNCTIONS = ["dot"] class FakeSpace(object): w_ValueError = None @@ -381,17 +382,28 @@ w_res = neg.call(interp.space, [arr]) else: assert False # unreachable code - if isinstance(w_res, BaseArray): - return w_res - if isinstance(w_res, FloatObject): - dtype = interp.space.fromcache(W_Float64Dtype) - elif isinstance(w_res, BoolObject): - dtype = interp.space.fromcache(W_BoolDtype) - else: - dtype = None - return scalar_w(interp.space, dtype, w_res) + elif self.name in TWO_ARG_FUNCTIONS: + if len(self.args) != 2: + raise ArgumentMismatch + arr0 = self.args[0].execute(interp) + arr1 = self.args[1].execute(interp) + if not isinstance(arr0, BaseArray): + raise ArgumentNotAnArray + if not isinstance(arr1, BaseArray): + raise ArgumentNotAnArray + elif self.name == "dot": + w_res = arr0.descr_dot(interp.space, arr1) else: raise WrongFunctionName + if isinstance(w_res, BaseArray): + return w_res + if isinstance(w_res, FloatObject): + dtype = interp.space.fromcache(W_Float64Dtype) + elif isinstance(w_res, BoolObject): + dtype = interp.space.fromcache(W_BoolDtype) + else: + dtype = None + return scalar_w(interp.space, dtype, w_res) _REGEXES = [ ('-?[\d\.]+', 'number'), @@ -525,6 +537,9 @@ args = [] tokens.pop() # lparen while tokens.get(0).name != 'paren_right': + if tokens.get(0).name == 'coma': + tokens.pop() + continue args.append(self.parse_expression(tokens)) return FunctionCall(name, args) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -519,9 +519,11 @@ return w_res.descr_sum(space) #Do the dims match? my_critical_dim_size = self.shape[-1] - other_critical_dim_size = w_other.shape[0] + other_critical_dim_size = w_other.shape[0] + other_critical_dim_stride = w_other.strides[0] if len(w_other.shape) > 2: other_critical_dim_size = w_other.shape[-2] + other_critical_dim_stride = w_other.strides[-2] if my_critical_dim_size != other_critical_dim_size: raise OperationError(space.w_ValueError, space.wrap( "objects are not aligned")) @@ -529,23 +531,26 @@ out_size = 1 for os in out_shape: out_size *= os + out_ndims = len(out_shape) dtype = interp_ufuncs.find_binop_result_dtype(space, self.find_dtype(), w_other.find_dtype()) #TODO: what should the order be? C or F? arr = NDimArray(out_size, out_shape, dtype=dtype) + out_iter = ArrayIterator(out_size) + #TODO: invalidate self, w_other with arr + me_iter = BroadcastIterator(self,self.shape[:-1] + [1]) + other_iter = BroadcastIterator(self, + w_other.shape[:-2] + [1] + w_other.shape[-1:]) + while not out_iter.done(): + i = OneDimIterator(me_iter.get_offset(), self.strides[-1], self.shape[-1]) + j = OneDimIterator(other_iter.get_offset(), other_critical_dim_stride, other_critical_dim_size) + #Heres what I would like to do, but how? + #value = sum(mult_with_iters(self, i, w_other, j)) + #arr.setitem(out_iter, value) + out_iter = out_iter.next(out_ndims) + me_iter = me_iter.next(0) + other_iter = other_iter.next(0) return arr - out_iter = ArrayIterator(out_size) - me_iter = BroadcastIterator(self,self.shape[:len(self.size)-1] + [1]) - other_iter = BroadcastIter(self, - w_other.shape[:-2] + [1] + w_other.shape[-1]) - call2 = instantiate(Call2) - call2.left = self - call2.right = w_other - call2.calc_dtype = None - call2.size = my_critical_dim_size - - while not out_iter.done(): - pass def get_concrete(self): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -161,7 +161,6 @@ self.signature, w_lhs.signature, w_rhs.signature ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - edf = jkl w_res = Call2(new_sig, new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -232,3 +232,11 @@ a -> 3 """) assert interp.results[0].value.val == 11 + def test_dot(self): + interp = self.run(""" + a = [[1, 2], [3, 4]] + b = [[5, 6], [7, 8]] + c = dot(a, b) + c -> 0 -> 0 + """) + assert interp.results[0].value.val == 19 From noreply at buildbot.pypy.org Thu Dec 1 23:05:39 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Thu, 1 Dec 2011 23:05:39 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: fixed smallintobject tests. The type() checks make no longer sense Message-ID: <20111201220539.160168208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50047:234142ad1aa7 Date: 2011-12-01 23:05 +0100 http://bitbucket.org/pypy/pypy/changeset/234142ad1aa7/ Log: fixed smallintobject tests. The type() checks make no longer sense diff --git a/pypy/objspace/std/test/test_smallintobject.py b/pypy/objspace/std/test/test_smallintobject.py --- a/pypy/objspace/std/test/test_smallintobject.py +++ b/pypy/objspace/std/test/test_smallintobject.py @@ -64,7 +64,7 @@ f1 = wrapint(self.space, x) f2 = wrapint(self.space, y) result = self.space.unwrap(self.space.add(f1, f2)) - assert result == x+y and type(result) == type(x+y) + assert result == x+y and def test_sub(self): for x in [1, 100, sys.maxint // 2 - 50, @@ -74,15 +74,16 @@ f1 = wrapint(self.space, x) f2 = wrapint(self.space, y) result = self.space.unwrap(self.space.sub(f1, f2)) - assert result == x-y and type(result) == type(x-y) - + assert result == x-y + def test_mul(self): for x in [0, 1, 100, sys.maxint // 2 - 50, sys.maxint - 1000]: for y in [0, 1, 100, sys.maxint // 2 - 50, sys.maxint - 1000]: f1 = wrapint(self.space, x) f2 = wrapint(self.space, y) result = self.space.unwrap(self.space.mul(f1, f2)) - assert result == x*y and type(result) == type(x*y) + assert result == x*y + def test_div(self): for i in range(10): From noreply at buildbot.pypy.org Thu Dec 1 23:20:17 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Thu, 1 Dec 2011 23:20:17 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: inv(glitch) :-) Message-ID: <20111201222017.4F4F08208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50048:da5b796470c9 Date: 2011-12-01 23:19 +0100 http://bitbucket.org/pypy/pypy/changeset/da5b796470c9/ Log: inv(glitch) :-) diff --git a/pypy/objspace/std/test/test_smallintobject.py b/pypy/objspace/std/test/test_smallintobject.py --- a/pypy/objspace/std/test/test_smallintobject.py +++ b/pypy/objspace/std/test/test_smallintobject.py @@ -64,7 +64,7 @@ f1 = wrapint(self.space, x) f2 = wrapint(self.space, y) result = self.space.unwrap(self.space.add(f1, f2)) - assert result == x+y and + assert result == x+y def test_sub(self): for x in [1, 100, sys.maxint // 2 - 50, From noreply at buildbot.pypy.org Fri Dec 2 00:21:21 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 2 Dec 2011 00:21:21 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: adjusted the format code of gc (long->Signed) Message-ID: <20111201232121.7AE558208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50049:7f97cc73d700 Date: 2011-12-02 00:20 +0100 http://bitbucket.org/pypy/pypy/changeset/7f97cc73d700/ Log: adjusted the format code of gc (long->Signed) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -576,7 +576,7 @@ # if convenient for the backend, we compute the info about # the flag as (byte-offset, single-byte-flag). import struct - value = struct.pack("l", flag_word) + value = struct.pack(lltype.SignedFmt, flag_word) assert value.count('\x00') == len(value) - 1 # only one byte is != 0 i = 0 while value[i] == '\x00': i += 1 diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -8,7 +8,7 @@ from pypy.tool.identity_dict import identity_dict from pypy.tool import leakfinder from types import NoneType -from pypy.rlib.rarithmetic import maxint, is_valid_int +from pypy.rlib.rarithmetic import maxint, is_valid_int, is_emulated_long import weakref class State(object): @@ -682,6 +682,11 @@ number = _numbertypes[type] = Number(name, type) return number +if is_emulated_long: + SignedFmt = 'll' +else: + SignedFmt = 'l' + Signed = build_number("Signed", int) Unsigned = build_number("Unsigned", r_uint) SignedLongLong = build_number("SignedLongLong", r_longlong) From noreply at buildbot.pypy.org Fri Dec 2 02:39:23 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 2 Dec 2011 02:39:23 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: corrected ovfcheck for the final time! ; -) Message-ID: <20111202013923.D56948208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50050:f26e12e71561 Date: 2011-12-02 01:30 +0100 http://bitbucket.org/pypy/pypy/changeset/f26e12e71561/ Log: corrected ovfcheck for the final time! ;-) diff --git a/pypy/rlib/rarithmetic.py b/pypy/rlib/rarithmetic.py --- a/pypy/rlib/rarithmetic.py +++ b/pypy/rlib/rarithmetic.py @@ -147,7 +147,7 @@ assert not isinstance(r, r_uint), "unexpected ovf check on unsigned" assert not isinstance(r, r_longlong), "ovfcheck not supported on r_longlong" assert not isinstance(r, r_ulonglong), "ovfcheck not supported on r_ulonglong" - if not is_valid_int(r): + if type(r) is long and not is_valid_int(r): # checks only if applicable to r's type. # this happens in the garbage collector. raise OverflowError, "signed integer expression did overflow" From noreply at buildbot.pypy.org Fri Dec 2 02:39:25 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 2 Dec 2011 02:39:25 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: correction to struct.pack and test_gc Message-ID: <20111202013925.14A088208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50051:c0364c9ad58f Date: 2011-12-02 02:38 +0100 http://bitbucket.org/pypy/pypy/changeset/c0364c9ad58f/ Log: correction to struct.pack and test_gc diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -10,6 +10,7 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.rlib.rarithmetic import is_valid_int def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -415,9 +416,9 @@ assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() - assert isinstance(wbdescr.jit_wb_if_flag, int) - assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int) - assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int) + assert is_valid_int(wbdescr.jit_wb_if_flag) + assert is_valid_int(wbdescr.jit_wb_if_flag_byteofs) + assert is_valid_int(wbdescr.jit_wb_if_flag_singlebyte) def test_get_rid_of_debug_merge_point(self): operations = [ diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -683,7 +683,7 @@ return number if is_emulated_long: - SignedFmt = 'll' + SignedFmt = 'q' else: SignedFmt = 'l' From noreply at buildbot.pypy.org Fri Dec 2 08:04:20 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Dec 2011 08:04:20 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: Create a branch to merge matrixmath-reshape. A bit of a mess because of the Message-ID: <20111202070420.D993D8208A@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: matrixmath-reshape-merge Changeset: r50052:501d675cc368 Date: 2011-12-02 09:03 +0200 http://bitbucket.org/pypy/pypy/changeset/501d675cc368/ Log: Create a branch to merge matrixmath-reshape. A bit of a mess because of the way the branch was created, but too bad. Add a failing test that should work, slices were not really tested that well diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,6 +98,66 @@ endshape[i] = remainder[i] return endshape +#Recalculating strides. Find the steps that the iteration does for each +#dimension, given the stride and shape. Then try to create a new stride that +#fits the new shape, using those steps. If there is a shape/step mismatch +#(meaning that the realignment of elements crosses from one step into another) +#return None so that the caller can raise an exception. +def calc_new_strides(new_shape, old_shape, old_strides): + #Return the proper strides for new_shape, or None + # if the mapping crosses stepping boundaries + + #Assumes that nelems have been matched, len(shape) > 1 for old_shape and + # len(new_shape) > 0 + steps = [] + last_step = 1 + oldI = 0 + new_strides = [] + if old_strides[0] < old_strides[-1]: + for i in range(len(old_shape)): + steps.append(old_strides[i] / last_step) + last_step = old_shape[i] * old_strides[i] + cur_step = steps[0] + n_new_elems_used = 1 + n_old_elems_to_use = old_shape[0] + for s in new_shape: + new_strides.append(cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI += 1 + if steps[oldI] != steps[oldI - 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI += 1 + if oldI >= len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + else: + for i in range(len(old_shape) - 1, -1, -1): + steps.insert(0, old_strides[i] / last_step) + last_step = old_shape[i] * old_strides[i] + cur_step = steps[-1] + n_new_elems_used = 1 + oldI = -1 + n_old_elems_to_use = old_shape[-1] + for s in new_shape[::-1]: + new_strides.insert(0, cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI -= 1 + if steps[oldI] != steps[oldI + 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI -= 1 + if oldI < -len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + return new_strides + def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): # find scalar @@ -518,6 +578,42 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) + def descr_set_shape(self, space, w_iterable): + concrete = self.get_concrete() + new_size = 0 + new_shape = [] + if not space.issequence_w(w_iterable): + new_size = space.int_w(w_iterable) + if new_size < 0: + new_size = self.find_size() + new_shape = [new_size, ] + else: + neg_dim = -1 + batch = space.listview(w_iterable) + new_size = 1 + if len(batch) < 1: + new_size = 0 + new_shape = [] + i = 0 + for elem in batch: + s = space.int_w(elem) + if s < 0: + if neg_dim >= 0: + raise OperationError(space.w_ValueError, space.wrap( + "can only specify one unknown dimension")) + s = 1 + neg_dim = i + new_size *= s + new_shape.append(s) + i += 1 + if neg_dim >= 0: + new_shape[neg_dim] = self.find_size() / new_size + new_size *= new_shape[neg_dim] + if new_size != self.find_size(): + raise OperationError(space.w_ValueError, + space.wrap("total size of new array must be unchanged")) + concrete.setshape(space, new_shape) + def descr_get_size(self, space): return space.wrap(self.find_size()) @@ -770,6 +866,27 @@ return NDimSlice(self, new_sig, start, strides[:], backstrides[:], shape[:]) + def descr_reshape(self, space, w_iterable): + """Return a reshaped view into the original array's data + """ + new_sig = signature.Signature.find_sig([ + NDimSlice.signature, self.signature, + ]) + concrete = self.get_concrete() + #concrete = self + ndims = len(concrete.shape) + strides = [0] * ndims + backstrides = [0] * ndims + shape = [0] * ndims + for i in range(len(concrete.shape)): + strides[i] = concrete.strides[i] + backstrides[i] = concrete.backstrides[i] + shape[i] = concrete.shape[i] + arr = NDimSlice(self, new_sig, self.start, strides, + backstrides, shape) + arr.descr_set_shape(space, w_iterable) + return arr + def descr_mean(self, space): return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) @@ -866,6 +983,10 @@ def debug_repr(self): return 'Scalar' + def setshape(self, space, new_shape): + # XXX shouldn't it raise? + pass + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1058,6 +1179,39 @@ return space.wrap(self.shape[0]) return space.wrap(1) + def setshape(self, space, new_shape): + if len(self.shape) < 1: + return + elif len(self.shape) < 2: + #REVIEWER: this code could be refactored into calc_strides + #but then calc_strides would have to accept a stepping factor + strides = [] + backstrides = [] + s = self.strides[0] + if self.order == 'C': + new_shape.reverse() + for sh in new_shape: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + new_shape.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + self.shape = new_shape[:] + return + new_strides = calc_new_strides(new_shape, self.shape, self.strides) + if new_strides is None: + raise OperationError(space.w_AttributeError, space.wrap( + "incompatible shape for a non-contiguous array")) + new_backstrides = [0] * len(new_shape) + for nd in range(len(new_shape)): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + self.strides = new_strides[:] + self.backstrides = new_backstrides[:] + self.shape = new_shape[:] class NDimSlice(ViewArray): signature = signature.BaseSignature() @@ -1174,6 +1328,10 @@ return ArrayIterator(self.size) raise NotImplementedError # use ViewIterator simply, test it + def setshape(self, space, new_shape): + self.shape = new_shape + self.calc_strides(new_shape) + def debug_repr(self): return 'Array' @@ -1256,7 +1414,8 @@ __debug_repr__ = interp2app(BaseArray.descr_debug_repr), dtype = GetSetProperty(BaseArray.descr_get_dtype), - shape = GetSetProperty(BaseArray.descr_get_shape), + shape = GetSetProperty(BaseArray.descr_get_shape, + BaseArray.descr_set_shape), size = GetSetProperty(BaseArray.descr_get_size), T = GetSetProperty(BaseArray.descr_get_transpose), @@ -1274,6 +1433,7 @@ dot = interp2app(BaseArray.descr_dot), copy = interp2app(BaseArray.descr_copy), + reshape = interp2app(BaseArray.descr_reshape), ) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -158,6 +158,13 @@ assert shape_agreement(self.space, [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + def test_calc_new_strides(self): + from pypy.module.micronumpy.interp_numarray import calc_new_strides + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None + assert calc_new_strides([8, 3], [2, 4, 3], [48, 6, 1]) == [6, 1] + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 2]) is None class AppTestNumArray(BaseNumpyAppTest): def test_type(self): @@ -324,6 +331,50 @@ c = a[:3] assert c.shape == (3,) + def test_set_shape(self): + from numpypy import array, zeros + a = array([]) + a.shape = [] + a = array(range(12)) + a.shape = (3, 4) + assert (a == [range(4), range(4, 8), range(8, 12)]).all() + a.shape = (3, 2, 2) + assert a[1, 1, 1] == 7 + a.shape = (3, -1, 2) + assert a.shape == (3, 2, 2) + a.shape = 12 + assert a.shape == (12, ) + exc = raises(ValueError, "a.shape = 10") + assert str(exc.value) == "total size of new array must be unchanged" + + def test_reshape(self): + from numpypy import array, zeros + a = array(range(12)) + exc = raises(ValueError, "b = a.reshape((3, 10))") + assert str(exc.value) == "total size of new array must be unchanged" + b = a.reshape((3, 4)) + assert b.shape == (3, 4) + assert (b == [range(4), range(4, 8), range(8, 12)]).all() + b[:, 0] = 1000 + assert (a == [1000, 1, 2, 3, 1000, 5, 6, 7, 1000, 9, 10, 11]).all() + a = zeros((4, 2, 3)) + a.shape = (12, 2) + + def test_slice_reshape(self): + from numpypy import zeros, arange + a = zeros((4, 2, 3)) + b = a[::2, :, :] + b.shape = (2, 6) + exc = raises(AttributeError, "b.shape = 12") + assert str(exc.value) == \ + "incompatible shape for a non-contiguous array" + b = a[::2, :, :].reshape((2, 6)) + b = arange(20)[1:17:2] + b.shape = (4, 2) + assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() + b.reshape((2, 4)) + assert (b == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + def test_add(self): from numpypy import array a = array(range(5)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -188,6 +188,7 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately + py.test.skip("counting exact number of classes is nonsense") self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, From noreply at buildbot.pypy.org Fri Dec 2 08:28:50 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Dec 2011 08:28:50 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor: add REVIEW notes Message-ID: <20111202072850.795408208A@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-dtype-refactor Changeset: r50053:8bfca97d2a5c Date: 2011-12-02 09:28 +0200 http://bitbucket.org/pypy/pypy/changeset/8bfca97d2a5c/ Log: add REVIEW notes diff --git a/pypy/module/micronumpy/REVIEW b/pypy/module/micronumpy/REVIEW new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/REVIEW @@ -0,0 +1,5 @@ +* Why we pass __module__ = 'numpy' to typedefs? Most stuff in pypy has + __module__ == '__builtin__' for good or bad, but if so, we should pass numpypy +* why int16/int32 are not exported in __init__.py? +* W_GenericBox.descr_int is not tested +* setitem_w no longer calls invalidated(), why? doesn't it break some stuff? From noreply at buildbot.pypy.org Fri Dec 2 09:14:24 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Dec 2011 09:14:24 +0100 (CET) Subject: [pypy-commit] pypy default: Bump the release number Message-ID: <20111202081424.49E888208A@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50054:9f0ae6c94a99 Date: 2011-12-02 10:13 +0200 http://bitbucket.org/pypy/pypy/changeset/9f0ae6c94a99/ Log: Bump the release number diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix From noreply at buildbot.pypy.org Fri Dec 2 09:34:51 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Dec 2011 09:34:51 +0100 (CET) Subject: [pypy-commit] pypy default: Be slightly more vigilant when it comes to allocating registers for variables Message-ID: <20111202083451.6674C8208A@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50055:4b2e7974fd92 Date: 2011-12-02 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/4b2e7974fd92/ Log: Be slightly more vigilant when it comes to allocating registers for variables at the beginning of the loop. Not *much* of an improvement, but should help for tight loops diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -167,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -195,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): From noreply at buildbot.pypy.org Fri Dec 2 10:50:59 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Dec 2011 10:50:59 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: kill reference to --stackless (thanks Fluxid) Message-ID: <20111202095059.1A5D18208A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r296:ab85b0339954 Date: 2011-12-02 10:50 +0100 http://bitbucket.org/pypy/pypy.org/changeset/ab85b0339954/ Log: kill reference to --stackless (thanks Fluxid) diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -134,7 +134,6 @@ pypy translate.py -Ojit # get the JIT version pypy translate.py -O2 # get the no-jit version pypy translate.py -O2 --sandbox # get the sandbox version -pypy translate.py -O2 --stackless # get the stackless version pypy translate.py -Ojit --backend=cli # only for branch/cli-jit diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -126,7 +126,6 @@ pypy translate.py -Ojit # get the JIT version pypy translate.py -O2 # get the no-jit version pypy translate.py -O2 --sandbox # get the sandbox version - pypy translate.py -O2 --stackless # get the stackless version pypy translate.py -Ojit --backend=cli # only for branch/cli-jit 5. Enjoy Mandelbrot ``:-)`` It takes on the order of half an hour to From noreply at buildbot.pypy.org Fri Dec 2 12:05:54 2011 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Dec 2011 12:05:54 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: added force index to description of stackframe Message-ID: <20111202110554.BE2308208A@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50056:d3e4ca39849d Date: 2011-12-02 12:05 +0100 http://bitbucket.org/pypy/pypy/changeset/d3e4ca39849d/ Log: added force index to description of stackframe diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py --- a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py +++ b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py @@ -2,9 +2,10 @@ PyPy PPC Stackframe - - --------------------------- -- - | | | + OLD FRAME + | BACK CHAIN | + - - - - - --------------------------- - - - - -- - - - - - - - - - + | | | CURRENT FRAME | FPR SAVE AREA | |>> len(NONVOLATILES_FPR) * WORD | | | --------------------------- -- @@ -16,6 +17,8 @@ | FLOAT/INT CONVERSION | |>> ? * WORD | | | --------------------------- -- + | FORCE INDEX | WORD | 1 WORD + SPP -> --------------------------- -- | | | | SPILLING AREA | |>> regalloc.frame_manager.frame_depth * WORD | (LOCAL VARIABLE SPACE) | | From noreply at buildbot.pypy.org Fri Dec 2 12:10:51 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Dec 2011 12:10:51 +0100 (CET) Subject: [pypy-commit] pypy default: also add an explicit way to record a class Message-ID: <20111202111051.3D6808208A@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r50057:09d322fa3784 Date: 2011-12-02 10:01 +0100 http://bitbucket.org/pypy/pypy/changeset/09d322fa3784/ Log: also add an explicit way to record a class diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -220,6 +220,9 @@ return [None, # hack, do the right renaming from op.args[0] to op.result SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3585,7 +3585,7 @@ self.interp_operations(f, [5], translationoptions=translationoptions) - def test_annotation_gives_knowledge_to_tracer(self): + def test_annotation_gives_class_knowledge_to_tracer(self): class Base(object): pass class A(Base): @@ -3645,6 +3645,70 @@ # here it works again self.check_operations_history(guard_class=0, record_known_class=1) + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -738,3 +738,26 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +def record_known_class(value, cls): + """ + Assure the JIT that value is an instance of cls. This is not a precise + class check, unlike a guard_class. + """ + assert isinstance(value, cls) + + +class Entry(ExtRegistryEntry): + _about_ = record_known_class + + def compute_result_annotation(self, *args): + pass + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype, rclass + classrepr = rclass.get_type_repr(hop.rtyper) + + hop.exception_cannot_occur() + v_inst = hop.inputarg(hop.args_r[0], arg=0) + v_cls = hop.inputarg(classrepr, arg=1) + return hop.genop('jit_record_known_class', [v_inst, v_cls], + resulttype=lltype.Void) diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -548,6 +548,9 @@ def op_jit_marker(self, *args): pass + def op_jit_record_known_class(self, *args): + pass + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -430,6 +430,7 @@ 'jit_force_virtual': LLOp(canrun=True), 'jit_is_virtual': LLOp(canrun=True), 'jit_force_quasi_immutable': LLOp(canrun=True), + 'jit_record_known_class' : LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -548,6 +548,9 @@ def op_jit_force_quasi_immutable(*args): pass +def op_jit_record_known_class(x, y): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) From noreply at buildbot.pypy.org Fri Dec 2 12:10:52 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Dec 2011 12:10:52 +0100 (CET) Subject: [pypy-commit] pypy default: jit_record_known_class does nothing in the C backend Message-ID: <20111202111052.6D5308208A@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r50058:8b3befe1bd20 Date: 2011-12-02 10:35 +0100 http://bitbucket.org/pypy/pypy/changeset/8b3befe1bd20/ Log: jit_record_known_class does nothing in the C backend diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -11,6 +11,7 @@ #endif /* MIN */ #define RUNNING_ON_LLINTERP 0 +#define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ #define FAIL_EXCEPTION(exc, msg) \ { \ From noreply at buildbot.pypy.org Fri Dec 2 12:10:53 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Dec 2011 12:10:53 +0100 (CET) Subject: [pypy-commit] pypy default: (arigo, cfbolz): some sanity checks Message-ID: <20111202111053.929DD8208A@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r50059:ba3ca8aaaef4 Date: 2011-12-02 12:10 +0100 http://bitbucket.org/pypy/pypy/changeset/ba3ca8aaaef4/ Log: (arigo, cfbolz): some sanity checks diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -749,8 +749,11 @@ class Entry(ExtRegistryEntry): _about_ = record_known_class - def compute_result_annotation(self, *args): - pass + def compute_result_annotation(self, s_inst, s_cls): + from pypy.annotation import model as annmodel + assert s_cls.is_constant() + assert not s_inst.can_be_none() + assert isinstance(s_inst, annmodel.SomeInstance) def specialize_call(self, hop): from pypy.rpython.lltypesystem import lltype, rclass From noreply at buildbot.pypy.org Fri Dec 2 12:10:54 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Dec 2011 12:10:54 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20111202111054.D87CD8208A@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r50060:0c65719691d8 Date: 2011-12-02 12:10 +0100 http://bitbucket.org/pypy/pypy/changeset/0c65719691d8/ Log: merge diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -167,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -195,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -491,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -612,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -761,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -75,7 +75,8 @@ def list_strategy(space, w_list): from pypy.objspace.std.listobject import W_ListObject - str_type = None if isinstance(w_list, W_ListObject): - str_type = w_list.strategy._applevel_repr - return space.wrap(str_type) + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -57,17 +57,18 @@ def test_list_strategy(self): from __pypy__ import list_strategy - l = [1,2,3] + + l = [1, 2, 3] assert list_strategy(l) == "int" - l = ["a","b","c"] + l = ["a", "b", "c"] assert list_strategy(l) == "str" - l = [1.1,2.2,3.3] + l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) assert list_strategy(l) == "range" - l = [1,"b",3] + l = [1, "b", 3] assert list_strategy(l) == "object" l = [] assert list_strategy(l) == "empty" o = 5 - assert list_strategy(o) == None + raises(TypeError, list_strategy, 5) From noreply at buildbot.pypy.org Fri Dec 2 13:25:48 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Dec 2011 13:25:48 +0100 (CET) Subject: [pypy-commit] pypy default: disable the use of cast_pointer in the JIT for now, to find out whether it Message-ID: <20111202122548.8BE2F8208A@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r50061:c8c6a9ce7e04 Date: 2011-12-02 13:25 +0100 http://bitbucket.org/pypy/pypy/changeset/c8c6a9ce7e04/ Log: disable the use of cast_pointer in the JIT for now, to find out whether it caused the test failures. diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -210,6 +210,8 @@ def rewrite_op_cast_pointer(self, op): newop = self.rewrite_op_same_as(op) assert newop is None + return + # disabled for now if (self._is_rclass_instance(op.args[0]) and self._is_rclass_instance(op.result)): FROM = op.args[0].concretetype.TO diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3586,6 +3586,7 @@ def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") class Base(object): pass class A(Base): From noreply at buildbot.pypy.org Fri Dec 2 14:02:32 2011 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Dec 2011 14:02:32 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: started refactoring of stackframes Message-ID: <20111202130232.37F668208A@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50062:d1741320b829 Date: 2011-12-02 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/d1741320b829/ Log: started refactoring of stackframes diff --git a/pypy/jit/backend/ppc/ppcgen/arch.py b/pypy/jit/backend/ppc/ppcgen/arch.py --- a/pypy/jit/backend/ppc/ppcgen/arch.py +++ b/pypy/jit/backend/ppc/ppcgen/arch.py @@ -1,19 +1,23 @@ # Constants that depend on whether we are on 32-bit or 64-bit -from pypy.jit.backend.ppc.ppcgen.register import NONVOLATILES +from pypy.jit.backend.ppc.ppcgen.register import (NONVOLATILES, + NONVOLATILES_FLOAT) import sys if sys.maxint == (2**31 - 1): WORD = 4 IS_PPC_32 = True - BACKCHAIN_SIZE = 2 * WORD else: WORD = 8 IS_PPC_32 = False - BACKCHAIN_SIZE = 4 * WORD -IS_PPC_64 = not IS_PPC_32 -MY_COPY_OF_REGS = 0 +DWORD = 2 * WORD +BACKCHAIN_SIZE = 6 * WORD +IS_PPC_64 = not IS_PPC_32 +MY_COPY_OF_REGS = 0 -GPR_SAVE_AREA = len(NONVOLATILES) * WORD -MAX_REG_PARAMS = 8 +FORCE_INDEX = WORD +GPR_SAVE_AREA = len(NONVOLATILES) * WORD +FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * DWORD +FLOAT_INT_CONVERSION = 4 * WORD +MAX_REG_PARAMS = 8 diff --git a/pypy/jit/backend/ppc/ppcgen/locations.py b/pypy/jit/backend/ppc/ppcgen/locations.py --- a/pypy/jit/backend/ppc/ppcgen/locations.py +++ b/pypy/jit/backend/ppc/ppcgen/locations.py @@ -1,5 +1,13 @@ from pypy.jit.metainterp.history import INT, FLOAT, REF -from pypy.jit.backend.arm.arch import WORD +import sys + +# XXX import from arch.py, currently we have a circular import +if sys.maxint == (2**31 - 1): + WORD = 4 +else: + WORD = 8 +DWORD = 2 * WORD + class AssemblerLocation(object): _immutable_ = True type = INT @@ -38,6 +46,23 @@ def as_key(self): return self.value +class FPRegisterLocation(RegisterLocation): + _immutable_ = True + type = FLOAT + width = DWORD + + def __repr__(self): + return 'fp%d' % self.value + + def is_reg(self): + return False + + def is_fp_reg(self): + return True + + def as_key(self): + return self.value + class ImmLocation(AssemblerLocation): _immutable_ = True width = WORD diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -11,7 +11,9 @@ from pypy.jit.backend.ppc.ppcgen.jump import remap_frame_layout from pypy.jit.backend.ppc.ppcgen.arch import (IS_PPC_32, IS_PPC_64, WORD, NONVOLATILES, - GPR_SAVE_AREA, BACKCHAIN_SIZE) + GPR_SAVE_AREA, BACKCHAIN_SIZE, + FPR_SAVE_AREA, + FLOAT_INT_CONVERSION, FORCE_INDEX) from pypy.jit.backend.ppc.ppcgen.helper.assembler import (gen_emit_cmp_op, encode32, decode32, decode64, @@ -127,6 +129,7 @@ self.fail_boxes_count = 0 self.current_clt = None self._regalloc = None + self.max_stack_params = 0 def _save_nonvolatiles(self): for i, reg in enumerate(NONVOLATILES): @@ -546,6 +549,7 @@ self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) self.stack_in_use = False + self.max_stack_params = 0 def setup_once(self): gc_ll_descr = self.cpu.gc_ll_descr @@ -712,6 +716,7 @@ self._regalloc = None assert self.datablockwrapper is None self.stack_in_use = False + self.max_stack_params = 0 def _walk_operations(self, operations, regalloc): self._regalloc = regalloc @@ -770,10 +775,21 @@ return mc.materialize(self.cpu.asmmemmgr, [], self.cpu.gc_ll_descr.gcrootmap) + #def compute_frame_depth(self, regalloc): + # frame_depth = (GPR_SAVE_AREA # GPR space + # + WORD # FORCE INDEX + # + regalloc.frame_manager.frame_depth * WORD) + # return frame_depth def compute_frame_depth(self, regalloc): - frame_depth = (GPR_SAVE_AREA # GPR space - + WORD # FORCE INDEX - + regalloc.frame_manager.frame_depth * WORD) + frame_depth = ( GPR_SAVE_AREA + + FPR_SAVE_AREA + + FLOAT_INT_CONVERSION + + FORCE_INDEX + + regalloc.frame_manager.frame_depth * WORD + + len(r.MANAGED_REGS) * WORD + + self.max_stack_params * WORD + + BACKCHAIN_SIZE) + return frame_depth def materialize_loop(self, looptoken, show): diff --git a/pypy/jit/backend/ppc/ppcgen/register.py b/pypy/jit/backend/ppc/ppcgen/register.py --- a/pypy/jit/backend/ppc/ppcgen/register.py +++ b/pypy/jit/backend/ppc/ppcgen/register.py @@ -1,14 +1,24 @@ -from pypy.jit.backend.ppc.ppcgen.locations import RegisterLocation +from pypy.jit.backend.ppc.ppcgen.locations import (RegisterLocation, + FPRegisterLocation) -ALL_REGS = [RegisterLocation(i) for i in range(32)] +ALL_REGS = [RegisterLocation(i) for i in range(32)] +ALL_FLOAT_REGS = [FPRegisterLocation(i) for i in range(32)] r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, r16,\ r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, r30, r31\ = ALL_REGS -NONVOLATILES = [r14, r15, r16, r17, r18, r19, r20, r21, r22, r23, +f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16,\ + f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28, f29, f30, f31\ + = ALL_FLOAT_REGS + +NONVOLATILES = [r14, r15, r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, r30, r31] -VOLATILES = [r0, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13] +VOLATILES = [r0, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13] + +NONVOLATILES_FLOAT = [f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, + f24, f25, f26, f27, f28, f29, f30, f31] + SPP = r31 SP = r1 diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py --- a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py +++ b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py @@ -14,7 +14,7 @@ | | | --------------------------- -- | | | - | FLOAT/INT CONVERSION | |>> ? * WORD + | FLOAT/INT CONVERSION | |>> 4 (?) * WORD | | | --------------------------- -- | FORCE INDEX | WORD | 1 WORD From noreply at buildbot.pypy.org Fri Dec 2 15:18:15 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 2 Dec 2011 15:18:15 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: struct.pack issues with the JIT Message-ID: <20111202141815.C57A88208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50063:cf81cefc5208 Date: 2011-12-02 15:08 +0100 http://bitbucket.org/pypy/pypy/changeset/cf81cefc5208/ Log: struct.pack issues with the JIT diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -189,7 +189,7 @@ # def get_alignment(code): # Retrieve default alignment for the compiler/platform - return struct.calcsize('l' + code) - struct.calcsize(code) + return struct.calcsize(lltype.SignedFmt + code) - struct.calcsize(code) assert descr1.get_base_size(False) == get_alignment('c') assert descr2.get_base_size(False) == get_alignment('p') assert descr3.get_base_size(False) == get_alignment('p') From noreply at buildbot.pypy.org Fri Dec 2 15:52:47 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Dec 2011 15:52:47 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: - rename ANY to Any to avoid confusion Message-ID: <20111202145247.F2EC08208A@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50064:acd6e35b5711 Date: 2011-12-02 15:52 +0100 http://bitbucket.org/pypy/pypy/changeset/acd6e35b5711/ Log: - rename ANY to Any to avoid confusion - use a more pragmatic approach of having 10 specialized versions, for all for 2-tuples in all combinations, with the exception that floats only get the (float, float) combination. diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -9,7 +9,7 @@ from pypy.rlib.objectmodel import compute_hash from pypy.rlib.unroll import unrolling_iterable -class ANY(type): +class Any(object): pass class NotSpecialised(Exception): @@ -62,10 +62,10 @@ class cls(W_SpecialisedTupleObject): def __init__(self, space, values): - print cls,cls.__class__, values + #print cls,cls.__class__, values assert len(values) == nValues for i in iter_n: - if typetuple[i] != ANY: + if typetuple[i] != Any: assert isinstance(values[i], typetuple[i]) self.space = space for i in iter_n: @@ -86,7 +86,7 @@ elif val_type == str: if space.type(param) != space.w_str: raise NotSpecialised - elif val_type == ANY: + elif val_type == Any: pass else: raise NotSpecialised @@ -98,7 +98,7 @@ unwrappedparams[i] = space.float_w(paramlist[i]) elif typetuple[i] == str: unwrappedparams[i] = space.str_w(paramlist[i]) - elif typetuple[i] == ANY: + elif typetuple[i] == Any: unwrappedparams[i] = paramlist[i] else: raise NotSpecialised @@ -110,7 +110,7 @@ def tolist(self): list_w = [None] * nValues for i in iter_n: - if typetuple[i] == ANY: + if typetuple[i] == Any: list_w[i] = getattr(self, 'value%s' % i) else: list_w[i] = self.space.wrap(getattr(self, 'value%s' % i)) @@ -119,7 +119,7 @@ def _to_unwrapped_list(self): list_w = [None] * nValues for i in iter_n: - if typetuple[i] == ANY: + if typetuple[i] == Any: list_w[i] = space.unwrap(getattr(self, 'value%s' % i))#xxx else: list_w[i] = getattr(self, 'value%s' % i) @@ -131,7 +131,7 @@ z = 2 for i in iter_n: value = getattr(self, 'value%s' % i) - if typetuple[i] == ANY: + if typetuple[i] == Any: y = space.int_w(space.hash(value)) elif typetuple[i] == float: # get correct hash for float which is an integer & other less frequent cases y = _hash_float(space, value) @@ -147,7 +147,7 @@ if not isinstance(w_other, cls): #so we will be sure we are comparing same types raise FailedToImplement for i in iter_n: - if typetuple[i] == ANY: + if typetuple[i] == Any: if not self.space.is_true(self.space.eq(getattr(self, 'value%s' % i), getattr(w_other, 'value%s' % i))): return False else: @@ -167,7 +167,7 @@ raise FailedToImplement ncmp = min(self.length(), w_other.length()) for i in iter_n: - if typetuple[i] == ANY:#like space.eq on wrapped or two params? + if typetuple[i] == Any:#like space.eq on wrapped or two params? raise FailedToImplement if ncmp > i: l_val = getattr(self, 'value%s' % i) @@ -179,7 +179,7 @@ def getitem(self, index): for i in iter_n: if index == i: - if typetuple[i] == ANY: + if typetuple[i] == Any: return getattr(self, 'value%s' % i) else: return self.space.wrap(getattr(self, 'value%s' % i)) @@ -188,16 +188,13 @@ cls.__name__ = 'W_SpecialisedTupleObject' + ''.join([t.__name__.capitalize() for t in typetuple]) _specialisations.append(cls) return cls - - -W_SpecialisedTupleObjectIntInt = make_specialised_class((int,int)) -W_SpecialisedTupleObjectIntAny = make_specialised_class((int, ANY)) -W_SpecialisedTupleObjectIntIntInt = make_specialised_class((int,int,int)) -W_SpecialisedTupleObjectFloatFloat = make_specialised_class((float,float)) -W_SpecialisedTupleObjectStrStr = make_specialised_class((str, str)) -W_SpecialisedTupleObjectStrAny = make_specialised_class((str, ANY)) -W_SpecialisedTupleObjectIntFloatStr= make_specialised_class((int, float, str)) -W_SpecialisedTupleObjectIntStrFloatAny= make_specialised_class((int, float, str, ANY)) + +make_specialised_class((float, float)) +for _typ1 in [int, str, Any]: + for _typ2 in [int, str, Any]: + make_specialised_class((_typ1, _typ2)) + +# ____________________________________________________________ registerimplementation(W_SpecialisedTupleObject) diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -1,12 +1,17 @@ import py from pypy.objspace.std.tupleobject import W_TupleObject -from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject,W_SpecialisedTupleObjectIntInt +from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject +from pypy.objspace.std.specialisedtupleobject import _specialisations from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace from pypy.objspace.std.test.test_tupleobject import AppTestW_TupleObject from pypy.interpreter import gateway +for cls in _specialisations: + globals()[cls.__name__] = cls + + class TestW_SpecialisedTupleObject(): def setup_class(cls): @@ -67,26 +72,36 @@ w_tuple.tolist = delegation_forbidden return w_tuple cls.w_forbid_delegation = cls.space.wrap(gateway.interp2app(forbid_delegation)) - - def w_isspecialised(self, obj): - import __pypy__ - return "SpecialisedTuple" in __pypy__.internal_repr(obj) - + + def w_isspecialised(self, obj, expected=''): + import __pypy__ + r = __pypy__.internal_repr(obj) + print obj, '==>', r, ' (expected: %r)' % expected + return ("SpecialisedTupleObject" + expected) in r def test_createspecialisedtuple(self): - assert self.isspecialised((42,43)) - assert self.isspecialised((4.2,4.3)) - assert self.isspecialised((1.0,2.0)) - assert self.isspecialised(('a','b')) - + spec = {int: 'Int', + float: 'Float', + str: 'Str', + list: 'Any'} + # + for x in [42, 4.2, "foo", []]: + for y in [43, 4.3, "bar", []]: + expected1 = spec[type(x)] + expected2 = spec[type(y)] + if (expected1 == 'Float') ^ (expected2 == 'Float'): + if expected1 == 'Float': expected1 = 'Any' + if expected2 == 'Float': expected2 = 'Any' + obj = (x, y) + assert self.isspecialised(obj, expected1 + expected2) + def test_len(self): t = self.forbid_delegation((42,43)) assert len(t) == 2 def test_notspecialisedtuple(self): assert not self.isspecialised((42,43,44,45)) - assert not self.isspecialised((1.5,2)) - assert not self.isspecialised((1.0,2)) + assert not self.isspecialised((1.5,)) def test_slicing_to_specialised(self): assert self.isspecialised((1, 2, 3)[0:2]) From noreply at buildbot.pypy.org Fri Dec 2 15:55:55 2011 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Dec 2011 15:55:55 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: slightly changed stackframe layout so that encoding area can be accessed with fixed offset from SPP Message-ID: <20111202145555.4E7EB8208A@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50065:6ad92645f0e4 Date: 2011-12-02 15:55 +0100 http://bitbucket.org/pypy/pypy/changeset/6ad92645f0e4/ Log: slightly changed stackframe layout so that encoding area can be accessed with fixed offset from SPP diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py --- a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py +++ b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py @@ -18,16 +18,16 @@ | | | --------------------------- -- | FORCE INDEX | WORD | 1 WORD + --------------------------- -- + | | | + | ENCODING AREA | |>> len(MANAGED_REGS) * WORD + | (ALLOCA AREA) | | SPP -> --------------------------- -- | | | | SPILLING AREA | |>> regalloc.frame_manager.frame_depth * WORD | (LOCAL VARIABLE SPACE) | | --------------------------- -- | | | - | ENCODING AREA | |>> len(MANAGED_REGS) * WORD - | (ALLOCA AREA) | | - --------------------------- -- - | | | | PARAMETER SAVE AREA | |>> use MAX(number of parameters | | | passed on stack in emit_call) * WORD --------------------------- -- From noreply at buildbot.pypy.org Fri Dec 2 16:08:33 2011 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Dec 2011 16:08:33 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: remove parameter save area from stacklayout Message-ID: <20111202150833.D80358208A@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50066:de0569ccdb88 Date: 2011-12-02 16:08 +0100 http://bitbucket.org/pypy/pypy/changeset/de0569ccdb88/ Log: remove parameter save area from stacklayout diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py --- a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py +++ b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py @@ -26,10 +26,6 @@ | | | | SPILLING AREA | |>> regalloc.frame_manager.frame_depth * WORD | (LOCAL VARIABLE SPACE) | | - --------------------------- -- - | | | - | PARAMETER SAVE AREA | |>> use MAX(number of parameters - | | | passed on stack in emit_call) * WORD --------------------------- -- | TOC POINTER | WORD | --------------------------- | From noreply at buildbot.pypy.org Fri Dec 2 18:40:23 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 18:40:23 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor: __module__ is now correctly, numpypy Message-ID: <20111202174023.668EA8208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor Changeset: r50067:adbd652dc0ca Date: 2011-12-02 12:27 -0500 http://bitbucket.org/pypy/pypy/changeset/adbd652dc0ca/ Log: __module__ is now correctly, numpypy diff --git a/pypy/module/micronumpy/REVIEW b/pypy/module/micronumpy/REVIEW --- a/pypy/module/micronumpy/REVIEW +++ b/pypy/module/micronumpy/REVIEW @@ -1,5 +1,3 @@ -* Why we pass __module__ = 'numpy' to typedefs? Most stuff in pypy has - __module__ == '__builtin__' for good or bad, but if so, we should pass numpypy * why int16/int32 are not exported in __init__.py? * W_GenericBox.descr_int is not tested * setitem_w no longer calls invalidated(), why? doesn't it break some stuff? diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -154,7 +154,7 @@ W_GenericBox.typedef = TypeDef("generic", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_GenericBox.descr__new__.im_func), @@ -184,45 +184,45 @@ ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_BoolBox.descr__new__.im_func), ) W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), ) W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) if LONG_BIT == 32: @@ -230,36 +230,36 @@ elif LONG_BIT == 64: long_name = "int64" W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), - __module__ = "numpy", + __module__ = "numpypy", ) W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Int64Box.descr__new__.im_func), ) W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), ) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -84,7 +84,7 @@ return space.newtuple([]) W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), __str__= interp2app(W_Dtype.descr_str), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -168,7 +168,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), From noreply at buildbot.pypy.org Fri Dec 2 18:40:24 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 18:40:24 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor: Fix tests, expose int{16, 32} at app level, add a test for __int__ Message-ID: <20111202174024.93CA3820C2@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor Changeset: r50068:f1d05c0a4b16 Date: 2011-12-02 12:40 -0500 http://bitbucket.org/pypy/pypy/changeset/f1d05c0a4b16/ Log: Fix tests, expose int{16,32} at app level, add a test for __int__ diff --git a/pypy/module/micronumpy/REVIEW b/pypy/module/micronumpy/REVIEW --- a/pypy/module/micronumpy/REVIEW +++ b/pypy/module/micronumpy/REVIEW @@ -1,3 +1,1 @@ -* why int16/int32 are not exported in __init__.py? -* W_GenericBox.descr_int is not tested * setitem_w no longer calls invalidated(), why? doesn't it break some stuff? diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -25,6 +25,8 @@ 'signedinteger': 'interp_boxes.W_SignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', 'int64': 'interp_boxes.W_Int64Box', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -122,7 +122,7 @@ descr__new__, get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - pass + descr__new__, get_dtype = new_dtype_getter("int32") class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint32") @@ -211,6 +211,7 @@ W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), ) W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, @@ -219,6 +220,7 @@ W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), ) W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -30,7 +30,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -173,7 +173,7 @@ raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'numpy.signedinteger' instances" + assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -207,6 +207,21 @@ assert type(x) is numpy.int8 assert repr(x) == "-128" + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + def test_int_(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin From noreply at buildbot.pypy.org Fri Dec 2 18:41:03 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 18:41:03 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor: merged default Message-ID: <20111202174103.CA7578208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor Changeset: r50069:6de4a284490c Date: 2011-12-02 12:40 -0500 http://bitbucket.org/pypy/pypy/changeset/6de4a284490c/ Log: merged default diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -167,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -195,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -481,8 +500,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1180,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6482,6 +6482,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -243,6 +243,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3585,6 +3585,132 @@ self.interp_operations(f, [5], translationoptions=translationoptions) + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -612,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -761,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,11 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + if isinstance(w_list, W_ListObject): + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,21 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + + l = [1, 2, 3] + assert list_strategy(l) == "int" + l = ["a", "b", "c"] + assert list_strategy(l) == "str" + l = [1.1, 2.2, 3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1, "b", 3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + raises(TypeError, list_strategy, 5) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -326,6 +326,8 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + _applevel_repr = "empty" + def __init__(self, space): ListStrategy.__init__(self, space) # cache an empty list that is used whenever getitems is called (i.e. sorting) @@ -426,6 +428,8 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" + _applevel_repr = "range" + def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -864,6 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -892,6 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 + _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -918,6 +924,7 @@ class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 + _applevel_repr = "float" def wrap(self, floatval): return self.space.wrap(floatval) @@ -944,6 +951,7 @@ class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "str" def wrap(self, stringval): return self.space.wrap(stringval) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -738,3 +738,29 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +def record_known_class(value, cls): + """ + Assure the JIT that value is an instance of cls. This is not a precise + class check, unlike a guard_class. + """ + assert isinstance(value, cls) + + +class Entry(ExtRegistryEntry): + _about_ = record_known_class + + def compute_result_annotation(self, s_inst, s_cls): + from pypy.annotation import model as annmodel + assert s_cls.is_constant() + assert not s_inst.can_be_none() + assert isinstance(s_inst, annmodel.SomeInstance) + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype, rclass + classrepr = rclass.get_type_repr(hop.rtyper) + + hop.exception_cannot_occur() + v_inst = hop.inputarg(hop.args_r[0], arg=0) + v_cls = hop.inputarg(classrepr, arg=1) + return hop.genop('jit_record_known_class', [v_inst, v_cls], + resulttype=lltype.Void) diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -548,6 +548,9 @@ def op_jit_marker(self, *args): pass + def op_jit_record_known_class(self, *args): + pass + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -430,6 +430,7 @@ 'jit_force_virtual': LLOp(canrun=True), 'jit_is_virtual': LLOp(canrun=True), 'jit_force_quasi_immutable': LLOp(canrun=True), + 'jit_record_known_class' : LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -548,6 +548,9 @@ def op_jit_force_quasi_immutable(*args): pass +def op_jit_record_known_class(x, y): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -11,6 +11,7 @@ #endif /* MIN */ #define RUNNING_ON_LLINTERP 0 +#define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ #define FAIL_EXCEPTION(exc, msg) \ { \ From noreply at buildbot.pypy.org Fri Dec 2 18:42:27 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 18:42:27 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor: remove the review file, I addressed fijal's concern Message-ID: <20111202174227.451DE8208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor Changeset: r50070:8dd8ceded68e Date: 2011-12-02 12:42 -0500 http://bitbucket.org/pypy/pypy/changeset/8dd8ceded68e/ Log: remove the review file, I addressed fijal's concern diff --git a/pypy/module/micronumpy/REVIEW b/pypy/module/micronumpy/REVIEW deleted file mode 100644 --- a/pypy/module/micronumpy/REVIEW +++ /dev/null @@ -1,1 +0,0 @@ -* setitem_w no longer calls invalidated(), why? doesn't it break some stuff? From noreply at buildbot.pypy.org Fri Dec 2 18:47:56 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 18:47:56 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor: Close branch for merge. Message-ID: <20111202174756.DA9628208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor Changeset: r50071:dfe5b3a2b6aa Date: 2011-12-02 12:46 -0500 http://bitbucket.org/pypy/pypy/changeset/dfe5b3a2b6aa/ Log: Close branch for merge. From noreply at buildbot.pypy.org Fri Dec 2 18:47:58 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 18:47:58 +0100 (CET) Subject: [pypy-commit] pypy default: Merge the numpy-dtype-refactor branch, it refactors the internals of numpy, and exposes the numpy box classes, it also sets the stage for complex and custom dtypes. Message-ID: <20111202174758.5ADC08208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50072:8c67eefdff41 Date: 2011-12-02 12:47 -0500 http://bitbucket.org/pypy/pypy/changeset/8c67eefdff41/ Log: Merge the numpy-dtype-refactor branch, it refactors the internals of numpy, and exposes the numpy box classes, it also sets the stage for complex and custom dtypes. diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -1432,6 +1432,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1516,6 +1520,7 @@ return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -16,8 +16,22 @@ 'fromstring': 'interp_support.fromstring', 'flatiter': 'interp_numarray.W_FlatIterator', - 'True_': 'space.w_True', - 'False_': 'space.w_False', + 'True_': 'types.Bool.True', + 'False_': 'types.Bool.False', + + 'generic': 'interp_boxes.W_GenericBox', + 'number': 'interp_boxes.W_NumberBox', + 'integer': 'interp_boxes.W_IntegerBox', + 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'bool_': 'interp_boxes.W_BoolBox', + 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', + 'int64': 'interp_boxes.W_Int64Box', + 'int_': 'interp_boxes.W_LongBox', + 'inexact': 'interp_boxes.W_InexactBox', + 'floating': 'interp_boxes.W_FloatingBox', + 'float64': 'interp_boxes.W_Float64Box', } # ufuncs diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,13 +3,16 @@ It should not be imported by the module itself """ +import re + from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_BoolDtype +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, descr_new_array, scalar_w, NDimArray) from pypy.module.micronumpy import interp_ufuncs -from pypy.rlib.objectmodel import specialize -import re +from pypy.rlib.objectmodel import specialize, instantiate + class BogusBytecode(Exception): pass @@ -48,15 +51,12 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_float64dtype = W_Float64Dtype(self) def issequence_w(self, w_obj): return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) def isinstance_w(self, w_obj, w_tp): - if w_obj.tp == w_tp: - return True - return False + return w_obj.tp == w_tp def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -97,8 +97,10 @@ fixedview = listview def float(self, w_obj): - assert isinstance(w_obj, FloatObject) - return w_obj + if isinstance(w_obj, FloatObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): assert isinstance(w_obj, FloatObject) @@ -112,7 +114,10 @@ raise NotImplementedError def int(self, w_obj): - return w_obj + if isinstance(w_obj, IntObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.int(w_obj.descr_int(self)) def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) @@ -135,6 +140,9 @@ assert isinstance(what, tp) return what + def allocate_instance(self, klass, w_subtype): + return instantiate(klass) + def len_w(self, w_obj): if isinstance(w_obj, ListObject): return len(w_obj.items) @@ -247,7 +255,7 @@ w_rhs = self.rhs.execute(interp) if not isinstance(w_lhs, BaseArray): # scalar - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) if self.name == '+': @@ -264,8 +272,9 @@ w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError - if not isinstance(w_res, BaseArray): - dtype = interp.space.fromcache(W_Float64Dtype) + if (not isinstance(w_res, BaseArray) and + not isinstance(w_res, interp_boxes.W_GenericBox)): + dtype = get_dtype_cache(interp.space).w_float64dtype w_res = scalar_w(interp.space, dtype, w_res) return w_res @@ -283,7 +292,7 @@ return space.wrap(self.v) def execute(self, interp): - return FloatObject(self.v) + return interp.space.wrap(self.v) class RangeConstant(Node): def __init__(self, v): @@ -291,8 +300,9 @@ def execute(self, interp): w_list = interp.space.newlist( - [interp.space.wrap(float(i)) for i in range(self.v)]) - dtype = interp.space.fromcache(W_Float64Dtype) + [interp.space.wrap(float(i)) for i in range(self.v)] + ) + dtype = get_dtype_cache(interp.space).w_float64dtype return descr_new_array(interp.space, None, w_list, w_dtype=dtype, w_order=None) @@ -315,7 +325,7 @@ def execute(self, interp): w_list = self.wrap(interp.space) - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype return descr_new_array(interp.space, None, w_list, w_dtype=dtype, w_order=None) @@ -384,9 +394,11 @@ if isinstance(w_res, BaseArray): return w_res if isinstance(w_res, FloatObject): - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype elif isinstance(w_res, BoolObject): - dtype = interp.space.fromcache(W_BoolDtype) + dtype = get_dtype_cache(interp.space).w_booldtype + elif isinstance(w_res, interp_boxes.W_GenericBox): + dtype = w_res.get_dtype(interp.space) else: dtype = None return scalar_w(interp.space, dtype, w_res) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_boxes.py @@ -0,0 +1,267 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef +from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.inttype import int_typedef +from pypy.objspace.std.typeobject import W_TypeObject +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.tool.sourcetools import func_with_new_name + + +MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () + +def new_dtype_getter(name): + def get_dtype(space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return getattr(get_dtype_cache(space), "w_%sdtype" % name) + def new(space, w_subtype, w_value): + dtype = get_dtype(space) + return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) + return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + +class PrimitiveBox(object): + _mixin_ = True + + def __init__(self, value): + self.value = value + + def convert_to(self, dtype): + return dtype.box(self.value) + +class W_GenericBox(Wrappable): + _attrs_ = () + + def descr__new__(space, w_subtype, __args__): + assert isinstance(w_subtype, W_TypeObject) + raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", + w_subtype.get_module_type_name() + ) + + def descr_str(self, space): + return self.descr_repr(space) + + def descr_repr(self, space): + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + + def descr_int(self, space): + box = self.convert_to(W_LongBox.get_dtype(space)) + assert isinstance(box, W_LongBox) + return space.wrap(box.value) + + def descr_float(self, space): + box = self.convert_to(W_Float64Box.get_dtype(space)) + assert isinstance(box, W_Float64Box) + return space.wrap(box.value) + + def descr_nonzero(self, space): + dtype = self.get_dtype(space) + return space.wrap(dtype.itemtype.bool(self)) + + def _binop_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + def _binop_right_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) + + def _unaryop_impl(ufunc_name): + def impl(self, space): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + + descr_radd = _binop_right_impl("add") + descr_rmul = _binop_right_impl("multiply") + + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") + + +class W_BoolBox(W_GenericBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("bool") + +class W_NumberBox(W_GenericBox): + _attrs_ = () + +class W_IntegerBox(W_NumberBox): + pass + +class W_SignedIntegerBox(W_IntegerBox): + pass + +class W_UnsignedIntgerBox(W_IntegerBox): + pass + +class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int8") + +class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint8") + +class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int16") + +class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint16") + +class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int32") + +class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint32") + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("long") + +class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int64") + +class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_InexactBox(W_NumberBox): + _attrs_ = () + +class W_FloatingBox(W_InexactBox): + _attrs_ = () + +class W_Float32Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float32") + +class W_Float64Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float64") + + + +W_GenericBox.typedef = TypeDef("generic", + __module__ = "numpypy", + + __new__ = interp2app(W_GenericBox.descr__new__.im_func), + + __str__ = interp2app(W_GenericBox.descr_str), + __repr__ = interp2app(W_GenericBox.descr_repr), + __int__ = interp2app(W_GenericBox.descr_int), + __float__ = interp2app(W_GenericBox.descr_float), + __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + + __add__ = interp2app(W_GenericBox.descr_add), + __sub__ = interp2app(W_GenericBox.descr_sub), + __mul__ = interp2app(W_GenericBox.descr_mul), + __div__ = interp2app(W_GenericBox.descr_div), + + __radd__ = interp2app(W_GenericBox.descr_add), + __rmul__ = interp2app(W_GenericBox.descr_rmul), + + __eq__ = interp2app(W_GenericBox.descr_eq), + __ne__ = interp2app(W_GenericBox.descr_ne), + __lt__ = interp2app(W_GenericBox.descr_lt), + __le__ = interp2app(W_GenericBox.descr_le), + __gt__ = interp2app(W_GenericBox.descr_gt), + __ge__ = interp2app(W_GenericBox.descr_ge), + + __neg__ = interp2app(W_GenericBox.descr_neg), + __abs__ = interp2app(W_GenericBox.descr_abs), +) + +W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_BoolBox.descr__new__.im_func), +) + +W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + +W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int8Box.descr__new__.im_func), +) + +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), +) + +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), +) + +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +if LONG_BIT == 32: + long_name = "int32" +elif LONG_BIT == 64: + long_name = "int64" +W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), + __module__ = "numpypy", +) + +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, + __module__ = "numpypy", + __new__ = interp2app(W_Int64Box.descr__new__.im_func), +) + +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, + __module__ = "numpypy", +) + +W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, + __module__ = "numpypy", +) + +W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), + __module__ = "numpypy", + + __new__ = interp2app(W_Float64Box.descr__new__.im_func), +) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,16 +1,11 @@ -import functools -import math - from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty -from pypy.module.micronumpy import signature -from pypy.objspace.std.floatobject import float2string -from pypy.rlib import rarithmetic, rfloat -from pypy.rlib.rarithmetic import LONG_BIT, widen -from pypy.rlib.objectmodel import specialize, enforceargs -from pypy.rlib.unroll import unrolling_iterable +from pypy.interpreter.typedef import (TypeDef, GetSetProperty, + interp_attrproperty, interp_attrproperty_w) +from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT from pypy.rpython.lltypesystem import lltype, rffi @@ -19,523 +14,216 @@ BOOLLTR = "b" FLOATINGLTR = "f" + +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) + class W_Dtype(Wrappable): - def __init__(self, space): - pass + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): + self.signature = signature.BaseSignature() + self.itemtype = itemtype + self.num = num + self.kind = kind + self.name = name + self.char = char + self.w_box_type = w_box_type + self.alternate_constructors = alternate_constructors + + def malloc(self, length): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True + ) + + @specialize.argtype(1) + def box(self, value): + return self.itemtype.box(value) + + def coerce(self, space, w_item): + return self.itemtype.coerce(space, w_item) + + def getitem(self, storage, i): + return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + + def setitem(self, storage, i, box): + self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + + def fill(self, storage, box, start, stop): + self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + if space.is_w(w_dtype, space.w_None): - return space.fromcache(W_Float64Dtype) + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype elif space.isinstance_w(w_dtype, space.w_str): - dtype = space.str_w(w_dtype) - for alias, dtype_class in dtypes_by_alias: - if alias == dtype: - return space.fromcache(dtype_class) - elif isinstance(space.interpclass_w(w_dtype), W_Dtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_type): - for typename, dtype_class in dtypes_by_apptype: - if space.is_w(getattr(space, "w_%s" % typename), w_dtype): - return space.fromcache(dtype_class) + name = space.str_w(w_dtype) + for dtype in cache.builtin_dtypes: + if dtype.name == name or dtype.char == name: + return dtype + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + def descr_str(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("dtype('%s')" % self.name) - def descr_str(self, space): - return space.wrap(self.name) + def descr_get_itemsize(self, space): + return space.wrap(self.itemtype.get_element_size()) def descr_get_shape(self, space): return space.newtuple([]) - -class BaseBox(object): - pass - -VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) - -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, - expected_size=None): - - class Box(BaseBox): - def __init__(self, val): - self.val = val - - def wrap(self, space): - val = self.val - if valtype is rarithmetic.r_singlefloat: - val = float(val) - return space.wrap(val) - - def convert_to(self, dtype): - return dtype.adapt_val(self.val) - Box.__name__ = "%sBox" % T._name - - TP = lltype.Ptr(lltype.Array(T, hints={'nolength': True})) - class W_LowLevelDtype(W_Dtype): - signature = signature.BaseSignature() - - def erase(self, storage): - return rffi.cast(VOID_TP, storage) - - def unerase(self, storage): - return rffi.cast(TP, storage) - - @enforceargs(None, valtype) - def box(self, value): - return Box(value) - - def unbox(self, box): - assert isinstance(box, Box) - return box.val - - def unwrap(self, space, w_item): - raise NotImplementedError - - def malloc(self, size): - # XXX find out why test_zjit explodes with tracking of allocations - return self.erase(lltype.malloc(TP.TO, size, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - )) - - def getitem(self, storage, i): - return Box(self.unerase(storage)[i]) - - def setitem(self, storage, i, item): - self.unerase(storage)[i] = self.unbox(item) - - def setitem_w(self, space, storage, i, w_item): - self.setitem(storage, i, self.unwrap(space, w_item)) - - def fill(self, storage, item, start, stop): - storage = self.unerase(storage) - item = self.unbox(item) - for i in xrange(start, stop): - storage[i] = item - - @specialize.argtype(1) - def adapt_val(self, val): - return self.box(rffi.cast(TP.TO.OF, val)) - - W_LowLevelDtype.__name__ = "W_%sDtype" % name.capitalize() - W_LowLevelDtype.num = num - W_LowLevelDtype.kind = kind - W_LowLevelDtype.name = name - W_LowLevelDtype.aliases = aliases - W_LowLevelDtype.applevel_types = applevel_types - W_LowLevelDtype.num_bytes = rffi.sizeof(T) - if expected_size is not None: - assert W_LowLevelDtype.num_bytes == expected_size - return W_LowLevelDtype - - -def binop(func): - specialize.argtype(1, 2)(func) - @functools.wraps(func) - def impl(self, v1, v2): - return self.adapt_val(func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)), - )) - return impl - -def raw_binop(func): - specialize.argtype(1, 2)(func) - # Returns the result unwrapped. - @functools.wraps(func) - def impl(self, v1, v2): - return func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)) - ) - return impl - -def unaryop(func): - specialize.argtype(1)(func) - @functools.wraps(func) - def impl(self, v): - return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) - return impl - -class ArithmeticTypeMixin(object): - _mixin_ = True - - @binop - def add(self, v1, v2): - return v1 + v2 - @binop - def sub(self, v1, v2): - return v1 - v2 - @binop - def mul(self, v1, v2): - return v1 * v2 - - @unaryop - def pos(self, v): - return +v - @unaryop - def neg(self, v): - return -v - @unaryop - def abs(self, v): - return abs(v) - - @binop - def max(self, v1, v2): - return max(v1, v2) - @binop - def min(self, v1, v2): - return min(v1, v2) - - def bool(self, v): - return bool(self.for_computation(self.unbox(v))) - @raw_binop - def eq(self, v1, v2): - return v1 == v2 - @raw_binop - def ne(self, v1, v2): - return v1 != v2 - @raw_binop - def lt(self, v1, v2): - return v1 < v2 - @raw_binop - def le(self, v1, v2): - return v1 <= v2 - @raw_binop - def gt(self, v1, v2): - return v1 > v2 - @raw_binop - def ge(self, v1, v2): - return v1 >= v2 - - -class FloatArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def for_computation(self, v): - return float(v) - - def str_format(self, item): - return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION) - - @binop - def div(self, v1, v2): - # XXX this won't work after translation, probably requires ovfcheck - try: - return v1 / v2 - except ZeroDivisionError: - if v1 == v2 == 0.0: - return rfloat.NAN - return rfloat.copysign(rfloat.INFINITY, v1 * v2) - @binop - def mod(self, v1, v2): - return math.fmod(v1, v2) - @binop - def pow(self, v1, v2): - return math.pow(v1, v2) - - @unaryop - def sign(self, v): - if v == 0.0: - return 0.0 - return rfloat.copysign(1.0, v) - @unaryop - def reciprocal(self, v): - if v == 0.0: - return rfloat.copysign(rfloat.INFINITY, v) - return 1.0 / v - @unaryop - def fabs(self, v): - return math.fabs(v) - @unaryop - def floor(self, v): - return math.floor(v) - - @binop - def copysign(self, v1, v2): - return math.copysign(v1, v2) - @unaryop - def exp(self, v): - try: - return math.exp(v) - except OverflowError: - return rfloat.INFINITY - @unaryop - def sin(self, v): - return math.sin(v) - @unaryop - def cos(self, v): - return math.cos(v) - @unaryop - def tan(self, v): - return math.tan(v) - @unaryop - def arcsin(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.asin(v) - @unaryop - def arccos(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.acos(v) - @unaryop - def arctan(self, v): - return math.atan(v) - @unaryop - def arcsinh(self, v): - return math.asinh(v) - @unaryop - def arctanh(self, v): - if v == 1.0 or v == -1.0: - return math.copysign(rfloat.INFINITY, v) - if not -1.0 < v < 1.0: - return rfloat.NAN - return math.atanh(v) - @unaryop - def sqrt(self, v): - try: - return math.sqrt(v) - except ValueError: - return rfloat.NAN - -class IntegerArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) - - def for_computation(self, v): - return widen(v) - - def str_format(self, item): - return str(widen(self.unbox(item))) - - @binop - def div(self, v1, v2): - if v2 == 0: - return 0 - return v1 / v2 - @binop - def mod(self, v1, v2): - return v1 % v2 - @binop - def pow(self, v1, v2): - res = 1 - while v2 > 0: - if v2 & 1: - res *= v1 - v2 >>= 1 - if v2 == 0: - break - v1 *= v1 - return res - - -class SignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - if v > 0: - return 1 - elif v < 0: - return -1 - else: - assert v == 0 - return 0 - -class UnsignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - return int(v != 0) - - -W_BoolDtype = create_low_level_dtype( - num = 0, kind = BOOLLTR, name = "bool", - aliases = ["?", "bool", "bool8"], - applevel_types = ["bool"], - T = lltype.Bool, - valtype = bool, -) -class W_BoolDtype(SignedIntegerArithmeticDtype, W_BoolDtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.is_true(w_item)) - - def str_format(self, item): - v = self.unbox(item) - return "True" if v else "False" - - def for_computation(self, v): - return int(v) - -W_Int8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "int8", - aliases = ["b", "int8", "i1"], - applevel_types = [], - T = rffi.SIGNEDCHAR, - valtype = rffi.SIGNEDCHAR._type, - expected_size = 1, -) -class W_Int8Dtype(SignedIntegerArithmeticDtype, W_Int8Dtype): - pass - -W_UInt8Dtype = create_low_level_dtype( - num = 2, kind = UNSIGNEDLTR, name = "uint8", - aliases = ["B", "uint8", "I1"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(UnsignedIntegerArithmeticDtype, W_UInt8Dtype): - pass - -W_Int16Dtype = create_low_level_dtype( - num = 3, kind = SIGNEDLTR, name = "int16", - aliases = ["h", "int16", "i2"], - applevel_types = [], - T = rffi.SHORT, - valtype = rffi.SHORT._type, - expected_size = 2, -) -class W_Int16Dtype(SignedIntegerArithmeticDtype, W_Int16Dtype): - pass - -W_UInt16Dtype = create_low_level_dtype( - num = 4, kind = UNSIGNEDLTR, name = "uint16", - aliases = ["H", "uint16", "I2"], - applevel_types = [], - T = rffi.USHORT, - valtype = rffi.USHORT._type, - expected_size = 2, -) -class W_UInt16Dtype(UnsignedIntegerArithmeticDtype, W_UInt16Dtype): - pass - -W_Int32Dtype = create_low_level_dtype( - num = 5, kind = SIGNEDLTR, name = "int32", - aliases = ["i", "int32", "i4"], - applevel_types = [], - T = rffi.INT, - valtype = rffi.INT._type, - expected_size = 4, -) -class W_Int32Dtype(SignedIntegerArithmeticDtype, W_Int32Dtype): - pass - -W_UInt32Dtype = create_low_level_dtype( - num = 6, kind = UNSIGNEDLTR, name = "uint32", - aliases = ["I", "uint32", "I4"], - applevel_types = [], - T = rffi.UINT, - valtype = rffi.UINT._type, - expected_size = 4, -) -class W_UInt32Dtype(UnsignedIntegerArithmeticDtype, W_UInt32Dtype): - pass - -W_Int64Dtype = create_low_level_dtype( - num = 9, kind = SIGNEDLTR, name = "int64", - aliases = ["q", "int64", "i8"], - applevel_types = ["long"], - T = rffi.LONGLONG, - valtype = rffi.LONGLONG._type, - expected_size = 8, -) -class W_Int64Dtype(SignedIntegerArithmeticDtype, W_Int64Dtype): - pass - -W_UInt64Dtype = create_low_level_dtype( - num = 10, kind = UNSIGNEDLTR, name = "uint64", - aliases = ["Q", "uint64", "I8"], - applevel_types = [], - T = rffi.ULONGLONG, - valtype = rffi.ULONGLONG._type, - expected_size = 8, -) -class W_UInt64Dtype(UnsignedIntegerArithmeticDtype, W_UInt64Dtype): - pass - -if LONG_BIT == 32: - long_dtype = W_Int32Dtype - ulong_dtype = W_UInt32Dtype -elif LONG_BIT == 64: - long_dtype = W_Int64Dtype - ulong_dtype = W_UInt64Dtype -else: - assert False - -class W_LongDtype(long_dtype): - num = 7 - aliases = ["l"] - applevel_types = ["int"] - -class W_ULongDtype(ulong_dtype): - num = 8 - aliases = ["L"] - -W_Float32Dtype = create_low_level_dtype( - num = 11, kind = FLOATINGLTR, name = "float32", - aliases = ["f", "float32", "f4"], - applevel_types = [], - T = lltype.SingleFloat, - valtype = rarithmetic.r_singlefloat, - expected_size = 4, -) -class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): - pass - -W_Float64Dtype = create_low_level_dtype( - num = 12, kind = FLOATINGLTR, name = "float64", - aliases = ["d", "float64", "f8"], - applevel_types = ["float"], - T = lltype.Float, - valtype = float, - expected_size = 8, -) -class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): - pass - -ALL_DTYPES = [ - W_BoolDtype, - W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, - W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, - W_Int64Dtype, W_UInt64Dtype, - W_Float32Dtype, W_Float64Dtype, -] - -dtypes_by_alias = unrolling_iterable([ - (alias, dtype) - for dtype in ALL_DTYPES - for alias in dtype.aliases -]) -dtypes_by_apptype = unrolling_iterable([ - (apptype, dtype) - for dtype in ALL_DTYPES - for apptype in dtype.applevel_types -]) -dtypes_by_num_bytes = unrolling_iterable(sorted([ - (dtype.num_bytes, dtype) - for dtype in ALL_DTYPES -])) - W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), + __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), - __str__ = interp2app(W_Dtype.descr_str), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), - itemsize = interp_attrproperty("num_bytes", cls=W_Dtype), + type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), shape = GetSetProperty(W_Dtype.descr_get_shape), ) W_Dtype.typedef.acceptable_as_base_class = False + +class DtypeCache(object): + def __init__(self, space): + self.w_booldtype = W_Dtype( + types.Bool(), + num=0, + kind=BOOLLTR, + name="bool", + char="?", + w_box_type = space.gettypefor(interp_boxes.W_BoolBox), + alternate_constructors=[space.w_bool], + ) + self.w_int8dtype = W_Dtype( + types.Int8(), + num=1, + kind=SIGNEDLTR, + name="int8", + char="b", + w_box_type = space.gettypefor(interp_boxes.W_Int8Box) + ) + self.w_uint8dtype = W_Dtype( + types.UInt8(), + num=2, + kind=UNSIGNEDLTR, + name="uint8", + char="B", + w_box_type = space.gettypefor(interp_boxes.W_UInt8Box), + ) + self.w_int16dtype = W_Dtype( + types.Int16(), + num=3, + kind=SIGNEDLTR, + name="int16", + char="h", + w_box_type = space.gettypefor(interp_boxes.W_Int16Box), + ) + self.w_uint16dtype = W_Dtype( + types.UInt16(), + num=4, + kind=UNSIGNEDLTR, + name="uint16", + char="H", + w_box_type = space.gettypefor(interp_boxes.W_UInt16Box), + ) + self.w_int32dtype = W_Dtype( + types.Int32(), + num=5, + kind=SIGNEDLTR, + name="int32", + char="i", + w_box_type = space.gettypefor(interp_boxes.W_Int32Box), + ) + self.w_uint32dtype = W_Dtype( + types.UInt32(), + num=6, + kind=UNSIGNEDLTR, + name="uint32", + char="I", + w_box_type = space.gettypefor(interp_boxes.W_UInt32Box), + ) + if LONG_BIT == 32: + name = "int32" + elif LONG_BIT == 64: + name = "int64" + self.w_longdtype = W_Dtype( + types.Long(), + num=7, + kind=SIGNEDLTR, + name=name, + char="l", + w_box_type = space.gettypefor(interp_boxes.W_LongBox), + alternate_constructors=[space.w_int], + ) + self.w_ulongdtype = W_Dtype( + types.ULong(), + num=8, + kind=UNSIGNEDLTR, + name="u" + name, + char="L", + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + ) + self.w_int64dtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name="int64", + char="q", + w_box_type = space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], + ) + self.w_uint64dtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name="uint64", + char="Q", + w_box_type = space.gettypefor(interp_boxes.W_UInt64Box), + ) + self.w_float32dtype = W_Dtype( + types.Float32(), + num=11, + kind=FLOATINGLTR, + name="float32", + char="f", + w_box_type = space.gettypefor(interp_boxes.W_Float32Box), + ) + self.w_float64dtype = W_Dtype( + types.Float64(), + num=12, + kind=FLOATINGLTR, + name="float64", + char="d", + w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + alternate_constructors=[space.w_float], + ) + + self.builtin_dtypes = [ + self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, + self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, + self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, + self.w_float64dtype + ] + self.dtypes_by_num_bytes = sorted( + (dtype.itemtype.get_element_size(), dtype) + for dtype in self.builtin_dtypes + ) + +def get_dtype_cache(space): + return space.fromcache(DtypeCache) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -124,7 +124,7 @@ for w_elem in elems_w: w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, w_dtype) - if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: break if w_dtype is None: w_dtype = space.w_None @@ -136,7 +136,7 @@ arr_iter = arr.start_iter(arr.shape) for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -451,8 +451,8 @@ self=self, dtype=dtype, i=i, result=result, idx=idx, cur_best=cur_best) - new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.ne(new_best, cur_best): + new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best i = i.next(shapelen) @@ -462,8 +462,7 @@ size = self.find_size() if size == 0: raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) + space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -475,7 +474,7 @@ all_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if not dtype.bool(self.eval(i)): + if not dtype.itemtype.bool(self.eval(i)): return False i = i.next(shapelen) return True @@ -490,7 +489,7 @@ any_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if dtype.bool(self.eval(i)): + if dtype.itemtype.bool(self.eval(i)): return True i = i.next(shapelen) return False @@ -542,8 +541,8 @@ res.append(')') else: concrete.to_str(space, 1, res, indent=' ') - if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or \ + if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and + dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ not self.find_size(): res.append(", dtype=" + dtype.name) res.append(")") @@ -612,7 +611,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] # Add a comma only if comma is False - this prevents adding two # commas @@ -625,7 +624,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] i += 1 else: @@ -712,7 +711,7 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item).wrap(space) + return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) return space.wrap(self.create_slice(space, chunks)) @@ -771,14 +770,15 @@ shape[:]) def descr_mean(self, space): - return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) + return space.div(self.descr_sum(space), space.wrap(self.find_size())) def descr_nonzero(self, space): if self.find_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true(self.get_concrete().eval( - self.start_iter(self.shape)).wrap(space))) + return space.wrap(space.is_true( + self.get_concrete().eval(self.start_iter(self.shape)) + )) def descr_get_transpose(self, space): concrete = self.get_concrete() @@ -823,8 +823,7 @@ return scalar_w(space, dtype, w_obj) def scalar_w(space, dtype, w_obj): - assert isinstance(dtype, interp_dtype.W_Dtype) - return Scalar(dtype, dtype.unwrap(space, w_obj)) + return Scalar(dtype, dtype.coerce(space, w_obj)) class Scalar(BaseArray): """ @@ -858,7 +857,7 @@ return ConstantIterator() def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.str_format(self.value)) + builder.append(self.dtype.itemtype.str_format(self.value)) def copy(self): return Scalar(self.dtype, self.value) @@ -1147,9 +1146,9 @@ def copy(self): array = NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( - rffi.cast(rffi.VOIDP, array.storage), - rffi.cast(rffi.VOIDP, self.storage), - self.size * self.dtype.num_bytes + array.storage, + self.storage, + self.size * self.dtype.itemtype.get_element_size() ) return array @@ -1160,8 +1159,7 @@ "len() of unsized object")) def setitem_w(self, space, item, w_value): - self.invalidated() - self.dtype.setitem_w(space, self.storage, item, w_value) + return self.setitem(item, self.dtype.coerce(space, w_value)) def setitem(self, item, value): self.invalidated() @@ -1204,9 +1202,10 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) + size, shape = _find_size_and_shape(space, w_size) arr = NDimArray(size, shape[:], dtype=dtype) - one = dtype.adapt_val(1) + one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) @@ -1308,10 +1307,10 @@ def descr_next(self, space): if self.iter.done(): - raise OperationError(space.w_StopIteration, space.wrap('')) + raise OperationError(space.w_StopIteration, space.w_None) result = self.eval(self.iter) self.iter = self.iter.next(self.shapelen) - return result.wrap(space) + return result def descr_iter(self): return self diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi @@ -18,7 +18,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - dtype = space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(space).w_float64dtype a = NDimArray(number, [number], dtype=dtype) start = 0 diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_dtype, signature +from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -29,7 +29,7 @@ def descr_get_identity(self, space): if self.identity is None: return space.w_None - return self.identity.wrap(space) + return self.identity def descr_call(self, space, __args__): if __args__.keywords or len(__args__.arguments_w) < self.argcount: @@ -80,8 +80,7 @@ new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, - dtype).wrap(space) + return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): while not i.done(): @@ -115,7 +114,7 @@ promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) @@ -148,14 +147,14 @@ promote_bools=self.promote_bools, ) if self.comparison_func: - res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): return self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - ).wrap(space) + ) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature @@ -169,7 +168,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), @@ -187,7 +186,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. @@ -197,14 +196,14 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == 11 and dt1.num_bytes >= 4: - return space.fromcache(interp_dtype.W_Float64Dtype) + if dt2.num == 11 and dt1.itemtype.get_element_size() >= 4: + return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == interp_dtype.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it - if dt1.num_bytes < dt2.num_bytes: + if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 @@ -214,10 +213,11 @@ # UInt64 + signed = Float64 if dt2.num == 10: dtypenum += 1 - newdtype = interp_dtype.ALL_DTYPES[dtypenum] + newdtype = interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] - if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(newdtype) + if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or + newdtype.kind == interp_dtype.FLOATINGLTR): + return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes @@ -225,35 +225,42 @@ dtypenum += 2 else: dtypenum += 3 - return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum]) + return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt if dt.num >= 5: - return space.fromcache(interp_dtype.W_Float64Dtype) - for bytes, dtype in interp_dtype.dtypes_by_num_bytes: - if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: - return space.fromcache(dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype + for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + if (dtype.kind == interp_dtype.FLOATINGLTR and + dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): + return dtype if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: - return space.fromcache(interp_dtype.W_Int64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.UNSIGNEDLTR: - return space.fromcache(interp_dtype.W_UInt64Dtype) + return interp_dtype.get_dtype_cache(space).w_uint64dtype else: assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) + bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + + if isinstance(w_obj, interp_boxes.W_GenericBox): + dtype = w_obj.get_dtype(space) + if current_guess is None: + return dtype + return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: @@ -269,20 +276,19 @@ current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): - return getattr(res_dtype, op_name)(value) + return getattr(res_dtype.itemtype, op_name)(value) elif argcount == 2: + dtype_cache = interp_dtype.get_dtype_cache(space) def impl(res_dtype, lvalue, rvalue): - res = getattr(res_dtype, op_name)(lvalue, rvalue) + res = getattr(res_dtype.itemtype, op_name)(lvalue, rvalue) if comparison_func: - booldtype = space.fromcache(interp_dtype.W_BoolDtype) - assert isinstance(booldtype, interp_dtype.W_BoolDtype) - res = booldtype.box(res) + return dtype_cache.w_booldtype.box(res) return res return func_with_new_name(impl, ufunc_name) @@ -338,7 +344,7 @@ identity = extra_kwargs.get("identity") if identity is not None: - identity = space.fromcache(interp_dtype.W_LongDtype).adapt_val(identity) + identity = interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,5 +1,5 @@ from pypy.conftest import gettestobjspace -from pypy.module.micronumpy import interp_dtype +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -11,7 +11,8 @@ class TestSignature(object): def test_binop_signature(self, space): - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + float64_dtype = get_dtype_cache(space).w_float64dtype + bool_dtype = get_dtype_cache(space).w_booldtype ar = NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) @@ -22,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_BoolDtype)) + bool_ar = NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -30,7 +31,9 @@ assert v5.signature is v6.signature def test_slice_signature(self, space): - ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_Float64Dtype)) + float64_dtype = get_dtype_cache(space).w_float64dtype + + ar = NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature @@ -41,10 +44,10 @@ class TestUfuncCoerscion(object): def test_binops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Basic pairing assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype @@ -62,19 +65,19 @@ assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype def test_unaryops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - uint8_dtype = space.fromcache(interp_dtype.W_UInt8Dtype) - int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype) - uint16_dtype = space.fromcache(interp_dtype.W_UInt16Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - uint32_dtype = space.fromcache(interp_dtype.W_UInt32Dtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - ulong_dtype = space.fromcache(interp_dtype.W_ULongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) - uint64_dtype = space.fromcache(interp_dtype.W_UInt64Dtype) - float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + uint8_dtype = get_dtype_cache(space).w_uint8dtype + int16_dtype = get_dtype_cache(space).w_int16dtype + uint16_dtype = get_dtype_cache(space).w_uint16dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + uint32_dtype = get_dtype_cache(space).w_uint32dtype + long_dtype = get_dtype_cache(space).w_longdtype + ulong_dtype = get_dtype_cache(space).w_ulongdtype + int64_dtype = get_dtype_cache(space).w_int64dtype + uint64_dtype = get_dtype_cache(space).w_uint64dtype + float32_dtype = get_dtype_cache(space).w_float32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Normal rules, everything returns itself assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -106,7 +106,7 @@ c -> 3 """ interp = self.run(code) - assert interp.results[-1].value.val == 9 + assert interp.results[-1].value == 9 def test_array_getitem(self): code = """ @@ -115,7 +115,7 @@ a + b -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 3 + 6 + assert interp.results[0].value == 3 + 6 def test_range_getitem(self): code = """ @@ -123,7 +123,7 @@ r -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_sum(self): code = """ @@ -132,7 +132,7 @@ r """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value.value == 15 def test_array_write(self): code = """ @@ -141,7 +141,7 @@ a -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_min(self): interp = self.run(""" @@ -150,7 +150,7 @@ b = a + a min(b) """) - assert interp.results[0].value.val == -24 + assert interp.results[0].value.value == -24 def test_max(self): interp = self.run(""" @@ -159,7 +159,7 @@ b = a + a max(b) """) - assert interp.results[0].value.val == 256 + assert interp.results[0].value.value == 256 def test_slice(self): interp = self.run(""" @@ -167,7 +167,7 @@ b = a -> : b -> 3 """) - assert interp.results[0].value.val == 4 + assert interp.results[0].value == 4 def test_slice_step(self): interp = self.run(""" @@ -175,7 +175,7 @@ b = a -> ::2 b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_setslice(self): interp = self.run(""" @@ -185,7 +185,7 @@ a[::3] = b a -> 3 """) - assert interp.results[0].value.val == 5 + assert interp.results[0].value == 5 def test_slice2(self): @@ -196,14 +196,14 @@ b = s1 + s2 b -> 3 """) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_multidim_getitem(self): interp = self.run(""" a = [[1,2]] a -> 0 -> 1 """) - assert interp.results[0].value.val == 2 + assert interp.results[0].value == 2 def test_multidim_getitem_2(self): interp = self.run(""" @@ -211,7 +211,7 @@ b = a + a b -> 1 -> 1 """) - assert interp.results[0].value.val == 8 + assert interp.results[0].value == 8 def test_set_slice(self): interp = self.run(""" @@ -220,7 +220,7 @@ b[:] = a + a b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_set_slice2(self): interp = self.run(""" @@ -231,4 +231,4 @@ a[0:30:3] = c a -> 3 """) - assert interp.results[0].value.val == 11 + assert interp.results[0].value == 11 diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -30,7 +30,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -44,13 +44,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from numpypy import array, False_, True_ + from numpypy import array, False_, True_, int64 a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], (int, long)) + assert isinstance(a[0], int64) b = a.copy() - assert isinstance(b[0], (int, long)) + assert isinstance(b[0], int64) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -72,17 +72,17 @@ assert a[i] is True_ def test_zeros_long(self): - from numpypy import zeros + from numpypy import zeros, int64 a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 0 def test_ones_long(self): - from numpypy import ones + from numpypy import ones, int64 a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 1 def test_overflow(self): @@ -165,3 +165,99 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) + +class AppTestTypes(BaseNumpyAppTest): + def test_abstract_types(self): + import numpypy as numpy + raises(TypeError, numpy.generic, 0) + raises(TypeError, numpy.number, 0) + raises(TypeError, numpy.integer, 0) + exc = raises(TypeError, numpy.signedinteger, 0) + assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + + raises(TypeError, numpy.floating, 0) + raises(TypeError, numpy.inexact, 0) + + def test_bool(self): + import numpypy as numpy + + assert numpy.bool_.mro() == [numpy.bool_, numpy.generic, object] + assert numpy.bool_(3) is numpy.True_ + assert numpy.bool_("") is numpy.False_ + assert type(numpy.True_) is type(numpy.False_) is numpy.bool_ + + class X(numpy.bool_): + pass + + assert type(X(True)) is numpy.bool_ + assert X(True) is numpy.True_ + + def test_int8(self): + import numpypy as numpy + + assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.int8) + assert type(a[1]) is numpy.int8 + assert numpy.dtype("int8").type is numpy.int8 + + x = numpy.int8(128) + assert x == -128 + assert x != 128 + assert type(x) is numpy.int8 + assert repr(x) == "-128" + + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + + def test_int_(self): + import numpypy as numpy + + assert numpy.int_ is numpy.dtype(int).type + assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + + def test_int64(self): + import sys + import numpypy as numpy + + if sys.maxint == 2 ** 63 -1: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + else: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.int64).type is numpy.int64 + assert numpy.int64(3) == 3 + + def test_float64(self): + import numpypy as numpy + + assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] + + a = numpy.array([1, 2, 3], numpy.float64) + assert type(a[1]) is numpy.float64 + assert numpy.dtype(float).type is numpy.float64 + + assert numpy.float64(2.0) == 2.0 + + def test_subclass_type(self): + import numpypy as numpy + + class X(numpy.float64): + def m(self): + return self + 2 + + b = X(10) + assert type(b) is X + assert b.m() == 12 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -709,7 +709,7 @@ assert b[i] == 2.5 * a[i] def test_dtype_guessing(self): - from numpypy import array, dtype + from numpypy import array, dtype, float64, int8, bool_ assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) @@ -719,6 +719,10 @@ assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + assert array([float64(2)]).dtype is dtype(float) + assert array([int8(3)]).dtype is dtype("int8") + assert array([bool_(True)]).dtype is dtype(bool) + assert array([bool_(True), 3.0]).dtype is dtype(float) def test_comparison(self): import operator @@ -1008,10 +1012,10 @@ b = a[0].copy() assert (b == zeros(10)).all() -class AppTestSupport(object): +class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct - cls.space = gettestobjspace(usemodules=('micronumpy',)) + BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) def test_fromstring(self): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,11 +8,11 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_ufuncs, signature +from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import NDimArray, NDimSlice,\ - BaseArray +from pypy.module.micronumpy.interp_numarray import (NDimArray, NDimSlice, + BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr @@ -48,17 +48,15 @@ def f(i): interp = InterpreterState(codes[i]) interp.run(space) - res = interp.results[-1] - assert isinstance(res, BaseArray) - w_res = res.eval(res.start_iter()).wrap(interp.space) - if isinstance(w_res, BoolObject): - return float(w_res.boolval) - elif isinstance(w_res, FloatObject): - return w_res.floatval - elif isinstance(w_res, IntObject): - return w_res.intval - else: - return -42. + w_res = interp.results[-1] + if isinstance(w_res, BaseArray): + w_res = w_res.eval(w_res.start_iter()) + + if isinstance(w_res, interp_boxes.W_Float64Box): + return w_res.value + elif isinstance(w_res, interp_boxes.W_BoolBox): + return float(w_res.value) + raise TypeError(w_res) if self.graph is None: interp, graph = self.meta_interp(f, [i], @@ -80,9 +78,9 @@ def test_add(self): result = self.run("add") - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 def define_float_add(): @@ -94,9 +92,9 @@ def test_floatadd(self): result = self.run("float_add") assert result == 3 + 3 - self.check_simple_loop({"getarrayitem_raw": 1, "float_add": 1, - "setarrayitem_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_sum(): return """ @@ -108,9 +106,9 @@ def test_sum(self): result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 2, - "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, + "int_add": 2, "int_ge": 1, "guard_false": 1, + "jump": 1}) def define_prod(): return """ @@ -125,9 +123,9 @@ for i in range(30): expected *= i * 2 assert result == expected - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -138,9 +136,9 @@ max(b) """) assert result == 256 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def test_min(self): py.test.skip("broken, investigate") @@ -151,9 +149,9 @@ min(b) """) assert result == -24 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def define_any(): return """ @@ -166,10 +164,10 @@ def test_any(self): result = self.run("any") assert result == 1 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, - "int_ge": 1, "jump": 1, - "guard_false": 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) def define_already_forced(): return """ @@ -188,10 +186,10 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) def define_ufunc(): @@ -205,10 +203,9 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -6 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, - "setarrayitem_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1, - }) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, + "setinteriorfield_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_specialization(): return """ @@ -246,9 +243,9 @@ def test_slice(self): result = self.run("slice") assert result == 18 - self.check_simple_loop({'getarrayitem_raw': 2, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) @@ -265,8 +262,8 @@ def test_slice2(self): result = self.run("slice2") assert result == 15 - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) def define_multidim(): @@ -279,11 +276,11 @@ def test_multidim(self): result = self.run('multidim') assert result == 8 - self.check_simple_loop({'float_add': 1, 'getarrayitem_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setarrayitem_raw': 1}) # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization + self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1}) def define_multidim_slice(): return """ @@ -329,18 +326,18 @@ result = self.run("setslice") assert result == 11.0 self.check_loop_count(1) - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add' : 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import W_Float64Dtype + from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() - cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) + cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/types.py @@ -0,0 +1,389 @@ +import functools +import math + +from pypy.module.micronumpy import interp_boxes +from pypy.objspace.std.floatobject import float2string +from pypy.rlib import rfloat, libffi, clibffi +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rpython.lltypesystem import lltype, rffi + + +def simple_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v): + return self.box( + func( + self, + self.for_computation(self.unbox(v)) + ) + ) + return dispatcher + +def simple_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return self.box( + func( + self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + ) + ) + return dispatcher + +def raw_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)) + ) + return dispatcher + +class BaseType(object): + def _unimplemented_ufunc(self, *args): + raise NotImplementedError + # add = sub = mul = div = mod = pow = eq = ne = lt = le = gt = ge = max = \ + # min = copysign = pos = neg = abs = sign = reciprocal = fabs = floor = \ + # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ + # arctanh = _unimplemented_ufunc + +class Primitive(object): + _mixin_ = True + def get_element_size(self): + return rffi.sizeof(self.T) + + @specialize.argtype(1) + def box(self, value): + return self.BoxType(rffi.cast(self.T, value)) + + def unbox(self, box): + assert isinstance(box, self.BoxType) + return box.value + + def coerce(self, space, w_item): + if isinstance(w_item, self.BoxType): + return w_item + return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # XXX: ugly + w_obj = space.allocate_instance(self.BoxType, w_subtype) + assert isinstance(w_obj, self.BoxType) + w_obj.__init__(self._coerce(space, w_item).value) + return w_obj + + def _coerce(self, space, w_item): + raise NotImplementedError + + def read(self, storage, width, i, offset): + return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset + )) + + def store(self, storage, width, i, offset, box): + value = self.unbox(box) + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + @simple_binary_op + def add(self, v1, v2): + return v1 + v2 + + @simple_binary_op + def sub(self, v1, v2): + return v1 - v2 + + @simple_binary_op + def mul(self, v1, v2): + return v1 * v2 + + @simple_unary_op + def pos(self, v): + return +v + + @simple_unary_op + def neg(self, v): + return -v + + @simple_unary_op + def abs(self, v): + return abs(v) + + @raw_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @raw_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @raw_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @raw_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @raw_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @raw_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + def bool(self, v): + return bool(self.for_computation(self.unbox(v))) + + @simple_binary_op + def max(self, v1, v2): + return max(v1, v2) + + @simple_binary_op + def min(self, v1, v2): + return min(v1, v2) + +class Bool(BaseType, Primitive): + T = lltype.Bool + BoxType = interp_boxes.W_BoolBox + + True = BoxType(True) + False = BoxType(False) + + @specialize.argtype(1) + def box(self, value): + box = Primitive.box(self, value) + if box.value: + return self.True + else: + return self.False + + def coerce_subtype(self, space, w_subtype, w_item): + # Doesn't return subclasses so it can return the constants. + return self._coerce(space, w_item) + + def _coerce(self, space, w_item): + return self.box(space.is_true(w_item)) + + def str_format(self, box): + value = self.unbox(box) + return "True" if value else "False" + + def for_computation(self, v): + return int(v) + +class Integer(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.int_w(space.int(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return str(self.for_computation(value)) + + def for_computation(self, v): + return widen(v) + + @simple_binary_op + def div(self, v1, v2): + if v2 == 0: + return 0 + return v1 / v2 + + @simple_binary_op + def mod(self, v1, v2): + return v1 % v2 + + @simple_binary_op + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + + @simple_unary_op + def sign(self, v): + if v > 0: + return 1 + elif v < 0: + return -1 + else: + assert v == 0 + return 0 + +class Int8(BaseType, Integer): + T = rffi.SIGNEDCHAR + BoxType = interp_boxes.W_Int8Box + +class UInt8(BaseType, Integer): + T = rffi.UCHAR + BoxType = interp_boxes.W_UInt8Box + +class Int16(BaseType, Integer): + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + +class UInt16(BaseType, Integer): + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + +class Int32(BaseType, Integer): + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + +class UInt32(BaseType, Integer): + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + +class Int64(BaseType, Integer): + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + +class UInt64(BaseType, Integer): + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + +class Float(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.float_w(space.float(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + + def for_computation(self, v): + return float(v) + + @simple_binary_op + def div(self, v1, v2): + try: + return v1 / v2 + except ZeroDivisionError: + if v1 == v2 == 0.0: + return rfloat.NAN + return rfloat.copysign(rfloat.INFINITY, v1 * v2) + + @simple_binary_op + def mod(self, v1, v2): + return math.fmod(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return math.pow(v1, v2) + + @simple_binary_op + def copysign(self, v1, v2): + return math.copysign(v1, v2) + + @simple_unary_op + def sign(self, v): + if v == 0.0: + return 0.0 + return rfloat.copysign(1.0, v) + + @simple_unary_op + def fabs(self, v): + return math.fabs(v) + + @simple_unary_op + def reciprocal(self, v): + if v == 0.0: + return rfloat.copysign(rfloat.INFINITY, v) + return 1.0 / v + + @simple_unary_op + def floor(self, v): + return math.floor(v) + + @simple_unary_op + def exp(self, v): + try: + return math.exp(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def sin(self, v): + return math.sin(v) + + @simple_unary_op + def cos(self, v): + return math.cos(v) + + @simple_unary_op + def tan(self, v): + return math.tan(v) + + @simple_unary_op + def arcsin(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.asin(v) + + @simple_unary_op + def arccos(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.acos(v) + + @simple_unary_op + def arctan(self, v): + return math.atan(v) + + @simple_unary_op + def arcsinh(self, v): + return math.asinh(v) + + @simple_unary_op + def arctanh(self, v): + if v == 1.0 or v == -1.0: + return math.copysign(rfloat.INFINITY, v) + if not -1.0 < v < 1.0: + return rfloat.NAN + return math.atanh(v) + + @simple_unary_op + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN + + +class Float32(BaseType, Float): + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + +class Float64(BaseType, Float): + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box \ No newline at end of file diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -411,6 +411,10 @@ def getaddressindll(self, name): return dlsym(self.lib, name) +# These specialize.call_location's should really be specialize.arg(0), however +# you can't hash a pointer obj, which the specialize machinery wants to do. +# Given the present usage of these functions, it's good enough. + at specialize.call_location() @jit.oopspec("libffi_array_getitem(ffitype, width, addr, index, offset)") def array_getitem(ffitype, width, addr, index, offset): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -420,6 +424,7 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False + at specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -428,4 +433,4 @@ addr = rffi.ptradd(addr, offset) rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return - assert False \ No newline at end of file + assert False diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -91,9 +91,18 @@ return decorated_func + def call_location(self): + """ Specializes the function for each call site. + """ + def decorated_func(func): + func._annspecialcase_ = "specialize:call_location" + return func + + return decorated_func + def _wrap(self, args): return "("+','.join([repr(arg) for arg in args]) +")" - + specialize = _Specialize() def enforceargs(*args): @@ -125,7 +134,7 @@ def __hash__(self): raise TypeError("Symbolics are not hashable!") - + def __nonzero__(self): raise TypeError("Symbolics are not comparable") @@ -155,7 +164,7 @@ def lltype(self): from pypy.rpython.lltypesystem import lltype return lltype.Signed - + malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) # running_on_llinterp is meant to have the value 0 in all backends @@ -221,7 +230,7 @@ def compute_result_annotation(self, s_sizehint): from pypy.annotation.model import SomeInteger - + assert isinstance(s_sizehint, SomeInteger) return self.bookkeeper.newlist() From noreply at buildbot.pypy.org Fri Dec 2 19:32:05 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 19:32:05 +0100 (CET) Subject: [pypy-commit] pypy numpy-ndarray: a branch to make array dn ndarray work the same way as they do in numpy, I can't figure how this changeset breaks stuff Message-ID: <20111202183205.C934C8208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-ndarray Changeset: r50073:55386ade1ad3 Date: 2011-12-02 13:31 -0500 http://bitbucket.org/pypy/pypy/changeset/55386ade1ad3/ Log: a branch to make array dn ndarray work the same way as they do in numpy, I can't figure how this changeset breaks stuff diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,10 +5,11 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.NDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -9,7 +9,7 @@ from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, NDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs from pypy.rlib.objectmodel import specialize, instantiate @@ -53,7 +53,7 @@ self.fromcache = InternalSpaceCache(self).getorbuild def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): return w_obj.tp == w_tp @@ -303,8 +303,7 @@ [interp.space.wrap(float(i)) for i in range(self.v)] ) dtype = get_dtype_cache(interp.space).w_float64dtype - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -326,8 +325,7 @@ def execute(self, interp): w_list = self.wrap(interp.space) dtype = get_dtype_cache(interp.space).w_float64dtype - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,47 +98,6 @@ endshape[i] = remainder[i] return endshape -def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, - w_order=NoneNotWrapped): - # find scalar - if not space.issequence_w(w_item_or_iterable): - if space.is_w(w_dtype, space.w_None): - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, - w_item_or_iterable) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - return scalar_w(space, dtype, w_item_or_iterable) - if w_order is None: - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise operationerrfmt(space.w_ValueError, "Unknown order: %s", - order) - shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) - # they come back in C order - size = len(elems_w) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: - break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = NDimArray(size, shape[:], dtype=dtype, order=order) - shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) - for i in range(len(elems_w)): - w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) - arr_iter = arr_iter.next(shapelen) - return arr # Iterators for arrays # -------------------- @@ -378,6 +337,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -883,7 +849,7 @@ i = 0 signature = self.signature result_size = self.find_size() - result = NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) i = self.start_iter() ri = result.start_iter() @@ -1110,14 +1076,14 @@ return 'Slice(%s)' % self.parent.debug_repr() def copy(self): - array = NDimArray(self.size, self.shape[:], self.find_dtype()) + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() while not iter.done(): array.setitem(iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) return array -class NDimArray(BaseArray): +class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ @@ -1144,7 +1110,7 @@ return self.dtype.getitem(self.storage, iter.get_offset()) def copy(self): - array = NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( array.storage, self.storage, @@ -1191,12 +1157,53 @@ shape.append(item) return size, shape +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr + def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) size, shape = _find_size_and_shape(space, w_size) - return space.wrap(NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, @@ -1204,7 +1211,7 @@ ) size, shape = _find_size_and_shape(space, w_size) - arr = NDimArray(size, shape[:], dtype=dtype) + arr = W_NDimArray(size, shape[:], dtype=dtype) one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) @@ -1216,9 +1223,9 @@ return w_arr.descr_dot(space, w_obj2) BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -14,7 +14,7 @@ float64_dtype = get_dtype_cache(space).w_float64dtype bool_dtype = get_dtype_cache(space).w_booldtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -23,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=bool_dtype) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -33,7 +33,7 @@ def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,9 @@ +import py -import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,7 +1,7 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy import signature from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace @@ -28,18 +28,18 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -58,7 +58,7 @@ def test_create_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -78,7 +78,7 @@ def test_slice_of_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -96,7 +96,7 @@ def test_slice_of_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -114,7 +114,7 @@ def test_negative_step_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] @@ -122,14 +122,14 @@ def test_negative_step_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -139,7 +139,7 @@ assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -160,6 +160,21 @@ class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -359,11 +374,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -11,7 +11,7 @@ from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import (NDimArray, NDimSlice, +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr @@ -352,7 +352,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = NDimArray(n, [n], dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) From noreply at buildbot.pypy.org Fri Dec 2 19:42:16 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 19:42:16 +0100 (CET) Subject: [pypy-commit] pypy numpy-ndarray: fix the bug, fix one other bug Message-ID: <20111202184216.613F98208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-ndarray Changeset: r50074:2d3c7db968ec Date: 2011-12-02 13:40 -0500 http://bitbucket.org/pypy/pypy/changeset/2d3c7db968ec/ Log: fix the bug, fix one other bug diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -780,9 +780,7 @@ return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import NDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -19,7 +19,7 @@ "string length %d not divisable by %d" % (length, FLOAT_SIZE))) dtype = get_dtype_cache(space).w_float64dtype - a = NDimArray(number, [number], dtype=dtype) + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE From noreply at buildbot.pypy.org Fri Dec 2 19:42:17 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 19:42:17 +0100 (CET) Subject: [pypy-commit] pypy numpy-ndarray: CLose branhc for merge Message-ID: <20111202184217.7E84D8208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-ndarray Changeset: r50075:9cfed3afa1cc Date: 2011-12-02 13:41 -0500 http://bitbucket.org/pypy/pypy/changeset/9cfed3afa1cc/ Log: CLose branhc for merge From noreply at buildbot.pypy.org Fri Dec 2 19:42:18 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 19:42:18 +0100 (CET) Subject: [pypy-commit] pypy default: make numpy.ndarray refer to the class, and numpy.array be a funciton which constructs it Message-ID: <20111202184218.AC0B48208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50076:ccf1f7b9b78b Date: 2011-12-02 13:41 -0500 http://bitbucket.org/pypy/pypy/changeset/ccf1f7b9b78b/ Log: make numpy.ndarray refer to the class, and numpy.array be a funciton which constructs it diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,10 +5,11 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.NDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -9,7 +9,7 @@ from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, NDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs from pypy.rlib.objectmodel import specialize, instantiate @@ -53,7 +53,7 @@ self.fromcache = InternalSpaceCache(self).getorbuild def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): return w_obj.tp == w_tp @@ -303,8 +303,7 @@ [interp.space.wrap(float(i)) for i in range(self.v)] ) dtype = get_dtype_cache(interp.space).w_float64dtype - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -326,8 +325,7 @@ def execute(self, interp): w_list = self.wrap(interp.space) dtype = get_dtype_cache(interp.space).w_float64dtype - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,47 +98,6 @@ endshape[i] = remainder[i] return endshape -def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, - w_order=NoneNotWrapped): - # find scalar - if not space.issequence_w(w_item_or_iterable): - if space.is_w(w_dtype, space.w_None): - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, - w_item_or_iterable) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - return scalar_w(space, dtype, w_item_or_iterable) - if w_order is None: - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise operationerrfmt(space.w_ValueError, "Unknown order: %s", - order) - shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) - # they come back in C order - size = len(elems_w) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: - break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = NDimArray(size, shape[:], dtype=dtype, order=order) - shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) - for i in range(len(elems_w)): - w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) - arr_iter = arr_iter.next(shapelen) - return arr # Iterators for arrays # -------------------- @@ -378,6 +337,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -814,9 +780,7 @@ return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) @@ -883,7 +847,7 @@ i = 0 signature = self.signature result_size = self.find_size() - result = NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) i = self.start_iter() ri = result.start_iter() @@ -1110,14 +1074,14 @@ return 'Slice(%s)' % self.parent.debug_repr() def copy(self): - array = NDimArray(self.size, self.shape[:], self.find_dtype()) + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() while not iter.done(): array.setitem(iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) return array -class NDimArray(BaseArray): +class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ @@ -1144,7 +1108,7 @@ return self.dtype.getitem(self.storage, iter.get_offset()) def copy(self): - array = NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( array.storage, self.storage, @@ -1191,12 +1155,53 @@ shape.append(item) return size, shape +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr + def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) size, shape = _find_size_and_shape(space, w_size) - return space.wrap(NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, @@ -1204,7 +1209,7 @@ ) size, shape = _find_size_and_shape(space, w_size) - arr = NDimArray(size, shape[:], dtype=dtype) + arr = W_NDimArray(size, shape[:], dtype=dtype) one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) @@ -1216,9 +1221,9 @@ return w_arr.descr_dot(space, w_obj2) BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import NDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -19,7 +19,7 @@ "string length %d not divisable by %d" % (length, FLOAT_SIZE))) dtype = get_dtype_cache(space).w_float64dtype - a = NDimArray(number, [number], dtype=dtype) + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -14,7 +14,7 @@ float64_dtype = get_dtype_cache(space).w_float64dtype bool_dtype = get_dtype_cache(space).w_booldtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -23,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=bool_dtype) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -33,7 +33,7 @@ def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,9 @@ +import py -import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,7 +1,7 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy import signature from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace @@ -28,18 +28,18 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -58,7 +58,7 @@ def test_create_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -78,7 +78,7 @@ def test_slice_of_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -96,7 +96,7 @@ def test_slice_of_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -114,7 +114,7 @@ def test_negative_step_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] @@ -122,14 +122,14 @@ def test_negative_step_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -139,7 +139,7 @@ assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -160,6 +160,21 @@ class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -359,11 +374,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -11,7 +11,7 @@ from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import (NDimArray, NDimSlice, +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr @@ -352,7 +352,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = NDimArray(n, [n], dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) From noreply at buildbot.pypy.org Fri Dec 2 20:51:28 2011 From: noreply at buildbot.pypy.org (hager) Date: Fri, 2 Dec 2011 20:51:28 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: made first tests run again on ppc32 Message-ID: <20111202195128.CD4B98208A@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50077:ecbfbef353c7 Date: 2011-12-02 20:50 +0100 http://bitbucket.org/pypy/pypy/changeset/ecbfbef353c7/ Log: made first tests run again on ppc32 diff --git a/pypy/jit/backend/ppc/ppcgen/arch.py b/pypy/jit/backend/ppc/ppcgen/arch.py --- a/pypy/jit/backend/ppc/ppcgen/arch.py +++ b/pypy/jit/backend/ppc/ppcgen/arch.py @@ -7,12 +7,13 @@ if sys.maxint == (2**31 - 1): WORD = 4 IS_PPC_32 = True + BACKCHAIN_SIZE = 2 else: WORD = 8 IS_PPC_32 = False + BACKCHAIN_SIZE = 6 DWORD = 2 * WORD -BACKCHAIN_SIZE = 6 * WORD IS_PPC_64 = not IS_PPC_32 MY_COPY_OF_REGS = 0 diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -366,6 +366,8 @@ self.mc.std(r.r0.value, r.SP.value, stack_space + 2 * WORD) # then we push everything on the stack + self.max_stack_params = max(self.max_stack_params, len(stack_args))\ + + MAX_REG_PARAMS for i, arg in enumerate(stack_args): if IS_PPC_32: abi = 2 diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -111,9 +111,14 @@ have been generated. ''' - GPR_SAVE_AREA_AND_FORCE_INDEX = GPR_SAVE_AREA + WORD + #GPR_SAVE_AREA_AND_FORCE_INDEX = GPR_SAVE_AREA + WORD # ^^^^^^^^^^^^^ ^^^^ # save GRP regs force index + ENCODING_AREA = len(r.MANAGED_REGS) * WORD + OFFSET_SPP_TO_GPR_SAVE_AREA = (FORCE_INDEX + FLOAT_INT_CONVERSION + + ENCODING_AREA) + OFFSET_SPP_TO_OLD_BACKCHAIN = (OFFSET_SPP_TO_GPR_SAVE_AREA + + GPR_SAVE_AREA + FPR_SAVE_AREA) def __init__(self, cpu, failargs_limit=1000): self.cpu = cpu @@ -132,21 +137,33 @@ self.max_stack_params = 0 def _save_nonvolatiles(self): + """ save nonvolatile GPRs in GPR SAVE AREA + """ for i, reg in enumerate(NONVOLATILES): # save r31 later on if reg.value == r.SPP.value: continue if IS_PPC_32: - self.mc.stw(reg.value, r.SPP.value, WORD + WORD * i) + #self.mc.stw(reg.value, r.SPP.value, WORD + WORD * i) + self.mc.stw(reg.value, r.SPP.value, + self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) else: - self.mc.std(reg.value, r.SPP.value, WORD + WORD * i) + #self.mc.std(reg.value, r.SPP.value, WORD + WORD * i) + self.mc.std(reg.value, r.SPP.value, + self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) def _restore_nonvolatiles(self, mc, spp_reg): + """ restore nonvolatile GPRs from GPR SAVE AREA + """ for i, reg in enumerate(NONVOLATILES): if IS_PPC_32: - mc.lwz(reg.value, spp_reg.value, WORD + WORD * i) + #mc.lwz(reg.value, spp_reg.value, WORD + WORD * i) + mc.lwz(reg.value, spp_reg.value, + self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) else: - mc.ld(reg.value, spp_reg.value, WORD + WORD * i) + #mc.ld(reg.value, spp_reg.value, WORD + WORD * i) + mc.ld(reg.value, spp_reg.value, + self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) # Fetches the identifier from a descr object. # If it has no identifier, then an unused identifier @@ -194,8 +211,10 @@ self.mc.std(r.SPP.value, r.SP.value, WORD) # compute spilling pointer (SPP) - self.mc.addi(r.SPP.value, r.SP.value, frame_depth - - self.GPR_SAVE_AREA_AND_FORCE_INDEX) + #self.mc.addi(r.SPP.value, r.SP.value, frame_depth + # - self.GPR_SAVE_AREA_AND_FORCE_INDEX) + self.mc.addi(r.SPP.value, r.SP.value, + frame_depth - self.OFFSET_SPP_TO_OLD_BACKCHAIN) self._save_nonvolatiles() # save r31, use r30 as scratch register # this is safe because r30 has been saved already @@ -275,18 +294,17 @@ else: value = decode32(spilling_area, spilling_depth - stack_location * WORD) else: # REG_LOC + #import pdb; pdb.set_trace() reg = ord(enc[i]) if group == self.FLOAT_TYPE: - value = decode64(vfp_regs, reg*2*WORD) - self.fail_boxes_float.setitem(fail_index, value) - continue + assert 0, "not implemented yet" else: # XXX dirty, fix - sub = r.managed_regs_sub(reg) + #sub = r.managed_regs_sub(reg) if IS_PPC_32: - value = decode32(regs, (reg - sub) * WORD) + value = decode32(regs, (reg - 3) * WORD) else: - value = decode64(regs, (reg - sub) * WORD) + value = decode64(regs, (reg - 3) * WORD) if group == self.INT_TYPE: self.fail_boxes_int.setitem(fail_index, value) @@ -360,12 +378,12 @@ def _gen_exit_path(self): mc = PPCBuilder() # compute offset to new SP - size = WORD * (len(r.MANAGED_REGS)) + BACKCHAIN_SIZE + ##size = WORD * (len(r.MANAGED_REGS)) + BACKCHAIN_SIZE # set SP - if IS_PPC_32: - mc.stwu(r.SP.value, r.SP.value, -size) - else: - mc.stdu(r.SP.value, r.SP.value, -size) + ##if IS_PPC_32: + ## mc.stwu(r.SP.value, r.SP.value, -size) + ##else: + ## mc.stdu(r.SP.value, r.SP.value, -size) self._save_managed_regs(mc) # adjust SP (r1) # XXX do quadword alignment @@ -385,14 +403,15 @@ # load parameters into parameter registers if IS_PPC_32: - mc.lwz(r.r3.value, r.SPP.value, 0) # address of state encoding + #mc.lwz(r.r3.value, r.SPP.value, 0) # address of state encoding + mc.lwz(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding else: mc.ld(r.r3.value, r.SPP.value, 0) mc.mr(r.r4.value, r.SP.value) # load stack pointer mc.mr(r.r5.value, r.SPP.value) # load spilling pointer # # load address of decoding function into r0 - mc.load_imm(r.r0, addr) + mc.alloc_scratch_reg(addr) if IS_PPC_64: mc.std(r.r2.value, r.SP.value, 3 * WORD) # load TOC pointer and environment pointer @@ -400,42 +419,52 @@ mc.load_imm(r.r11, r11_value) # ... and branch there mc.mtctr(r.r0.value) + mc.free_scratch_reg() mc.bctrl() if IS_PPC_64: mc.ld(r.r2.value, r.SP.value, 3 * WORD) # - mc.addi(r.SP.value, r.SP.value, size) + ##mc.addi(r.SP.value, r.SP.value, size) # save SPP in r5 # (assume that r5 has been written to failboxes) mc.mr(r.r5.value, r.SPP.value) self._restore_nonvolatiles(mc, r.r5) # load old backchain into r4 - offset_to_old_backchain = self.GPR_SAVE_AREA_AND_FORCE_INDEX + WORD + #offset_to_old_backchain = ( FPR_SAVE_AREA + # + GPR_SAVE_AREA + # + FLOAT_INT_CONVERSION + # + FORCE_INDEX + # + self.ENCODING_AREA) if IS_PPC_32: - mc.lwz(r.r4.value, r.r5.value, offset_to_old_backchain) + #mc.lwz(r.r4.value, r.r5.value, offset_to_old_backchain) + mc.lwz(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + WORD) else: - mc.ld(r.r4.value, r.r5.value, offset_to_old_backchain + WORD) + ##mc.ld(r.r4.value, r.r5.value, offset_to_old_backchain + WORD) + #mc.ld(r.r4.value, r.r5.value, offset_to_old_backchain) + mc.ld(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + 2 * WORD) mc.mtlr(r.r4.value) # restore LR - - # From SPP, we have a constant offset of GPR_SAVE_AREA_AND_FORCE_INDEX - # to the old backchain. We use the SPP to re-establish the old backchain - # because this exit stub is generated before we know how much space - # the entire frame will need. - mc.addi(r.SP.value, r.r5.value, self.GPR_SAVE_AREA_AND_FORCE_INDEX) # restore old SP + # From SPP, we have a constant offset to the old backchain. We use the + # SPP to re-establish the old backchain because this exit stub is + # generated before we know how much space the entire frame will need. + ##mc.addi(r.SP.value, r.r5.value, self.GPR_SAVE_AREA_AND_FORCE_INDEX) # restore old SP + #mc.addi(r.SP.value, r.r5.value, offset_to_old_backchain) # restore old SP + mc.addi(r.SP.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN) # restore old SP mc.blr() mc.prepare_insts_blocks() return mc.materialize(self.cpu.asmmemmgr, [], self.cpu.gc_ll_descr.gcrootmap) - # Save all registers which are managed by the register - # allocator on top of the stack before decoding. def _save_managed_regs(self, mc): + """ store managed registers in ENCODING AREA + """ for i in range(len(r.MANAGED_REGS)): reg = r.MANAGED_REGS[i] if IS_PPC_32: - mc.stw(reg.value, r.SP.value, i * WORD + BACKCHAIN_SIZE) + #mc.stw(reg.value, r.SP.value, i * WORD + BACKCHAIN_SIZE) + mc.stw(reg.value, r.SPP.value, i * WORD) else: - mc.std(reg.value, r.SP.value, i * WORD + BACKCHAIN_SIZE) + #mc.std(reg.value, r.SP.value, i * WORD + BACKCHAIN_SIZE) + mc.std(reg.value, r.SPP.value, i * WORD) # Load parameters from fail args into locations (stack or registers) def gen_bootstrap_code(self, nonfloatlocs, inputargs): @@ -494,7 +523,9 @@ self.mc.free_scratch_reg() # load values passed on the stack to the corresponding locations - stack_position = self.GPR_SAVE_AREA_AND_FORCE_INDEX\ + #stack_position = self.GPR_SAVE_AREA_AND_FORCE_INDEX\ + # + BACKCHAIN_SIZE + stack_position = self.OFFSET_SPP_TO_OLD_BACKCHAIN\ + BACKCHAIN_SIZE count = 0 @@ -630,6 +661,9 @@ self._teardown() def assemble_bridge(self, faildescr, inputargs, operations, looptoken, log): + + assert 0, "Bridges do not work yet because they need to dynamically adjust the SP" + self.setup(looptoken, operations) assert isinstance(faildescr, AbstractFailDescr) code = faildescr._failure_recovery_code @@ -785,8 +819,8 @@ + FPR_SAVE_AREA + FLOAT_INT_CONVERSION + FORCE_INDEX + + self.ENCODING_AREA + regalloc.frame_manager.frame_depth * WORD - + len(r.MANAGED_REGS) * WORD + self.max_stack_params * WORD + BACKCHAIN_SIZE) @@ -819,12 +853,11 @@ memaddr = self.gen_descr_encoding(descr, args, arglocs) # store addr in force index field - self.mc.alloc_scratch_reg() - self.mc.load_imm(r.r0, memaddr) + self.mc.alloc_scratch_reg(memaddr) if IS_PPC_32: - self.mc.stw(r.r0.value, r.SPP.value, 0) + self.mc.stw(r.r0.value, r.SPP.value, self.ENCODING_AREA) else: - self.mc.std(r.r0.value, r.SPP.value, 0) + self.mc.std(r.r0.value, r.SPP.value, self.ENCODING_AREA) self.mc.free_scratch_reg() if save_exc: diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py --- a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py +++ b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py @@ -14,7 +14,7 @@ | | | --------------------------- -- | | | - | FLOAT/INT CONVERSION | |>> 4 (?) * WORD + | FLOAT/INT CONVERSION | |>> 1 * WORD | | | --------------------------- -- | FORCE INDEX | WORD | 1 WORD @@ -26,7 +26,11 @@ | | | | SPILLING AREA | |>> regalloc.frame_manager.frame_depth * WORD | (LOCAL VARIABLE SPACE) | | - --------------------------- -- + --------------------------- -- + | | | + | PARAMETER SAVE AREA | |>> max_stack_params * WORD + | | | + ---------------------------a -- | TOC POINTER | WORD | --------------------------- | | < RESERVED > | WORD | From noreply at buildbot.pypy.org Fri Dec 2 21:43:06 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 2 Dec 2011 21:43:06 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: test_libffi works almost. Only the two byval tests are left for further investigation Message-ID: <20111202204306.4D87D8208A@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50078:df6d8c92cc30 Date: 2011-12-02 21:38 +0100 http://bitbucket.org/pypy/pypy/changeset/df6d8c92cc30/ Log: test_libffi works almost. Only the two byval tests are left for further investigation diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -35,6 +35,7 @@ cls.ulong = clibffi.cast_type_to_ffitype(rffi.ULONG) cls.slonglong = clibffi.cast_type_to_ffitype(rffi.LONGLONG) cls.ulonglong = clibffi.cast_type_to_ffitype(rffi.ULONGLONG) + cls.signed = clibffi.cast_type_to_ffitype(rffi.SIGNED) cls.wchar_t = clibffi.cast_type_to_ffitype(lltype.UniChar) del cls._import @@ -121,7 +122,7 @@ _check_type(TYPE) if _fits_into_signed(TYPE): cls = IntArg - val = rffi.cast(rffi.LONG, val) + val = rffi.cast(rffi.SIGNED, val) elif TYPE is rffi.DOUBLE: cls = FloatArg elif TYPE is rffi.LONGLONG or TYPE is rffi.ULONGLONG: @@ -313,7 +314,7 @@ @jit.oopspec('libffi_call_int(self, funcsym, ll_args)') def _do_call_int(self, funcsym, ll_args): - return self._do_call(funcsym, ll_args, rffi.LONG) + return self._do_call(funcsym, ll_args, rffi.SIGNED) @jit.oopspec('libffi_call_float(self, funcsym, ll_args)') def _do_call_float(self, funcsym, ll_args): @@ -326,7 +327,7 @@ @jit.dont_look_inside def _do_call_raw(self, funcsym, ll_args): # same as _do_call_int, but marked as jit.dont_look_inside - return self._do_call(funcsym, ll_args, rffi.LONG) + return self._do_call(funcsym, ll_args, rffi.SIGNED) @jit.oopspec('libffi_call_longlong(self, funcsym, ll_args)') def _do_call_longlong(self, funcsym, ll_args): @@ -364,7 +365,7 @@ TP = lltype.Ptr(rffi.CArray(RESULT)) buf = rffi.cast(TP, ll_result) if types.is_struct(self.restype): - assert RESULT == rffi.LONG + assert RESULT == rffi.SIGNED # for structs, we directly return the buffer and transfer the # ownership res = rffi.cast(RESULT, buf) diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -34,8 +34,8 @@ # .arg() only supports integers and floats chain = ArgChain() x = lltype.malloc(lltype.GcStruct('xxx')) - y = lltype.malloc(lltype.GcArray(rffi.LONG), 3) - z = lltype.malloc(lltype.Array(rffi.LONG), 4, flavor='raw') + y = lltype.malloc(lltype.GcArray(rffi.SIGNED), 3) + z = lltype.malloc(lltype.Array(rffi.SIGNED), 4, flavor='raw') py.test.raises(TypeError, "chain.arg(x)") py.test.raises(TypeError, "chain.arg(y)") py.test.raises(TypeError, "chain.arg(z)") @@ -123,7 +123,7 @@ c_file.write(py.code.Source('\n'.join(snippets))) eci = ExternalCompilationInfo( export_symbols=exports, - include_dirs = [str(py.path.local(autopath.pypydir).join('translator', 'c'))]) + include_dirs=[str(py.path.local(autopath.pypydir).join('translator', 'c'))]) cls.libfoo_name = str(platform.compile([c_file], eci, 'x', standalone=False)) @@ -158,14 +158,15 @@ # ------------------------------------------------------------------------ def test_very_simple(self): - """ - int diff_xy(int x, long y) + """ #include "src/signed_defn.h" + + int diff_xy(int x, Signed y) { return x - y; } """ libfoo = self.get_libfoo() - func = (libfoo, 'diff_xy', [types.sint, types.slong], types.sint) + func = (libfoo, 'diff_xy', [types.sint, types.signed], types.sint) res = self.call(func, [50, 8], lltype.Signed) assert res == 42 @@ -208,7 +209,7 @@ """ libfoo = self.get_libfoo() func = (libfoo, 'many_args', [types.uchar, types.sint], types.sint) - res = self.call(func, [chr(20), 22], rffi.LONG) + res = self.call(func, [chr(20), 22], rffi.SIGNED) assert res == 42 def test_char_args(self): @@ -250,12 +251,12 @@ } """ libfoo = self.get_libfoo() - func = (libfoo, 'inc', [types.pointer], types.slong) + func = (libfoo, 'inc', [types.pointer], types.signed) null = lltype.nullptr(rffi.SIGNEDP.TO) res = self.call(func, [null], rffi.SIGNED) assert res == -1 # - ptr_result = lltype.malloc(LONGP.TO, 1, flavor='raw') + ptr_result = lltype.malloc(rffi.SIGNEDP.TO, 1, flavor='raw') ptr_result[0] = 41 res = self.call(func, [ptr_result], rffi.SIGNED) if self.__class__ is TestLibffiCall: @@ -275,23 +276,23 @@ lltype.free(ptr_result, flavor='raw') def test_return_pointer(self): - """ + """ #include "src/signed_defn.h" + struct pair { - long a; - long b; + Signed a; + Signed b; }; struct pair my_static_pair = {10, 20}; - long* get_pointer_to_b() + Signed* get_pointer_to_b() { return &my_static_pair.b; } """ libfoo = self.get_libfoo() func = (libfoo, 'get_pointer_to_b', [], types.pointer) - LONGP = lltype.Ptr(rffi.CArray(rffi.LONG)) - res = self.call(func, [], LONGP) + res = self.call(func, [], rffi.SIGNEDP) assert res[0] == 20 def test_void_result(self): @@ -304,12 +305,12 @@ set_dummy = (libfoo, 'set_dummy', [types.sint], types.void) get_dummy = (libfoo, 'get_dummy', [], types.sint) # - initval = self.call(get_dummy, [], rffi.LONG) + initval = self.call(get_dummy, [], rffi.SIGNED) # res = self.call(set_dummy, [initval+1], lltype.Void) assert res is None # - res = self.call(get_dummy, [], rffi.LONG) + res = self.call(get_dummy, [], rffi.SIGNED) assert res == initval+1 def test_single_float_args(self): @@ -389,32 +390,33 @@ else: assert False, 'Did not raise' - my_raises("self.call(func, [38], rffi.LONG)") # one less - my_raises("self.call(func, [38, 12.3, 42], rffi.LONG)") # one more + my_raises("self.call(func, [38], rffi.SIGNED)") # one less + my_raises("self.call(func, [38, 12.3, 42], rffi.SIGNED)") # one more def test_byval_argument(self): - """ + """ #include "src/signed_defn.h" + struct Point { - long x; - long y; + Signed x; + Signed y; }; - long sum_point(struct Point p) { + Signed sum_point(struct Point p) { return p.x + p.y; } """ libfoo = CDLL(self.libfoo_name) - ffi_point_struct = make_struct_ffitype_e(0, 0, [types.slong, types.slong]) + ffi_point_struct = make_struct_ffitype_e(0, 0, [types.signed, types.signed]) ffi_point = ffi_point_struct.ffistruct - sum_point = (libfoo, 'sum_point', [ffi_point], types.slong) + sum_point = (libfoo, 'sum_point', [ffi_point], types.signed) # - ARRAY = rffi.CArray(rffi.LONG) + ARRAY = rffi.CArray(rffi.SIGNED) buf = lltype.malloc(ARRAY, 2, flavor='raw') buf[0] = 30 buf[1] = 12 adr = rffi.cast(rffi.VOIDP, buf) - res = self.call(sum_point, [('arg_raw', adr)], rffi.LONG, + res = self.call(sum_point, [('arg_raw', adr)], rffi.SIGNED, jitif=["byval"]) assert res == 42 # check that we still have the ownership on the buffer @@ -424,8 +426,9 @@ lltype.free(ffi_point_struct, flavor='raw') def test_byval_result(self): - """ - struct Point make_point(long x, long y) { + """ #include "src/signed_defn.h" + + struct Point make_point(Signed x, Signed y) { struct Point p; p.x = x; p.y = y; @@ -433,11 +436,11 @@ } """ libfoo = CDLL(self.libfoo_name) - ffi_point_struct = make_struct_ffitype_e(0, 0, [types.slong, types.slong]) + ffi_point_struct = make_struct_ffitype_e(0, 0, [types.signed, types.signed]) ffi_point = ffi_point_struct.ffistruct libfoo = CDLL(self.libfoo_name) - make_point = (libfoo, 'make_point', [types.slong, types.slong], ffi_point) + make_point = (libfoo, 'make_point', [types.signed, types.signed], ffi_point) # PTR = lltype.Ptr(rffi.CArray(rffi.LONG)) p = self.call(make_point, [12, 34], PTR, is_struct=True, From noreply at buildbot.pypy.org Fri Dec 2 22:31:34 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Dec 2011 22:31:34 +0100 (CET) Subject: [pypy-commit] pypy default: Preset correct attributes on scalars, so that a call to calc_strides isn't emitted Message-ID: <20111202213134.68DAE8208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50079:87235ee9b8ab Date: 2011-12-02 16:31 -0500 http://bitbucket.org/pypy/pypy/changeset/87235ee9b8ab/ Log: Preset correct attributes on scalars, so that a call to calc_strides isn't emitted diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -798,6 +798,7 @@ _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): + self.shape = self.strides = [] BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value From noreply at buildbot.pypy.org Sat Dec 3 00:17:44 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Dec 2011 00:17:44 +0100 (CET) Subject: [pypy-commit] pypy default: Mark several fields in numpy as being immutable, and the name of a type object as quassiimmut Message-ID: <20111202231744.E57188208A@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50080:e5ef85031c32 Date: 2011-12-02 18:17 -0500 http://bitbucket.org/pypy/pypy/changeset/e5ef85031c32/ Log: Mark several fields in numpy as being immutable, and the name of a type object as quassiimmut diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -18,6 +18,8 @@ VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) class W_Dtype(Wrappable): + _immuable_fields_ = ["itemtype", "num", "kind"] + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): self.signature = signature.BaseSignature() self.itemtype = itemtype diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -15,6 +15,7 @@ class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + _immutable_fields_ = ["promote_to_float", "promote_bools"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -123,6 +124,7 @@ class W_Ufunc2(W_Ufunc): + _immutable_fields_ = ["comparison_func", "func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -102,6 +102,7 @@ 'instancetypedef', 'terminator', '_version_tag?', + 'name?', ] # for config.objspace.std.getattributeshortcut From noreply at buildbot.pypy.org Sat Dec 3 00:24:45 2011 From: noreply at buildbot.pypy.org (hager) Date: Sat, 3 Dec 2011 00:24:45 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: made first tests pass again on ppc64 Message-ID: <20111202232445.883F88208A@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50081:7805074b94a4 Date: 2011-12-02 15:24 -0800 http://bitbucket.org/pypy/pypy/changeset/7805074b94a4/ Log: made first tests pass again on ppc64 diff --git a/pypy/jit/backend/ppc/ppcgen/arch.py b/pypy/jit/backend/ppc/ppcgen/arch.py --- a/pypy/jit/backend/ppc/ppcgen/arch.py +++ b/pypy/jit/backend/ppc/ppcgen/arch.py @@ -11,7 +11,7 @@ else: WORD = 8 IS_PPC_32 = False - BACKCHAIN_SIZE = 6 + BACKCHAIN_SIZE = 4 DWORD = 2 * WORD IS_PPC_64 = not IS_PPC_32 diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -144,11 +144,9 @@ if reg.value == r.SPP.value: continue if IS_PPC_32: - #self.mc.stw(reg.value, r.SPP.value, WORD + WORD * i) self.mc.stw(reg.value, r.SPP.value, self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) else: - #self.mc.std(reg.value, r.SPP.value, WORD + WORD * i) self.mc.std(reg.value, r.SPP.value, self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) @@ -157,30 +155,12 @@ """ for i, reg in enumerate(NONVOLATILES): if IS_PPC_32: - #mc.lwz(reg.value, spp_reg.value, WORD + WORD * i) mc.lwz(reg.value, spp_reg.value, self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) else: - #mc.ld(reg.value, spp_reg.value, WORD + WORD * i) mc.ld(reg.value, spp_reg.value, self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) - # Fetches the identifier from a descr object. - # If it has no identifier, then an unused identifier - # is generated - # XXX could be overwritten later on, better approach? - def _get_identifier_from_descr(self, descr): - try: - identifier = descr.identifier - except AttributeError: - identifier = None - if identifier is not None: - return identifier - keys = self.cpu.saved_descr.keys() - if keys == []: - return 1 - return max(keys) + 1 - def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token if clt.asmmemmgr_blocks is None: @@ -211,8 +191,6 @@ self.mc.std(r.SPP.value, r.SP.value, WORD) # compute spilling pointer (SPP) - #self.mc.addi(r.SPP.value, r.SP.value, frame_depth - # - self.GPR_SAVE_AREA_AND_FORCE_INDEX) self.mc.addi(r.SPP.value, r.SP.value, frame_depth - self.OFFSET_SPP_TO_OLD_BACKCHAIN) self._save_nonvolatiles() @@ -294,7 +272,6 @@ else: value = decode32(spilling_area, spilling_depth - stack_location * WORD) else: # REG_LOC - #import pdb; pdb.set_trace() reg = ord(enc[i]) if group == self.FLOAT_TYPE: assert 0, "not implemented yet" @@ -377,19 +354,7 @@ # - jump back to the calling code def _gen_exit_path(self): mc = PPCBuilder() - # compute offset to new SP - ##size = WORD * (len(r.MANAGED_REGS)) + BACKCHAIN_SIZE - # set SP - ##if IS_PPC_32: - ## mc.stwu(r.SP.value, r.SP.value, -size) - ##else: - ## mc.stdu(r.SP.value, r.SP.value, -size) self._save_managed_regs(mc) - # adjust SP (r1) - # XXX do quadword alignment - #while size % (4 * WORD) != 0: - # size += WORD - # decode_func_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func) if IS_PPC_32: @@ -403,10 +368,9 @@ # load parameters into parameter registers if IS_PPC_32: - #mc.lwz(r.r3.value, r.SPP.value, 0) # address of state encoding mc.lwz(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding else: - mc.ld(r.r3.value, r.SPP.value, 0) + mc.ld(r.r3.value, r.SPP.value, self.ENCODING_AREA) mc.mr(r.r4.value, r.SP.value) # load stack pointer mc.mr(r.r5.value, r.SPP.value) # load spilling pointer # @@ -424,30 +388,19 @@ if IS_PPC_64: mc.ld(r.r2.value, r.SP.value, 3 * WORD) # - ##mc.addi(r.SP.value, r.SP.value, size) # save SPP in r5 # (assume that r5 has been written to failboxes) mc.mr(r.r5.value, r.SPP.value) self._restore_nonvolatiles(mc, r.r5) # load old backchain into r4 - #offset_to_old_backchain = ( FPR_SAVE_AREA - # + GPR_SAVE_AREA - # + FLOAT_INT_CONVERSION - # + FORCE_INDEX - # + self.ENCODING_AREA) if IS_PPC_32: - #mc.lwz(r.r4.value, r.r5.value, offset_to_old_backchain) mc.lwz(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + WORD) else: - ##mc.ld(r.r4.value, r.r5.value, offset_to_old_backchain + WORD) - #mc.ld(r.r4.value, r.r5.value, offset_to_old_backchain) mc.ld(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + 2 * WORD) mc.mtlr(r.r4.value) # restore LR # From SPP, we have a constant offset to the old backchain. We use the # SPP to re-establish the old backchain because this exit stub is # generated before we know how much space the entire frame will need. - ##mc.addi(r.SP.value, r.r5.value, self.GPR_SAVE_AREA_AND_FORCE_INDEX) # restore old SP - #mc.addi(r.SP.value, r.r5.value, offset_to_old_backchain) # restore old SP mc.addi(r.SP.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN) # restore old SP mc.blr() mc.prepare_insts_blocks() @@ -460,10 +413,8 @@ for i in range(len(r.MANAGED_REGS)): reg = r.MANAGED_REGS[i] if IS_PPC_32: - #mc.stw(reg.value, r.SP.value, i * WORD + BACKCHAIN_SIZE) mc.stw(reg.value, r.SPP.value, i * WORD) else: - #mc.std(reg.value, r.SP.value, i * WORD + BACKCHAIN_SIZE) mc.std(reg.value, r.SPP.value, i * WORD) # Load parameters from fail args into locations (stack or registers) @@ -523,8 +474,6 @@ self.mc.free_scratch_reg() # load values passed on the stack to the corresponding locations - #stack_position = self.GPR_SAVE_AREA_AND_FORCE_INDEX\ - # + BACKCHAIN_SIZE stack_position = self.OFFSET_SPP_TO_OLD_BACKCHAIN\ + BACKCHAIN_SIZE @@ -809,11 +758,6 @@ return mc.materialize(self.cpu.asmmemmgr, [], self.cpu.gc_ll_descr.gcrootmap) - #def compute_frame_depth(self, regalloc): - # frame_depth = (GPR_SAVE_AREA # GPR space - # + WORD # FORCE INDEX - # + regalloc.frame_manager.frame_depth * WORD) - # return frame_depth def compute_frame_depth(self, regalloc): frame_depth = ( GPR_SAVE_AREA + FPR_SAVE_AREA From noreply at buildbot.pypy.org Sat Dec 3 07:55:44 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Dec 2011 07:55:44 +0100 (CET) Subject: [pypy-commit] pypy numpy-share-iterators: in-progress work on sharing iterators. not really working Message-ID: <20111203065544.C0C9282A00@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-share-iterators Changeset: r50082:02ca7995bf12 Date: 2011-12-03 08:52 +0200 http://bitbucket.org/pypy/pypy/changeset/02ca7995bf12/ Log: in-progress work on sharing iterators. not really working diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -133,7 +133,7 @@ ) arr = NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) + arr_iter = arr.start_iter([]) for i in range(len(elems_w)): w_elem = elems_w[i] dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) @@ -153,6 +153,8 @@ # in the original array, strides[i] == backstrides[i] == 0 class BaseIterator(object): + _next = None + def next(self, shapelen): raise NotImplementedError @@ -162,6 +164,19 @@ def get_offset(self): raise NotImplementedError + def unique(self, all_iters): + for iter in all_iters: + if iter.compatible(self): + return ChildIterator(iter) + all_iters.append(self) + return self + + def compatible(self, other): + return False + + def clean_next(self): + self._next = None + class ArrayIterator(BaseIterator): def __init__(self, size): self.offset = 0 @@ -171,6 +186,7 @@ arr = instantiate(ArrayIterator) arr.size = self.size arr.offset = self.offset + 1 + self._next = arr return arr def done(self): @@ -179,6 +195,9 @@ def get_offset(self): return self.offset + def compatible(self, other): + return isinstance(other, ArrayIterator) # there can be only one + class OneDimIterator(BaseIterator): def __init__(self, start, step, stop): self.offset = start @@ -190,6 +209,7 @@ arr.size = self.size arr.step = self.step arr.offset = self.offset + self.step + self._next = arr return arr def done(self): @@ -227,6 +247,7 @@ res.indices = indices res.arr = self.arr res._done = done + self._next = res return res def done(self): @@ -282,6 +303,7 @@ res.strides = self.strides res.backstrides = self.backstrides res.res_shape = self.res_shape + self._next = res return res def done(self): @@ -309,6 +331,10 @@ return self.right.get_offset() return self.left.get_offset() + def clean_next(self): + self.left.clean_next() + self.right.clean_next() + class Call1Iterator(BaseIterator): def __init__(self, child): self.child = child @@ -322,6 +348,9 @@ def get_offset(self): return self.child.get_offset() + def clean_next(self): + self.child.clean_next() + class ConstantIterator(BaseIterator): def next(self, shapelen): return self @@ -332,6 +361,20 @@ def get_offset(self): return 0 +class ChildIterator(BaseIterator): + """ An iterator that just refers to some other iterator + """ + def __init__(self, parent): + self.parent = parent + + def next(self, shapelen): + return ChildIterator(self.parent._next) + + def done(self): + return self.parent.done() + + def get_offset(self): + return self.parent.get_offset() class BaseArray(Wrappable): _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", @@ -438,7 +481,7 @@ reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] ) def loop(self): - i = self.start_iter() + i = self.start_iter([]) cur_best = self.eval(i) shapelen = len(self.shape) i = i.next(shapelen) @@ -469,7 +512,7 @@ def _all(self): dtype = self.find_dtype() - i = self.start_iter() + i = self.start_iter([]) shapelen = len(self.shape) while not i.done(): all_driver.jit_merge_point(signature=self.signature, @@ -484,7 +527,7 @@ def _any(self): dtype = self.find_dtype() - i = self.start_iter() + i = self.start_iter([]) shapelen = len(self.shape) while not i.done(): any_driver.jit_merge_point(signature=self.signature, @@ -778,7 +821,7 @@ raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) return space.wrap(space.is_true(self.get_concrete().eval( - self.start_iter(self.shape)).wrap(space))) + self.start_iter([])).wrap(space))) def descr_get_transpose(self, space): concrete = self.get_concrete() @@ -803,7 +846,7 @@ def getitem(self, item): raise NotImplementedError - def start_iter(self, res_shape=None): + def start_iter(self, all_iters, res_shape=None): raise NotImplementedError def descr_debug_repr(self, space): @@ -854,8 +897,8 @@ def eval(self, iter): return self.value - def start_iter(self, res_shape=None): - return ConstantIterator() + def start_iter(self, all_iters, res_shape=None): + return ConstantIterator().unique(all_iters) def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): builder.append(self.dtype.str_format(self.value)) @@ -886,16 +929,19 @@ result_size = self.find_size() result = NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) - i = self.start_iter() - ri = result.start_iter() + all_iters = [] + i = self.start_iter(all_iters) + ri = result.start_iter(all_iters) while not ri.done(): numpy_driver.jit_merge_point(signature=signature, shapelen=shapelen, result_size=result_size, i=i, ri=ri, self=self, result=result) - result.dtype.setitem(result.storage, ri.offset, self.eval(i)) + result.dtype.setitem(result.storage, ri.get_offset(), self.eval(i)) i = i.next(shapelen) ri = ri.next(shapelen) + i.clean_next() + ri.clean_next() return result def force_if_needed(self): @@ -952,10 +998,10 @@ assert isinstance(call_sig, signature.Call1) return call_sig.func(self.res_dtype, val) - def start_iter(self, res_shape=None): + def start_iter(self, all_iters, res_shape=None): if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - return Call1Iterator(self.values.start_iter(res_shape)) + return self.forced_result.start_iter(all_iters, res_shape) + return Call1Iterator(self.values.start_iter(all_iters, res_shape)) def debug_repr(self): sig = self.signature @@ -989,13 +1035,13 @@ def _find_size(self): return self.size - def start_iter(self, res_shape=None): + def start_iter(self, all_iters, res_shape=None): if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) + return self.forced_result.start_iter(all_iters, res_shape) if res_shape is None: res_shape = self.shape # we still force the shape on children - return Call2Iterator(self.left.start_iter(res_shape), - self.right.start_iter(res_shape)) + return Call2Iterator(self.left.start_iter(all_iters, res_shape), + self.right.start_iter(all_iters, res_shape)) def _eval(self, iter): assert isinstance(iter, Call2Iterator) @@ -1083,8 +1129,9 @@ self._sliceloop(w_value, res_shape) def _sliceloop(self, source, res_shape): - source_iter = source.start_iter(res_shape) - res_iter = self.start_iter(res_shape) + all_iters = [] + source_iter = source.start_iter(all_iters, res_shape) + res_iter = self.start_iter(all_iters, res_shape) shapelen = len(res_shape) while not res_iter.done(): slice_driver.jit_merge_point(signature=source.signature, @@ -1097,11 +1144,11 @@ source_iter = source_iter.next(shapelen) res_iter = res_iter.next(shapelen) - def start_iter(self, res_shape=None): + def start_iter(self, all_iters, res_shape=None): if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) + return BroadcastIterator(self, res_shape).unique(all_iters) if len(self.shape) == 1: - return OneDimIterator(self.start, self.strides[0], self.shape[0]) + return OneDimIterator(self.start, self.strides[0], self.shape[0]).unique(all_iters) return ViewIterator(self) def setitem(self, item, value): @@ -1112,7 +1159,7 @@ def copy(self): array = NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = self.start_iter() + iter = self.start_iter([]) while not iter.done(): array.setitem(iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) @@ -1167,11 +1214,11 @@ self.invalidated() self.dtype.setitem(self.storage, item, value) - def start_iter(self, res_shape=None): + def start_iter(self, all_iters, res_shape=None): if self.order == 'C': if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return ArrayIterator(self.size) + return BroadcastIterator(self, res_shape).unique(all_iters) + return ArrayIterator(self.size).unique(all_iters) raise NotImplementedError # use ViewIterator simply, test it def debug_repr(self): @@ -1292,13 +1339,13 @@ [arr.backstrides[-1]], [size]) self.shapelen = len(arr.shape) self.arr = arr - self.iter = self.start_iter() + self.iter = self.start_iter([]) - def start_iter(self, res_shape=None): + def start_iter(self, all_iters, res_shape=None): if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) + return BroadcastIterator(self, res_shape).unique(all_iters) return OneDimIterator(self.arr.start, self.strides[0], - self.shape[0]) + self.shape[0]).unique(all_iters) def find_dtype(self): return self.arr.find_dtype() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -64,7 +64,7 @@ space, obj.find_dtype(), promote_to_largest=True ) - start = obj.start_iter(obj.shape) + start = obj.start_iter([]) shapelen = len(obj.shape) if shapelen > 1 and not multidim: raise OperationError(space.w_NotImplementedError, diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -50,7 +50,7 @@ interp.run(space) res = interp.results[-1] assert isinstance(res, BaseArray) - w_res = res.eval(res.start_iter()).wrap(interp.space) + w_res = res.eval(res.start_iter([])).wrap(interp.space) if isinstance(w_res, BoolObject): return float(w_res.boolval) elif isinstance(w_res, FloatObject): From noreply at buildbot.pypy.org Sat Dec 3 09:09:00 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Dec 2011 09:09:00 +0100 (CET) Subject: [pypy-commit] pypy default: fix for setinteriorfield_raw on llimpl for floatstorage Message-ID: <20111203080900.E5F1682A00@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50083:5cbe959f1d81 Date: 2011-12-03 03:07 -0500 http://bitbucket.org/pypy/pypy/changeset/5cbe959f1d81/ Log: fix for setinteriorfield_raw on llimpl for floatstorage diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -20,7 +20,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -1514,13 +1514,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) -do_setinteriorfield_raw_float = new_setinteriorfield_raw(libffi.types.double) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] From noreply at buildbot.pypy.org Sat Dec 3 11:04:22 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Dec 2011 11:04:22 +0100 (CET) Subject: [pypy-commit] pypy default: Be safe rather than sorry. Fixes a crash. Message-ID: <20111203100422.74C4882A00@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50084:a6074ffef316 Date: 2011-12-03 11:01 +0100 http://bitbucket.org/pypy/pypy/changeset/a6074ffef316/ Log: Be safe rather than sorry. Fixes a crash. diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -247,7 +247,7 @@ if funcobj.random_effects_on_gcobjs: return True except (AttributeError, lltype.DelayedPointer): - pass + return True # better safe than sorry return super(RandomEffectsAnalyzer, self).analyze_external_call( op, seen) From noreply at buildbot.pypy.org Sat Dec 3 11:08:14 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 3 Dec 2011 11:08:14 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix translation Message-ID: <20111203100814.437B382A00@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50085:832424e457be Date: 2011-12-03 11:07 +0100 http://bitbucket.org/pypy/pypy/changeset/832424e457be/ Log: Fix translation diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -326,7 +326,7 @@ if 'Unicode' in funcname: def unwrap_sep(space, w_by): - return w_by.value + return w_by._value else: def unwrap_sep(space, w_by): return space.bufferstr_w(w_by) From noreply at buildbot.pypy.org Sat Dec 3 12:35:26 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Dec 2011 12:35:26 +0100 (CET) Subject: [pypy-commit] pypy default: A failing test for the missing write_barrier before a SETINTERIORFIELD_GC. Message-ID: <20111203113526.5603682A00@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50086:68edd5123ab0 Date: 2011-12-03 12:34 +0100 http://bitbucket.org/pypy/pypy/changeset/68edd5123ab0/ Log: A failing test for the missing write_barrier before a SETINTERIORFIELD_GC. diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + f(123, *[None]*11) # check that the check() are ok + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): From noreply at buildbot.pypy.org Sat Dec 3 15:17:47 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 3 Dec 2011 15:17:47 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: test framework for traces with labels Message-ID: <20111203141747.B2F1182A00@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50087:6da95987f8ae Date: 2011-12-03 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/6da95987f8ae/ Log: test framework for traces with labels diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -0,0 +1,108 @@ +from pypy.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimize import InvalidLoop +from py.test import raises + +class BaseTestMultiLabel(BaseTest): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + + def optimize_loop(self, ops, expected): + loop = self.parse(ops) + if expected != "crash!": + expected = self.parse(expected) + + part = TreeLoop('part') + part.inputargs = loop.inputargs + part.start_resumedescr = FakeDescrWithSnapshot() + token = loop.original_jitcell_token + + optimized = TreeLoop('optimized') + optimized.inputargs = loop.inputargs + optimized.operations = [] + + labels = [i for i, op in enumerate(loop.operations) \ + if op.getopnum()==rop.LABEL] + prv = 0 + last_label = [] + for nxt in labels + [len(loop.operations)]: + assert prv != nxt + operations = last_label + loop.operations[prv:nxt] + if nxt < len(loop.operations): + label = loop.operations[nxt] + assert label.getopnum() == rop.LABEL + jumpop = ResOperation(rop.JUMP, label.getarglist(), + None, descr=token) + operations.append(jumpop) + part.operations = operations + self._do_optimize_loop(part, None) + if part.operations[-1].getopnum() == rop.LABEL: + last_label = [part.operations.pop()] + else: + last_label = [] + optimized.operations.extend(part.operations) + prv = nxt + 1 + + # + print + print "Optimized:" + if optimized.operations: + print '\n'.join([str(o) for o in optimized.operations]) + else: + print 'Failed!' + print + + assert expected != "crash!", "should have raised an exception" + self.assert_equal(optimized, expected) + + return optimized + + def test_simple(self): + ops = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1) + i3 = int_add(i1, 1) + escape(i3) + jump(i1) + """ + expected = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1, i2) + escape(i2) + jump(i1, i2) + """ + self.optimize_loop(ops, expected) + + def test_forced_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + escape(p3) + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_with_nonmatching_fields(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, 1, descr=valuedescr) + label(p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p4, 1, descr=nextdescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + +class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + pass + From noreply at buildbot.pypy.org Sat Dec 3 15:33:26 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 3 Dec 2011 15:33:26 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: we cant assume the virtual states always match at this point anymore Message-ID: <20111203143326.ABCB682A00@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50088:27339b188388 Date: 2011-12-03 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/27339b188388/ Log: we cant assume the virtual states always match at this point anymore diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -101,6 +101,17 @@ """ with raises(InvalidLoop): self.optimize_loop(ops, ops) + + def test_virtual_arrays_with_nonmatching_lens(self): + ops = """ + [p1] + p2 = new_array(3, descr=arraydescr) + label(p2) + p4 = new_array(2, descr=arraydescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) class TestLLtype(BaseTestMultiLabel, LLtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -110,7 +110,10 @@ if not value.is_virtual(): raise BadVirtualState for i in range(len(self.fielddescrs)): - v = value._fields[self.fielddescrs[i]] + try: + v = value._fields[self.fielddescrs[i]] + except KeyError: + raise BadVirtualState s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -188,7 +191,10 @@ if not value.is_virtual(): raise BadVirtualState for i in range(len(self.fieldstate)): - v = value._items[i] + try: + v = value._items[i] + except IndexError: + raise BadVirtualState s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -259,7 +265,12 @@ p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): - v = value._items[i][self.fielddescrs[i][j]] + try: + v = value._items[i][self.fielddescrs[i][j]] + except IndexError: + raise BadVirtualState + except KeyError: + raise BadVirtualState s = self.fieldstate[p] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) From noreply at buildbot.pypy.org Sat Dec 3 18:21:56 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 3 Dec 2011 18:21:56 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Implement the #\ syntax Message-ID: <20111203172156.635EC82A00@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r21:b3e418138833 Date: 2011-11-29 21:46 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/b3e418138833/ Log: Implement the #\ syntax diff --git a/scheme/object.py b/scheme/object.py --- a/scheme/object.py +++ b/scheme/object.py @@ -143,15 +143,30 @@ def __repr__(self): return "" +_charname_to_char = { + 'space': ' ', + 'newline': '\n', +} + +_char_to_charname = dict((v, k) for k, v in _charname_to_char.items()) + class W_Character(W_Root): def __init__(self, val): + if len(val) != 1: + val = _charname_to_char.get(val.lower(), None) + if val is None: + raise SchemeSyntaxError self.chrval = val def to_string(self): return self.chrval def to_repr(self): - return "#\\" + self.chrval + charname = _char_to_charname.get(self.chrval, None) + if charname is None: + return "#\\" + self.chrval + else: + return "#\\" + charname def __repr__(self): return "" diff --git a/scheme/ssparser.py b/scheme/ssparser.py --- a/scheme/ssparser.py +++ b/scheme/ssparser.py @@ -32,9 +32,9 @@ return {W_String(str_unquote(c))}; CHARACTER: - c = `#\\.` + c = `#\\(.|[A-Za-z]+)` IGNORE* - return {W_Character(c[2])}; + return {W_Character(c[2:])}; SYMBOL: c = `[\+\-\*\^\?a-zA-Z!<=>_~/$%&:][\+\-\*\^\?a-zA-Z0-9!<=>_~/$%&:.]*` diff --git a/scheme/test/test_object.py b/scheme/test/test_object.py --- a/scheme/test/test_object.py +++ b/scheme/test/test_object.py @@ -24,7 +24,19 @@ w_str = W_String(str) assert str == w_str.to_string() assert w_str.to_repr() == r'''"\\ \\\\ \\' \" \\\""''' - + +def test_char(): + c = 'x' + w_char = W_Character(c) + assert w_char.to_boolean() is True + assert w_char.to_string() == 'x' + assert w_char.to_repr() == r'#\x' + c = ' ' + w_char = W_Character(c) + assert w_char.to_boolean() is True + assert w_char.to_string() == ' ' + assert w_char.to_repr() == r'#\space' + def test_fixnum(): num = 12345 w_num = W_Integer(num) diff --git a/scheme/test/test_parser.py b/scheme/test/test_parser.py --- a/scheme/test/test_parser.py +++ b/scheme/test/test_parser.py @@ -2,6 +2,7 @@ from scheme.ssparser import parse from scheme.object import W_Boolean, W_Real, W_Integer, W_String from scheme.object import W_Pair, W_Nil, W_Symbol, W_Character, W_Vector +from scheme.object import SchemeSyntaxError from pypy.rlib.parsing.makepackrat import BacktrackException def parse_sexpr(expr): @@ -76,6 +77,29 @@ assert isinstance(w_string, W_String) assert unwrap(w_string) == contents +def test_character(): + w_char = parse_sexpr(r'#\c') + assert isinstance(w_char, W_Character) + assert unwrap(w_char) == 'c' + + more_chars = [(r'#\Z', 'Z'), + (r'#\,', ','), + (r'#\;', ';'), + (r'#\)', ')'), + (r'#\(', '('), + (r'#\#', '#'), + (r'#\ ', ' '), + (r'#\space', ' '), + (r'#\newline', '\n'), + ] + + for code, result in more_chars: + w_char = parse_sexpr(code) + assert isinstance(w_char, W_Character) + assert unwrap(w_char) == result + + py.test.raises(SchemeSyntaxError, parse_sexpr, r'#\foobar') + def test_objects(): w_fixnum = parse_sexpr('-12345') assert isinstance(w_fixnum, W_Integer) From noreply at buildbot.pypy.org Sat Dec 3 18:21:57 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 3 Dec 2011 18:21:57 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Mock-up Implementation of for-each Message-ID: <20111203172157.68A9E82A00@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r22:195698c3612c Date: 2011-11-29 21:47 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/195698c3612c/ Log: Mock-up Implementation of for-each diff --git a/scheme/procedure.py b/scheme/procedure.py --- a/scheme/procedure.py +++ b/scheme/procedure.py @@ -262,6 +262,14 @@ # XXX need to find out how to do this tailrecusive return (Reverse().procedure(ctx,[w_rev_result]), None) +class ForEach(W_Procedure): + _symbol_name = "for-each" + + def procedure_tr(self, ctx, lst): + # simply relay to map and ignore output + (res, ctx) = Map().procedure_tr(ctx, lst) + return (w_undefined, ctx) + ## # Equivalnece Predicates ## From noreply at buildbot.pypy.org Sat Dec 3 18:21:58 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 3 Dec 2011 18:21:58 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Implement String equal Message-ID: <20111203172158.6D5AE82A00@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r23:d188b009732f Date: 2011-11-29 23:02 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/d188b009732f/ Log: Implement String equal diff --git a/scheme/object.py b/scheme/object.py --- a/scheme/object.py +++ b/scheme/object.py @@ -143,6 +143,11 @@ def __repr__(self): return "" + def equal(self, w_obj): + if not isinstance(w_obj, W_String): + return False + return self.strval == w_obj.strval + _charname_to_char = { 'space': ' ', 'newline': '\n', diff --git a/scheme/test/test_object.py b/scheme/test/test_object.py --- a/scheme/test/test_object.py +++ b/scheme/test/test_object.py @@ -24,6 +24,11 @@ w_str = W_String(str) assert str == w_str.to_string() assert w_str.to_repr() == r'''"\\ \\\\ \\' \" \\\""''' + str1 = "foobar" + w_str1 = W_String(str1) + str2 = "foo" + "bar" + w_str2 = W_String(str2) + assert w_str1.equal(w_str2) def test_char(): c = 'x' From noreply at buildbot.pypy.org Sat Dec 3 18:21:59 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 3 Dec 2011 18:21:59 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Equality for Characters Message-ID: <20111203172159.726CA82A00@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r24:d333d806d357 Date: 2011-11-29 23:24 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/d333d806d357/ Log: Equality for Characters diff --git a/scheme/object.py b/scheme/object.py --- a/scheme/object.py +++ b/scheme/object.py @@ -176,6 +176,12 @@ def __repr__(self): return "" + def eqv(self, w_obj): + if not isinstance(w_obj, W_Character): + return False + return self.chrval == w_obj.chrval + equal = eqv + class W_Real(W_Root): def __init__(self, val): self.exact = False diff --git a/scheme/test/test_object.py b/scheme/test/test_object.py --- a/scheme/test/test_object.py +++ b/scheme/test/test_object.py @@ -24,11 +24,16 @@ w_str = W_String(str) assert str == w_str.to_string() assert w_str.to_repr() == r'''"\\ \\\\ \\' \" \\\""''' + str1 = "foobar" w_str1 = W_String(str1) str2 = "foo" + "bar" w_str2 = W_String(str2) assert w_str1.equal(w_str2) + w_str2 = W_String("foo") + assert not w_str1.equal(w_str2) + w_sym = symbol(str1) + assert not w_str1.equal(w_str2) def test_char(): c = 'x' @@ -41,6 +46,7 @@ assert w_char.to_boolean() is True assert w_char.to_string() == ' ' assert w_char.to_repr() == r'#\space' + assert w_char.eqv(W_Character(' ')) def test_fixnum(): num = 12345 From noreply at buildbot.pypy.org Sat Dec 3 18:22:00 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 3 Dec 2011 18:22:00 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Implement make-string Message-ID: <20111203172200.76B7D82A00@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r25:9ff8fb62dd90 Date: 2011-11-29 23:44 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/9ff8fb62dd90/ Log: Implement make-string diff --git a/scheme/procedure.py b/scheme/procedure.py --- a/scheme/procedure.py +++ b/scheme/procedure.py @@ -269,7 +269,27 @@ # simply relay to map and ignore output (res, ctx) = Map().procedure_tr(ctx, lst) return (w_undefined, ctx) - + +class MakeString(W_Procedure): + _symbol_name = "make-string" + + def procedure(self, ctx, lst): + if len(lst) < 1 or len(lst) > 2: + raise WrongArgsNumber + + w_number = lst[0] + if not isinstance(w_number, W_Integer): + raise WrongArgType(w_number, "Integer") + + if len(lst) == 2: + w_char = lst[1] + else: + w_char = W_Character(' ') + if not isinstance(w_char, W_Character): + raise WrongArgType(w_char, "Character") + + return W_String(w_char.to_string() * w_number.to_fixnum()) + ## # Equivalnece Predicates ## diff --git a/scheme/test/test_scheme_level.py b/scheme/test/test_scheme_level.py --- a/scheme/test/test_scheme_level.py +++ b/scheme/test/test_scheme_level.py @@ -104,4 +104,11 @@ (#f #f #f #f #f #f #f #f #f #t #f #f) ; procedure? (#f #f #f #f #f #f #f #f #f #f #t #t)); vector? )) -""") \ No newline at end of file +""") + +def test_string(): + run_with_assert(r""" +(define new-str (make-string 7 #\*)) +(assert (string? new-str)) +(assert (equal? new-str "*******")) +""") From noreply at buildbot.pypy.org Sat Dec 3 18:22:01 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 3 Dec 2011 18:22:01 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Improve Scheme-level testing: Message-ID: <20111203172201.7A86E82A00@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r26:55878863ff6b Date: 2011-12-03 02:25 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/55878863ff6b/ Log: Improve Scheme-level testing: In case of differences show the calculated values diff --git a/scheme/test/test_object.py b/scheme/test/test_object.py --- a/scheme/test/test_object.py +++ b/scheme/test/test_object.py @@ -87,6 +87,7 @@ def test_symbol(): w_sym = W_Symbol("symb") assert w_sym.to_string() == "symb" + assert w_sym.to_repr() == "symb" assert w_sym.to_boolean() is True def test_vector(): diff --git a/scheme/test/test_scheme_level.py b/scheme/test/test_scheme_level.py --- a/scheme/test/test_scheme_level.py +++ b/scheme/test/test_scheme_level.py @@ -2,6 +2,7 @@ from scheme.ssparser import parse from scheme.execution import ExecutionContext from scheme.object import * +import re # A scheme level macro, which raises an AssertionError at python # level. This python level Errors are then reported by pytest.py @@ -19,7 +20,38 @@ raise WrongArgsNumber comment = w_rest.car.to_string() - w_test_result = w_test.eval(ctx) + if isinstance(w_test, W_Pair) and isinstance(w_test.car, W_Symbol): + w_test_oper = w_test.car + test_name = w_test_oper.name + if test_name in ['eq?', 'eqv?', 'equal?']: + w_iter = w_test.cdr + if not isinstance(w_iter, W_Pair): + raise SchemeSyntaxError + w_first = w_iter.car + w_iter = w_iter.cdr + if not isinstance(w_iter, W_Pair): + raise SchemeSyntaxError + w_second = w_iter.car + w_iter = w_iter.cdr + if not w_iter is w_nil: + raise WrongArgsNumber + + w_got = w_first.eval(ctx) + w_expected= w_second.eval(ctx) + + comment += "\n + got: " + w_got.to_repr() + comment += "\n + expected: " + w_expected.to_repr() + + w_compare = ctx.get(test_name) + if not isinstance(w_compare, W_Procedure): + raise SchemeSyntaxError + w_test_result = w_compare.procedure(ctx, + [w_got, w_expected]) + else: + w_test_result = w_test.eval(ctx) + else: + w_test_result = w_test.eval(ctx) + assert w_test_result.to_boolean(), comment return w_undefined @@ -42,6 +74,21 @@ r'(assert #f "Failed assert raises")') py.test.raises(AssertionError, run_with_assert, r'(define foo #f) (+ 1 1) (assert foo "more complex test")') + e = py.test.raises(AssertionError, run_with_assert, + r'(assert (eqv? (+ 9 7) 10))') + assert re.search('got: \d+', str(e.value)) + assert re.search('expected: 10', str(e.value)) + +def test_simple(): + run_with_assert(r""" +(assert (equal? (list 1 2 3) '(1 2 3))) +(assert (equal? (cons 'a 'b) '(a . b))) +(assert (eq? (car (cons 'a 'b)) 'a)) +(assert (eq? (cdr (cons 'a 'b)) 'b)) +(assert (eqv? (+ 1 2) 3)) +(assert (eqv? (* (+ 1 2) 3) 9)) +(assert (eqv? (- (* 2 3) (/ 6 2)) 3)) +""") def test_fac(): run_with_assert(r""" From noreply at buildbot.pypy.org Sat Dec 3 18:22:02 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 3 Dec 2011 18:22:02 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Turn W_Nil and W_Boolean into singleton classes Message-ID: <20111203172202.8108282A00@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r27:d86450ba30f6 Date: 2011-12-03 15:37 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/d86450ba30f6/ Log: Turn W_Nil and W_Boolean into singleton classes diff --git a/scheme/object.py b/scheme/object.py --- a/scheme/object.py +++ b/scheme/object.py @@ -102,25 +102,49 @@ w_ellipsis = symbol("...") class W_Boolean(W_Root): + def __new__(cls, val): + if val: + return w_true + else: + return w_false + def __init__(self, val): - self.boolval = bool(val) + pass + +class W_True(W_Boolean): + _w_true = None + def __new__(cls, val): + if cls._w_true is None: + cls._w_true = W_Root.__new__(cls) + return cls._w_true + + def __init__(self, val): + assert val def to_repr(self): - if self.boolval: - return "#t" + return "#t" + to_string = to_repr + +w_true = W_True(True) + +class W_False(W_Boolean): + _w_false = None + def __new__(cls, val): + if cls._w_false is None: + cls._w_false = W_Root.__new__(cls) + return cls._w_false + + def __init__(self, val): + assert not val + + def to_repr(self): return "#f" - to_string = to_repr def to_boolean(self): - return self.boolval + return False - def eqv(self, w_obj): - if isinstance(w_obj, W_Boolean): - return self.boolval is w_obj.boolval - return False - eq = eqv - equal = eqv +w_false = W_False(False) class W_String(W_Root): def __init__(self, val): @@ -267,6 +291,12 @@ pass class W_Nil(W_List): + _w_nil = None + def __new__(cls): + if cls._w_nil is None: + cls._w_nil = W_Root.__new__(cls) + return cls._w_nil + def __repr__(self): return "" diff --git a/scheme/test/test_object.py b/scheme/test/test_object.py --- a/scheme/test/test_object.py +++ b/scheme/test/test_object.py @@ -68,6 +68,8 @@ w_nil = W_Nil() assert w_nil.to_boolean() is True # this is Scheme not LISP assert w_nil.to_repr() == "()" + w_nil2 = W_Nil() + assert w_nil is w_nil2 def test_pair(): c1 = W_Integer(1) diff --git a/scheme/test/test_parser.py b/scheme/test/test_parser.py --- a/scheme/test/test_parser.py +++ b/scheme/test/test_parser.py @@ -21,7 +21,7 @@ elif isinstance(w_obj, W_Character): return w_obj.chrval elif isinstance(w_obj, W_Boolean): - return w_obj.boolval + return w_obj.to_boolean() elif isinstance(w_obj, W_Pair): result = [] while not isinstance(w_obj, W_Nil): From noreply at buildbot.pypy.org Sat Dec 3 18:22:03 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 3 Dec 2011 18:22:03 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Add another helper function Message-ID: <20111203172203.8CA1682A00@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r28:ebb0242d9706 Date: 2011-12-03 17:52 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/ebb0242d9706/ Log: Add another helper function diff --git a/scheme/object.py b/scheme/object.py --- a/scheme/object.py +++ b/scheme/object.py @@ -634,6 +634,18 @@ return w_cdr +def lst2plst(w_list): + """coverts W_Pair scheme list into a python list() of W_Root""" + lst = [] + w_iter = w_list + while w_iter is not w_nil: + if not isinstance(w_iter, W_Pair): + raise WrongArg(w_list, "List") + lst.append(w_iter.car) + w_iter = w_iter.cdr + + return lst + ## # Continuations ## From noreply at buildbot.pypy.org Sat Dec 3 19:41:12 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Dec 2011 19:41:12 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: add tests, reimplement reshape so that it can create a copy if necessary, fix bug Message-ID: <20111203184112.6286982A00@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-reshape-merge Changeset: r50089:50916c4a552b Date: 2011-12-02 15:03 +0200 http://bitbucket.org/pypy/pypy/changeset/50916c4a552b/ Log: add tests, reimplement reshape so that it can create a copy if necessary, fix bug diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,6 +98,41 @@ endshape[i] = remainder[i] return endshape +def get_shape_from_iterable(space, old_size, w_iterable): + new_size = 0 + new_shape = [] + if not space.issequence_w(w_iterable): + new_size = space.int_w(w_iterable) + if new_size < 0: + new_size = old_size + new_shape = [new_size, ] + else: + neg_dim = -1 + batch = space.listview(w_iterable) + new_size = 1 + if len(batch) < 1: + new_size = 0 + new_shape = [] + i = 0 + for elem in batch: + s = space.int_w(elem) + if s < 0: + if neg_dim >= 0: + raise OperationError(space.w_ValueError, space.wrap( + "can only specify one unknown dimension")) + s = 1 + neg_dim = i + new_size *= s + new_shape.append(s) + i += 1 + if neg_dim >= 0: + new_shape[neg_dim] = old_size / new_size + new_size *= new_shape[neg_dim] + if new_size != old_size: + raise OperationError(space.w_ValueError, + space.wrap("total size of new array must be unchanged")) + return new_shape + #Recalculating strides. Find the steps that the iteration does for each #dimension, given the stride and shape. Then try to create a new stride that #fits the new shape, using those steps. If there is a shape/step mismatch @@ -107,7 +142,7 @@ #Return the proper strides for new_shape, or None # if the mapping crosses stepping boundaries - #Assumes that nelems have been matched, len(shape) > 1 for old_shape and + #Assumes that prod(old_shape) ==prod(new_shape), len(old_shape) > 1 and # len(new_shape) > 0 steps = [] last_step = 1 @@ -116,7 +151,7 @@ if old_strides[0] < old_strides[-1]: for i in range(len(old_shape)): steps.append(old_strides[i] / last_step) - last_step = old_shape[i] * old_strides[i] + last_step *= old_shape[i] cur_step = steps[0] n_new_elems_used = 1 n_old_elems_to_use = old_shape[0] @@ -137,7 +172,7 @@ else: for i in range(len(old_shape) - 1, -1, -1): steps.insert(0, old_strides[i] / last_step) - last_step = old_shape[i] * old_strides[i] + last_step *= old_shape[i] cur_step = steps[-1] n_new_elems_used = 1 oldI = -1 @@ -578,40 +613,10 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) + def descr_set_shape(self, space, w_iterable): concrete = self.get_concrete() - new_size = 0 - new_shape = [] - if not space.issequence_w(w_iterable): - new_size = space.int_w(w_iterable) - if new_size < 0: - new_size = self.find_size() - new_shape = [new_size, ] - else: - neg_dim = -1 - batch = space.listview(w_iterable) - new_size = 1 - if len(batch) < 1: - new_size = 0 - new_shape = [] - i = 0 - for elem in batch: - s = space.int_w(elem) - if s < 0: - if neg_dim >= 0: - raise OperationError(space.w_ValueError, space.wrap( - "can only specify one unknown dimension")) - s = 1 - neg_dim = i - new_size *= s - new_shape.append(s) - i += 1 - if neg_dim >= 0: - new_shape[neg_dim] = self.find_size() / new_size - new_size *= new_shape[neg_dim] - if new_size != self.find_size(): - raise OperationError(space.w_ValueError, - space.wrap("total size of new array must be unchanged")) + new_shape = get_shape_from_iterable(space, concrete.find_size(), w_iterable) concrete.setshape(space, new_shape) def descr_get_size(self, space): @@ -869,22 +874,24 @@ def descr_reshape(self, space, w_iterable): """Return a reshaped view into the original array's data """ - new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature, - ]) concrete = self.get_concrete() - #concrete = self - ndims = len(concrete.shape) - strides = [0] * ndims - backstrides = [0] * ndims - shape = [0] * ndims - for i in range(len(concrete.shape)): - strides[i] = concrete.strides[i] - backstrides[i] = concrete.backstrides[i] - shape[i] = concrete.shape[i] - arr = NDimSlice(self, new_sig, self.start, strides, - backstrides, shape) - arr.descr_set_shape(space, w_iterable) + new_shape = get_shape_from_iterable(space, concrete.find_size(), w_iterable) + #Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides) + if new_strides: + #We can create a view, strides somehow match up. + new_sig = signature.Signature.find_sig([ + NDimSlice.signature, self.signature, ]) + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = NDimSlice(self, new_sig, self.start, new_strides, + new_backstrides, new_shape) + else: + #Create copy with contiguous data + arr = concrete.copy() + arr.set_shape(space, new_shape) return arr def descr_mean(self, space): @@ -984,7 +991,8 @@ return 'Scalar' def setshape(self, space, new_shape): - # XXX shouldn't it raise? + # In order to get here, we already checked that prod(new_shape)==1, + # so in order to have a consistent API, let it go through. pass class VirtualArray(BaseArray): @@ -1183,7 +1191,7 @@ if len(self.shape) < 1: return elif len(self.shape) < 2: - #REVIEWER: this code could be refactored into calc_strides + #TODO: this code could be refactored into calc_strides #but then calc_strides would have to accept a stepping factor strides = [] backstrides = [] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -160,6 +160,7 @@ def test_calc_new_strides(self): from pypy.module.micronumpy.interp_numarray import calc_new_strides + assert calc_new_strides([2, 4], [4, 2], [4, 2]) == [8, 2] assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None assert calc_new_strides([8, 3], [2, 4, 3], [48, 6, 1]) == [6, 1] @@ -346,6 +347,10 @@ assert a.shape == (12, ) exc = raises(ValueError, "a.shape = 10") assert str(exc.value) == "total size of new array must be unchanged" + a = array(3) + a.shape = () + #numpy allows this + a.shape = (1,) def test_reshape(self): from numpypy import array, zeros @@ -369,12 +374,28 @@ assert str(exc.value) == \ "incompatible shape for a non-contiguous array" b = a[::2, :, :].reshape((2, 6)) + assert b.shape == (2, 6) b = arange(20)[1:17:2] b.shape = (4, 2) assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() b.reshape((2, 4)) assert (b == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + z=arange(96).reshape((12, -1)) + assert z.shape == (12, 8) + y=z.reshape(4,3,8) + v=y[:,::2,:] + w = y.reshape(96) + u = v.reshape(64) + assert y[1, 2, 1] == z[5, 1] + y[1, 2, 1] = 1000 + #z, y, w, v are views of eachother + assert z[5, 1] == 1000 + assert v[1, 1, 1] == 1000 + assert w[41] == 1000 + #u is not a view, it is a copy! + assert u[25] == 41 + def test_add(self): from numpypy import array a = array(range(5)) From noreply at buildbot.pypy.org Sat Dec 3 19:41:13 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Dec 2011 19:41:13 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: allow empty shape for Scalars Message-ID: <20111203184113.8D2A882A00@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-reshape-merge Changeset: r50090:0c43b1d3d817 Date: 2011-12-02 15:08 +0200 http://bitbucket.org/pypy/pypy/changeset/0c43b1d3d817/ Log: allow empty shape for Scalars diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -111,7 +111,11 @@ batch = space.listview(w_iterable) new_size = 1 if len(batch) < 1: - new_size = 0 + if old_size ==1: + #Scalars can have an empty size. + new_size = 1 + else: + new_size = 0 new_shape = [] i = 0 for elem in batch: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -378,8 +378,8 @@ b = arange(20)[1:17:2] b.shape = (4, 2) assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() - b.reshape((2, 4)) - assert (b == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + c = b.reshape((2, 4)) + assert (c == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() z=arange(96).reshape((12, -1)) assert z.shape == (12, 8) From noreply at buildbot.pypy.org Sat Dec 3 19:41:14 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Dec 2011 19:41:14 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: passing more tests Message-ID: <20111203184114.B681482A00@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-reshape-merge Changeset: r50091:131fbfea066a Date: 2011-12-03 18:24 +0200 http://bitbucket.org/pypy/pypy/changeset/131fbfea066a/ Log: passing more tests diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -101,7 +101,7 @@ def get_shape_from_iterable(space, old_size, w_iterable): new_size = 0 new_shape = [] - if not space.issequence_w(w_iterable): + if space.isinstance_w(w_iterable, space.w_int): new_size = space.int_w(w_iterable) if new_size < 0: new_size = old_size @@ -109,6 +109,9 @@ else: neg_dim = -1 batch = space.listview(w_iterable) + #Allow for shape = (1,2,3) or shape = ((1,2,3)) + if len(batch)>1 and space.issequence_w(batch[0]): + batch = space.listview(batch[0]) new_size = 1 if len(batch) < 1: if old_size ==1: @@ -875,11 +878,11 @@ return NDimSlice(self, new_sig, start, strides[:], backstrides[:], shape[:]) - def descr_reshape(self, space, w_iterable): + def descr_reshape(self, space, w_args): """Return a reshaped view into the original array's data """ concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, concrete.find_size(), w_iterable) + new_shape = get_shape_from_iterable(space, concrete.find_size(), w_args) #Since we got to here, prod(new_shape) == self.size new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides) if new_strides: @@ -895,7 +898,7 @@ else: #Create copy with contiguous data arr = concrete.copy() - arr.set_shape(space, new_shape) + arr.setshape(space, new_shape) return arr def descr_mean(self, space): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -209,8 +209,8 @@ assert a[2] == 4 def test_copy(self): - from numpypy import array - a = array(range(5)) + from numpypy import arange, array + a = arange(5) b = a.copy() for i in xrange(5): assert b[i] == a[i] @@ -383,7 +383,7 @@ z=arange(96).reshape((12, -1)) assert z.shape == (12, 8) - y=z.reshape(4,3,8) + y=z.reshape((4,3,8)) v=y[:,::2,:] w = y.reshape(96) u = v.reshape(64) @@ -395,7 +395,13 @@ assert w[41] == 1000 #u is not a view, it is a copy! assert u[25] == 41 - + + def test_reshape_varargs(self): + skip("How do I do varargs in rpython? reshape should accept a" + " variable number of arguments") + z=arange(96).reshape(12, -1) + y=z.reshape(4,3,8) + def test_add(self): from numpypy import array a = array(range(5)) From noreply at buildbot.pypy.org Sat Dec 3 19:41:15 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Dec 2011 19:41:15 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: add (failing) test for slice copy Message-ID: <20111203184115.DD7CF82A00@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-reshape-merge Changeset: r50092:4fa5cac612c6 Date: 2011-12-03 18:25 +0200 http://bitbucket.org/pypy/pypy/changeset/4fa5cac612c6/ Log: add (failing) test for slice copy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -220,6 +220,11 @@ a = array(1) assert a.copy() == a + a = arange(8) + b = a[::2] + c = b.copy() + assert (c == b).all() + def test_iterator_init(self): from numpypy import array a = array(range(5)) From noreply at buildbot.pypy.org Sat Dec 3 19:41:17 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Dec 2011 19:41:17 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: Fix copy for slices Message-ID: <20111203184117.12FDD82A00@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-reshape-merge Changeset: r50093:7cf074391cf8 Date: 2011-12-03 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/7cf074391cf8/ Log: Fix copy for slices diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1282,9 +1282,11 @@ def copy(self): array = NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() + a_iter = array.start_iter() while not iter.done(): - array.setitem(iter.offset, self.getitem(iter.offset)) + array.setitem(a_iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) return array class NDimArray(BaseArray): From noreply at buildbot.pypy.org Sat Dec 3 19:41:18 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Dec 2011 19:41:18 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: cleanup, fix incorrect tests; ready for review Message-ID: <20111203184118.44E7982A00@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-reshape-merge Changeset: r50094:ff635955ea46 Date: 2011-12-03 20:40 +0200 http://bitbucket.org/pypy/pypy/changeset/ff635955ea46/ Log: cleanup, fix incorrect tests; ready for review diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -110,11 +110,11 @@ neg_dim = -1 batch = space.listview(w_iterable) #Allow for shape = (1,2,3) or shape = ((1,2,3)) - if len(batch)>1 and space.issequence_w(batch[0]): + if len(batch) > 1 and space.issequence_w(batch[0]): batch = space.listview(batch[0]) new_size = 1 if len(batch) < 1: - if old_size ==1: + if old_size == 1: #Scalars can have an empty size. new_size = 1 else: @@ -208,7 +208,7 @@ w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_item_or_iterable) dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) return scalar_w(space, dtype, w_item_or_iterable) if w_order is None: @@ -581,6 +581,7 @@ return False i = i.next(shapelen) return True + def descr_all(self, space): return space.wrap(self._all()) @@ -596,6 +597,7 @@ return True i = i.next(shapelen) return False + def descr_any(self, space): return space.wrap(self._any()) @@ -620,10 +622,10 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) - def descr_set_shape(self, space, w_iterable): concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, concrete.find_size(), w_iterable) + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_iterable) concrete.setshape(space, new_shape) def descr_get_size(self, space): @@ -882,9 +884,11 @@ """Return a reshaped view into the original array's data """ concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, concrete.find_size(), w_args) + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_args) #Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides) + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) if new_strides: #We can create a view, strides somehow match up. new_sig = signature.Signature.find_sig([ diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -159,13 +159,12 @@ [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] def test_calc_new_strides(self): - from pypy.module.micronumpy.interp_numarray import calc_new_strides + from pypy.module.micronumpy.interp_numarray import calc_new_strides assert calc_new_strides([2, 4], [4, 2], [4, 2]) == [8, 2] assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None - assert calc_new_strides([8, 3], [2, 4, 3], [48, 6, 1]) == [6, 1] assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None - assert calc_new_strides([24], [2, 4, 3], [48, 6, 2]) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2]) == [2] class AppTestNumArray(BaseNumpyAppTest): def test_type(self): @@ -386,10 +385,10 @@ c = b.reshape((2, 4)) assert (c == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() - z=arange(96).reshape((12, -1)) + z = arange(96).reshape((12, -1)) assert z.shape == (12, 8) - y=z.reshape((4,3,8)) - v=y[:,::2,:] + y = z.reshape((4, 3, 8)) + v = y[:, ::2, :] w = y.reshape(96) u = v.reshape(64) assert y[1, 2, 1] == z[5, 1] @@ -399,14 +398,14 @@ assert v[1, 1, 1] == 1000 assert w[41] == 1000 #u is not a view, it is a copy! - assert u[25] == 41 + assert u[25] == 41 def test_reshape_varargs(self): skip("How do I do varargs in rpython? reshape should accept a" " variable number of arguments") - z=arange(96).reshape(12, -1) - y=z.reshape(4,3,8) - + z = arange(96).reshape(12, -1) + y = z.reshape(4, 3, 8) + def test_add(self): from numpypy import array a = array(range(5)) From noreply at buildbot.pypy.org Sat Dec 3 20:50:49 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Dec 2011 20:50:49 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: add app-level reshape with docstrings Message-ID: <20111203195049.36D5C82A00@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-reshape-merge Changeset: r50095:25cacab6ecdb Date: 2011-12-03 21:49 +0200 http://bitbucket.org/pypy/pypy/changeset/25cacab6ecdb/ Log: add app-level reshape with docstrings diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -61,4 +61,5 @@ 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', 'arange': 'app_numpy.arange', + 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -36,3 +36,39 @@ j += 1 i += step return arr + +def reshape(a, shape): + '''reshape(a, newshape) + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred + from the length of the array and remaining dimensions. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. + + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raise if the data is copied, + you should assign the new shape to the shape attribute of the array +''' + if not hasattr(a, 'reshape'): + a = numpypy.array(a) + return a.reshape(shape) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -881,8 +881,17 @@ shape[:]) def descr_reshape(self, space, w_args): - """Return a reshaped view into the original array's data - """ + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `%s.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function +""" % 'numpypy' concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, concrete.find_size(), w_args) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1218,3 +1218,14 @@ a = arange(0, 0.8, 0.1) assert len(a) == 8 assert arange(False, True, True).dtype is dtype(int) + + +class AppTestRanges(BaseNumpyAppTest): + def test_app_reshape(self): + from numpypy import arange, array, dtype, reshape + a = arange(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) + a = range(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) From notifications-noreply at bitbucket.org Sat Dec 3 21:10:19 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 03 Dec 2011 20:10:19 -0000 Subject: [pypy-commit] [COMMENT] Pull request #1 for pypy/benchmarks: Added GZip benchmark Message-ID: <20111203201019.23757.28823@bitbucket03.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/benchmarks/pull-request/1/added-gzip-benchmark#comment-1278 Jonas Haag (jonashaag) said: bump -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sat Dec 3 22:14:23 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Dec 2011 22:14:23 +0100 (CET) Subject: [pypy-commit] pypy default: Add two names to __all__. Message-ID: <20111203211423.6357682A00@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50096:aca2c0a862b8 Date: 2011-12-03 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/aca2c0a862b8/ Log: Add two names to __all__. diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f From noreply at buildbot.pypy.org Sat Dec 3 22:14:24 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Dec 2011 22:14:24 +0100 (CET) Subject: [pypy-commit] pypy default: Try out the approach described on issue945 on the itertools module. Message-ID: <20111203211424.8DB5C82A00@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50097:d5a684ab8934 Date: 2011-12-03 18:32 +0100 http://bitbucket.org/pypy/pypy/changeset/d5a684ab8934/ Log: Try out the approach described on issue945 on the itertools module. diff --git a/lib_pypy/itertools.py b/lib_pypy/_itertools.py rename from lib_pypy/itertools.py rename to lib_pypy/_itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/_itertools.py @@ -1,5 +1,5 @@ -# Note that PyPy contains also a built-in module 'itertools' which will -# hide this one if compiled in. +# Note that PyPy contains also a built-in implementation of 'itertools'; +# when translated with default options, this one is not used. """Functional tools for creating and using iterators. diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -23,6 +23,7 @@ dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) """ + applevel_name = '__builtin_itertools' interpleveldefs = { 'chain' : 'interp_itertools.W_Chain', From noreply at buildbot.pypy.org Sat Dec 3 22:14:25 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Dec 2011 22:14:25 +0100 (CET) Subject: [pypy-commit] pypy default: Forgot to re-add this stub file. Message-ID: <20111203211425.B155982A00@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50098:03e42e96479d Date: 2011-12-03 18:38 +0100 http://bitbucket.org/pypy/pypy/changeset/03e42e96479d/ Log: Forgot to re-add this stub file. diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py new file mode 100644 --- /dev/null +++ b/lib_pypy/itertools.py @@ -0,0 +1,6 @@ +try: + from __builtin_itertools import * + from __builtin_itertools import __doc__ +except ImportError: + from _itertools import * + from _itertools import __doc__ From noreply at buildbot.pypy.org Sat Dec 3 22:14:27 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Dec 2011 22:14:27 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: issue945 in-progress. The goal is to rename all modules that in pypy Message-ID: <20111203211427.1371682A00@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50099:34a3d737b0d4 Date: 2011-12-03 22:11 +0100 http://bitbucket.org/pypy/pypy/changeset/34a3d737b0d4/ Log: issue945 in-progress. The goal is to rename all modules that in pypy are built-in, but in cpython are merely extension modules. diff --git a/lib_pypy/_collections.py b/lib_pypy/__collections.py copy from lib_pypy/_collections.py copy to lib_pypy/__collections.py diff --git a/lib_pypy/array.py b/lib_pypy/_array.py copy from lib_pypy/array.py copy to lib_pypy/_array.py --- a/lib_pypy/array.py +++ b/lib_pypy/_array.py @@ -1,29 +1,3 @@ -"""This module defines an object type which can efficiently represent -an array of basic values: characters, integers, floating point -numbers. Arrays are sequence types and behave very much like lists, -except that the type of objects stored in them is constrained. The -type is specified at object creation time by using a type code, which -is a single character. The following type codes are defined: - - Type code C Type Minimum size in bytes - 'c' character 1 - 'b' signed integer 1 - 'B' unsigned integer 1 - 'u' Unicode character 2 - 'h' signed integer 2 - 'H' unsigned integer 2 - 'i' signed integer 2 - 'I' unsigned integer 2 - 'l' signed integer 4 - 'L' unsigned integer 4 - 'f' floating point 4 - 'd' floating point 8 - -The constructor is: - -array(typecode [, initializer]) -- create a new array -""" - from struct import calcsize, pack, pack_into, unpack_from import operator diff --git a/lib_pypy/_bisect.py b/lib_pypy/_bisect.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_bisect.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_bisect" shadows +# any file _bisect.py that would be found in the user dirs +from __builtin__bisect import * diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -1,425 +1,6 @@ -"""High performance data structures -""" -# -# Copied and completed from the sandbox of CPython -# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger) -# - -import operator +# indirection needed; otherwise the built-in module "_collections" shadows +# any file _collections.py that would be found in the user dirs try: - from thread import get_ident as _thread_ident + from __builtin__collections import * except ImportError: - def _thread_ident(): - return -1 - - -n = 30 -LFTLNK = n -RGTLNK = n+1 -BLOCKSIZ = n+2 - -# The deque's size limit is d.maxlen. The limit can be zero or positive, or -# None. After an item is added to a deque, we check to see if the size has -# grown past the limit. If it has, we get the size back down to the limit by -# popping an item off of the opposite end. The methods that can trigger this -# are append(), appendleft(), extend(), and extendleft(). - -class deque(object): - - def __new__(cls, iterable=(), *args, **kw): - self = super(deque, cls).__new__(cls, *args, **kw) - self.clear() - return self - - def __init__(self, iterable=(), maxlen=None): - self.clear() - if maxlen is not None: - if maxlen < 0: - raise ValueError("maxlen must be non-negative") - self._maxlen = maxlen - add = self.append - for elem in iterable: - add(elem) - - @property - def maxlen(self): - return self._maxlen - - def clear(self): - self.right = self.left = [None] * BLOCKSIZ - self.rightndx = n//2 # points to last written element - self.leftndx = n//2+1 - self.length = 0 - self.state = 0 - - def append(self, x): - self.state += 1 - self.rightndx += 1 - if self.rightndx == n: - newblock = [None] * BLOCKSIZ - self.right[RGTLNK] = newblock - newblock[LFTLNK] = self.right - self.right = newblock - self.rightndx = 0 - self.length += 1 - self.right[self.rightndx] = x - if self.maxlen is not None and self.length > self.maxlen: - self.popleft() - - def appendleft(self, x): - self.state += 1 - self.leftndx -= 1 - if self.leftndx == -1: - newblock = [None] * BLOCKSIZ - self.left[LFTLNK] = newblock - newblock[RGTLNK] = self.left - self.left = newblock - self.leftndx = n-1 - self.length += 1 - self.left[self.leftndx] = x - if self.maxlen is not None and self.length > self.maxlen: - self.pop() - - def extend(self, iterable): - if iterable is self: - iterable = list(iterable) - for elem in iterable: - self.append(elem) - - def extendleft(self, iterable): - if iterable is self: - iterable = list(iterable) - for elem in iterable: - self.appendleft(elem) - - def pop(self): - if self.left is self.right and self.leftndx > self.rightndx: - raise IndexError, "pop from an empty deque" - x = self.right[self.rightndx] - self.right[self.rightndx] = None - self.length -= 1 - self.rightndx -= 1 - self.state += 1 - if self.rightndx == -1: - prevblock = self.right[LFTLNK] - if prevblock is None: - # the deque has become empty; recenter instead of freeing block - self.rightndx = n//2 - self.leftndx = n//2+1 - else: - prevblock[RGTLNK] = None - self.right[LFTLNK] = None - self.right = prevblock - self.rightndx = n-1 - return x - - def popleft(self): - if self.left is self.right and self.leftndx > self.rightndx: - raise IndexError, "pop from an empty deque" - x = self.left[self.leftndx] - self.left[self.leftndx] = None - self.length -= 1 - self.leftndx += 1 - self.state += 1 - if self.leftndx == n: - prevblock = self.left[RGTLNK] - if prevblock is None: - # the deque has become empty; recenter instead of freeing block - self.rightndx = n//2 - self.leftndx = n//2+1 - else: - prevblock[LFTLNK] = None - self.left[RGTLNK] = None - self.left = prevblock - self.leftndx = 0 - return x - - def count(self, value): - c = 0 - for item in self: - if item == value: - c += 1 - return c - - def remove(self, value): - # Need to be defensive for mutating comparisons - for i in range(len(self)): - if self[i] == value: - del self[i] - return - raise ValueError("deque.remove(x): x not in deque") - - def rotate(self, n=1): - length = len(self) - if length == 0: - return - halflen = (length+1) >> 1 - if n > halflen or n < -halflen: - n %= length - if n > halflen: - n -= length - elif n < -halflen: - n += length - while n > 0: - self.appendleft(self.pop()) - n -= 1 - while n < 0: - self.append(self.popleft()) - n += 1 - - def reverse(self): - "reverse *IN PLACE*" - leftblock = self.left - rightblock = self.right - leftindex = self.leftndx - rightindex = self.rightndx - for i in range(self.length // 2): - # Validate that pointers haven't met in the middle - assert leftblock != rightblock or leftindex < rightindex - - # Swap - (rightblock[rightindex], leftblock[leftindex]) = ( - leftblock[leftindex], rightblock[rightindex]) - - # Advance left block/index pair - leftindex += 1 - if leftindex == n: - leftblock = leftblock[RGTLNK] - assert leftblock is not None - leftindex = 0 - - # Step backwards with the right block/index pair - rightindex -= 1 - if rightindex == -1: - rightblock = rightblock[LFTLNK] - assert rightblock is not None - rightindex = n - 1 - - def __repr__(self): - threadlocalattr = '__repr' + str(_thread_ident()) - if threadlocalattr in self.__dict__: - return 'deque([...])' - else: - self.__dict__[threadlocalattr] = True - try: - if self.maxlen is not None: - return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen) - else: - return 'deque(%r)' % (list(self),) - finally: - del self.__dict__[threadlocalattr] - - def __iter__(self): - return deque_iterator(self, self._iter_impl) - - def _iter_impl(self, original_state, giveup): - if self.state != original_state: - giveup() - block = self.left - while block: - l, r = 0, n - if block is self.left: - l = self.leftndx - if block is self.right: - r = self.rightndx + 1 - for elem in block[l:r]: - yield elem - if self.state != original_state: - giveup() - block = block[RGTLNK] - - def __reversed__(self): - return deque_iterator(self, self._reversed_impl) - - def _reversed_impl(self, original_state, giveup): - if self.state != original_state: - giveup() - block = self.right - while block: - l, r = 0, n - if block is self.left: - l = self.leftndx - if block is self.right: - r = self.rightndx + 1 - for elem in reversed(block[l:r]): - yield elem - if self.state != original_state: - giveup() - block = block[LFTLNK] - - def __len__(self): - #sum = 0 - #block = self.left - #while block: - # sum += n - # block = block[RGTLNK] - #return sum + self.rightndx - self.leftndx + 1 - n - return self.length - - def __getref(self, index): - if index >= 0: - block = self.left - while block: - l, r = 0, n - if block is self.left: - l = self.leftndx - if block is self.right: - r = self.rightndx + 1 - span = r-l - if index < span: - return block, l+index - index -= span - block = block[RGTLNK] - else: - block = self.right - while block: - l, r = 0, n - if block is self.left: - l = self.leftndx - if block is self.right: - r = self.rightndx + 1 - negative_span = l-r - if index >= negative_span: - return block, r+index - index -= negative_span - block = block[LFTLNK] - raise IndexError("deque index out of range") - - def __getitem__(self, index): - block, index = self.__getref(index) - return block[index] - - def __setitem__(self, index, value): - block, index = self.__getref(index) - block[index] = value - - def __delitem__(self, index): - length = len(self) - if index >= 0: - if index >= length: - raise IndexError("deque index out of range") - self.rotate(-index) - self.popleft() - self.rotate(index) - else: - index = ~index - if index >= length: - raise IndexError("deque index out of range") - self.rotate(index) - self.pop() - self.rotate(-index) - - def __reduce_ex__(self, proto): - return type(self), (list(self), self.maxlen) - - def __hash__(self): - raise TypeError, "deque objects are unhashable" - - def __copy__(self): - return self.__class__(self, self.maxlen) - - # XXX make comparison more efficient - def __eq__(self, other): - if isinstance(other, deque): - return list(self) == list(other) - else: - return NotImplemented - - def __ne__(self, other): - if isinstance(other, deque): - return list(self) != list(other) - else: - return NotImplemented - - def __lt__(self, other): - if isinstance(other, deque): - return list(self) < list(other) - else: - return NotImplemented - - def __le__(self, other): - if isinstance(other, deque): - return list(self) <= list(other) - else: - return NotImplemented - - def __gt__(self, other): - if isinstance(other, deque): - return list(self) > list(other) - else: - return NotImplemented - - def __ge__(self, other): - if isinstance(other, deque): - return list(self) >= list(other) - else: - return NotImplemented - - def __iadd__(self, other): - self.extend(other) - return self - -class deque_iterator(object): - - def __init__(self, deq, itergen): - self.counter = len(deq) - def giveup(): - self.counter = 0 - raise RuntimeError, "deque mutated during iteration" - self._gen = itergen(deq.state, giveup) - - def next(self): - res = self._gen.next() - self.counter -= 1 - return res - - def __iter__(self): - return self - -class defaultdict(dict): - - def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] - args = args[1:] - super(defaultdict, self).__init__(*args, **kwds) - - def __missing__(self, key): - # from defaultdict docs - if self.default_factory is None: - raise KeyError(key) - self[key] = value = self.default_factory() - return value - - def __repr__(self, recurse=set()): - if id(self) in recurse: - return "defaultdict(...)" - try: - recurse.add(id(self)) - return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__()) - finally: - recurse.remove(id(self)) - - def copy(self): - return type(self)(self, default_factory=self.default_factory) - - def __copy__(self): - return self.copy() - - def __reduce__(self): - """ - __reduce__ must return a 5-tuple as follows: - - - factory function - - tuple of args for the factory function - - additional state (here None) - - sequence iterator (here None) - - dictionary iterator (yielding successive (key, value) pairs - - This API is used by pickle.py and copy.py. - """ - return (type(self), (self.default_factory,), None, None, self.iteritems()) - + from __collections import * diff --git a/lib_pypy/_continuation.py b/lib_pypy/_continuation.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_continuation.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_continuation" shadows +# any file _continuation.py that would be found in the user dirs +from __builtin__continuation import * diff --git a/lib_pypy/_io.py b/lib_pypy/_io.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_io.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_io" shadows +# any file _io.py that would be found in the user dirs +from __builtin__io import * diff --git a/lib_pypy/_random.py b/lib_pypy/_random.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_random.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_random" shadows +# any file _random.py that would be found in the user dirs +from __builtin__random import * diff --git a/lib_pypy/_socket.py b/lib_pypy/_socket.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_socket.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_socket" shadows +# any file _socket.py that would be found in the user dirs +from __builtin__socket import * diff --git a/lib_pypy/_ssl.py b/lib_pypy/_ssl.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ssl.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_ssl" shadows +# any file _ssl.py that would be found in the user dirs +from __builtin__ssl import * diff --git a/lib_pypy/array.py b/lib_pypy/array.py --- a/lib_pypy/array.py +++ b/lib_pypy/array.py @@ -24,508 +24,9 @@ array(typecode [, initializer]) -- create a new array """ -from struct import calcsize, pack, pack_into, unpack_from -import operator - -# the buffer-like object to use internally: trying from -# various places in order... +# indirection needed; otherwise the built-in module "array" shadows +# any file array.py that would be found in the user dirs try: - import _rawffi # a reasonable implementation based - _RAWARRAY = _rawffi.Array('c') # on raw_malloc, and providing a - def bytebuffer(size): # real address - return _RAWARRAY(size, autofree=True) - def getbufaddress(buf): - return buf.buffer + from __builtin_array import * except ImportError: - try: - from __pypy__ import bytebuffer # a reasonable implementation - def getbufaddress(buf): # compatible with oo backends, - return 0 # but no address - except ImportError: - # not running on PyPy. Fall back to ctypes... - import ctypes - bytebuffer = ctypes.create_string_buffer - def getbufaddress(buf): - voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p) - return voidp.value - -# ____________________________________________________________ - -TYPECODES = "cbBuhHiIlLfd" - -class array(object): - """array(typecode [, initializer]) -> array - - Return a new array whose items are restricted by typecode, and - initialized from the optional initializer value, which must be a list, - string. or iterable over elements of the appropriate type. - - Arrays represent basic values and behave very much like lists, except - the type of objects stored in them is constrained. - - Methods: - - append() -- append a new item to the end of the array - buffer_info() -- return information giving the current memory info - byteswap() -- byteswap all the items of the array - count() -- return number of occurences of an object - extend() -- extend array by appending multiple elements from an iterable - fromfile() -- read items from a file object - fromlist() -- append items from the list - fromstring() -- append items from the string - index() -- return index of first occurence of an object - insert() -- insert a new item into the array at a provided position - pop() -- remove and return item (default last) - read() -- DEPRECATED, use fromfile() - remove() -- remove first occurence of an object - reverse() -- reverse the order of the items in the array - tofile() -- write all items to a file object - tolist() -- return the array converted to an ordinary list - tostring() -- return the array converted to a string - write() -- DEPRECATED, use tofile() - - Attributes: - - typecode -- the typecode character used to create the array - itemsize -- the length in bytes of one array item - """ - __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"] - - def __new__(cls, typecode, initializer=[], **extrakwds): - self = object.__new__(cls) - if cls is array and extrakwds: - raise TypeError("array() does not take keyword arguments") - if not isinstance(typecode, str) or len(typecode) != 1: - raise TypeError( - "array() argument 1 must be char, not %s" % type(typecode)) - if typecode not in TYPECODES: - raise ValueError( - "bad typecode (must be one of %s)" % ', '.join(TYPECODES)) - self._data = bytebuffer(0) - self.typecode = typecode - self.itemsize = calcsize(typecode) - if isinstance(initializer, list): - self.fromlist(initializer) - elif isinstance(initializer, str): - self.fromstring(initializer) - elif isinstance(initializer, unicode) and self.typecode == "u": - self.fromunicode(initializer) - else: - self.extend(initializer) - return self - - def _clear(self): - self._data = bytebuffer(0) - - ##### array-specific operations - - def fromfile(self, f, n): - """Read n objects from the file object f and append them to the end of - the array. Also called as read.""" - if not isinstance(f, file): - raise TypeError("arg1 must be open file") - size = self.itemsize * n - item = f.read(size) - if len(item) < size: - raise EOFError("not enough items in file") - self.fromstring(item) - - def fromlist(self, l): - """Append items to array from list.""" - if not isinstance(l, list): - raise TypeError("arg must be list") - self._fromiterable(l) - - def fromstring(self, s): - """Appends items from the string, interpreting it as an array of machine - values, as if it had been read from a file using the fromfile() - method.""" - if isinstance(s, unicode): - s = str(s) - self._frombuffer(s) - - def _frombuffer(self, s): - length = len(s) - if length % self.itemsize != 0: - raise ValueError("string length not a multiple of item size") - boundary = len(self._data) - newdata = bytebuffer(boundary + length) - newdata[:boundary] = self._data - newdata[boundary:] = s - self._data = newdata - - def fromunicode(self, ustr): - """Extends this array with data from the unicode string ustr. The array - must be a type 'u' array; otherwise a ValueError is raised. Use - array.fromstring(ustr.encode(...)) to append Unicode data to an array of - some other type.""" - if not self.typecode == "u": - raise ValueError( - "fromunicode() may only be called on type 'u' arrays") - # XXX the following probable bug is not emulated: - # CPython accepts a non-unicode string or a buffer, and then - # behaves just like fromstring(), except that it strangely truncates - # string arguments at multiples of the unicode byte size. - # Let's only accept unicode arguments for now. - if not isinstance(ustr, unicode): - raise TypeError("fromunicode() argument should probably be " - "a unicode string") - # _frombuffer() does the currect thing using - # the buffer behavior of unicode objects - self._frombuffer(buffer(ustr)) - - def tofile(self, f): - """Write all items (as machine values) to the file object f. Also - called as write.""" - if not isinstance(f, file): - raise TypeError("arg must be open file") - f.write(self.tostring()) - - def tolist(self): - """Convert array to an ordinary list with the same items.""" - count = len(self._data) // self.itemsize - return list(unpack_from('%d%s' % (count, self.typecode), self._data)) - - def tostring(self): - return self._data[:] - - def __buffer__(self): - return buffer(self._data) - - def tounicode(self): - """Convert the array to a unicode string. The array must be a type 'u' - array; otherwise a ValueError is raised. Use array.tostring().decode() - to obtain a unicode string from an array of some other type.""" - if self.typecode != "u": - raise ValueError("tounicode() may only be called on type 'u' arrays") - # XXX performance is not too good - return u"".join(self.tolist()) - - def byteswap(self): - """Byteswap all items of the array. If the items in the array are not - 1, 2, 4, or 8 bytes in size, RuntimeError is raised.""" - if self.itemsize not in [1, 2, 4, 8]: - raise RuntimeError("byteswap not supported for this array") - # XXX slowish - itemsize = self.itemsize - bytes = self._data - for start in range(0, len(bytes), itemsize): - stop = start + itemsize - bytes[start:stop] = bytes[start:stop][::-1] - - def buffer_info(self): - """Return a tuple (address, length) giving the current memory address - and the length in items of the buffer used to hold array's contents. The - length should be multiplied by the itemsize attribute to calculate the - buffer length in bytes. On PyPy the address might be meaningless - (returned as 0), depending on the available modules.""" - return (getbufaddress(self._data), len(self)) - - read = fromfile - - write = tofile - - ##### general object protocol - - def __repr__(self): - if len(self._data) == 0: - return "array('%s')" % self.typecode - elif self.typecode == "c": - return "array('%s', %s)" % (self.typecode, repr(self.tostring())) - elif self.typecode == "u": - return "array('%s', %s)" % (self.typecode, repr(self.tounicode())) - else: - return "array('%s', %s)" % (self.typecode, repr(self.tolist())) - - def __copy__(self): - a = array(self.typecode) - a._data = bytebuffer(len(self._data)) - a._data[:] = self._data - return a - - def __eq__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) == buffer(other._data) - else: - return self.tolist() == other.tolist() - - def __ne__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) != buffer(other._data) - else: - return self.tolist() != other.tolist() - - def __lt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) < buffer(other._data) - else: - return self.tolist() < other.tolist() - - def __gt__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) > buffer(other._data) - else: - return self.tolist() > other.tolist() - - def __le__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) <= buffer(other._data) - else: - return self.tolist() <= other.tolist() - - def __ge__(self, other): - if not isinstance(other, array): - return NotImplemented - if self.typecode == 'c': - return buffer(self._data) >= buffer(other._data) - else: - return self.tolist() >= other.tolist() - - def __reduce__(self): - dict = getattr(self, '__dict__', None) - data = self.tostring() - if data: - initargs = (self.typecode, data) - else: - initargs = (self.typecode,) - return (type(self), initargs, dict) - - ##### list methods - - def append(self, x): - """Append new value x to the end of the array.""" - self._frombuffer(pack(self.typecode, x)) - - def count(self, x): - """Return number of occurences of x in the array.""" - return operator.countOf(self, x) - - def extend(self, iterable): - """Append items to the end of the array.""" - if isinstance(iterable, array) \ - and not self.typecode == iterable.typecode: - raise TypeError("can only extend with array of same kind") - self._fromiterable(iterable) - - def index(self, x): - """Return index of first occurence of x in the array.""" - return operator.indexOf(self, x) - - def insert(self, i, x): - """Insert a new item x into the array before position i.""" - seqlength = len(self) - if i < 0: - i += seqlength - if i < 0: - i = 0 - elif i > seqlength: - i = seqlength - boundary = i * self.itemsize - data = pack(self.typecode, x) - newdata = bytebuffer(len(self._data) + len(data)) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:boundary+self.itemsize] = data - newdata[boundary+self.itemsize:] = self._data[boundary:] - self._data = newdata - - def pop(self, i=-1): - """Return the i-th element and delete it from the array. i defaults to - -1.""" - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - result = unpack_from(self.typecode, self._data, boundary)[0] - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - return result - - def remove(self, x): - """Remove the first occurence of x in the array.""" - self.pop(self.index(x)) - - def reverse(self): - """Reverse the order of the items in the array.""" - lst = self.tolist() - lst.reverse() - self._clear() - self.fromlist(lst) - - ##### list protocol - - def __len__(self): - return len(self._data) // self.itemsize - - def __add__(self, other): - if not isinstance(other, array): - raise TypeError("can only append array to array") - if self.typecode != other.typecode: - raise TypeError("bad argument type for built-in operation") - return array(self.typecode, buffer(self._data) + buffer(other._data)) - - def __mul__(self, repeat): - return array(self.typecode, buffer(self._data) * repeat) - - __rmul__ = __mul__ - - def __getitem__(self, i): - seqlength = len(self) - if isinstance(i, slice): - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist()[i] # fall-back - return array(self.typecode, sublist) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - return array(self.typecode, self._data[start * self.itemsize : - stop * self.itemsize]) - else: - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - return self._data[i] - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - return unpack_from(self.typecode, self._data, boundary)[0] - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - - def __setitem__(self, i, x): - if isinstance(i, slice): - if (not isinstance(x, array) - or self.typecode != x.typecode): - raise TypeError("can only assign array of same kind" - " to array slice") - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if step != 1: - sublist = self.tolist() # fall-back - sublist[i] = x.tolist() - self._clear() - self.fromlist(sublist) - return - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - boundary2new = boundary1 + len(x._data) - if boundary2 == boundary2new: - self._data[boundary1:boundary2] = x._data - else: - newdata = bytebuffer(len(self._data) + boundary2new-boundary2) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:boundary2new] = x._data - newdata[boundary2new:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if self.typecode == 'c': # speed trick - self._data[i] = x - return - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - pack_into(self.typecode, self._data, boundary, x) - - def __setslice__(self, i, j, x): - self.__setitem__(slice(i, j), x) - - def __delitem__(self, i): - if isinstance(i, slice): - seqlength = len(self) - start, stop, step = i.indices(seqlength) - if start < 0: - start = 0 - if stop < start: - stop = start - assert stop <= seqlength - if step != 1: - sublist = self.tolist() # fall-back - del sublist[i] - self._clear() - self.fromlist(sublist) - return - dellength = stop - start - boundary1 = start * self.itemsize - boundary2 = stop * self.itemsize - newdata = bytebuffer(len(self._data) - (boundary2-boundary1)) - newdata[:boundary1] = self._data[:boundary1] - newdata[boundary1:] = self._data[boundary2:] - self._data = newdata - else: - seqlength = len(self) - if i < 0: - i += seqlength - if not (0 <= i < seqlength): - raise IndexError(i) - boundary = i * self.itemsize - newdata = bytebuffer(len(self._data) - self.itemsize) - newdata[:boundary] = self._data[:boundary] - newdata[boundary:] = self._data[boundary+self.itemsize:] - self._data = newdata - - def __delslice__(self, i, j): - self.__delitem__(slice(i, j)) - - def __contains__(self, item): - for x in self: - if x == item: - return True - return False - - def __iadd__(self, other): - if not isinstance(other, array): - raise TypeError("can only extend array with array") - self.extend(other) - return self - - def __imul__(self, repeat): - newdata = buffer(self._data) * repeat - self._data = bytebuffer(len(newdata)) - self._data[:] = newdata - return self - - def __iter__(self): - p = 0 - typecode = self.typecode - itemsize = self.itemsize - while p < len(self._data): - yield unpack_from(typecode, self._data, p)[0] - p += itemsize - - ##### internal methods - - def _fromiterable(self, iterable): - iterable = tuple(iterable) - n = len(iterable) - boundary = len(self._data) - newdata = bytebuffer(boundary + n * self.itemsize) - newdata[:boundary] = self._data - pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable) - self._data = newdata - -ArrayType = array + from _array import array, ArrayType diff --git a/lib_pypy/cmath.py b/lib_pypy/cmath.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cmath.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "cmath" shadows +# any file cmath.py that would be found in the user dirs +from __builtin_cmath import * diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -1,3 +1,5 @@ +# indirection needed; otherwise the built-in module "itertools" shadows +# any file itertools.py that would be found in the user dirs try: from __builtin_itertools import * from __builtin_itertools import __doc__ diff --git a/lib_pypy/math.py b/lib_pypy/math.py new file mode 100644 --- /dev/null +++ b/lib_pypy/math.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "math" shadows +# any file math.py that would be found in the user dirs +from __builtin_math import * diff --git a/lib_pypy/operator.py b/lib_pypy/operator.py new file mode 100644 --- /dev/null +++ b/lib_pypy/operator.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "operator" shadows +# any file operator.py that would be found in the user dirs +from __builtin_operator import * diff --git a/lib_pypy/symbol.py b/lib_pypy/symbol.py new file mode 100644 --- /dev/null +++ b/lib_pypy/symbol.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "symbol" shadows +# any file symbol.py that would be found in the user dirs +from __builtin_symbol import * diff --git a/lib_pypy/token.py b/lib_pypy/token.py new file mode 100644 --- /dev/null +++ b/lib_pypy/token.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "token" shadows +# any file token.py that would be found in the user dirs +from __builtin_token import * diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -259,7 +259,7 @@ def descr_function__reduce__(self, space): from pypy.interpreter.gateway import BuiltinCode from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) code = self.code if isinstance(code, BuiltinCode): @@ -559,7 +559,7 @@ def descr_method__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule from pypy.interpreter.gateway import BuiltinCode - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('method_new') w = space.wrap diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -584,7 +584,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) builtin_code = mod.get('builtin_code') return space.newtuple([builtin_code, diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -24,7 +24,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('generator_new') w = space.wrap @@ -196,4 +196,4 @@ self.frame = None return unpack_into unpack_into = _create_unpack_into() - unpack_into_w = _create_unpack_into() \ No newline at end of file + unpack_into_w = _create_unpack_into() diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -62,11 +62,14 @@ def get_applevel_name(cls): """ NOT_RPYTHON """ - if cls.applevel_name is not None: - return cls.applevel_name - else: - pkgroot = cls.__module__ - return pkgroot.split('.')[-1] + assert cls.applevel_name is not None, ( + "%r: please add an explicit applevel_name to this built-in " + "module. Note that built-in modules shadow all normal app-level " + "imports, so consider naming the built-in module " + "'__builtin_%s' and adding a regular '%s.py' file in " + "lib_pypy that imports * from __builtin_%s." % + ((cls,) + (cls.__module__.split('.')[-1],) * 3)) + return cls.applevel_name get_applevel_name = classmethod(get_applevel_name) def get(self, name): diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -86,7 +86,7 @@ if space.finditem(w_modules, w_name) is None: #not imported case from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('module_new') return space.newtuple([new_inst, diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -46,7 +46,7 @@ return space.cmp(self.w_value, other.w_value) def descr__reduce__(self, space): - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('cell_new') if self.w_value is None: #when would this happen? diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -368,7 +368,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('code_new') w = space.wrap diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -303,7 +303,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule from pypy.module._pickle_support import maker # helper fns - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('frame_new') w = space.wrap diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py --- a/pypy/interpreter/pytraceback.py +++ b/pypy/interpreter/pytraceback.py @@ -26,7 +26,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('traceback_new') w = space.wrap diff --git a/pypy/interpreter/test/demomixedmod/__init__.py b/pypy/interpreter/test/demomixedmod/__init__.py --- a/pypy/interpreter/test/demomixedmod/__init__.py +++ b/pypy/interpreter/test/demomixedmod/__init__.py @@ -1,6 +1,8 @@ from pypy.interpreter.mixedmodule import MixedModule -class Module(MixedModule): +class Module(MixedModule): + applevel_name = 'demomixedmod' + interpleveldefs = { '__name__' : '(space.wrap("mixedmodule"))', '__doc__' : '(space.wrap("mixedmodule doc"))', diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py --- a/pypy/interpreter/test/test_appinterp.py +++ b/pypy/interpreter/test/test_appinterp.py @@ -143,6 +143,7 @@ from pypy.conftest import maketestobjspace class MyModule(MixedModule): + applevel_name = 'mymod' interpleveldefs = {} appleveldefs = {} def __init__(self, space, w_name): @@ -172,8 +173,8 @@ from pypy.conftest import gettestobjspace space = gettestobjspace(usemodules=('_ssl', '_socket')) - w_socket = space.builtin_modules['_socket'] - w_ssl = space.builtin_modules['_ssl'] + w_socket = space.builtin_modules['__builtin__socket'] + w_ssl = space.builtin_modules['__builtin__ssl'] # Uncomment this line for a workaround # space.getattr(w_ssl, space.wrap('SSLError')) diff --git a/pypy/interpreter/test/test_extmodules.py b/pypy/interpreter/test/test_extmodules.py --- a/pypy/interpreter/test/test_extmodules.py +++ b/pypy/interpreter/test/test_extmodules.py @@ -11,6 +11,7 @@ import time class Module(MixedModule): + applevel_name = 'extmod' appleveldefs = {} diff --git a/pypy/interpreter/test/test_mixedmodule.py b/pypy/interpreter/test/test_mixedmodule.py --- a/pypy/interpreter/test/test_mixedmodule.py +++ b/pypy/interpreter/test/test_mixedmodule.py @@ -5,6 +5,7 @@ class TestMixedModule(object): def test_install(self): class Module(MixedModule): + applevel_name = 'test_module' interpleveldefs = {} appleveldefs = {} @@ -15,10 +16,12 @@ def test_submodule(self): class SubModule(MixedModule): + applevel_name = 'test_module.sub' interpleveldefs = {} appleveldefs = {} class Module(MixedModule): + applevel_name = 'test_module' interpleveldefs = {} appleveldefs = {} submodules = { @@ -38,12 +41,14 @@ space = cls.space class SubModule(MixedModule): + applevel_name = 'test_module.sub' interpleveldefs = { "value": "space.wrap(14)" } appleveldefs = {} class Module(MixedModule): + applevel_name = 'test_module' interpleveldefs = {} appleveldefs = {} submodules = { diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -240,7 +240,7 @@ def test_trace_ignore_hidden(self): import sys - import _testing + import __builtin__testing as _testing l = [] def trace(a,b,c): diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -9,6 +9,8 @@ """Built-in functions, exceptions, and other objects.""" expose__file__attribute = False + applevel_name = '__builtin__' + appleveldefs = { 'execfile' : 'app_io.execfile', 'raw_input' : 'app_io.raw_input', diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -227,7 +227,7 @@ def descr___reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) w_new_inst = mod.get('enumerate_new') w_info = space.newtuple([self.w_iter, self.w_index]) @@ -288,7 +288,7 @@ def descr___reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) w_new_inst = mod.get('reversed_new') info_w = [self.w_sequence, space.wrap(self.remaining)] @@ -412,7 +412,7 @@ def descr_reduce(self): from pypy.interpreter.mixedmodule import MixedModule space = self.space - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('xrangeiter_new') w = space.wrap diff --git a/pypy/module/__builtin__/state.py b/pypy/module/__builtin__/state.py --- a/pypy/module/__builtin__/state.py +++ b/pypy/module/__builtin__/state.py @@ -2,8 +2,8 @@ class State: def __init__(self, space): self.w_file = space.appexec([], """(): - import _file; - return _file.file""") + import __builtin__file; + return __builtin__file.file""") def get(space): return space.fromcache(State) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -5,6 +5,8 @@ class BuildersModule(MixedModule): + applevel_name = '__pypy__.builders' + appleveldefs = {} interpleveldefs = { @@ -13,6 +15,8 @@ } class Module(MixedModule): + applevel_name = '__pypy__' + appleveldefs = { } diff --git a/pypy/module/_ast/__init__.py b/pypy/module/_ast/__init__.py --- a/pypy/module/_ast/__init__.py +++ b/pypy/module/_ast/__init__.py @@ -3,12 +3,15 @@ class Module(MixedModule): + applevel_name = '_ast' + + appleveldefs = { + } interpleveldefs = { "PyCF_ONLY_AST" : "space.wrap(%s)" % consts.PyCF_ONLY_AST, "__version__" : "space.wrap('82160')", # from CPython's svn. } - appleveldefs = {} def _setup(): diff --git a/pypy/module/_bisect/__init__.py b/pypy/module/_bisect/__init__.py --- a/pypy/module/_bisect/__init__.py +++ b/pypy/module/_bisect/__init__.py @@ -13,6 +13,7 @@ having to sort the list after each insertion. For long lists of items with expensive comparison operations, this can be an improvement over the more common approach.""" + applevel_name = '__builtin__bisect' appleveldefs = { 'insort': 'app_bisect.insort_right', diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -38,6 +38,8 @@ Copyright (c) Corporation for National Research Initiatives. """ + applevel_name = '_codecs' + appleveldefs = {} interpleveldefs = { diff --git a/pypy/module/_collections/__init__.py b/pypy/module/_collections/__init__.py --- a/pypy/module/_collections/__init__.py +++ b/pypy/module/_collections/__init__.py @@ -7,6 +7,7 @@ - deque: ordered collection accessible from endpoints only - defaultdict: dict subclass with a default value factory """ + applevel_name = '__builtin__collections' appleveldefs = { 'defaultdict': 'app_defaultdict.defaultdict', diff --git a/pypy/module/_continuation/__init__.py b/pypy/module/_continuation/__init__.py --- a/pypy/module/_continuation/__init__.py +++ b/pypy/module/_continuation/__init__.py @@ -28,6 +28,7 @@ The most primitive API is actually 'permute()', which just permutes the one-shot continuation stored in two (or more) continulets. """ + applevel_name = '__builtin__continuation' appleveldefs = { 'error': 'app_continuation.error', diff --git a/pypy/module/_file/__init__.py b/pypy/module/_file/__init__.py --- a/pypy/module/_file/__init__.py +++ b/pypy/module/_file/__init__.py @@ -4,6 +4,8 @@ import sys class Module(MixedModule): + applevel_name = '__builtin__file' + appleveldefs = { } diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -6,8 +6,8 @@ def getfile(space): return space.appexec([], """(): try: - import _file - return _file.file + import __builtin__file + return __builtin__file.file except ImportError: # when running with py.test -A return file """) @@ -208,7 +208,7 @@ assert exc.value.filename == os.curdir def test_encoding_errors(self): - import _file + import __builtin__file as _file with self.file(self.temppath, "w") as f: _file.set_file_encoding(f, "utf-8") diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -2,6 +2,7 @@ import sys class Module(MixedModule): + applevel_name = '__builtin__io' appleveldefs = { } diff --git a/pypy/module/_pickle_support/__init__.py b/pypy/module/_pickle_support/__init__.py --- a/pypy/module/_pickle_support/__init__.py +++ b/pypy/module/_pickle_support/__init__.py @@ -2,6 +2,7 @@ class Module(MixedModule): """Built-in functions, exceptions, and other objects.""" + applevel_name = '__builtin__pickle_support' appleveldefs = { } diff --git a/pypy/module/_random/__init__.py b/pypy/module/_random/__init__.py --- a/pypy/module/_random/__init__.py +++ b/pypy/module/_random/__init__.py @@ -3,9 +3,10 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = '__builtin__random' + appleveldefs = {} - + interpleveldefs = { 'Random' : 'interp_random.W_Random', } - diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -3,6 +3,7 @@ import sys class Module(MixedModule): + applevel_name = '__builtin__socket' appleveldefs = { } diff --git a/pypy/module/_sre/__init__.py b/pypy/module/_sre/__init__.py --- a/pypy/module/_sre/__init__.py +++ b/pypy/module/_sre/__init__.py @@ -1,6 +1,7 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = '_sre' appleveldefs = { } diff --git a/pypy/module/_ssl/__init__.py b/pypy/module/_ssl/__init__.py --- a/pypy/module/_ssl/__init__.py +++ b/pypy/module/_ssl/__init__.py @@ -3,6 +3,7 @@ class Module(MixedModule): """Implementation module for SSL socket operations. See the socket module for documentation.""" + applevel_name = '__builtin__ssl' interpleveldefs = { 'sslwrap': 'interp_ssl.sslwrap', diff --git a/pypy/module/_testing/__init__.py b/pypy/module/_testing/__init__.py --- a/pypy/module/_testing/__init__.py +++ b/pypy/module/_testing/__init__.py @@ -9,6 +9,8 @@ class Module(MixedModule): """PyPy own testing""" + applevel_name = '__builtin__testing' + interpleveldefs = { } diff --git a/pypy/module/_weakref/__init__.py b/pypy/module/_weakref/__init__.py --- a/pypy/module/_weakref/__init__.py +++ b/pypy/module/_weakref/__init__.py @@ -1,8 +1,11 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = '_weakref' + appleveldefs = { } + interpleveldefs = { 'ref': 'interp__weakref.W_Weakref', 'getweakrefcount': 'interp__weakref.getweakrefcount', diff --git a/pypy/module/array/__init__.py b/pypy/module/array/__init__.py --- a/pypy/module/array/__init__.py +++ b/pypy/module/array/__init__.py @@ -7,6 +7,7 @@ class Module(MixedModule): + applevel_name = '__builtin_array' interpleveldefs = { 'array': 'interp_array.W_ArrayBase', diff --git a/pypy/module/cmath/__init__.py b/pypy/module/cmath/__init__.py --- a/pypy/module/cmath/__init__.py +++ b/pypy/module/cmath/__init__.py @@ -33,6 +33,8 @@ class Module(MixedModule): + applevel_name = '__builtin_cmath' + appleveldefs = { } diff --git a/pypy/module/errno/__init__.py b/pypy/module/errno/__init__.py --- a/pypy/module/errno/__init__.py +++ b/pypy/module/errno/__init__.py @@ -16,6 +16,7 @@ To map error codes to error messages, use the function os.strerror(), e.g. os.strerror(2) could return 'No such file or directory'.""" + applevel_name = 'errno' appleveldefs = {} interpleveldefs = {"errorcode": "interp_errno.get_errorcode(space)"} diff --git a/pypy/module/exceptions/__init__.py b/pypy/module/exceptions/__init__.py --- a/pypy/module/exceptions/__init__.py +++ b/pypy/module/exceptions/__init__.py @@ -2,8 +2,10 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = 'exceptions' + appleveldefs = {} - + interpleveldefs = { 'ArithmeticError' : 'interp_exceptions.W_ArithmeticError', 'AssertionError' : 'interp_exceptions.W_AssertionError', diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py --- a/pypy/module/gc/__init__.py +++ b/pypy/module/gc/__init__.py @@ -1,6 +1,8 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = 'gc' + appleveldefs = { 'enable': 'app_gc.enable', 'disable': 'app_gc.disable', diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -5,6 +5,8 @@ This module provides the components needed to build your own __import__ function. """ + applevel_name = 'imp' + interpleveldefs = { 'PY_SOURCE': 'space.wrap(importing.PY_SOURCE)', 'PY_COMPILED': 'space.wrap(importing.PY_COMPILED)', diff --git a/pypy/module/marshal/__init__.py b/pypy/module/marshal/__init__.py --- a/pypy/module/marshal/__init__.py +++ b/pypy/module/marshal/__init__.py @@ -5,6 +5,7 @@ """ This module implements marshal at interpreter level. """ + applevel_name = 'marshal' appleveldefs = { } diff --git a/pypy/module/math/__init__.py b/pypy/module/math/__init__.py --- a/pypy/module/math/__init__.py +++ b/pypy/module/math/__init__.py @@ -3,6 +3,8 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = '__builtin_math' + appleveldefs = { 'factorial' : 'app_math.factorial' } diff --git a/pypy/module/operator/__init__.py b/pypy/module/operator/__init__.py --- a/pypy/module/operator/__init__.py +++ b/pypy/module/operator/__init__.py @@ -4,6 +4,8 @@ class Module(MixedModule): """Operator Builtin Module. """ + applevel_name = '__builtin_operator' + # HACK! override loaders to be able to access different operations # under same name. I.e., operator.eq == operator.__eq__ diff --git a/pypy/module/symbol/__init__.py b/pypy/module/symbol/__init__.py --- a/pypy/module/symbol/__init__.py +++ b/pypy/module/symbol/__init__.py @@ -10,6 +10,7 @@ class Module(MixedModule): """Non-terminal symbols of Python grammar.""" + applevel_name = '__builtin_symbol' appleveldefs = {} interpleveldefs = {} # see below diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -9,6 +9,8 @@ """Sys Builtin Module. """ _immutable_fields_ = ["defaultencoding?"] + applevel_name = 'sys' + def __init__(self, space, w_name): """NOT_RPYTHON""" # because parent __init__ isn't if space.config.translating: diff --git a/pypy/module/token/__init__.py b/pypy/module/token/__init__.py --- a/pypy/module/token/__init__.py +++ b/pypy/module/token/__init__.py @@ -4,6 +4,7 @@ class Module(MixedModule): + applevel_name = '__builtin_token' appleveldefs = {} interpleveldefs = { diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py --- a/pypy/objspace/std/dicttype.py +++ b/pypy/objspace/std/dicttype.py @@ -148,7 +148,7 @@ XXX to do: remove this __reduce__ method and do a registration with copy_reg, instead. """ - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('dictiter_surrogate_new') w_typeobj = space.gettypeobject(dictiter_typedef) diff --git a/pypy/objspace/std/itertype.py b/pypy/objspace/std/itertype.py --- a/pypy/objspace/std/itertype.py +++ b/pypy/objspace/std/itertype.py @@ -17,7 +17,7 @@ from pypy.objspace.std.iterobject import W_AbstractSeqIterObject assert isinstance(w_self, W_AbstractSeqIterObject) from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('seqiter_new') tup = [w_self.w_seq, space.wrap(w_self.index)] @@ -33,7 +33,7 @@ from pypy.objspace.std.iterobject import W_ReverseSeqIterObject assert isinstance(w_self, W_ReverseSeqIterObject) from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') + w_mod = space.getbuiltinmodule('__builtin__pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('reverseseqiter_new') tup = [w_self.w_seq, space.wrap(w_self.index)] diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -258,7 +258,7 @@ def set_io_encoding(io_encoding): try: - import _file + import __builtin__file except ImportError: if sys.version_info < (2, 7): return @@ -266,7 +266,7 @@ set_file_encoding = ctypes.pythonapi.PyFile_SetEncodingAndErrors set_file_encoding.argtypes = [ctypes.py_object, ctypes.c_char_p, ctypes.c_char_p] else: - set_file_encoding = _file.set_file_encoding + set_file_encoding = __builtin__file.set_file_encoding if ":" in io_encoding: encoding, errors = io_encoding.split(":", 1) else: From noreply at buildbot.pypy.org Sun Dec 4 01:23:39 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Sun, 4 Dec 2011 01:23:39 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: resolved another missing include_dirs entry, for cpyext. Message-ID: <20111204002339.7152282A00@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50100:dc30c26f6f21 Date: 2011-12-04 01:15 +0100 http://bitbucket.org/pypy/pypy/changeset/dc30c26f6f21/ Log: resolved another missing include_dirs entry, for cpyext. (hit by chance, forgot to disable modules) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -45,8 +45,10 @@ pypydir = py.path.local(autopath.pypydir) include_dir = pypydir / 'module' / 'cpyext' / 'include' source_dir = pypydir / 'module' / 'cpyext' / 'src' +signed_dir = pypydir / 'translator' / 'c' / 'src' include_dirs = [ include_dir, + signed_dir, udir, ] From noreply at buildbot.pypy.org Sun Dec 4 01:23:41 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Sun, 4 Dec 2011 01:23:41 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Merge with default Message-ID: <20111204002341.5043782A00@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50101:e7ab064d5412 Date: 2011-12-04 01:22 +0100 http://bitbucket.org/pypy/pypy/changeset/e7ab064d5412/ Log: Merge with default diff --git a/lib_pypy/itertools.py b/lib_pypy/_itertools.py copy from lib_pypy/itertools.py copy to lib_pypy/_itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/_itertools.py @@ -1,5 +1,5 @@ -# Note that PyPy contains also a built-in module 'itertools' which will -# hide this one if compiled in. +# Note that PyPy contains also a built-in implementation of 'itertools'; +# when translated with default options, this one is not used. """Functional tools for creating and using iterators. @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -1,670 +1,6 @@ -# Note that PyPy contains also a built-in module 'itertools' which will -# hide this one if compiled in. - -"""Functional tools for creating and using iterators. - -Infinite iterators: -count([n]) --> n, n+1, n+2, ... -cycle(p) --> p0, p1, ... plast, p0, p1, ... -repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times - -Iterators terminating on the shortest input sequence: -izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... -ifilter(pred, seq) --> elements of seq where pred(elem) is True -ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False -islice(seq, [start,] stop [, step]) --> elements from - seq[start:stop:step] -imap(fun, p, q, ...) --> fun(p0, q0), fun(p1, q1), ... -starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ... -tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n -chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... -takewhile(pred, seq) --> seq[0], seq[1], until pred fails -dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails -groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) -""" - -__all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', - 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -class chain(object): - """Make an iterator that returns elements from the first iterable - until it is exhausted, then proceeds to the next iterable, until - all of the iterables are exhausted. Used for treating consecutive - sequences as a single sequence. - - Equivalent to : - - def chain(*iterables): - for it in iterables: - for element in it: - yield element - """ - def __init__(self, *iterables): - self._iterables_iter = iter(map(iter, iterables)) - # little trick for the first chain.next() call - self._cur_iterable_iter = iter([]) - - def __iter__(self): - return self - - def next(self): - while True: - try: - return self._cur_iterable_iter.next() - except StopIteration: - self._cur_iterable_iter = self._iterables_iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._cur_iterable_iter)) - - -class compress(object): - def __init__(self, data, selectors): - self.data = iter(data) - self.selectors = iter(selectors) - - def __iter__(self): - return self - - def next(self): - while True: - next_item = self.data.next() - next_selector = self.selectors.next() - if bool(next_selector): - return next_item - - -class count(object): - """Make an iterator that returns consecutive integers starting - with n. If not specified n defaults to zero. Does not currently - support python long integers. Often used as an argument to imap() - to generate consecutive data points. Also, used with izip() to - add sequence numbers. - - Equivalent to : - - def count(n=0): - if not isinstance(n, int): - raise TypeError("%s is not a regular integer" % n) - while True: - yield n - n += 1 - """ - def __init__(self, n=0): - if not isinstance(n, int): - raise TypeError('%s is not a regular integer' % n) - self.times = n-1 - - def __iter__(self): - return self - - def next(self): - self.times += 1 - return self.times - - def __repr__(self): - return 'count(%d)' % (self.times + 1) - - - -class cycle(object): - """Make an iterator returning elements from the iterable and - saving a copy of each. When the iterable is exhausted, return - elements from the saved copy. Repeats indefinitely. - - Equivalent to : - - def cycle(iterable): - saved = [] - for element in iterable: - yield element - saved.append(element) - while saved: - for element in saved: - yield element - """ - def __init__(self, iterable): - self._cur_iter = iter(iterable) - self._saved = [] - self._must_save = True - - def __iter__(self): - return self - - def next(self): - # XXX Could probably be improved - try: - next_elt = self._cur_iter.next() - if self._must_save: - self._saved.append(next_elt) - except StopIteration: - self._cur_iter = iter(self._saved) - next_elt = self._cur_iter.next() - self._must_save = False - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._cur_iter)) - return next_elt - - -class dropwhile(object): - """Make an iterator that drops elements from the iterable as long - as the predicate is true; afterwards, returns every - element. Note, the iterator does not produce any output until the - predicate is true, so it may have a lengthy start-up time. - - Equivalent to : - - def dropwhile(predicate, iterable): - iterable = iter(iterable) - for x in iterable: - if not predicate(x): - yield x - break - for x in iterable: - yield x - """ - def __init__(self, predicate, iterable): - self._predicate = predicate - self._iter = iter(iterable) - self._dropped = False - - def __iter__(self): - return self - - def next(self): - try: - value = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - if self._dropped: - return value - while self._predicate(value): - value = self._iter.next() - self._dropped = True - return value - -class groupby(object): - """Make an iterator that returns consecutive keys and groups from the - iterable. The key is a function computing a key value for each - element. If not specified or is None, key defaults to an identity - function and returns the element unchanged. Generally, the - iterable needs to already be sorted on the same key function. - - The returned group is itself an iterator that shares the - underlying iterable with groupby(). Because the source is shared, - when the groupby object is advanced, the previous group is no - longer visible. So, if that data is needed later, it should be - stored as a list: - - groups = [] - uniquekeys = [] - for k, g in groupby(data, keyfunc): - groups.append(list(g)) # Store group iterator as a list - uniquekeys.append(k) - """ - def __init__(self, iterable, key=None): - if key is None: - key = lambda x: x - self.keyfunc = key - self.it = iter(iterable) - self.tgtkey = self.currkey = self.currvalue = xrange(0) - - def __iter__(self): - return self - - def next(self): - while self.currkey == self.tgtkey: - try: - self.currvalue = self.it.next() # Exit on StopIteration - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self.it)) - self.currkey = self.keyfunc(self.currvalue) - self.tgtkey = self.currkey - return (self.currkey, self._grouper(self.tgtkey)) - - def _grouper(self, tgtkey): - while self.currkey == tgtkey: - yield self.currvalue - self.currvalue = self.it.next() # Exit on StopIteration - self.currkey = self.keyfunc(self.currvalue) - - - -class _ifilter_base(object): - """base class for ifilter and ifilterflase""" - def __init__(self, predicate, iterable): - # Make sure iterable *IS* iterable - self._iter = iter(iterable) - if predicate is None: - self._predicate = bool - else: - self._predicate = predicate - - def __iter__(self): - return self - -class ifilter(_ifilter_base): - """Make an iterator that filters elements from iterable returning - only those for which the predicate is True. If predicate is - None, return the items that are true. - - Equivalent to : - - def ifilter: - if predicate is None: - predicate = bool - for x in iterable: - if predicate(x): - yield x - """ - def next(self): - try: - next_elt = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - while True: - if self._predicate(next_elt): - return next_elt - next_elt = self._iter.next() - -class ifilterfalse(_ifilter_base): - """Make an iterator that filters elements from iterable returning - only those for which the predicate is False. If predicate is - None, return the items that are false. - - Equivalent to : - - def ifilterfalse(predicate, iterable): - if predicate is None: - predicate = bool - for x in iterable: - if not predicate(x): - yield x - """ - def next(self): - try: - next_elt = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - while True: - if not self._predicate(next_elt): - return next_elt - next_elt = self._iter.next() - - - - -class imap(object): - """Make an iterator that computes the function using arguments - from each of the iterables. If function is set to None, then - imap() returns the arguments as a tuple. Like map() but stops - when the shortest iterable is exhausted instead of filling in - None for shorter iterables. The reason for the difference is that - infinite iterator arguments are typically an error for map() - (because the output is fully evaluated) but represent a common - and useful way of supplying arguments to imap(). - - Equivalent to : - - def imap(function, *iterables): - iterables = map(iter, iterables) - while True: - args = [i.next() for i in iterables] - if function is None: - yield tuple(args) - else: - yield function(*args) - - """ - def __init__(self, function, iterable, *other_iterables): - self._func = function - self._iters = map(iter, (iterable, ) + other_iterables) - - def __iter__(self): - return self - - def next(self): - try: - args = [it.next() for it in self._iters] - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (it)) - if self._func is None: - return tuple(args) - else: - return self._func(*args) - - - -class islice(object): - """Make an iterator that returns selected elements from the - iterable. If start is non-zero, then elements from the iterable - are skipped until start is reached. Afterward, elements are - returned consecutively unless step is set higher than one which - results in items being skipped. If stop is None, then iteration - continues until the iterator is exhausted, if at all; otherwise, - it stops at the specified position. Unlike regular slicing, - islice() does not support negative values for start, stop, or - step. Can be used to extract related fields from data where the - internal structure has been flattened (for example, a multi-line - report may list a name field on every third line). - """ - def __init__(self, iterable, *args): - s = slice(*args) - self.start, self.stop, self.step = s.start or 0, s.stop, s.step - if not isinstance(self.start, (int, long)): - raise ValueError("Start argument must be an integer") - if self.stop is not None and not isinstance(self.stop, (int,long)): - raise ValueError("Stop argument must be an integer or None") - if self.step is None: - self.step = 1 - if self.start<0 or (self.stop is not None and self.stop<0 - ) or self.step<=0: - raise ValueError, "indices for islice() must be positive" - self.it = iter(iterable) - self.donext = None - self.cnt = 0 - - def __iter__(self): - return self - - def next(self): - if self.donext is None: - try: - self.donext = self.it.next - except AttributeError: - raise TypeError - nextindex = self.start - if self.stop is not None and nextindex >= self.stop: - raise StopIteration - while self.cnt <= nextindex: - nextitem = self.donext() - self.cnt += 1 - self.start += self.step - return nextitem - -class izip(object): - """Make an iterator that aggregates elements from each of the - iterables. Like zip() except that it returns an iterator instead - of a list. Used for lock-step iteration over several iterables at - a time. - - Equivalent to : - - def izip(*iterables): - iterables = map(iter, iterables) - while iterables: - result = [i.next() for i in iterables] - yield tuple(result) - """ - def __init__(self, *iterables): - self._iterators = map(iter, iterables) - self._result = [None] * len(self._iterators) - - def __iter__(self): - return self - - def next(self): - if not self._iterators: - raise StopIteration() - try: - return tuple([i.next() for i in self._iterators]) - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % (i)) - - -class product(object): - - def __init__(self, *args, **kw): - if len(kw) > 1: - raise TypeError("product() takes at most 1 argument (%d given)" % - len(kw)) - self.repeat = kw.get('repeat', 1) - self.gears = [x for x in args] * self.repeat - self.num_gears = len(self.gears) - # initialization of indicies to loop over - self.indicies = [(0, len(self.gears[x])) - for x in range(0, self.num_gears)] - self.cont = True - - def roll_gears(self): - # Starting from the end of the gear indicies work to the front - # incrementing the gear until the limit is reached. When the limit - # is reached carry operation to the next gear - should_carry = True - for n in range(0, self.num_gears): - nth_gear = self.num_gears - n - 1 - if should_carry: - count, lim = self.indicies[nth_gear] - count += 1 - if count == lim and nth_gear == 0: - self.cont = False - if count == lim: - should_carry = True - count = 0 - else: - should_carry = False - self.indicies[nth_gear] = (count, lim) - else: - break - - def __iter__(self): - return self - - def next(self): - if not self.cont: - raise StopIteration - l = [] - for x in range(0, self.num_gears): - index, limit = self.indicies[x] - l.append(self.gears[x][index]) - self.roll_gears() - return tuple(l) - - -class repeat(object): - """Make an iterator that returns object over and over again. - Runs indefinitely unless the times argument is specified. Used - as argument to imap() for invariant parameters to the called - function. Also used with izip() to create an invariant part of a - tuple record. - - Equivalent to : - - def repeat(object, times=None): - if times is None: - while True: - yield object - else: - for i in xrange(times): - yield object - """ - def __init__(self, obj, times=None): - self._obj = obj - if times is not None: - xrange(times) # Raise a TypeError - if times < 0: - times = 0 - self._times = times - - def __iter__(self): - return self - - def next(self): - # next() *need* to decrement self._times when consumed - if self._times is not None: - if self._times <= 0: - raise StopIteration() - self._times -= 1 - return self._obj - - def __repr__(self): - if self._times is not None: - return 'repeat(%r, %r)' % (self._obj, self._times) - else: - return 'repeat(%r)' % (self._obj,) - - def __len__(self): - if self._times == -1 or self._times is None: - raise TypeError("len() of uniszed object") - return self._times - - -class starmap(object): - """Make an iterator that computes the function using arguments - tuples obtained from the iterable. Used instead of imap() when - argument parameters are already grouped in tuples from a single - iterable (the data has been ``pre-zipped''). The difference - between imap() and starmap() parallels the distinction between - function(a,b) and function(*c). - - Equivalent to : - - def starmap(function, iterable): - iterable = iter(iterable) - while True: - yield function(*iterable.next()) - """ - def __init__(self, function, iterable): - self._func = function - self._iter = iter(iterable) - - def __iter__(self): - return self - - def next(self): - # CPython raises a TypeError when the iterator doesn't return a tuple - try: - t = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % self._iter) - if not isinstance(t, tuple): - raise TypeError("iterator must return a tuple") - return self._func(*t) - - - -class takewhile(object): - """Make an iterator that returns elements from the iterable as - long as the predicate is true. - - Equivalent to : - - def takewhile(predicate, iterable): - for x in iterable: - if predicate(x): - yield x - else: - break - """ - def __init__(self, predicate, iterable): - self._predicate = predicate - self._iter = iter(iterable) - - def __iter__(self): - return self - - def next(self): - try: - value = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - if not self._predicate(value): - raise StopIteration() - return value - - -class TeeData(object): - """Holds cached values for TeeObjects""" - def __init__(self, iterator): - self.data = [] - self._iter = iterator - - def __getitem__(self, i): - # iterates until 'i' if not done yet - while i>= len(self.data): - try: - self.data.append( self._iter.next() ) - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % self._iter) - return self.data[i] - - -class TeeObject(object): - """Iterables / Iterators as returned by the tee() function""" - def __init__(self, iterable=None, tee_data=None): - if tee_data: - self.tee_data = tee_data - self.pos = 0 - # <=> Copy constructor - elif isinstance(iterable, TeeObject): - self.tee_data = iterable.tee_data - self.pos = iterable.pos - else: - self.tee_data = TeeData(iter(iterable)) - self.pos = 0 - - def next(self): - data = self.tee_data[self.pos] - self.pos += 1 - return data - - def __iter__(self): - return self - - - at builtinify -def tee(iterable, n=2): - """Return n independent iterators from a single iterable. - Note : once tee() has made a split, the original iterable - should not be used anywhere else; otherwise, the iterable could get - advanced without the tee objects being informed. - - Note : this member of the toolkit may require significant auxiliary - storage (depending on how much temporary data needs to be stored). - In general, if one iterator is going to use most or all of the - data before the other iterator, it is faster to use list() instead - of tee() - - Equivalent to : - - def tee(iterable, n=2): - def gen(next, data={}, cnt=[0]): - for i in count(): - if i == cnt[0]: - item = data[i] = next() - cnt[0] += 1 - else: - item = data.pop(i) - yield item - it = iter(iterable) - return tuple([gen(it.next) for i in range(n)]) - """ - if isinstance(iterable, TeeObject): - # a,b = tee(range(10)) ; c,d = tee(a) ; self.assert_(a is c) - return tuple([iterable] + - [TeeObject(tee_data=iterable.tee_data) for i in xrange(n-1)]) - tee_data = TeeData(iter(iterable)) - return tuple([TeeObject(tee_data=tee_data) for i in xrange(n)]) +try: + from __builtin_itertools import * + from __builtin_itertools import __doc__ +except ImportError: + from _itertools import * + from _itertools import __doc__ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -20,7 +20,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -1432,6 +1432,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1510,12 +1514,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -167,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -195,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + f(123, *[None]*11) # check that the check() are ok + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -247,7 +247,7 @@ if funcobj.random_effects_on_gcobjs: return True except (AttributeError, lltype.DelayedPointer): - pass + return True # better safe than sorry return super(RandomEffectsAnalyzer, self).analyze_external_call( op, seen) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -210,6 +210,8 @@ def rewrite_op_cast_pointer(self, op): newop = self.rewrite_op_same_as(op) assert newop is None + return + # disabled for now if (self._is_rclass_instance(op.args[0]) and self._is_rclass_instance(op.result)): FROM = op.args[0].concretetype.TO @@ -220,6 +222,9 @@ return [None, # hack, do the right renaming from op.args[0] to op.result SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3585,7 +3585,8 @@ self.interp_operations(f, [5], translationoptions=translationoptions) - def test_annotation_gives_knowledge_to_tracer(self): + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") class Base(object): pass class A(Base): @@ -3645,6 +3646,70 @@ # here it works again self.check_operations_history(guard_class=0, record_known_class=1) + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -23,6 +23,7 @@ dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) """ + applevel_name = '__builtin_itertools' interpleveldefs = { 'chain' : 'interp_itertools.W_Chain', diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,10 +5,11 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.NDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', @@ -16,8 +17,22 @@ 'fromstring': 'interp_support.fromstring', 'flatiter': 'interp_numarray.W_FlatIterator', - 'True_': 'space.w_True', - 'False_': 'space.w_False', + 'True_': 'types.Bool.True', + 'False_': 'types.Bool.False', + + 'generic': 'interp_boxes.W_GenericBox', + 'number': 'interp_boxes.W_NumberBox', + 'integer': 'interp_boxes.W_IntegerBox', + 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'bool_': 'interp_boxes.W_BoolBox', + 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', + 'int64': 'interp_boxes.W_Int64Box', + 'int_': 'interp_boxes.W_LongBox', + 'inexact': 'interp_boxes.W_InexactBox', + 'floating': 'interp_boxes.W_FloatingBox', + 'float64': 'interp_boxes.W_Float64Box', } # ufuncs diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,13 +3,16 @@ It should not be imported by the module itself """ +import re + from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_BoolDtype +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, NDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs -from pypy.rlib.objectmodel import specialize -import re +from pypy.rlib.objectmodel import specialize, instantiate + class BogusBytecode(Exception): pass @@ -48,15 +51,12 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_float64dtype = W_Float64Dtype(self) def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): - if w_obj.tp == w_tp: - return True - return False + return w_obj.tp == w_tp def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -97,8 +97,10 @@ fixedview = listview def float(self, w_obj): - assert isinstance(w_obj, FloatObject) - return w_obj + if isinstance(w_obj, FloatObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): assert isinstance(w_obj, FloatObject) @@ -112,7 +114,10 @@ raise NotImplementedError def int(self, w_obj): - return w_obj + if isinstance(w_obj, IntObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.int(w_obj.descr_int(self)) def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) @@ -135,6 +140,9 @@ assert isinstance(what, tp) return what + def allocate_instance(self, klass, w_subtype): + return instantiate(klass) + def len_w(self, w_obj): if isinstance(w_obj, ListObject): return len(w_obj.items) @@ -247,7 +255,7 @@ w_rhs = self.rhs.execute(interp) if not isinstance(w_lhs, BaseArray): # scalar - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) if self.name == '+': @@ -264,8 +272,9 @@ w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError - if not isinstance(w_res, BaseArray): - dtype = interp.space.fromcache(W_Float64Dtype) + if (not isinstance(w_res, BaseArray) and + not isinstance(w_res, interp_boxes.W_GenericBox)): + dtype = get_dtype_cache(interp.space).w_float64dtype w_res = scalar_w(interp.space, dtype, w_res) return w_res @@ -283,7 +292,7 @@ return space.wrap(self.v) def execute(self, interp): - return FloatObject(self.v) + return interp.space.wrap(self.v) class RangeConstant(Node): def __init__(self, v): @@ -291,10 +300,10 @@ def execute(self, interp): w_list = interp.space.newlist( - [interp.space.wrap(float(i)) for i in range(self.v)]) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + [interp.space.wrap(float(i)) for i in range(self.v)] + ) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -315,9 +324,8 @@ def execute(self, interp): w_list = self.wrap(interp.space) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" @@ -384,9 +392,11 @@ if isinstance(w_res, BaseArray): return w_res if isinstance(w_res, FloatObject): - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype elif isinstance(w_res, BoolObject): - dtype = interp.space.fromcache(W_BoolDtype) + dtype = get_dtype_cache(interp.space).w_booldtype + elif isinstance(w_res, interp_boxes.W_GenericBox): + dtype = w_res.get_dtype(interp.space) else: dtype = None return scalar_w(interp.space, dtype, w_res) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_boxes.py @@ -0,0 +1,267 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef +from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.inttype import int_typedef +from pypy.objspace.std.typeobject import W_TypeObject +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.tool.sourcetools import func_with_new_name + + +MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () + +def new_dtype_getter(name): + def get_dtype(space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return getattr(get_dtype_cache(space), "w_%sdtype" % name) + def new(space, w_subtype, w_value): + dtype = get_dtype(space) + return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) + return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + +class PrimitiveBox(object): + _mixin_ = True + + def __init__(self, value): + self.value = value + + def convert_to(self, dtype): + return dtype.box(self.value) + +class W_GenericBox(Wrappable): + _attrs_ = () + + def descr__new__(space, w_subtype, __args__): + assert isinstance(w_subtype, W_TypeObject) + raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", + w_subtype.get_module_type_name() + ) + + def descr_str(self, space): + return self.descr_repr(space) + + def descr_repr(self, space): + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + + def descr_int(self, space): + box = self.convert_to(W_LongBox.get_dtype(space)) + assert isinstance(box, W_LongBox) + return space.wrap(box.value) + + def descr_float(self, space): + box = self.convert_to(W_Float64Box.get_dtype(space)) + assert isinstance(box, W_Float64Box) + return space.wrap(box.value) + + def descr_nonzero(self, space): + dtype = self.get_dtype(space) + return space.wrap(dtype.itemtype.bool(self)) + + def _binop_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + def _binop_right_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) + + def _unaryop_impl(ufunc_name): + def impl(self, space): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + + descr_radd = _binop_right_impl("add") + descr_rmul = _binop_right_impl("multiply") + + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") + + +class W_BoolBox(W_GenericBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("bool") + +class W_NumberBox(W_GenericBox): + _attrs_ = () + +class W_IntegerBox(W_NumberBox): + pass + +class W_SignedIntegerBox(W_IntegerBox): + pass + +class W_UnsignedIntgerBox(W_IntegerBox): + pass + +class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int8") + +class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint8") + +class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int16") + +class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint16") + +class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int32") + +class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint32") + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("long") + +class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int64") + +class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_InexactBox(W_NumberBox): + _attrs_ = () + +class W_FloatingBox(W_InexactBox): + _attrs_ = () + +class W_Float32Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float32") + +class W_Float64Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float64") + + + +W_GenericBox.typedef = TypeDef("generic", + __module__ = "numpypy", + + __new__ = interp2app(W_GenericBox.descr__new__.im_func), + + __str__ = interp2app(W_GenericBox.descr_str), + __repr__ = interp2app(W_GenericBox.descr_repr), + __int__ = interp2app(W_GenericBox.descr_int), + __float__ = interp2app(W_GenericBox.descr_float), + __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + + __add__ = interp2app(W_GenericBox.descr_add), + __sub__ = interp2app(W_GenericBox.descr_sub), + __mul__ = interp2app(W_GenericBox.descr_mul), + __div__ = interp2app(W_GenericBox.descr_div), + + __radd__ = interp2app(W_GenericBox.descr_add), + __rmul__ = interp2app(W_GenericBox.descr_rmul), + + __eq__ = interp2app(W_GenericBox.descr_eq), + __ne__ = interp2app(W_GenericBox.descr_ne), + __lt__ = interp2app(W_GenericBox.descr_lt), + __le__ = interp2app(W_GenericBox.descr_le), + __gt__ = interp2app(W_GenericBox.descr_gt), + __ge__ = interp2app(W_GenericBox.descr_ge), + + __neg__ = interp2app(W_GenericBox.descr_neg), + __abs__ = interp2app(W_GenericBox.descr_abs), +) + +W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_BoolBox.descr__new__.im_func), +) + +W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + +W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int8Box.descr__new__.im_func), +) + +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), +) + +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), +) + +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +if LONG_BIT == 32: + long_name = "int32" +elif LONG_BIT == 64: + long_name = "int64" +W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), + __module__ = "numpypy", +) + +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, + __module__ = "numpypy", + __new__ = interp2app(W_Int64Box.descr__new__.im_func), +) + +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, + __module__ = "numpypy", +) + +W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, + __module__ = "numpypy", +) + +W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), + __module__ = "numpypy", + + __new__ = interp2app(W_Float64Box.descr__new__.im_func), +) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,16 +1,11 @@ -import functools -import math - from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty -from pypy.module.micronumpy import signature -from pypy.objspace.std.floatobject import float2string -from pypy.rlib import rarithmetic, rfloat -from pypy.rlib.rarithmetic import LONG_BIT, widen -from pypy.rlib.objectmodel import specialize, enforceargs -from pypy.rlib.unroll import unrolling_iterable +from pypy.interpreter.typedef import (TypeDef, GetSetProperty, + interp_attrproperty, interp_attrproperty_w) +from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT from pypy.rpython.lltypesystem import lltype, rffi @@ -19,523 +14,218 @@ BOOLLTR = "b" FLOATINGLTR = "f" + +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) + class W_Dtype(Wrappable): - def __init__(self, space): - pass + _immuable_fields_ = ["itemtype", "num", "kind"] + + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): + self.signature = signature.BaseSignature() + self.itemtype = itemtype + self.num = num + self.kind = kind + self.name = name + self.char = char + self.w_box_type = w_box_type + self.alternate_constructors = alternate_constructors + + def malloc(self, length): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True + ) + + @specialize.argtype(1) + def box(self, value): + return self.itemtype.box(value) + + def coerce(self, space, w_item): + return self.itemtype.coerce(space, w_item) + + def getitem(self, storage, i): + return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + + def setitem(self, storage, i, box): + self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + + def fill(self, storage, box, start, stop): + self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + if space.is_w(w_dtype, space.w_None): - return space.fromcache(W_Float64Dtype) + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype elif space.isinstance_w(w_dtype, space.w_str): - dtype = space.str_w(w_dtype) - for alias, dtype_class in dtypes_by_alias: - if alias == dtype: - return space.fromcache(dtype_class) - elif isinstance(space.interpclass_w(w_dtype), W_Dtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_type): - for typename, dtype_class in dtypes_by_apptype: - if space.is_w(getattr(space, "w_%s" % typename), w_dtype): - return space.fromcache(dtype_class) + name = space.str_w(w_dtype) + for dtype in cache.builtin_dtypes: + if dtype.name == name or dtype.char == name: + return dtype + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + def descr_str(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("dtype('%s')" % self.name) - def descr_str(self, space): - return space.wrap(self.name) + def descr_get_itemsize(self, space): + return space.wrap(self.itemtype.get_element_size()) def descr_get_shape(self, space): return space.newtuple([]) - -class BaseBox(object): - pass - -VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) - -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, - expected_size=None): - - class Box(BaseBox): - def __init__(self, val): - self.val = val - - def wrap(self, space): - val = self.val - if valtype is rarithmetic.r_singlefloat: - val = float(val) - return space.wrap(val) - - def convert_to(self, dtype): - return dtype.adapt_val(self.val) - Box.__name__ = "%sBox" % T._name - - TP = lltype.Ptr(lltype.Array(T, hints={'nolength': True})) - class W_LowLevelDtype(W_Dtype): - signature = signature.BaseSignature() - - def erase(self, storage): - return rffi.cast(VOID_TP, storage) - - def unerase(self, storage): - return rffi.cast(TP, storage) - - @enforceargs(None, valtype) - def box(self, value): - return Box(value) - - def unbox(self, box): - assert isinstance(box, Box) - return box.val - - def unwrap(self, space, w_item): - raise NotImplementedError - - def malloc(self, size): - # XXX find out why test_zjit explodes with tracking of allocations - return self.erase(lltype.malloc(TP.TO, size, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - )) - - def getitem(self, storage, i): - return Box(self.unerase(storage)[i]) - - def setitem(self, storage, i, item): - self.unerase(storage)[i] = self.unbox(item) - - def setitem_w(self, space, storage, i, w_item): - self.setitem(storage, i, self.unwrap(space, w_item)) - - def fill(self, storage, item, start, stop): - storage = self.unerase(storage) - item = self.unbox(item) - for i in xrange(start, stop): - storage[i] = item - - @specialize.argtype(1) - def adapt_val(self, val): - return self.box(rffi.cast(TP.TO.OF, val)) - - W_LowLevelDtype.__name__ = "W_%sDtype" % name.capitalize() - W_LowLevelDtype.num = num - W_LowLevelDtype.kind = kind - W_LowLevelDtype.name = name - W_LowLevelDtype.aliases = aliases - W_LowLevelDtype.applevel_types = applevel_types - W_LowLevelDtype.num_bytes = rffi.sizeof(T) - if expected_size is not None: - assert W_LowLevelDtype.num_bytes == expected_size - return W_LowLevelDtype - - -def binop(func): - specialize.argtype(1, 2)(func) - @functools.wraps(func) - def impl(self, v1, v2): - return self.adapt_val(func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)), - )) - return impl - -def raw_binop(func): - specialize.argtype(1, 2)(func) - # Returns the result unwrapped. - @functools.wraps(func) - def impl(self, v1, v2): - return func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)) - ) - return impl - -def unaryop(func): - specialize.argtype(1)(func) - @functools.wraps(func) - def impl(self, v): - return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) - return impl - -class ArithmeticTypeMixin(object): - _mixin_ = True - - @binop - def add(self, v1, v2): - return v1 + v2 - @binop - def sub(self, v1, v2): - return v1 - v2 - @binop - def mul(self, v1, v2): - return v1 * v2 - - @unaryop - def pos(self, v): - return +v - @unaryop - def neg(self, v): - return -v - @unaryop - def abs(self, v): - return abs(v) - - @binop - def max(self, v1, v2): - return max(v1, v2) - @binop - def min(self, v1, v2): - return min(v1, v2) - - def bool(self, v): - return bool(self.for_computation(self.unbox(v))) - @raw_binop - def eq(self, v1, v2): - return v1 == v2 - @raw_binop - def ne(self, v1, v2): - return v1 != v2 - @raw_binop - def lt(self, v1, v2): - return v1 < v2 - @raw_binop - def le(self, v1, v2): - return v1 <= v2 - @raw_binop - def gt(self, v1, v2): - return v1 > v2 - @raw_binop - def ge(self, v1, v2): - return v1 >= v2 - - -class FloatArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def for_computation(self, v): - return float(v) - - def str_format(self, item): - return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION) - - @binop - def div(self, v1, v2): - # XXX this won't work after translation, probably requires ovfcheck - try: - return v1 / v2 - except ZeroDivisionError: - if v1 == v2 == 0.0: - return rfloat.NAN - return rfloat.copysign(rfloat.INFINITY, v1 * v2) - @binop - def mod(self, v1, v2): - return math.fmod(v1, v2) - @binop - def pow(self, v1, v2): - return math.pow(v1, v2) - - @unaryop - def sign(self, v): - if v == 0.0: - return 0.0 - return rfloat.copysign(1.0, v) - @unaryop - def reciprocal(self, v): - if v == 0.0: - return rfloat.copysign(rfloat.INFINITY, v) - return 1.0 / v - @unaryop - def fabs(self, v): - return math.fabs(v) - @unaryop - def floor(self, v): - return math.floor(v) - - @binop - def copysign(self, v1, v2): - return math.copysign(v1, v2) - @unaryop - def exp(self, v): - try: - return math.exp(v) - except OverflowError: - return rfloat.INFINITY - @unaryop - def sin(self, v): - return math.sin(v) - @unaryop - def cos(self, v): - return math.cos(v) - @unaryop - def tan(self, v): - return math.tan(v) - @unaryop - def arcsin(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.asin(v) - @unaryop - def arccos(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.acos(v) - @unaryop - def arctan(self, v): - return math.atan(v) - @unaryop - def arcsinh(self, v): - return math.asinh(v) - @unaryop - def arctanh(self, v): - if v == 1.0 or v == -1.0: - return math.copysign(rfloat.INFINITY, v) - if not -1.0 < v < 1.0: - return rfloat.NAN - return math.atanh(v) - @unaryop - def sqrt(self, v): - try: - return math.sqrt(v) - except ValueError: - return rfloat.NAN - -class IntegerArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) - - def for_computation(self, v): - return widen(v) - - def str_format(self, item): - return str(widen(self.unbox(item))) - - @binop - def div(self, v1, v2): - if v2 == 0: - return 0 - return v1 / v2 - @binop - def mod(self, v1, v2): - return v1 % v2 - @binop - def pow(self, v1, v2): - res = 1 - while v2 > 0: - if v2 & 1: - res *= v1 - v2 >>= 1 - if v2 == 0: - break - v1 *= v1 - return res - - -class SignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - if v > 0: - return 1 - elif v < 0: - return -1 - else: - assert v == 0 - return 0 - -class UnsignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - return int(v != 0) - - -W_BoolDtype = create_low_level_dtype( - num = 0, kind = BOOLLTR, name = "bool", - aliases = ["?", "bool", "bool8"], - applevel_types = ["bool"], - T = lltype.Bool, - valtype = bool, -) -class W_BoolDtype(SignedIntegerArithmeticDtype, W_BoolDtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.is_true(w_item)) - - def str_format(self, item): - v = self.unbox(item) - return "True" if v else "False" - - def for_computation(self, v): - return int(v) - -W_Int8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "int8", - aliases = ["b", "int8", "i1"], - applevel_types = [], - T = rffi.SIGNEDCHAR, - valtype = rffi.SIGNEDCHAR._type, - expected_size = 1, -) -class W_Int8Dtype(SignedIntegerArithmeticDtype, W_Int8Dtype): - pass - -W_UInt8Dtype = create_low_level_dtype( - num = 2, kind = UNSIGNEDLTR, name = "uint8", - aliases = ["B", "uint8", "I1"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(UnsignedIntegerArithmeticDtype, W_UInt8Dtype): - pass - -W_Int16Dtype = create_low_level_dtype( - num = 3, kind = SIGNEDLTR, name = "int16", - aliases = ["h", "int16", "i2"], - applevel_types = [], - T = rffi.SHORT, - valtype = rffi.SHORT._type, - expected_size = 2, -) -class W_Int16Dtype(SignedIntegerArithmeticDtype, W_Int16Dtype): - pass - -W_UInt16Dtype = create_low_level_dtype( - num = 4, kind = UNSIGNEDLTR, name = "uint16", - aliases = ["H", "uint16", "I2"], - applevel_types = [], - T = rffi.USHORT, - valtype = rffi.USHORT._type, - expected_size = 2, -) -class W_UInt16Dtype(UnsignedIntegerArithmeticDtype, W_UInt16Dtype): - pass - -W_Int32Dtype = create_low_level_dtype( - num = 5, kind = SIGNEDLTR, name = "int32", - aliases = ["i", "int32", "i4"], - applevel_types = [], - T = rffi.INT, - valtype = rffi.INT._type, - expected_size = 4, -) -class W_Int32Dtype(SignedIntegerArithmeticDtype, W_Int32Dtype): - pass - -W_UInt32Dtype = create_low_level_dtype( - num = 6, kind = UNSIGNEDLTR, name = "uint32", - aliases = ["I", "uint32", "I4"], - applevel_types = [], - T = rffi.UINT, - valtype = rffi.UINT._type, - expected_size = 4, -) -class W_UInt32Dtype(UnsignedIntegerArithmeticDtype, W_UInt32Dtype): - pass - -W_Int64Dtype = create_low_level_dtype( - num = 9, kind = SIGNEDLTR, name = "int64", - aliases = ["q", "int64", "i8"], - applevel_types = ["long"], - T = rffi.LONGLONG, - valtype = rffi.LONGLONG._type, - expected_size = 8, -) -class W_Int64Dtype(SignedIntegerArithmeticDtype, W_Int64Dtype): - pass - -W_UInt64Dtype = create_low_level_dtype( - num = 10, kind = UNSIGNEDLTR, name = "uint64", - aliases = ["Q", "uint64", "I8"], - applevel_types = [], - T = rffi.ULONGLONG, - valtype = rffi.ULONGLONG._type, - expected_size = 8, -) -class W_UInt64Dtype(UnsignedIntegerArithmeticDtype, W_UInt64Dtype): - pass - -if LONG_BIT == 32: - long_dtype = W_Int32Dtype - ulong_dtype = W_UInt32Dtype -elif LONG_BIT == 64: - long_dtype = W_Int64Dtype - ulong_dtype = W_UInt64Dtype -else: - assert False - -class W_LongDtype(long_dtype): - num = 7 - aliases = ["l"] - applevel_types = ["int"] - -class W_ULongDtype(ulong_dtype): - num = 8 - aliases = ["L"] - -W_Float32Dtype = create_low_level_dtype( - num = 11, kind = FLOATINGLTR, name = "float32", - aliases = ["f", "float32", "f4"], - applevel_types = [], - T = lltype.SingleFloat, - valtype = rarithmetic.r_singlefloat, - expected_size = 4, -) -class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): - pass - -W_Float64Dtype = create_low_level_dtype( - num = 12, kind = FLOATINGLTR, name = "float64", - aliases = ["d", "float64", "f8"], - applevel_types = ["float"], - T = lltype.Float, - valtype = float, - expected_size = 8, -) -class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): - pass - -ALL_DTYPES = [ - W_BoolDtype, - W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, - W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, - W_Int64Dtype, W_UInt64Dtype, - W_Float32Dtype, W_Float64Dtype, -] - -dtypes_by_alias = unrolling_iterable([ - (alias, dtype) - for dtype in ALL_DTYPES - for alias in dtype.aliases -]) -dtypes_by_apptype = unrolling_iterable([ - (apptype, dtype) - for dtype in ALL_DTYPES - for apptype in dtype.applevel_types -]) -dtypes_by_num_bytes = unrolling_iterable(sorted([ - (dtype.num_bytes, dtype) - for dtype in ALL_DTYPES -])) - W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), + __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), - __str__ = interp2app(W_Dtype.descr_str), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), - itemsize = interp_attrproperty("num_bytes", cls=W_Dtype), + type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), shape = GetSetProperty(W_Dtype.descr_get_shape), ) W_Dtype.typedef.acceptable_as_base_class = False + +class DtypeCache(object): + def __init__(self, space): + self.w_booldtype = W_Dtype( + types.Bool(), + num=0, + kind=BOOLLTR, + name="bool", + char="?", + w_box_type = space.gettypefor(interp_boxes.W_BoolBox), + alternate_constructors=[space.w_bool], + ) + self.w_int8dtype = W_Dtype( + types.Int8(), + num=1, + kind=SIGNEDLTR, + name="int8", + char="b", + w_box_type = space.gettypefor(interp_boxes.W_Int8Box) + ) + self.w_uint8dtype = W_Dtype( + types.UInt8(), + num=2, + kind=UNSIGNEDLTR, + name="uint8", + char="B", + w_box_type = space.gettypefor(interp_boxes.W_UInt8Box), + ) + self.w_int16dtype = W_Dtype( + types.Int16(), + num=3, + kind=SIGNEDLTR, + name="int16", + char="h", + w_box_type = space.gettypefor(interp_boxes.W_Int16Box), + ) + self.w_uint16dtype = W_Dtype( + types.UInt16(), + num=4, + kind=UNSIGNEDLTR, + name="uint16", + char="H", + w_box_type = space.gettypefor(interp_boxes.W_UInt16Box), + ) + self.w_int32dtype = W_Dtype( + types.Int32(), + num=5, + kind=SIGNEDLTR, + name="int32", + char="i", + w_box_type = space.gettypefor(interp_boxes.W_Int32Box), + ) + self.w_uint32dtype = W_Dtype( + types.UInt32(), + num=6, + kind=UNSIGNEDLTR, + name="uint32", + char="I", + w_box_type = space.gettypefor(interp_boxes.W_UInt32Box), + ) + if LONG_BIT == 32: + name = "int32" + elif LONG_BIT == 64: + name = "int64" + self.w_longdtype = W_Dtype( + types.Long(), + num=7, + kind=SIGNEDLTR, + name=name, + char="l", + w_box_type = space.gettypefor(interp_boxes.W_LongBox), + alternate_constructors=[space.w_int], + ) + self.w_ulongdtype = W_Dtype( + types.ULong(), + num=8, + kind=UNSIGNEDLTR, + name="u" + name, + char="L", + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + ) + self.w_int64dtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name="int64", + char="q", + w_box_type = space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], + ) + self.w_uint64dtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name="uint64", + char="Q", + w_box_type = space.gettypefor(interp_boxes.W_UInt64Box), + ) + self.w_float32dtype = W_Dtype( + types.Float32(), + num=11, + kind=FLOATINGLTR, + name="float32", + char="f", + w_box_type = space.gettypefor(interp_boxes.W_Float32Box), + ) + self.w_float64dtype = W_Dtype( + types.Float64(), + num=12, + kind=FLOATINGLTR, + name="float64", + char="d", + w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + alternate_constructors=[space.w_float], + ) + + self.builtin_dtypes = [ + self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, + self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, + self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, + self.w_float64dtype + ] + self.dtypes_by_num_bytes = sorted( + (dtype.itemtype.get_element_size(), dtype) + for dtype in self.builtin_dtypes + ) + +def get_dtype_cache(space): + return space.fromcache(DtypeCache) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,47 +98,6 @@ endshape[i] = remainder[i] return endshape -def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, - w_order=NoneNotWrapped): - # find scalar - if not space.issequence_w(w_item_or_iterable): - if space.is_w(w_dtype, space.w_None): - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, - w_item_or_iterable) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - return scalar_w(space, dtype, w_item_or_iterable) - if w_order is None: - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise operationerrfmt(space.w_ValueError, "Unknown order: %s", - order) - shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) - # they come back in C order - size = len(elems_w) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): - break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = NDimArray(size, shape[:], dtype=dtype, order=order) - shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) - for i in range(len(elems_w)): - w_elem = elems_w[i] - dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) - arr_iter = arr_iter.next(shapelen) - return arr # Iterators for arrays # -------------------- @@ -378,6 +337,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -451,8 +417,8 @@ self=self, dtype=dtype, i=i, result=result, idx=idx, cur_best=cur_best) - new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.ne(new_best, cur_best): + new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best i = i.next(shapelen) @@ -462,8 +428,7 @@ size = self.find_size() if size == 0: raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) + space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -475,7 +440,7 @@ all_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if not dtype.bool(self.eval(i)): + if not dtype.itemtype.bool(self.eval(i)): return False i = i.next(shapelen) return True @@ -490,7 +455,7 @@ any_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if dtype.bool(self.eval(i)): + if dtype.itemtype.bool(self.eval(i)): return True i = i.next(shapelen) return False @@ -542,8 +507,8 @@ res.append(')') else: concrete.to_str(space, 1, res, indent=' ') - if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or \ + if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and + dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ not self.find_size(): res.append(", dtype=" + dtype.name) res.append(")") @@ -612,7 +577,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] # Add a comma only if comma is False - this prevents adding two # commas @@ -625,7 +590,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] i += 1 else: @@ -712,7 +677,7 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item).wrap(space) + return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) return space.wrap(self.create_slice(space, chunks)) @@ -771,14 +736,15 @@ shape[:]) def descr_mean(self, space): - return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) + return space.div(self.descr_sum(space), space.wrap(self.find_size())) def descr_nonzero(self, space): if self.find_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true(self.get_concrete().eval( - self.start_iter(self.shape)).wrap(space))) + return space.wrap(space.is_true( + self.get_concrete().eval(self.start_iter(self.shape)) + )) def descr_get_transpose(self, space): concrete = self.get_concrete() @@ -814,17 +780,14 @@ return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) return scalar_w(space, dtype, w_obj) def scalar_w(space, dtype, w_obj): - assert isinstance(dtype, interp_dtype.W_Dtype) - return Scalar(dtype, dtype.unwrap(space, w_obj)) + return Scalar(dtype, dtype.coerce(space, w_obj)) class Scalar(BaseArray): """ @@ -835,6 +798,7 @@ _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): + self.shape = self.strides = [] BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value @@ -858,7 +822,7 @@ return ConstantIterator() def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.str_format(self.value)) + builder.append(self.dtype.itemtype.str_format(self.value)) def copy(self): return Scalar(self.dtype, self.value) @@ -884,7 +848,7 @@ i = 0 signature = self.signature result_size = self.find_size() - result = NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) i = self.start_iter() ri = result.start_iter() @@ -1111,14 +1075,14 @@ return 'Slice(%s)' % self.parent.debug_repr() def copy(self): - array = NDimArray(self.size, self.shape[:], self.find_dtype()) + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() while not iter.done(): array.setitem(iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) return array -class NDimArray(BaseArray): +class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ @@ -1145,11 +1109,11 @@ return self.dtype.getitem(self.storage, iter.get_offset()) def copy(self): - array = NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( - rffi.cast(rffi.VOIDP, array.storage), - rffi.cast(rffi.VOIDP, self.storage), - self.size * self.dtype.num_bytes + array.storage, + self.storage, + self.size * self.dtype.itemtype.get_element_size() ) return array @@ -1160,8 +1124,7 @@ "len() of unsized object")) def setitem_w(self, space, item, w_value): - self.invalidated() - self.dtype.setitem_w(space, self.storage, item, w_value) + return self.setitem(item, self.dtype.coerce(space, w_value)) def setitem(self, item, value): self.invalidated() @@ -1193,20 +1156,62 @@ shape.append(item) return size, shape +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr + def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) size, shape = _find_size_and_shape(space, w_size) - return space.wrap(NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) + size, shape = _find_size_and_shape(space, w_size) - arr = NDimArray(size, shape[:], dtype=dtype) - one = dtype.adapt_val(1) + arr = W_NDimArray(size, shape[:], dtype=dtype) + one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) @@ -1217,9 +1222,9 @@ return w_arr.descr_dot(space, w_obj2) BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), @@ -1308,10 +1313,10 @@ def descr_next(self, space): if self.iter.done(): - raise OperationError(space.w_StopIteration, space.wrap('')) + raise OperationError(space.w_StopIteration, space.w_None) result = self.eval(self.iter) self.iter = self.iter.next(self.shapelen) - return result.wrap(space) + return result def descr_iter(self): return self diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import NDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -18,8 +18,8 @@ raise OperationError(space.w_ValueError, space.wrap( "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - dtype = space.fromcache(W_Float64Dtype) - a = NDimArray(number, [number], dtype=dtype) + dtype = get_dtype_cache(space).w_float64dtype + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_dtype, signature +from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -15,6 +15,7 @@ class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + _immutable_fields_ = ["promote_to_float", "promote_bools"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -29,7 +30,7 @@ def descr_get_identity(self, space): if self.identity is None: return space.w_None - return self.identity.wrap(space) + return self.identity def descr_call(self, space, __args__): if __args__.keywords or len(__args__.arguments_w) < self.argcount: @@ -80,8 +81,7 @@ new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, - dtype).wrap(space) + return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): while not i.done(): @@ -115,7 +115,7 @@ promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) @@ -124,6 +124,7 @@ class W_Ufunc2(W_Ufunc): + _immutable_fields_ = ["comparison_func", "func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -148,14 +149,14 @@ promote_bools=self.promote_bools, ) if self.comparison_func: - res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): return self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - ).wrap(space) + ) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature @@ -169,7 +170,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), @@ -187,7 +188,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. @@ -197,14 +198,14 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == 11 and dt1.num_bytes >= 4: - return space.fromcache(interp_dtype.W_Float64Dtype) + if dt2.num == 11 and dt1.itemtype.get_element_size() >= 4: + return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == interp_dtype.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it - if dt1.num_bytes < dt2.num_bytes: + if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 @@ -214,10 +215,11 @@ # UInt64 + signed = Float64 if dt2.num == 10: dtypenum += 1 - newdtype = interp_dtype.ALL_DTYPES[dtypenum] + newdtype = interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] - if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(newdtype) + if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or + newdtype.kind == interp_dtype.FLOATINGLTR): + return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes @@ -225,35 +227,42 @@ dtypenum += 2 else: dtypenum += 3 - return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum]) + return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt if dt.num >= 5: - return space.fromcache(interp_dtype.W_Float64Dtype) - for bytes, dtype in interp_dtype.dtypes_by_num_bytes: - if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: - return space.fromcache(dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype + for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + if (dtype.kind == interp_dtype.FLOATINGLTR and + dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): + return dtype if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: - return space.fromcache(interp_dtype.W_Int64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.UNSIGNEDLTR: - return space.fromcache(interp_dtype.W_UInt64Dtype) + return interp_dtype.get_dtype_cache(space).w_uint64dtype else: assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) + bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + + if isinstance(w_obj, interp_boxes.W_GenericBox): + dtype = w_obj.get_dtype(space) + if current_guess is None: + return dtype + return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: @@ -269,20 +278,19 @@ current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): - return getattr(res_dtype, op_name)(value) + return getattr(res_dtype.itemtype, op_name)(value) elif argcount == 2: + dtype_cache = interp_dtype.get_dtype_cache(space) def impl(res_dtype, lvalue, rvalue): - res = getattr(res_dtype, op_name)(lvalue, rvalue) + res = getattr(res_dtype.itemtype, op_name)(lvalue, rvalue) if comparison_func: - booldtype = space.fromcache(interp_dtype.W_BoolDtype) - assert isinstance(booldtype, interp_dtype.W_BoolDtype) - res = booldtype.box(res) + return dtype_cache.w_booldtype.box(res) return res return func_with_new_name(impl, ufunc_name) @@ -338,7 +346,7 @@ identity = extra_kwargs.get("identity") if identity is not None: - identity = space.fromcache(interp_dtype.W_LongDtype).adapt_val(identity) + identity = interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace -from pypy.module.micronumpy import interp_dtype -from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar +from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -11,9 +11,10 @@ class TestSignature(object): def test_binop_signature(self, space): - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + float64_dtype = get_dtype_cache(space).w_float64dtype + bool_dtype = get_dtype_cache(space).w_booldtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -22,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_BoolDtype)) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -30,7 +31,9 @@ assert v5.signature is v6.signature def test_slice_signature(self, space): - ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_Float64Dtype)) + float64_dtype = get_dtype_cache(space).w_float64dtype + + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature @@ -41,10 +44,10 @@ class TestUfuncCoerscion(object): def test_binops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Basic pairing assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype @@ -62,19 +65,19 @@ assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype def test_unaryops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - uint8_dtype = space.fromcache(interp_dtype.W_UInt8Dtype) - int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype) - uint16_dtype = space.fromcache(interp_dtype.W_UInt16Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - uint32_dtype = space.fromcache(interp_dtype.W_UInt32Dtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - ulong_dtype = space.fromcache(interp_dtype.W_ULongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) - uint64_dtype = space.fromcache(interp_dtype.W_UInt64Dtype) - float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + uint8_dtype = get_dtype_cache(space).w_uint8dtype + int16_dtype = get_dtype_cache(space).w_int16dtype + uint16_dtype = get_dtype_cache(space).w_uint16dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + uint32_dtype = get_dtype_cache(space).w_uint32dtype + long_dtype = get_dtype_cache(space).w_longdtype + ulong_dtype = get_dtype_cache(space).w_ulongdtype + int64_dtype = get_dtype_cache(space).w_int64dtype + uint64_dtype = get_dtype_cache(space).w_uint64dtype + float32_dtype = get_dtype_cache(space).w_float32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Normal rules, everything returns itself assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,9 @@ +import py -import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): @@ -106,7 +109,7 @@ c -> 3 """ interp = self.run(code) - assert interp.results[-1].value.val == 9 + assert interp.results[-1].value == 9 def test_array_getitem(self): code = """ @@ -115,7 +118,7 @@ a + b -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 3 + 6 + assert interp.results[0].value == 3 + 6 def test_range_getitem(self): code = """ @@ -123,7 +126,7 @@ r -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_sum(self): code = """ @@ -132,7 +135,7 @@ r """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value.value == 15 def test_array_write(self): code = """ @@ -141,7 +144,7 @@ a -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_min(self): interp = self.run(""" @@ -150,7 +153,7 @@ b = a + a min(b) """) - assert interp.results[0].value.val == -24 + assert interp.results[0].value.value == -24 def test_max(self): interp = self.run(""" @@ -159,7 +162,7 @@ b = a + a max(b) """) - assert interp.results[0].value.val == 256 + assert interp.results[0].value.value == 256 def test_slice(self): interp = self.run(""" @@ -167,7 +170,7 @@ b = a -> : b -> 3 """) - assert interp.results[0].value.val == 4 + assert interp.results[0].value == 4 def test_slice_step(self): interp = self.run(""" @@ -175,7 +178,7 @@ b = a -> ::2 b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_setslice(self): interp = self.run(""" @@ -185,7 +188,7 @@ a[::3] = b a -> 3 """) - assert interp.results[0].value.val == 5 + assert interp.results[0].value == 5 def test_slice2(self): @@ -196,14 +199,14 @@ b = s1 + s2 b -> 3 """) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_multidim_getitem(self): interp = self.run(""" a = [[1,2]] a -> 0 -> 1 """) - assert interp.results[0].value.val == 2 + assert interp.results[0].value == 2 def test_multidim_getitem_2(self): interp = self.run(""" @@ -211,7 +214,7 @@ b = a + a b -> 1 -> 1 """) - assert interp.results[0].value.val == 8 + assert interp.results[0].value == 8 def test_set_slice(self): interp = self.run(""" @@ -220,7 +223,7 @@ b[:] = a + a b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_set_slice2(self): interp = self.run(""" @@ -231,4 +234,4 @@ a[0:30:3] = c a -> 3 """) - assert interp.results[0].value.val == 11 + assert interp.results[0].value == 11 diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -31,7 +31,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -45,13 +45,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from numpypy import array, False_, True_ + from numpypy import array, False_, True_, int64 a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert is_valid_int(a[0]) + assert isinstance(a[0], int64) b = a.copy() - assert is_valid_int(b[0]) + assert isinstance(b[0], int64) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -73,17 +73,17 @@ assert a[i] is True_ def test_zeros_long(self): - from numpypy import zeros + from numpypy import zeros, int64 a = zeros(10, dtype=long) for i in range(10): - assert is_valid_int(a[i]) + assert isinstance(a[i], int64) assert a[1] == 0 def test_ones_long(self): - from numpypy import ones + from numpypy import ones, int64 a = ones(10, dtype=long) for i in range(10): - assert is_valid_int(a[i]) + assert isinstance(a[i], int64) assert a[1] == 1 def test_overflow(self): @@ -166,3 +166,99 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) + +class AppTestTypes(BaseNumpyAppTest): + def test_abstract_types(self): + import numpypy as numpy + raises(TypeError, numpy.generic, 0) + raises(TypeError, numpy.number, 0) + raises(TypeError, numpy.integer, 0) + exc = raises(TypeError, numpy.signedinteger, 0) + assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + + raises(TypeError, numpy.floating, 0) + raises(TypeError, numpy.inexact, 0) + + def test_bool(self): + import numpypy as numpy + + assert numpy.bool_.mro() == [numpy.bool_, numpy.generic, object] + assert numpy.bool_(3) is numpy.True_ + assert numpy.bool_("") is numpy.False_ + assert type(numpy.True_) is type(numpy.False_) is numpy.bool_ + + class X(numpy.bool_): + pass + + assert type(X(True)) is numpy.bool_ + assert X(True) is numpy.True_ + + def test_int8(self): + import numpypy as numpy + + assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.int8) + assert type(a[1]) is numpy.int8 + assert numpy.dtype("int8").type is numpy.int8 + + x = numpy.int8(128) + assert x == -128 + assert x != 128 + assert type(x) is numpy.int8 + assert repr(x) == "-128" + + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + + def test_int_(self): + import numpypy as numpy + + assert numpy.int_ is numpy.dtype(int).type + assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + + def test_int64(self): + import sys + import numpypy as numpy + + if sys.maxint == 2 ** 63 -1: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + else: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.int64).type is numpy.int64 + assert numpy.int64(3) == 3 + + def test_float64(self): + import numpypy as numpy + + assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] + + a = numpy.array([1, 2, 3], numpy.float64) + assert type(a[1]) is numpy.float64 + assert numpy.dtype(float).type is numpy.float64 + + assert numpy.float64(2.0) == 2.0 + + def test_subclass_type(self): + import numpypy as numpy + + class X(numpy.float64): + def m(self): + return self + 2 + + b = X(10) + assert type(b) is X + assert b.m() == 12 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,7 +1,7 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy import signature from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace @@ -28,18 +28,18 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -58,7 +58,7 @@ def test_create_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -78,7 +78,7 @@ def test_slice_of_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -96,7 +96,7 @@ def test_slice_of_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -114,7 +114,7 @@ def test_negative_step_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] @@ -122,14 +122,14 @@ def test_negative_step_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -139,7 +139,7 @@ assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -160,6 +160,21 @@ class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -359,11 +374,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 @@ -709,7 +724,7 @@ assert b[i] == 2.5 * a[i] def test_dtype_guessing(self): - from numpypy import array, dtype + from numpypy import array, dtype, float64, int8, bool_ assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) @@ -719,6 +734,10 @@ assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + assert array([float64(2)]).dtype is dtype(float) + assert array([int8(3)]).dtype is dtype("int8") + assert array([bool_(True)]).dtype is dtype(bool) + assert array([bool_(True), 3.0]).dtype is dtype(float) def test_comparison(self): import operator @@ -1008,10 +1027,10 @@ b = a[0].copy() assert (b == zeros(10)).all() -class AppTestSupport(object): +class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct - cls.space = gettestobjspace(usemodules=('micronumpy',)) + BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) def test_fromstring(self): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,11 +8,11 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_ufuncs, signature +from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import NDimArray, NDimSlice,\ - BaseArray +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, + BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr @@ -48,17 +48,15 @@ def f(i): interp = InterpreterState(codes[i]) interp.run(space) - res = interp.results[-1] - assert isinstance(res, BaseArray) - w_res = res.eval(res.start_iter()).wrap(interp.space) - if isinstance(w_res, BoolObject): - return float(w_res.boolval) - elif isinstance(w_res, FloatObject): - return w_res.floatval - elif isinstance(w_res, IntObject): - return w_res.intval - else: - return -42. + w_res = interp.results[-1] + if isinstance(w_res, BaseArray): + w_res = w_res.eval(w_res.start_iter()) + + if isinstance(w_res, interp_boxes.W_Float64Box): + return w_res.value + elif isinstance(w_res, interp_boxes.W_BoolBox): + return float(w_res.value) + raise TypeError(w_res) if self.graph is None: interp, graph = self.meta_interp(f, [i], @@ -80,9 +78,9 @@ def test_add(self): result = self.run("add") - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 def define_float_add(): @@ -94,9 +92,9 @@ def test_floatadd(self): result = self.run("float_add") assert result == 3 + 3 - self.check_simple_loop({"getarrayitem_raw": 1, "float_add": 1, - "setarrayitem_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_sum(): return """ @@ -108,9 +106,9 @@ def test_sum(self): result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 2, - "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, + "int_add": 2, "int_ge": 1, "guard_false": 1, + "jump": 1}) def define_prod(): return """ @@ -125,9 +123,9 @@ for i in range(30): expected *= i * 2 assert result == expected - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -138,9 +136,9 @@ max(b) """) assert result == 256 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def test_min(self): py.test.skip("broken, investigate") @@ -151,9 +149,9 @@ min(b) """) assert result == -24 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def define_any(): return """ @@ -166,10 +164,10 @@ def test_any(self): result = self.run("any") assert result == 1 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, - "int_ge": 1, "jump": 1, - "guard_false": 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) def define_already_forced(): return """ @@ -188,10 +186,10 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) def define_ufunc(): @@ -205,10 +203,9 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -6 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, - "setarrayitem_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1, - }) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, + "setinteriorfield_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_specialization(): return """ @@ -246,9 +243,9 @@ def test_slice(self): result = self.run("slice") assert result == 18 - self.check_simple_loop({'getarrayitem_raw': 2, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) @@ -265,8 +262,8 @@ def test_slice2(self): result = self.run("slice2") assert result == 15 - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) def define_multidim(): @@ -279,11 +276,11 @@ def test_multidim(self): result = self.run('multidim') assert result == 8 - self.check_simple_loop({'float_add': 1, 'getarrayitem_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setarrayitem_raw': 1}) # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization + self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1}) def define_multidim_slice(): return """ @@ -329,18 +326,18 @@ result = self.run("setslice") assert result == 11.0 self.check_loop_count(1) - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add' : 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import W_Float64Dtype + from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() - cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) + cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " @@ -355,7 +352,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = NDimArray(n, [n], dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/types.py @@ -0,0 +1,389 @@ +import functools +import math + +from pypy.module.micronumpy import interp_boxes +from pypy.objspace.std.floatobject import float2string +from pypy.rlib import rfloat, libffi, clibffi +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rpython.lltypesystem import lltype, rffi + + +def simple_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v): + return self.box( + func( + self, + self.for_computation(self.unbox(v)) + ) + ) + return dispatcher + +def simple_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return self.box( + func( + self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + ) + ) + return dispatcher + +def raw_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)) + ) + return dispatcher + +class BaseType(object): + def _unimplemented_ufunc(self, *args): + raise NotImplementedError + # add = sub = mul = div = mod = pow = eq = ne = lt = le = gt = ge = max = \ + # min = copysign = pos = neg = abs = sign = reciprocal = fabs = floor = \ + # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ + # arctanh = _unimplemented_ufunc + +class Primitive(object): + _mixin_ = True + def get_element_size(self): + return rffi.sizeof(self.T) + + @specialize.argtype(1) + def box(self, value): + return self.BoxType(rffi.cast(self.T, value)) + + def unbox(self, box): + assert isinstance(box, self.BoxType) + return box.value + + def coerce(self, space, w_item): + if isinstance(w_item, self.BoxType): + return w_item + return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # XXX: ugly + w_obj = space.allocate_instance(self.BoxType, w_subtype) + assert isinstance(w_obj, self.BoxType) + w_obj.__init__(self._coerce(space, w_item).value) + return w_obj + + def _coerce(self, space, w_item): + raise NotImplementedError + + def read(self, storage, width, i, offset): + return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset + )) + + def store(self, storage, width, i, offset, box): + value = self.unbox(box) + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + @simple_binary_op + def add(self, v1, v2): + return v1 + v2 + + @simple_binary_op + def sub(self, v1, v2): + return v1 - v2 + + @simple_binary_op + def mul(self, v1, v2): + return v1 * v2 + + @simple_unary_op + def pos(self, v): + return +v + + @simple_unary_op + def neg(self, v): + return -v + + @simple_unary_op + def abs(self, v): + return abs(v) + + @raw_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @raw_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @raw_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @raw_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @raw_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @raw_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + def bool(self, v): + return bool(self.for_computation(self.unbox(v))) + + @simple_binary_op + def max(self, v1, v2): + return max(v1, v2) + + @simple_binary_op + def min(self, v1, v2): + return min(v1, v2) + +class Bool(BaseType, Primitive): + T = lltype.Bool + BoxType = interp_boxes.W_BoolBox + + True = BoxType(True) + False = BoxType(False) + + @specialize.argtype(1) + def box(self, value): + box = Primitive.box(self, value) + if box.value: + return self.True + else: + return self.False + + def coerce_subtype(self, space, w_subtype, w_item): + # Doesn't return subclasses so it can return the constants. + return self._coerce(space, w_item) + + def _coerce(self, space, w_item): + return self.box(space.is_true(w_item)) + + def str_format(self, box): + value = self.unbox(box) + return "True" if value else "False" + + def for_computation(self, v): + return int(v) + +class Integer(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.int_w(space.int(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return str(self.for_computation(value)) + + def for_computation(self, v): + return widen(v) + + @simple_binary_op + def div(self, v1, v2): + if v2 == 0: + return 0 + return v1 / v2 + + @simple_binary_op + def mod(self, v1, v2): + return v1 % v2 + + @simple_binary_op + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + + @simple_unary_op + def sign(self, v): + if v > 0: + return 1 + elif v < 0: + return -1 + else: + assert v == 0 + return 0 + +class Int8(BaseType, Integer): + T = rffi.SIGNEDCHAR + BoxType = interp_boxes.W_Int8Box + +class UInt8(BaseType, Integer): + T = rffi.UCHAR + BoxType = interp_boxes.W_UInt8Box + +class Int16(BaseType, Integer): + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + +class UInt16(BaseType, Integer): + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + +class Int32(BaseType, Integer): + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + +class UInt32(BaseType, Integer): + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + +class Int64(BaseType, Integer): + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + +class UInt64(BaseType, Integer): + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + +class Float(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.float_w(space.float(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + + def for_computation(self, v): + return float(v) + + @simple_binary_op + def div(self, v1, v2): + try: + return v1 / v2 + except ZeroDivisionError: + if v1 == v2 == 0.0: + return rfloat.NAN + return rfloat.copysign(rfloat.INFINITY, v1 * v2) + + @simple_binary_op + def mod(self, v1, v2): + return math.fmod(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return math.pow(v1, v2) + + @simple_binary_op + def copysign(self, v1, v2): + return math.copysign(v1, v2) + + @simple_unary_op + def sign(self, v): + if v == 0.0: + return 0.0 + return rfloat.copysign(1.0, v) + + @simple_unary_op + def fabs(self, v): + return math.fabs(v) + + @simple_unary_op + def reciprocal(self, v): + if v == 0.0: + return rfloat.copysign(rfloat.INFINITY, v) + return 1.0 / v + + @simple_unary_op + def floor(self, v): + return math.floor(v) + + @simple_unary_op + def exp(self, v): + try: + return math.exp(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def sin(self, v): + return math.sin(v) + + @simple_unary_op + def cos(self, v): + return math.cos(v) + + @simple_unary_op + def tan(self, v): + return math.tan(v) + + @simple_unary_op + def arcsin(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.asin(v) + + @simple_unary_op + def arccos(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.acos(v) + + @simple_unary_op + def arctan(self, v): + return math.atan(v) + + @simple_unary_op + def arcsinh(self, v): + return math.asinh(v) + + @simple_unary_op + def arctanh(self, v): + if v == 1.0 or v == -1.0: + return math.copysign(rfloat.INFINITY, v) + if not -1.0 < v < 1.0: + return rfloat.NAN + return math.atanh(v) + + @simple_unary_op + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN + + +class Float32(BaseType, Float): + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + +class Float64(BaseType, Float): + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box \ No newline at end of file diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -102,6 +102,7 @@ 'instancetypedef', 'terminator', '_version_tag?', + 'name?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -738,3 +738,29 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +def record_known_class(value, cls): + """ + Assure the JIT that value is an instance of cls. This is not a precise + class check, unlike a guard_class. + """ + assert isinstance(value, cls) + + +class Entry(ExtRegistryEntry): + _about_ = record_known_class + + def compute_result_annotation(self, s_inst, s_cls): + from pypy.annotation import model as annmodel + assert s_cls.is_constant() + assert not s_inst.can_be_none() + assert isinstance(s_inst, annmodel.SomeInstance) + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype, rclass + classrepr = rclass.get_type_repr(hop.rtyper) + + hop.exception_cannot_occur() + v_inst = hop.inputarg(hop.args_r[0], arg=0) + v_cls = hop.inputarg(classrepr, arg=1) + return hop.genop('jit_record_known_class', [v_inst, v_cls], + resulttype=lltype.Void) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -416,6 +416,10 @@ def getaddressindll(self, name): return dlsym(self.lib, name) +# These specialize.call_location's should really be specialize.arg(0), however +# you can't hash a pointer obj, which the specialize machinery wants to do. +# Given the present usage of these functions, it's good enough. + at specialize.call_location() @jit.oopspec("libffi_array_getitem(ffitype, width, addr, index, offset)") def array_getitem(ffitype, width, addr, index, offset): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -425,6 +429,7 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False + at specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -433,4 +438,4 @@ addr = rffi.ptradd(addr, offset) rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return - assert False \ No newline at end of file + assert False diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -91,9 +91,18 @@ return decorated_func + def call_location(self): + """ Specializes the function for each call site. + """ + def decorated_func(func): + func._annspecialcase_ = "specialize:call_location" + return func + + return decorated_func + def _wrap(self, args): return "("+','.join([repr(arg) for arg in args]) +")" - + specialize = _Specialize() def enforceargs(*args): @@ -125,7 +134,7 @@ def __hash__(self): raise TypeError("Symbolics are not hashable!") - + def __nonzero__(self): raise TypeError("Symbolics are not comparable") @@ -155,7 +164,7 @@ def lltype(self): from pypy.rpython.lltypesystem import lltype return lltype.Signed - + malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) # running_on_llinterp is meant to have the value 0 in all backends @@ -221,7 +230,7 @@ def compute_result_annotation(self, s_sizehint): from pypy.annotation.model import SomeInteger - + assert isinstance(s_sizehint, SomeInteger) return self.bookkeeper.newlist() diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -548,6 +548,9 @@ def op_jit_marker(self, *args): pass + def op_jit_record_known_class(self, *args): + pass + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -430,6 +430,7 @@ 'jit_force_virtual': LLOp(canrun=True), 'jit_is_virtual': LLOp(canrun=True), 'jit_force_quasi_immutable': LLOp(canrun=True), + 'jit_record_known_class' : LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -555,6 +555,9 @@ def op_jit_force_quasi_immutable(*args): pass +def op_jit_record_known_class(x, y): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -11,6 +11,7 @@ #endif /* MIN */ #define RUNNING_ON_LLINTERP 0 +#define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ #define FAIL_EXCEPTION(exc, msg) \ { \ From noreply at buildbot.pypy.org Sun Dec 4 04:14:46 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Sun, 4 Dec 2011 04:14:46 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: added a signed defn to libffi Message-ID: <20111204031446.A10E182A00@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50102:c250e080bac3 Date: 2011-12-04 04:09 +0100 http://bitbucket.org/pypy/pypy/changeset/c250e080bac3/ Log: added a signed defn to libffi diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -20,7 +20,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -1432,6 +1432,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1510,12 +1514,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -167,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -195,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + f(123, *[None]*11) # check that the check() are ok + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -247,7 +247,7 @@ if funcobj.random_effects_on_gcobjs: return True except (AttributeError, lltype.DelayedPointer): - pass + return True # better safe than sorry return super(RandomEffectsAnalyzer, self).analyze_external_call( op, seen) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -210,6 +210,8 @@ def rewrite_op_cast_pointer(self, op): newop = self.rewrite_op_same_as(op) assert newop is None + return + # disabled for now if (self._is_rclass_instance(op.args[0]) and self._is_rclass_instance(op.result)): FROM = op.args[0].concretetype.TO @@ -220,6 +222,9 @@ return [None, # hack, do the right renaming from op.args[0] to op.result SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3585,7 +3585,8 @@ self.interp_operations(f, [5], translationoptions=translationoptions) - def test_annotation_gives_knowledge_to_tracer(self): + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") class Base(object): pass class A(Base): @@ -3645,6 +3646,70 @@ # here it works again self.check_operations_history(guard_class=0, record_known_class=1) + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,10 +5,11 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.NDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', @@ -16,8 +17,22 @@ 'fromstring': 'interp_support.fromstring', 'flatiter': 'interp_numarray.W_FlatIterator', - 'True_': 'space.w_True', - 'False_': 'space.w_False', + 'True_': 'types.Bool.True', + 'False_': 'types.Bool.False', + + 'generic': 'interp_boxes.W_GenericBox', + 'number': 'interp_boxes.W_NumberBox', + 'integer': 'interp_boxes.W_IntegerBox', + 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'bool_': 'interp_boxes.W_BoolBox', + 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', + 'int64': 'interp_boxes.W_Int64Box', + 'int_': 'interp_boxes.W_LongBox', + 'inexact': 'interp_boxes.W_InexactBox', + 'floating': 'interp_boxes.W_FloatingBox', + 'float64': 'interp_boxes.W_Float64Box', } # ufuncs diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,13 +3,16 @@ It should not be imported by the module itself """ +import re + from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_BoolDtype +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, NDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs -from pypy.rlib.objectmodel import specialize -import re +from pypy.rlib.objectmodel import specialize, instantiate + class BogusBytecode(Exception): pass @@ -48,15 +51,12 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_float64dtype = W_Float64Dtype(self) def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): - if w_obj.tp == w_tp: - return True - return False + return w_obj.tp == w_tp def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -97,8 +97,10 @@ fixedview = listview def float(self, w_obj): - assert isinstance(w_obj, FloatObject) - return w_obj + if isinstance(w_obj, FloatObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): assert isinstance(w_obj, FloatObject) @@ -112,7 +114,10 @@ raise NotImplementedError def int(self, w_obj): - return w_obj + if isinstance(w_obj, IntObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.int(w_obj.descr_int(self)) def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) @@ -135,6 +140,9 @@ assert isinstance(what, tp) return what + def allocate_instance(self, klass, w_subtype): + return instantiate(klass) + def len_w(self, w_obj): if isinstance(w_obj, ListObject): return len(w_obj.items) @@ -247,7 +255,7 @@ w_rhs = self.rhs.execute(interp) if not isinstance(w_lhs, BaseArray): # scalar - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) if self.name == '+': @@ -264,8 +272,9 @@ w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError - if not isinstance(w_res, BaseArray): - dtype = interp.space.fromcache(W_Float64Dtype) + if (not isinstance(w_res, BaseArray) and + not isinstance(w_res, interp_boxes.W_GenericBox)): + dtype = get_dtype_cache(interp.space).w_float64dtype w_res = scalar_w(interp.space, dtype, w_res) return w_res @@ -283,7 +292,7 @@ return space.wrap(self.v) def execute(self, interp): - return FloatObject(self.v) + return interp.space.wrap(self.v) class RangeConstant(Node): def __init__(self, v): @@ -291,10 +300,10 @@ def execute(self, interp): w_list = interp.space.newlist( - [interp.space.wrap(float(i)) for i in range(self.v)]) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + [interp.space.wrap(float(i)) for i in range(self.v)] + ) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -315,9 +324,8 @@ def execute(self, interp): w_list = self.wrap(interp.space) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" @@ -384,9 +392,11 @@ if isinstance(w_res, BaseArray): return w_res if isinstance(w_res, FloatObject): - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype elif isinstance(w_res, BoolObject): - dtype = interp.space.fromcache(W_BoolDtype) + dtype = get_dtype_cache(interp.space).w_booldtype + elif isinstance(w_res, interp_boxes.W_GenericBox): + dtype = w_res.get_dtype(interp.space) else: dtype = None return scalar_w(interp.space, dtype, w_res) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_boxes.py @@ -0,0 +1,267 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef +from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.inttype import int_typedef +from pypy.objspace.std.typeobject import W_TypeObject +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.tool.sourcetools import func_with_new_name + + +MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () + +def new_dtype_getter(name): + def get_dtype(space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return getattr(get_dtype_cache(space), "w_%sdtype" % name) + def new(space, w_subtype, w_value): + dtype = get_dtype(space) + return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) + return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + +class PrimitiveBox(object): + _mixin_ = True + + def __init__(self, value): + self.value = value + + def convert_to(self, dtype): + return dtype.box(self.value) + +class W_GenericBox(Wrappable): + _attrs_ = () + + def descr__new__(space, w_subtype, __args__): + assert isinstance(w_subtype, W_TypeObject) + raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", + w_subtype.get_module_type_name() + ) + + def descr_str(self, space): + return self.descr_repr(space) + + def descr_repr(self, space): + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + + def descr_int(self, space): + box = self.convert_to(W_LongBox.get_dtype(space)) + assert isinstance(box, W_LongBox) + return space.wrap(box.value) + + def descr_float(self, space): + box = self.convert_to(W_Float64Box.get_dtype(space)) + assert isinstance(box, W_Float64Box) + return space.wrap(box.value) + + def descr_nonzero(self, space): + dtype = self.get_dtype(space) + return space.wrap(dtype.itemtype.bool(self)) + + def _binop_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + def _binop_right_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) + + def _unaryop_impl(ufunc_name): + def impl(self, space): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + + descr_radd = _binop_right_impl("add") + descr_rmul = _binop_right_impl("multiply") + + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") + + +class W_BoolBox(W_GenericBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("bool") + +class W_NumberBox(W_GenericBox): + _attrs_ = () + +class W_IntegerBox(W_NumberBox): + pass + +class W_SignedIntegerBox(W_IntegerBox): + pass + +class W_UnsignedIntgerBox(W_IntegerBox): + pass + +class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int8") + +class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint8") + +class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int16") + +class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint16") + +class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int32") + +class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint32") + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("long") + +class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int64") + +class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_InexactBox(W_NumberBox): + _attrs_ = () + +class W_FloatingBox(W_InexactBox): + _attrs_ = () + +class W_Float32Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float32") + +class W_Float64Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float64") + + + +W_GenericBox.typedef = TypeDef("generic", + __module__ = "numpypy", + + __new__ = interp2app(W_GenericBox.descr__new__.im_func), + + __str__ = interp2app(W_GenericBox.descr_str), + __repr__ = interp2app(W_GenericBox.descr_repr), + __int__ = interp2app(W_GenericBox.descr_int), + __float__ = interp2app(W_GenericBox.descr_float), + __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + + __add__ = interp2app(W_GenericBox.descr_add), + __sub__ = interp2app(W_GenericBox.descr_sub), + __mul__ = interp2app(W_GenericBox.descr_mul), + __div__ = interp2app(W_GenericBox.descr_div), + + __radd__ = interp2app(W_GenericBox.descr_add), + __rmul__ = interp2app(W_GenericBox.descr_rmul), + + __eq__ = interp2app(W_GenericBox.descr_eq), + __ne__ = interp2app(W_GenericBox.descr_ne), + __lt__ = interp2app(W_GenericBox.descr_lt), + __le__ = interp2app(W_GenericBox.descr_le), + __gt__ = interp2app(W_GenericBox.descr_gt), + __ge__ = interp2app(W_GenericBox.descr_ge), + + __neg__ = interp2app(W_GenericBox.descr_neg), + __abs__ = interp2app(W_GenericBox.descr_abs), +) + +W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_BoolBox.descr__new__.im_func), +) + +W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + +W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int8Box.descr__new__.im_func), +) + +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), +) + +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), +) + +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +if LONG_BIT == 32: + long_name = "int32" +elif LONG_BIT == 64: + long_name = "int64" +W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), + __module__ = "numpypy", +) + +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, + __module__ = "numpypy", + __new__ = interp2app(W_Int64Box.descr__new__.im_func), +) + +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, + __module__ = "numpypy", +) + +W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, + __module__ = "numpypy", +) + +W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), + __module__ = "numpypy", + + __new__ = interp2app(W_Float64Box.descr__new__.im_func), +) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,16 +1,11 @@ -import functools -import math - from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty -from pypy.module.micronumpy import signature -from pypy.objspace.std.floatobject import float2string -from pypy.rlib import rarithmetic, rfloat -from pypy.rlib.rarithmetic import LONG_BIT, widen -from pypy.rlib.objectmodel import specialize, enforceargs -from pypy.rlib.unroll import unrolling_iterable +from pypy.interpreter.typedef import (TypeDef, GetSetProperty, + interp_attrproperty, interp_attrproperty_w) +from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT from pypy.rpython.lltypesystem import lltype, rffi @@ -19,523 +14,218 @@ BOOLLTR = "b" FLOATINGLTR = "f" + +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) + class W_Dtype(Wrappable): - def __init__(self, space): - pass + _immuable_fields_ = ["itemtype", "num", "kind"] + + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): + self.signature = signature.BaseSignature() + self.itemtype = itemtype + self.num = num + self.kind = kind + self.name = name + self.char = char + self.w_box_type = w_box_type + self.alternate_constructors = alternate_constructors + + def malloc(self, length): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True + ) + + @specialize.argtype(1) + def box(self, value): + return self.itemtype.box(value) + + def coerce(self, space, w_item): + return self.itemtype.coerce(space, w_item) + + def getitem(self, storage, i): + return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + + def setitem(self, storage, i, box): + self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + + def fill(self, storage, box, start, stop): + self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + if space.is_w(w_dtype, space.w_None): - return space.fromcache(W_Float64Dtype) + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype elif space.isinstance_w(w_dtype, space.w_str): - dtype = space.str_w(w_dtype) - for alias, dtype_class in dtypes_by_alias: - if alias == dtype: - return space.fromcache(dtype_class) - elif isinstance(space.interpclass_w(w_dtype), W_Dtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_type): - for typename, dtype_class in dtypes_by_apptype: - if space.is_w(getattr(space, "w_%s" % typename), w_dtype): - return space.fromcache(dtype_class) + name = space.str_w(w_dtype) + for dtype in cache.builtin_dtypes: + if dtype.name == name or dtype.char == name: + return dtype + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + def descr_str(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("dtype('%s')" % self.name) - def descr_str(self, space): - return space.wrap(self.name) + def descr_get_itemsize(self, space): + return space.wrap(self.itemtype.get_element_size()) def descr_get_shape(self, space): return space.newtuple([]) - -class BaseBox(object): - pass - -VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) - -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, - expected_size=None): - - class Box(BaseBox): - def __init__(self, val): - self.val = val - - def wrap(self, space): - val = self.val - if valtype is rarithmetic.r_singlefloat: - val = float(val) - return space.wrap(val) - - def convert_to(self, dtype): - return dtype.adapt_val(self.val) - Box.__name__ = "%sBox" % T._name - - TP = lltype.Ptr(lltype.Array(T, hints={'nolength': True})) - class W_LowLevelDtype(W_Dtype): - signature = signature.BaseSignature() - - def erase(self, storage): - return rffi.cast(VOID_TP, storage) - - def unerase(self, storage): - return rffi.cast(TP, storage) - - @enforceargs(None, valtype) - def box(self, value): - return Box(value) - - def unbox(self, box): - assert isinstance(box, Box) - return box.val - - def unwrap(self, space, w_item): - raise NotImplementedError - - def malloc(self, size): - # XXX find out why test_zjit explodes with tracking of allocations - return self.erase(lltype.malloc(TP.TO, size, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - )) - - def getitem(self, storage, i): - return Box(self.unerase(storage)[i]) - - def setitem(self, storage, i, item): - self.unerase(storage)[i] = self.unbox(item) - - def setitem_w(self, space, storage, i, w_item): - self.setitem(storage, i, self.unwrap(space, w_item)) - - def fill(self, storage, item, start, stop): - storage = self.unerase(storage) - item = self.unbox(item) - for i in xrange(start, stop): - storage[i] = item - - @specialize.argtype(1) - def adapt_val(self, val): - return self.box(rffi.cast(TP.TO.OF, val)) - - W_LowLevelDtype.__name__ = "W_%sDtype" % name.capitalize() - W_LowLevelDtype.num = num - W_LowLevelDtype.kind = kind - W_LowLevelDtype.name = name - W_LowLevelDtype.aliases = aliases - W_LowLevelDtype.applevel_types = applevel_types - W_LowLevelDtype.num_bytes = rffi.sizeof(T) - if expected_size is not None: - assert W_LowLevelDtype.num_bytes == expected_size - return W_LowLevelDtype - - -def binop(func): - specialize.argtype(1, 2)(func) - @functools.wraps(func) - def impl(self, v1, v2): - return self.adapt_val(func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)), - )) - return impl - -def raw_binop(func): - specialize.argtype(1, 2)(func) - # Returns the result unwrapped. - @functools.wraps(func) - def impl(self, v1, v2): - return func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)) - ) - return impl - -def unaryop(func): - specialize.argtype(1)(func) - @functools.wraps(func) - def impl(self, v): - return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) - return impl - -class ArithmeticTypeMixin(object): - _mixin_ = True - - @binop - def add(self, v1, v2): - return v1 + v2 - @binop - def sub(self, v1, v2): - return v1 - v2 - @binop - def mul(self, v1, v2): - return v1 * v2 - - @unaryop - def pos(self, v): - return +v - @unaryop - def neg(self, v): - return -v - @unaryop - def abs(self, v): - return abs(v) - - @binop - def max(self, v1, v2): - return max(v1, v2) - @binop - def min(self, v1, v2): - return min(v1, v2) - - def bool(self, v): - return bool(self.for_computation(self.unbox(v))) - @raw_binop - def eq(self, v1, v2): - return v1 == v2 - @raw_binop - def ne(self, v1, v2): - return v1 != v2 - @raw_binop - def lt(self, v1, v2): - return v1 < v2 - @raw_binop - def le(self, v1, v2): - return v1 <= v2 - @raw_binop - def gt(self, v1, v2): - return v1 > v2 - @raw_binop - def ge(self, v1, v2): - return v1 >= v2 - - -class FloatArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def for_computation(self, v): - return float(v) - - def str_format(self, item): - return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION) - - @binop - def div(self, v1, v2): - # XXX this won't work after translation, probably requires ovfcheck - try: - return v1 / v2 - except ZeroDivisionError: - if v1 == v2 == 0.0: - return rfloat.NAN - return rfloat.copysign(rfloat.INFINITY, v1 * v2) - @binop - def mod(self, v1, v2): - return math.fmod(v1, v2) - @binop - def pow(self, v1, v2): - return math.pow(v1, v2) - - @unaryop - def sign(self, v): - if v == 0.0: - return 0.0 - return rfloat.copysign(1.0, v) - @unaryop - def reciprocal(self, v): - if v == 0.0: - return rfloat.copysign(rfloat.INFINITY, v) - return 1.0 / v - @unaryop - def fabs(self, v): - return math.fabs(v) - @unaryop - def floor(self, v): - return math.floor(v) - - @binop - def copysign(self, v1, v2): - return math.copysign(v1, v2) - @unaryop - def exp(self, v): - try: - return math.exp(v) - except OverflowError: - return rfloat.INFINITY - @unaryop - def sin(self, v): - return math.sin(v) - @unaryop - def cos(self, v): - return math.cos(v) - @unaryop - def tan(self, v): - return math.tan(v) - @unaryop - def arcsin(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.asin(v) - @unaryop - def arccos(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.acos(v) - @unaryop - def arctan(self, v): - return math.atan(v) - @unaryop - def arcsinh(self, v): - return math.asinh(v) - @unaryop - def arctanh(self, v): - if v == 1.0 or v == -1.0: - return math.copysign(rfloat.INFINITY, v) - if not -1.0 < v < 1.0: - return rfloat.NAN - return math.atanh(v) - @unaryop - def sqrt(self, v): - try: - return math.sqrt(v) - except ValueError: - return rfloat.NAN - -class IntegerArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) - - def for_computation(self, v): - return widen(v) - - def str_format(self, item): - return str(widen(self.unbox(item))) - - @binop - def div(self, v1, v2): - if v2 == 0: - return 0 - return v1 / v2 - @binop - def mod(self, v1, v2): - return v1 % v2 - @binop - def pow(self, v1, v2): - res = 1 - while v2 > 0: - if v2 & 1: - res *= v1 - v2 >>= 1 - if v2 == 0: - break - v1 *= v1 - return res - - -class SignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - if v > 0: - return 1 - elif v < 0: - return -1 - else: - assert v == 0 - return 0 - -class UnsignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - return int(v != 0) - - -W_BoolDtype = create_low_level_dtype( - num = 0, kind = BOOLLTR, name = "bool", - aliases = ["?", "bool", "bool8"], - applevel_types = ["bool"], - T = lltype.Bool, - valtype = bool, -) -class W_BoolDtype(SignedIntegerArithmeticDtype, W_BoolDtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.is_true(w_item)) - - def str_format(self, item): - v = self.unbox(item) - return "True" if v else "False" - - def for_computation(self, v): - return int(v) - -W_Int8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "int8", - aliases = ["b", "int8", "i1"], - applevel_types = [], - T = rffi.SIGNEDCHAR, - valtype = rffi.SIGNEDCHAR._type, - expected_size = 1, -) -class W_Int8Dtype(SignedIntegerArithmeticDtype, W_Int8Dtype): - pass - -W_UInt8Dtype = create_low_level_dtype( - num = 2, kind = UNSIGNEDLTR, name = "uint8", - aliases = ["B", "uint8", "I1"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(UnsignedIntegerArithmeticDtype, W_UInt8Dtype): - pass - -W_Int16Dtype = create_low_level_dtype( - num = 3, kind = SIGNEDLTR, name = "int16", - aliases = ["h", "int16", "i2"], - applevel_types = [], - T = rffi.SHORT, - valtype = rffi.SHORT._type, - expected_size = 2, -) -class W_Int16Dtype(SignedIntegerArithmeticDtype, W_Int16Dtype): - pass - -W_UInt16Dtype = create_low_level_dtype( - num = 4, kind = UNSIGNEDLTR, name = "uint16", - aliases = ["H", "uint16", "I2"], - applevel_types = [], - T = rffi.USHORT, - valtype = rffi.USHORT._type, - expected_size = 2, -) -class W_UInt16Dtype(UnsignedIntegerArithmeticDtype, W_UInt16Dtype): - pass - -W_Int32Dtype = create_low_level_dtype( - num = 5, kind = SIGNEDLTR, name = "int32", - aliases = ["i", "int32", "i4"], - applevel_types = [], - T = rffi.INT, - valtype = rffi.INT._type, - expected_size = 4, -) -class W_Int32Dtype(SignedIntegerArithmeticDtype, W_Int32Dtype): - pass - -W_UInt32Dtype = create_low_level_dtype( - num = 6, kind = UNSIGNEDLTR, name = "uint32", - aliases = ["I", "uint32", "I4"], - applevel_types = [], - T = rffi.UINT, - valtype = rffi.UINT._type, - expected_size = 4, -) -class W_UInt32Dtype(UnsignedIntegerArithmeticDtype, W_UInt32Dtype): - pass - -W_Int64Dtype = create_low_level_dtype( - num = 9, kind = SIGNEDLTR, name = "int64", - aliases = ["q", "int64", "i8"], - applevel_types = ["long"], - T = rffi.LONGLONG, - valtype = rffi.LONGLONG._type, - expected_size = 8, -) -class W_Int64Dtype(SignedIntegerArithmeticDtype, W_Int64Dtype): - pass - -W_UInt64Dtype = create_low_level_dtype( - num = 10, kind = UNSIGNEDLTR, name = "uint64", - aliases = ["Q", "uint64", "I8"], - applevel_types = [], - T = rffi.ULONGLONG, - valtype = rffi.ULONGLONG._type, - expected_size = 8, -) -class W_UInt64Dtype(UnsignedIntegerArithmeticDtype, W_UInt64Dtype): - pass - -if LONG_BIT == 32: - long_dtype = W_Int32Dtype - ulong_dtype = W_UInt32Dtype -elif LONG_BIT == 64: - long_dtype = W_Int64Dtype - ulong_dtype = W_UInt64Dtype -else: - assert False - -class W_LongDtype(long_dtype): - num = 7 - aliases = ["l"] - applevel_types = ["int"] - -class W_ULongDtype(ulong_dtype): - num = 8 - aliases = ["L"] - -W_Float32Dtype = create_low_level_dtype( - num = 11, kind = FLOATINGLTR, name = "float32", - aliases = ["f", "float32", "f4"], - applevel_types = [], - T = lltype.SingleFloat, - valtype = rarithmetic.r_singlefloat, - expected_size = 4, -) -class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): - pass - -W_Float64Dtype = create_low_level_dtype( - num = 12, kind = FLOATINGLTR, name = "float64", - aliases = ["d", "float64", "f8"], - applevel_types = ["float"], - T = lltype.Float, - valtype = float, - expected_size = 8, -) -class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): - pass - -ALL_DTYPES = [ - W_BoolDtype, - W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, - W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, - W_Int64Dtype, W_UInt64Dtype, - W_Float32Dtype, W_Float64Dtype, -] - -dtypes_by_alias = unrolling_iterable([ - (alias, dtype) - for dtype in ALL_DTYPES - for alias in dtype.aliases -]) -dtypes_by_apptype = unrolling_iterable([ - (apptype, dtype) - for dtype in ALL_DTYPES - for apptype in dtype.applevel_types -]) -dtypes_by_num_bytes = unrolling_iterable(sorted([ - (dtype.num_bytes, dtype) - for dtype in ALL_DTYPES -])) - W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), + __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), - __str__ = interp2app(W_Dtype.descr_str), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), - itemsize = interp_attrproperty("num_bytes", cls=W_Dtype), + type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), shape = GetSetProperty(W_Dtype.descr_get_shape), ) W_Dtype.typedef.acceptable_as_base_class = False + +class DtypeCache(object): + def __init__(self, space): + self.w_booldtype = W_Dtype( + types.Bool(), + num=0, + kind=BOOLLTR, + name="bool", + char="?", + w_box_type = space.gettypefor(interp_boxes.W_BoolBox), + alternate_constructors=[space.w_bool], + ) + self.w_int8dtype = W_Dtype( + types.Int8(), + num=1, + kind=SIGNEDLTR, + name="int8", + char="b", + w_box_type = space.gettypefor(interp_boxes.W_Int8Box) + ) + self.w_uint8dtype = W_Dtype( + types.UInt8(), + num=2, + kind=UNSIGNEDLTR, + name="uint8", + char="B", + w_box_type = space.gettypefor(interp_boxes.W_UInt8Box), + ) + self.w_int16dtype = W_Dtype( + types.Int16(), + num=3, + kind=SIGNEDLTR, + name="int16", + char="h", + w_box_type = space.gettypefor(interp_boxes.W_Int16Box), + ) + self.w_uint16dtype = W_Dtype( + types.UInt16(), + num=4, + kind=UNSIGNEDLTR, + name="uint16", + char="H", + w_box_type = space.gettypefor(interp_boxes.W_UInt16Box), + ) + self.w_int32dtype = W_Dtype( + types.Int32(), + num=5, + kind=SIGNEDLTR, + name="int32", + char="i", + w_box_type = space.gettypefor(interp_boxes.W_Int32Box), + ) + self.w_uint32dtype = W_Dtype( + types.UInt32(), + num=6, + kind=UNSIGNEDLTR, + name="uint32", + char="I", + w_box_type = space.gettypefor(interp_boxes.W_UInt32Box), + ) + if LONG_BIT == 32: + name = "int32" + elif LONG_BIT == 64: + name = "int64" + self.w_longdtype = W_Dtype( + types.Long(), + num=7, + kind=SIGNEDLTR, + name=name, + char="l", + w_box_type = space.gettypefor(interp_boxes.W_LongBox), + alternate_constructors=[space.w_int], + ) + self.w_ulongdtype = W_Dtype( + types.ULong(), + num=8, + kind=UNSIGNEDLTR, + name="u" + name, + char="L", + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + ) + self.w_int64dtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name="int64", + char="q", + w_box_type = space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], + ) + self.w_uint64dtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name="uint64", + char="Q", + w_box_type = space.gettypefor(interp_boxes.W_UInt64Box), + ) + self.w_float32dtype = W_Dtype( + types.Float32(), + num=11, + kind=FLOATINGLTR, + name="float32", + char="f", + w_box_type = space.gettypefor(interp_boxes.W_Float32Box), + ) + self.w_float64dtype = W_Dtype( + types.Float64(), + num=12, + kind=FLOATINGLTR, + name="float64", + char="d", + w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + alternate_constructors=[space.w_float], + ) + + self.builtin_dtypes = [ + self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, + self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, + self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, + self.w_float64dtype + ] + self.dtypes_by_num_bytes = sorted( + (dtype.itemtype.get_element_size(), dtype) + for dtype in self.builtin_dtypes + ) + +def get_dtype_cache(space): + return space.fromcache(DtypeCache) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,47 +98,6 @@ endshape[i] = remainder[i] return endshape -def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, - w_order=NoneNotWrapped): - # find scalar - if not space.issequence_w(w_item_or_iterable): - if space.is_w(w_dtype, space.w_None): - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, - w_item_or_iterable) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - return scalar_w(space, dtype, w_item_or_iterable) - if w_order is None: - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise operationerrfmt(space.w_ValueError, "Unknown order: %s", - order) - shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) - # they come back in C order - size = len(elems_w) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): - break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = NDimArray(size, shape[:], dtype=dtype, order=order) - shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) - for i in range(len(elems_w)): - w_elem = elems_w[i] - dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) - arr_iter = arr_iter.next(shapelen) - return arr # Iterators for arrays # -------------------- @@ -378,6 +337,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -451,8 +417,8 @@ self=self, dtype=dtype, i=i, result=result, idx=idx, cur_best=cur_best) - new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.ne(new_best, cur_best): + new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best i = i.next(shapelen) @@ -462,8 +428,7 @@ size = self.find_size() if size == 0: raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) + space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -475,7 +440,7 @@ all_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if not dtype.bool(self.eval(i)): + if not dtype.itemtype.bool(self.eval(i)): return False i = i.next(shapelen) return True @@ -490,7 +455,7 @@ any_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if dtype.bool(self.eval(i)): + if dtype.itemtype.bool(self.eval(i)): return True i = i.next(shapelen) return False @@ -542,8 +507,8 @@ res.append(')') else: concrete.to_str(space, 1, res, indent=' ') - if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or \ + if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and + dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ not self.find_size(): res.append(", dtype=" + dtype.name) res.append(")") @@ -612,7 +577,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] # Add a comma only if comma is False - this prevents adding two # commas @@ -625,7 +590,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] i += 1 else: @@ -712,7 +677,7 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item).wrap(space) + return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) return space.wrap(self.create_slice(space, chunks)) @@ -771,14 +736,15 @@ shape[:]) def descr_mean(self, space): - return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) + return space.div(self.descr_sum(space), space.wrap(self.find_size())) def descr_nonzero(self, space): if self.find_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true(self.get_concrete().eval( - self.start_iter(self.shape)).wrap(space))) + return space.wrap(space.is_true( + self.get_concrete().eval(self.start_iter(self.shape)) + )) def descr_get_transpose(self, space): concrete = self.get_concrete() @@ -814,17 +780,14 @@ return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) return scalar_w(space, dtype, w_obj) def scalar_w(space, dtype, w_obj): - assert isinstance(dtype, interp_dtype.W_Dtype) - return Scalar(dtype, dtype.unwrap(space, w_obj)) + return Scalar(dtype, dtype.coerce(space, w_obj)) class Scalar(BaseArray): """ @@ -835,6 +798,7 @@ _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): + self.shape = self.strides = [] BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value @@ -858,7 +822,7 @@ return ConstantIterator() def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.str_format(self.value)) + builder.append(self.dtype.itemtype.str_format(self.value)) def copy(self): return Scalar(self.dtype, self.value) @@ -884,7 +848,7 @@ i = 0 signature = self.signature result_size = self.find_size() - result = NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) i = self.start_iter() ri = result.start_iter() @@ -1111,14 +1075,14 @@ return 'Slice(%s)' % self.parent.debug_repr() def copy(self): - array = NDimArray(self.size, self.shape[:], self.find_dtype()) + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() while not iter.done(): array.setitem(iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) return array -class NDimArray(BaseArray): +class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ @@ -1145,11 +1109,11 @@ return self.dtype.getitem(self.storage, iter.get_offset()) def copy(self): - array = NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( - rffi.cast(rffi.VOIDP, array.storage), - rffi.cast(rffi.VOIDP, self.storage), - self.size * self.dtype.num_bytes + array.storage, + self.storage, + self.size * self.dtype.itemtype.get_element_size() ) return array @@ -1160,8 +1124,7 @@ "len() of unsized object")) def setitem_w(self, space, item, w_value): - self.invalidated() - self.dtype.setitem_w(space, self.storage, item, w_value) + return self.setitem(item, self.dtype.coerce(space, w_value)) def setitem(self, item, value): self.invalidated() @@ -1193,20 +1156,62 @@ shape.append(item) return size, shape +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr + def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) size, shape = _find_size_and_shape(space, w_size) - return space.wrap(NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) + size, shape = _find_size_and_shape(space, w_size) - arr = NDimArray(size, shape[:], dtype=dtype) - one = dtype.adapt_val(1) + arr = W_NDimArray(size, shape[:], dtype=dtype) + one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) @@ -1217,9 +1222,9 @@ return w_arr.descr_dot(space, w_obj2) BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), @@ -1308,10 +1313,10 @@ def descr_next(self, space): if self.iter.done(): - raise OperationError(space.w_StopIteration, space.wrap('')) + raise OperationError(space.w_StopIteration, space.w_None) result = self.eval(self.iter) self.iter = self.iter.next(self.shapelen) - return result.wrap(space) + return result def descr_iter(self): return self diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import NDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -18,8 +18,8 @@ raise OperationError(space.w_ValueError, space.wrap( "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - dtype = space.fromcache(W_Float64Dtype) - a = NDimArray(number, [number], dtype=dtype) + dtype = get_dtype_cache(space).w_float64dtype + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_dtype, signature +from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -15,6 +15,7 @@ class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + _immutable_fields_ = ["promote_to_float", "promote_bools"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -29,7 +30,7 @@ def descr_get_identity(self, space): if self.identity is None: return space.w_None - return self.identity.wrap(space) + return self.identity def descr_call(self, space, __args__): if __args__.keywords or len(__args__.arguments_w) < self.argcount: @@ -80,8 +81,7 @@ new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, - dtype).wrap(space) + return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): while not i.done(): @@ -115,7 +115,7 @@ promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) @@ -124,6 +124,7 @@ class W_Ufunc2(W_Ufunc): + _immutable_fields_ = ["comparison_func", "func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -148,14 +149,14 @@ promote_bools=self.promote_bools, ) if self.comparison_func: - res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): return self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - ).wrap(space) + ) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature @@ -169,7 +170,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), @@ -187,7 +188,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. @@ -197,14 +198,14 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == 11 and dt1.num_bytes >= 4: - return space.fromcache(interp_dtype.W_Float64Dtype) + if dt2.num == 11 and dt1.itemtype.get_element_size() >= 4: + return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == interp_dtype.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it - if dt1.num_bytes < dt2.num_bytes: + if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 @@ -214,10 +215,11 @@ # UInt64 + signed = Float64 if dt2.num == 10: dtypenum += 1 - newdtype = interp_dtype.ALL_DTYPES[dtypenum] + newdtype = interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] - if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(newdtype) + if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or + newdtype.kind == interp_dtype.FLOATINGLTR): + return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes @@ -225,35 +227,42 @@ dtypenum += 2 else: dtypenum += 3 - return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum]) + return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt if dt.num >= 5: - return space.fromcache(interp_dtype.W_Float64Dtype) - for bytes, dtype in interp_dtype.dtypes_by_num_bytes: - if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: - return space.fromcache(dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype + for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + if (dtype.kind == interp_dtype.FLOATINGLTR and + dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): + return dtype if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: - return space.fromcache(interp_dtype.W_Int64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.UNSIGNEDLTR: - return space.fromcache(interp_dtype.W_UInt64Dtype) + return interp_dtype.get_dtype_cache(space).w_uint64dtype else: assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) + bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + + if isinstance(w_obj, interp_boxes.W_GenericBox): + dtype = w_obj.get_dtype(space) + if current_guess is None: + return dtype + return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: @@ -269,20 +278,19 @@ current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): - return getattr(res_dtype, op_name)(value) + return getattr(res_dtype.itemtype, op_name)(value) elif argcount == 2: + dtype_cache = interp_dtype.get_dtype_cache(space) def impl(res_dtype, lvalue, rvalue): - res = getattr(res_dtype, op_name)(lvalue, rvalue) + res = getattr(res_dtype.itemtype, op_name)(lvalue, rvalue) if comparison_func: - booldtype = space.fromcache(interp_dtype.W_BoolDtype) - assert isinstance(booldtype, interp_dtype.W_BoolDtype) - res = booldtype.box(res) + return dtype_cache.w_booldtype.box(res) return res return func_with_new_name(impl, ufunc_name) @@ -338,7 +346,7 @@ identity = extra_kwargs.get("identity") if identity is not None: - identity = space.fromcache(interp_dtype.W_LongDtype).adapt_val(identity) + identity = interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace -from pypy.module.micronumpy import interp_dtype -from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar +from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -11,9 +11,10 @@ class TestSignature(object): def test_binop_signature(self, space): - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + float64_dtype = get_dtype_cache(space).w_float64dtype + bool_dtype = get_dtype_cache(space).w_booldtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -22,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_BoolDtype)) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -30,7 +31,9 @@ assert v5.signature is v6.signature def test_slice_signature(self, space): - ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_Float64Dtype)) + float64_dtype = get_dtype_cache(space).w_float64dtype + + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature @@ -41,10 +44,10 @@ class TestUfuncCoerscion(object): def test_binops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Basic pairing assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype @@ -62,19 +65,19 @@ assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype def test_unaryops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - uint8_dtype = space.fromcache(interp_dtype.W_UInt8Dtype) - int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype) - uint16_dtype = space.fromcache(interp_dtype.W_UInt16Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - uint32_dtype = space.fromcache(interp_dtype.W_UInt32Dtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - ulong_dtype = space.fromcache(interp_dtype.W_ULongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) - uint64_dtype = space.fromcache(interp_dtype.W_UInt64Dtype) - float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + uint8_dtype = get_dtype_cache(space).w_uint8dtype + int16_dtype = get_dtype_cache(space).w_int16dtype + uint16_dtype = get_dtype_cache(space).w_uint16dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + uint32_dtype = get_dtype_cache(space).w_uint32dtype + long_dtype = get_dtype_cache(space).w_longdtype + ulong_dtype = get_dtype_cache(space).w_ulongdtype + int64_dtype = get_dtype_cache(space).w_int64dtype + uint64_dtype = get_dtype_cache(space).w_uint64dtype + float32_dtype = get_dtype_cache(space).w_float32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Normal rules, everything returns itself assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,9 @@ +import py -import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): @@ -106,7 +109,7 @@ c -> 3 """ interp = self.run(code) - assert interp.results[-1].value.val == 9 + assert interp.results[-1].value == 9 def test_array_getitem(self): code = """ @@ -115,7 +118,7 @@ a + b -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 3 + 6 + assert interp.results[0].value == 3 + 6 def test_range_getitem(self): code = """ @@ -123,7 +126,7 @@ r -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_sum(self): code = """ @@ -132,7 +135,7 @@ r """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value.value == 15 def test_array_write(self): code = """ @@ -141,7 +144,7 @@ a -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_min(self): interp = self.run(""" @@ -150,7 +153,7 @@ b = a + a min(b) """) - assert interp.results[0].value.val == -24 + assert interp.results[0].value.value == -24 def test_max(self): interp = self.run(""" @@ -159,7 +162,7 @@ b = a + a max(b) """) - assert interp.results[0].value.val == 256 + assert interp.results[0].value.value == 256 def test_slice(self): interp = self.run(""" @@ -167,7 +170,7 @@ b = a -> : b -> 3 """) - assert interp.results[0].value.val == 4 + assert interp.results[0].value == 4 def test_slice_step(self): interp = self.run(""" @@ -175,7 +178,7 @@ b = a -> ::2 b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_setslice(self): interp = self.run(""" @@ -185,7 +188,7 @@ a[::3] = b a -> 3 """) - assert interp.results[0].value.val == 5 + assert interp.results[0].value == 5 def test_slice2(self): @@ -196,14 +199,14 @@ b = s1 + s2 b -> 3 """) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_multidim_getitem(self): interp = self.run(""" a = [[1,2]] a -> 0 -> 1 """) - assert interp.results[0].value.val == 2 + assert interp.results[0].value == 2 def test_multidim_getitem_2(self): interp = self.run(""" @@ -211,7 +214,7 @@ b = a + a b -> 1 -> 1 """) - assert interp.results[0].value.val == 8 + assert interp.results[0].value == 8 def test_set_slice(self): interp = self.run(""" @@ -220,7 +223,7 @@ b[:] = a + a b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_set_slice2(self): interp = self.run(""" @@ -231,4 +234,4 @@ a[0:30:3] = c a -> 3 """) - assert interp.results[0].value.val == 11 + assert interp.results[0].value == 11 diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -31,7 +31,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -45,13 +45,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from numpypy import array, False_, True_ + from numpypy import array, False_, True_, int64 a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert is_valid_int(a[0]) + assert isinstance(a[0], int64) b = a.copy() - assert is_valid_int(b[0]) + assert isinstance(b[0], int64) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -73,17 +73,17 @@ assert a[i] is True_ def test_zeros_long(self): - from numpypy import zeros + from numpypy import zeros, int64 a = zeros(10, dtype=long) for i in range(10): - assert is_valid_int(a[i]) + assert isinstance(a[i], int64) assert a[1] == 0 def test_ones_long(self): - from numpypy import ones + from numpypy import ones, int64 a = ones(10, dtype=long) for i in range(10): - assert is_valid_int(a[i]) + assert isinstance(a[i], int64) assert a[1] == 1 def test_overflow(self): @@ -166,3 +166,99 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) + +class AppTestTypes(BaseNumpyAppTest): + def test_abstract_types(self): + import numpypy as numpy + raises(TypeError, numpy.generic, 0) + raises(TypeError, numpy.number, 0) + raises(TypeError, numpy.integer, 0) + exc = raises(TypeError, numpy.signedinteger, 0) + assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + + raises(TypeError, numpy.floating, 0) + raises(TypeError, numpy.inexact, 0) + + def test_bool(self): + import numpypy as numpy + + assert numpy.bool_.mro() == [numpy.bool_, numpy.generic, object] + assert numpy.bool_(3) is numpy.True_ + assert numpy.bool_("") is numpy.False_ + assert type(numpy.True_) is type(numpy.False_) is numpy.bool_ + + class X(numpy.bool_): + pass + + assert type(X(True)) is numpy.bool_ + assert X(True) is numpy.True_ + + def test_int8(self): + import numpypy as numpy + + assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.int8) + assert type(a[1]) is numpy.int8 + assert numpy.dtype("int8").type is numpy.int8 + + x = numpy.int8(128) + assert x == -128 + assert x != 128 + assert type(x) is numpy.int8 + assert repr(x) == "-128" + + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + + def test_int_(self): + import numpypy as numpy + + assert numpy.int_ is numpy.dtype(int).type + assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + + def test_int64(self): + import sys + import numpypy as numpy + + if sys.maxint == 2 ** 63 -1: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + else: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.int64).type is numpy.int64 + assert numpy.int64(3) == 3 + + def test_float64(self): + import numpypy as numpy + + assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] + + a = numpy.array([1, 2, 3], numpy.float64) + assert type(a[1]) is numpy.float64 + assert numpy.dtype(float).type is numpy.float64 + + assert numpy.float64(2.0) == 2.0 + + def test_subclass_type(self): + import numpypy as numpy + + class X(numpy.float64): + def m(self): + return self + 2 + + b = X(10) + assert type(b) is X + assert b.m() == 12 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,7 +1,7 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy import signature from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace @@ -28,18 +28,18 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -58,7 +58,7 @@ def test_create_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -78,7 +78,7 @@ def test_slice_of_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -96,7 +96,7 @@ def test_slice_of_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -114,7 +114,7 @@ def test_negative_step_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] @@ -122,14 +122,14 @@ def test_negative_step_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -139,7 +139,7 @@ assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -160,6 +160,21 @@ class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -359,11 +374,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 @@ -709,7 +724,7 @@ assert b[i] == 2.5 * a[i] def test_dtype_guessing(self): - from numpypy import array, dtype + from numpypy import array, dtype, float64, int8, bool_ assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) @@ -719,6 +734,10 @@ assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + assert array([float64(2)]).dtype is dtype(float) + assert array([int8(3)]).dtype is dtype("int8") + assert array([bool_(True)]).dtype is dtype(bool) + assert array([bool_(True), 3.0]).dtype is dtype(float) def test_comparison(self): import operator @@ -1008,10 +1027,10 @@ b = a[0].copy() assert (b == zeros(10)).all() -class AppTestSupport(object): +class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct - cls.space = gettestobjspace(usemodules=('micronumpy',)) + BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) def test_fromstring(self): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,11 +8,11 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_ufuncs, signature +from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import NDimArray, NDimSlice,\ - BaseArray +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, + BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr @@ -48,17 +48,15 @@ def f(i): interp = InterpreterState(codes[i]) interp.run(space) - res = interp.results[-1] - assert isinstance(res, BaseArray) - w_res = res.eval(res.start_iter()).wrap(interp.space) - if isinstance(w_res, BoolObject): - return float(w_res.boolval) - elif isinstance(w_res, FloatObject): - return w_res.floatval - elif isinstance(w_res, IntObject): - return w_res.intval - else: - return -42. + w_res = interp.results[-1] + if isinstance(w_res, BaseArray): + w_res = w_res.eval(w_res.start_iter()) + + if isinstance(w_res, interp_boxes.W_Float64Box): + return w_res.value + elif isinstance(w_res, interp_boxes.W_BoolBox): + return float(w_res.value) + raise TypeError(w_res) if self.graph is None: interp, graph = self.meta_interp(f, [i], @@ -80,9 +78,9 @@ def test_add(self): result = self.run("add") - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 def define_float_add(): @@ -94,9 +92,9 @@ def test_floatadd(self): result = self.run("float_add") assert result == 3 + 3 - self.check_simple_loop({"getarrayitem_raw": 1, "float_add": 1, - "setarrayitem_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_sum(): return """ @@ -108,9 +106,9 @@ def test_sum(self): result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 2, - "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, + "int_add": 2, "int_ge": 1, "guard_false": 1, + "jump": 1}) def define_prod(): return """ @@ -125,9 +123,9 @@ for i in range(30): expected *= i * 2 assert result == expected - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -138,9 +136,9 @@ max(b) """) assert result == 256 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def test_min(self): py.test.skip("broken, investigate") @@ -151,9 +149,9 @@ min(b) """) assert result == -24 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def define_any(): return """ @@ -166,10 +164,10 @@ def test_any(self): result = self.run("any") assert result == 1 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, - "int_ge": 1, "jump": 1, - "guard_false": 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) def define_already_forced(): return """ @@ -188,10 +186,10 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) def define_ufunc(): @@ -205,10 +203,9 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -6 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, - "setarrayitem_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1, - }) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, + "setinteriorfield_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_specialization(): return """ @@ -246,9 +243,9 @@ def test_slice(self): result = self.run("slice") assert result == 18 - self.check_simple_loop({'getarrayitem_raw': 2, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) @@ -265,8 +262,8 @@ def test_slice2(self): result = self.run("slice2") assert result == 15 - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) def define_multidim(): @@ -279,11 +276,11 @@ def test_multidim(self): result = self.run('multidim') assert result == 8 - self.check_simple_loop({'float_add': 1, 'getarrayitem_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setarrayitem_raw': 1}) # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization + self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1}) def define_multidim_slice(): return """ @@ -329,18 +326,18 @@ result = self.run("setslice") assert result == 11.0 self.check_loop_count(1) - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add' : 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import W_Float64Dtype + from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() - cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) + cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " @@ -355,7 +352,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = NDimArray(n, [n], dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/types.py @@ -0,0 +1,389 @@ +import functools +import math + +from pypy.module.micronumpy import interp_boxes +from pypy.objspace.std.floatobject import float2string +from pypy.rlib import rfloat, libffi, clibffi +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rpython.lltypesystem import lltype, rffi + + +def simple_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v): + return self.box( + func( + self, + self.for_computation(self.unbox(v)) + ) + ) + return dispatcher + +def simple_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return self.box( + func( + self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + ) + ) + return dispatcher + +def raw_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)) + ) + return dispatcher + +class BaseType(object): + def _unimplemented_ufunc(self, *args): + raise NotImplementedError + # add = sub = mul = div = mod = pow = eq = ne = lt = le = gt = ge = max = \ + # min = copysign = pos = neg = abs = sign = reciprocal = fabs = floor = \ + # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ + # arctanh = _unimplemented_ufunc + +class Primitive(object): + _mixin_ = True + def get_element_size(self): + return rffi.sizeof(self.T) + + @specialize.argtype(1) + def box(self, value): + return self.BoxType(rffi.cast(self.T, value)) + + def unbox(self, box): + assert isinstance(box, self.BoxType) + return box.value + + def coerce(self, space, w_item): + if isinstance(w_item, self.BoxType): + return w_item + return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # XXX: ugly + w_obj = space.allocate_instance(self.BoxType, w_subtype) + assert isinstance(w_obj, self.BoxType) + w_obj.__init__(self._coerce(space, w_item).value) + return w_obj + + def _coerce(self, space, w_item): + raise NotImplementedError + + def read(self, storage, width, i, offset): + return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset + )) + + def store(self, storage, width, i, offset, box): + value = self.unbox(box) + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + @simple_binary_op + def add(self, v1, v2): + return v1 + v2 + + @simple_binary_op + def sub(self, v1, v2): + return v1 - v2 + + @simple_binary_op + def mul(self, v1, v2): + return v1 * v2 + + @simple_unary_op + def pos(self, v): + return +v + + @simple_unary_op + def neg(self, v): + return -v + + @simple_unary_op + def abs(self, v): + return abs(v) + + @raw_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @raw_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @raw_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @raw_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @raw_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @raw_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + def bool(self, v): + return bool(self.for_computation(self.unbox(v))) + + @simple_binary_op + def max(self, v1, v2): + return max(v1, v2) + + @simple_binary_op + def min(self, v1, v2): + return min(v1, v2) + +class Bool(BaseType, Primitive): + T = lltype.Bool + BoxType = interp_boxes.W_BoolBox + + True = BoxType(True) + False = BoxType(False) + + @specialize.argtype(1) + def box(self, value): + box = Primitive.box(self, value) + if box.value: + return self.True + else: + return self.False + + def coerce_subtype(self, space, w_subtype, w_item): + # Doesn't return subclasses so it can return the constants. + return self._coerce(space, w_item) + + def _coerce(self, space, w_item): + return self.box(space.is_true(w_item)) + + def str_format(self, box): + value = self.unbox(box) + return "True" if value else "False" + + def for_computation(self, v): + return int(v) + +class Integer(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.int_w(space.int(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return str(self.for_computation(value)) + + def for_computation(self, v): + return widen(v) + + @simple_binary_op + def div(self, v1, v2): + if v2 == 0: + return 0 + return v1 / v2 + + @simple_binary_op + def mod(self, v1, v2): + return v1 % v2 + + @simple_binary_op + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + + @simple_unary_op + def sign(self, v): + if v > 0: + return 1 + elif v < 0: + return -1 + else: + assert v == 0 + return 0 + +class Int8(BaseType, Integer): + T = rffi.SIGNEDCHAR + BoxType = interp_boxes.W_Int8Box + +class UInt8(BaseType, Integer): + T = rffi.UCHAR + BoxType = interp_boxes.W_UInt8Box + +class Int16(BaseType, Integer): + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + +class UInt16(BaseType, Integer): + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + +class Int32(BaseType, Integer): + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + +class UInt32(BaseType, Integer): + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + +class Int64(BaseType, Integer): + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + +class UInt64(BaseType, Integer): + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + +class Float(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.float_w(space.float(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + + def for_computation(self, v): + return float(v) + + @simple_binary_op + def div(self, v1, v2): + try: + return v1 / v2 + except ZeroDivisionError: + if v1 == v2 == 0.0: + return rfloat.NAN + return rfloat.copysign(rfloat.INFINITY, v1 * v2) + + @simple_binary_op + def mod(self, v1, v2): + return math.fmod(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return math.pow(v1, v2) + + @simple_binary_op + def copysign(self, v1, v2): + return math.copysign(v1, v2) + + @simple_unary_op + def sign(self, v): + if v == 0.0: + return 0.0 + return rfloat.copysign(1.0, v) + + @simple_unary_op + def fabs(self, v): + return math.fabs(v) + + @simple_unary_op + def reciprocal(self, v): + if v == 0.0: + return rfloat.copysign(rfloat.INFINITY, v) + return 1.0 / v + + @simple_unary_op + def floor(self, v): + return math.floor(v) + + @simple_unary_op + def exp(self, v): + try: + return math.exp(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def sin(self, v): + return math.sin(v) + + @simple_unary_op + def cos(self, v): + return math.cos(v) + + @simple_unary_op + def tan(self, v): + return math.tan(v) + + @simple_unary_op + def arcsin(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.asin(v) + + @simple_unary_op + def arccos(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.acos(v) + + @simple_unary_op + def arctan(self, v): + return math.atan(v) + + @simple_unary_op + def arcsinh(self, v): + return math.asinh(v) + + @simple_unary_op + def arctanh(self, v): + if v == 1.0 or v == -1.0: + return math.copysign(rfloat.INFINITY, v) + if not -1.0 < v < 1.0: + return rfloat.NAN + return math.atanh(v) + + @simple_unary_op + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN + + +class Float32(BaseType, Float): + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + +class Float64(BaseType, Float): + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box \ No newline at end of file diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -102,6 +102,7 @@ 'instancetypedef', 'terminator', '_version_tag?', + 'name?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -738,3 +738,29 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +def record_known_class(value, cls): + """ + Assure the JIT that value is an instance of cls. This is not a precise + class check, unlike a guard_class. + """ + assert isinstance(value, cls) + + +class Entry(ExtRegistryEntry): + _about_ = record_known_class + + def compute_result_annotation(self, s_inst, s_cls): + from pypy.annotation import model as annmodel + assert s_cls.is_constant() + assert not s_inst.can_be_none() + assert isinstance(s_inst, annmodel.SomeInstance) + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype, rclass + classrepr = rclass.get_type_repr(hop.rtyper) + + hop.exception_cannot_occur() + v_inst = hop.inputarg(hop.args_r[0], arg=0) + v_cls = hop.inputarg(classrepr, arg=1) + return hop.genop('jit_record_known_class', [v_inst, v_cls], + resulttype=lltype.Void) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -416,6 +416,10 @@ def getaddressindll(self, name): return dlsym(self.lib, name) +# These specialize.call_location's should really be specialize.arg(0), however +# you can't hash a pointer obj, which the specialize machinery wants to do. +# Given the present usage of these functions, it's good enough. + at specialize.call_location() @jit.oopspec("libffi_array_getitem(ffitype, width, addr, index, offset)") def array_getitem(ffitype, width, addr, index, offset): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -425,6 +429,7 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False + at specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -433,4 +438,4 @@ addr = rffi.ptradd(addr, offset) rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return - assert False \ No newline at end of file + assert False diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -91,9 +91,18 @@ return decorated_func + def call_location(self): + """ Specializes the function for each call site. + """ + def decorated_func(func): + func._annspecialcase_ = "specialize:call_location" + return func + + return decorated_func + def _wrap(self, args): return "("+','.join([repr(arg) for arg in args]) +")" - + specialize = _Specialize() def enforceargs(*args): @@ -125,7 +134,7 @@ def __hash__(self): raise TypeError("Symbolics are not hashable!") - + def __nonzero__(self): raise TypeError("Symbolics are not comparable") @@ -155,7 +164,7 @@ def lltype(self): from pypy.rpython.lltypesystem import lltype return lltype.Signed - + malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) # running_on_llinterp is meant to have the value 0 in all backends @@ -221,7 +230,7 @@ def compute_result_annotation(self, s_sizehint): from pypy.annotation.model import SomeInteger - + assert isinstance(s_sizehint, SomeInteger) return self.bookkeeper.newlist() diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -548,6 +548,9 @@ def op_jit_marker(self, *args): pass + def op_jit_record_known_class(self, *args): + pass + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -430,6 +430,7 @@ 'jit_force_virtual': LLOp(canrun=True), 'jit_is_virtual': LLOp(canrun=True), 'jit_force_quasi_immutable': LLOp(canrun=True), + 'jit_record_known_class' : LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -555,6 +555,9 @@ def op_jit_force_quasi_immutable(*args): pass +def op_jit_record_known_class(x, y): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -11,6 +11,7 @@ #endif /* MIN */ #define RUNNING_ON_LLINTERP 0 +#define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ #define FAIL_EXCEPTION(exc, msg) \ { \ From noreply at buildbot.pypy.org Sun Dec 4 04:14:47 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Sun, 4 Dec 2011 04:14:47 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Merge Message-ID: <20111204031447.D0ED282A00@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50103:12d0ea7bc8f1 Date: 2011-12-04 04:11 +0100 http://bitbucket.org/pypy/pypy/changeset/12d0ea7bc8f1/ Log: Merge diff --git a/lib_pypy/itertools.py b/lib_pypy/_itertools.py copy from lib_pypy/itertools.py copy to lib_pypy/_itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/_itertools.py @@ -1,5 +1,5 @@ -# Note that PyPy contains also a built-in module 'itertools' which will -# hide this one if compiled in. +# Note that PyPy contains also a built-in implementation of 'itertools'; +# when translated with default options, this one is not used. """Functional tools for creating and using iterators. @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -1,670 +1,6 @@ -# Note that PyPy contains also a built-in module 'itertools' which will -# hide this one if compiled in. - -"""Functional tools for creating and using iterators. - -Infinite iterators: -count([n]) --> n, n+1, n+2, ... -cycle(p) --> p0, p1, ... plast, p0, p1, ... -repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times - -Iterators terminating on the shortest input sequence: -izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... -ifilter(pred, seq) --> elements of seq where pred(elem) is True -ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False -islice(seq, [start,] stop [, step]) --> elements from - seq[start:stop:step] -imap(fun, p, q, ...) --> fun(p0, q0), fun(p1, q1), ... -starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ... -tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n -chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... -takewhile(pred, seq) --> seq[0], seq[1], until pred fails -dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails -groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) -""" - -__all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', - 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -class chain(object): - """Make an iterator that returns elements from the first iterable - until it is exhausted, then proceeds to the next iterable, until - all of the iterables are exhausted. Used for treating consecutive - sequences as a single sequence. - - Equivalent to : - - def chain(*iterables): - for it in iterables: - for element in it: - yield element - """ - def __init__(self, *iterables): - self._iterables_iter = iter(map(iter, iterables)) - # little trick for the first chain.next() call - self._cur_iterable_iter = iter([]) - - def __iter__(self): - return self - - def next(self): - while True: - try: - return self._cur_iterable_iter.next() - except StopIteration: - self._cur_iterable_iter = self._iterables_iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._cur_iterable_iter)) - - -class compress(object): - def __init__(self, data, selectors): - self.data = iter(data) - self.selectors = iter(selectors) - - def __iter__(self): - return self - - def next(self): - while True: - next_item = self.data.next() - next_selector = self.selectors.next() - if bool(next_selector): - return next_item - - -class count(object): - """Make an iterator that returns consecutive integers starting - with n. If not specified n defaults to zero. Does not currently - support python long integers. Often used as an argument to imap() - to generate consecutive data points. Also, used with izip() to - add sequence numbers. - - Equivalent to : - - def count(n=0): - if not isinstance(n, int): - raise TypeError("%s is not a regular integer" % n) - while True: - yield n - n += 1 - """ - def __init__(self, n=0): - if not isinstance(n, int): - raise TypeError('%s is not a regular integer' % n) - self.times = n-1 - - def __iter__(self): - return self - - def next(self): - self.times += 1 - return self.times - - def __repr__(self): - return 'count(%d)' % (self.times + 1) - - - -class cycle(object): - """Make an iterator returning elements from the iterable and - saving a copy of each. When the iterable is exhausted, return - elements from the saved copy. Repeats indefinitely. - - Equivalent to : - - def cycle(iterable): - saved = [] - for element in iterable: - yield element - saved.append(element) - while saved: - for element in saved: - yield element - """ - def __init__(self, iterable): - self._cur_iter = iter(iterable) - self._saved = [] - self._must_save = True - - def __iter__(self): - return self - - def next(self): - # XXX Could probably be improved - try: - next_elt = self._cur_iter.next() - if self._must_save: - self._saved.append(next_elt) - except StopIteration: - self._cur_iter = iter(self._saved) - next_elt = self._cur_iter.next() - self._must_save = False - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._cur_iter)) - return next_elt - - -class dropwhile(object): - """Make an iterator that drops elements from the iterable as long - as the predicate is true; afterwards, returns every - element. Note, the iterator does not produce any output until the - predicate is true, so it may have a lengthy start-up time. - - Equivalent to : - - def dropwhile(predicate, iterable): - iterable = iter(iterable) - for x in iterable: - if not predicate(x): - yield x - break - for x in iterable: - yield x - """ - def __init__(self, predicate, iterable): - self._predicate = predicate - self._iter = iter(iterable) - self._dropped = False - - def __iter__(self): - return self - - def next(self): - try: - value = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - if self._dropped: - return value - while self._predicate(value): - value = self._iter.next() - self._dropped = True - return value - -class groupby(object): - """Make an iterator that returns consecutive keys and groups from the - iterable. The key is a function computing a key value for each - element. If not specified or is None, key defaults to an identity - function and returns the element unchanged. Generally, the - iterable needs to already be sorted on the same key function. - - The returned group is itself an iterator that shares the - underlying iterable with groupby(). Because the source is shared, - when the groupby object is advanced, the previous group is no - longer visible. So, if that data is needed later, it should be - stored as a list: - - groups = [] - uniquekeys = [] - for k, g in groupby(data, keyfunc): - groups.append(list(g)) # Store group iterator as a list - uniquekeys.append(k) - """ - def __init__(self, iterable, key=None): - if key is None: - key = lambda x: x - self.keyfunc = key - self.it = iter(iterable) - self.tgtkey = self.currkey = self.currvalue = xrange(0) - - def __iter__(self): - return self - - def next(self): - while self.currkey == self.tgtkey: - try: - self.currvalue = self.it.next() # Exit on StopIteration - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self.it)) - self.currkey = self.keyfunc(self.currvalue) - self.tgtkey = self.currkey - return (self.currkey, self._grouper(self.tgtkey)) - - def _grouper(self, tgtkey): - while self.currkey == tgtkey: - yield self.currvalue - self.currvalue = self.it.next() # Exit on StopIteration - self.currkey = self.keyfunc(self.currvalue) - - - -class _ifilter_base(object): - """base class for ifilter and ifilterflase""" - def __init__(self, predicate, iterable): - # Make sure iterable *IS* iterable - self._iter = iter(iterable) - if predicate is None: - self._predicate = bool - else: - self._predicate = predicate - - def __iter__(self): - return self - -class ifilter(_ifilter_base): - """Make an iterator that filters elements from iterable returning - only those for which the predicate is True. If predicate is - None, return the items that are true. - - Equivalent to : - - def ifilter: - if predicate is None: - predicate = bool - for x in iterable: - if predicate(x): - yield x - """ - def next(self): - try: - next_elt = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - while True: - if self._predicate(next_elt): - return next_elt - next_elt = self._iter.next() - -class ifilterfalse(_ifilter_base): - """Make an iterator that filters elements from iterable returning - only those for which the predicate is False. If predicate is - None, return the items that are false. - - Equivalent to : - - def ifilterfalse(predicate, iterable): - if predicate is None: - predicate = bool - for x in iterable: - if not predicate(x): - yield x - """ - def next(self): - try: - next_elt = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - while True: - if not self._predicate(next_elt): - return next_elt - next_elt = self._iter.next() - - - - -class imap(object): - """Make an iterator that computes the function using arguments - from each of the iterables. If function is set to None, then - imap() returns the arguments as a tuple. Like map() but stops - when the shortest iterable is exhausted instead of filling in - None for shorter iterables. The reason for the difference is that - infinite iterator arguments are typically an error for map() - (because the output is fully evaluated) but represent a common - and useful way of supplying arguments to imap(). - - Equivalent to : - - def imap(function, *iterables): - iterables = map(iter, iterables) - while True: - args = [i.next() for i in iterables] - if function is None: - yield tuple(args) - else: - yield function(*args) - - """ - def __init__(self, function, iterable, *other_iterables): - self._func = function - self._iters = map(iter, (iterable, ) + other_iterables) - - def __iter__(self): - return self - - def next(self): - try: - args = [it.next() for it in self._iters] - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (it)) - if self._func is None: - return tuple(args) - else: - return self._func(*args) - - - -class islice(object): - """Make an iterator that returns selected elements from the - iterable. If start is non-zero, then elements from the iterable - are skipped until start is reached. Afterward, elements are - returned consecutively unless step is set higher than one which - results in items being skipped. If stop is None, then iteration - continues until the iterator is exhausted, if at all; otherwise, - it stops at the specified position. Unlike regular slicing, - islice() does not support negative values for start, stop, or - step. Can be used to extract related fields from data where the - internal structure has been flattened (for example, a multi-line - report may list a name field on every third line). - """ - def __init__(self, iterable, *args): - s = slice(*args) - self.start, self.stop, self.step = s.start or 0, s.stop, s.step - if not isinstance(self.start, (int, long)): - raise ValueError("Start argument must be an integer") - if self.stop is not None and not isinstance(self.stop, (int,long)): - raise ValueError("Stop argument must be an integer or None") - if self.step is None: - self.step = 1 - if self.start<0 or (self.stop is not None and self.stop<0 - ) or self.step<=0: - raise ValueError, "indices for islice() must be positive" - self.it = iter(iterable) - self.donext = None - self.cnt = 0 - - def __iter__(self): - return self - - def next(self): - if self.donext is None: - try: - self.donext = self.it.next - except AttributeError: - raise TypeError - nextindex = self.start - if self.stop is not None and nextindex >= self.stop: - raise StopIteration - while self.cnt <= nextindex: - nextitem = self.donext() - self.cnt += 1 - self.start += self.step - return nextitem - -class izip(object): - """Make an iterator that aggregates elements from each of the - iterables. Like zip() except that it returns an iterator instead - of a list. Used for lock-step iteration over several iterables at - a time. - - Equivalent to : - - def izip(*iterables): - iterables = map(iter, iterables) - while iterables: - result = [i.next() for i in iterables] - yield tuple(result) - """ - def __init__(self, *iterables): - self._iterators = map(iter, iterables) - self._result = [None] * len(self._iterators) - - def __iter__(self): - return self - - def next(self): - if not self._iterators: - raise StopIteration() - try: - return tuple([i.next() for i in self._iterators]) - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % (i)) - - -class product(object): - - def __init__(self, *args, **kw): - if len(kw) > 1: - raise TypeError("product() takes at most 1 argument (%d given)" % - len(kw)) - self.repeat = kw.get('repeat', 1) - self.gears = [x for x in args] * self.repeat - self.num_gears = len(self.gears) - # initialization of indicies to loop over - self.indicies = [(0, len(self.gears[x])) - for x in range(0, self.num_gears)] - self.cont = True - - def roll_gears(self): - # Starting from the end of the gear indicies work to the front - # incrementing the gear until the limit is reached. When the limit - # is reached carry operation to the next gear - should_carry = True - for n in range(0, self.num_gears): - nth_gear = self.num_gears - n - 1 - if should_carry: - count, lim = self.indicies[nth_gear] - count += 1 - if count == lim and nth_gear == 0: - self.cont = False - if count == lim: - should_carry = True - count = 0 - else: - should_carry = False - self.indicies[nth_gear] = (count, lim) - else: - break - - def __iter__(self): - return self - - def next(self): - if not self.cont: - raise StopIteration - l = [] - for x in range(0, self.num_gears): - index, limit = self.indicies[x] - l.append(self.gears[x][index]) - self.roll_gears() - return tuple(l) - - -class repeat(object): - """Make an iterator that returns object over and over again. - Runs indefinitely unless the times argument is specified. Used - as argument to imap() for invariant parameters to the called - function. Also used with izip() to create an invariant part of a - tuple record. - - Equivalent to : - - def repeat(object, times=None): - if times is None: - while True: - yield object - else: - for i in xrange(times): - yield object - """ - def __init__(self, obj, times=None): - self._obj = obj - if times is not None: - xrange(times) # Raise a TypeError - if times < 0: - times = 0 - self._times = times - - def __iter__(self): - return self - - def next(self): - # next() *need* to decrement self._times when consumed - if self._times is not None: - if self._times <= 0: - raise StopIteration() - self._times -= 1 - return self._obj - - def __repr__(self): - if self._times is not None: - return 'repeat(%r, %r)' % (self._obj, self._times) - else: - return 'repeat(%r)' % (self._obj,) - - def __len__(self): - if self._times == -1 or self._times is None: - raise TypeError("len() of uniszed object") - return self._times - - -class starmap(object): - """Make an iterator that computes the function using arguments - tuples obtained from the iterable. Used instead of imap() when - argument parameters are already grouped in tuples from a single - iterable (the data has been ``pre-zipped''). The difference - between imap() and starmap() parallels the distinction between - function(a,b) and function(*c). - - Equivalent to : - - def starmap(function, iterable): - iterable = iter(iterable) - while True: - yield function(*iterable.next()) - """ - def __init__(self, function, iterable): - self._func = function - self._iter = iter(iterable) - - def __iter__(self): - return self - - def next(self): - # CPython raises a TypeError when the iterator doesn't return a tuple - try: - t = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % self._iter) - if not isinstance(t, tuple): - raise TypeError("iterator must return a tuple") - return self._func(*t) - - - -class takewhile(object): - """Make an iterator that returns elements from the iterable as - long as the predicate is true. - - Equivalent to : - - def takewhile(predicate, iterable): - for x in iterable: - if predicate(x): - yield x - else: - break - """ - def __init__(self, predicate, iterable): - self._predicate = predicate - self._iter = iter(iterable) - - def __iter__(self): - return self - - def next(self): - try: - value = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - if not self._predicate(value): - raise StopIteration() - return value - - -class TeeData(object): - """Holds cached values for TeeObjects""" - def __init__(self, iterator): - self.data = [] - self._iter = iterator - - def __getitem__(self, i): - # iterates until 'i' if not done yet - while i>= len(self.data): - try: - self.data.append( self._iter.next() ) - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % self._iter) - return self.data[i] - - -class TeeObject(object): - """Iterables / Iterators as returned by the tee() function""" - def __init__(self, iterable=None, tee_data=None): - if tee_data: - self.tee_data = tee_data - self.pos = 0 - # <=> Copy constructor - elif isinstance(iterable, TeeObject): - self.tee_data = iterable.tee_data - self.pos = iterable.pos - else: - self.tee_data = TeeData(iter(iterable)) - self.pos = 0 - - def next(self): - data = self.tee_data[self.pos] - self.pos += 1 - return data - - def __iter__(self): - return self - - - at builtinify -def tee(iterable, n=2): - """Return n independent iterators from a single iterable. - Note : once tee() has made a split, the original iterable - should not be used anywhere else; otherwise, the iterable could get - advanced without the tee objects being informed. - - Note : this member of the toolkit may require significant auxiliary - storage (depending on how much temporary data needs to be stored). - In general, if one iterator is going to use most or all of the - data before the other iterator, it is faster to use list() instead - of tee() - - Equivalent to : - - def tee(iterable, n=2): - def gen(next, data={}, cnt=[0]): - for i in count(): - if i == cnt[0]: - item = data[i] = next() - cnt[0] += 1 - else: - item = data.pop(i) - yield item - it = iter(iterable) - return tuple([gen(it.next) for i in range(n)]) - """ - if isinstance(iterable, TeeObject): - # a,b = tee(range(10)) ; c,d = tee(a) ; self.assert_(a is c) - return tuple([iterable] + - [TeeObject(tee_data=iterable.tee_data) for i in xrange(n-1)]) - tee_data = TeeData(iter(iterable)) - return tuple([TeeObject(tee_data=tee_data) for i in xrange(n)]) +try: + from __builtin_itertools import * + from __builtin_itertools import __doc__ +except ImportError: + from _itertools import * + from _itertools import __doc__ diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -45,8 +45,10 @@ pypydir = py.path.local(autopath.pypydir) include_dir = pypydir / 'module' / 'cpyext' / 'include' source_dir = pypydir / 'module' / 'cpyext' / 'src' +signed_dir = pypydir / 'translator' / 'c' / 'src' include_dirs = [ include_dir, + signed_dir, udir, ] diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -23,6 +23,7 @@ dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) """ + applevel_name = '__builtin_itertools' interpleveldefs = { 'chain' : 'interp_itertools.W_Chain', From noreply at buildbot.pypy.org Sun Dec 4 05:39:38 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Sun, 4 Dec 2011 05:39:38 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: weird special casing in std-objspace.py::wrap. This function is not even Message-ID: <20111204043938.02C1382A00@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50104:5b5913c63621 Date: 2011-12-04 05:39 +0100 http://bitbucket.org/pypy/pypy/changeset/5b5913c63621/ Log: weird special casing in std-objspace.py::wrap. This function is not even RPython, and I had to inline is_valid_int. Why is this so special? Is that still necessary, or just not changed? diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -9,7 +9,7 @@ from pypy.objspace.descroperation import DescrOperation, raiseattrerror from pypy.rlib.objectmodel import instantiate, r_dict, specialize, is_annotation_constant from pypy.rlib.debug import make_sure_not_resized -from pypy.rlib.rarithmetic import base_int, widen, is_valid_int +from pypy.rlib.rarithmetic import base_int, widen, maxint from pypy.rlib.objectmodel import we_are_translated from pypy.rlib import jit @@ -160,11 +160,15 @@ if isinstance(x, OperationError): raise TypeError, ("attempt to wrap already wrapped exception: %s"% (x,)) - if is_valid_int(x): + if isinstance(x, int): if isinstance(x, bool): return self.newbool(x) else: return self.newint(x) + # this is an inlined 'is_valid_int' which cannot be used + # due to the special annotation nature of 'wrap'. + if isinstance(x, long) and (-maxint - 1 <= x <= maxint): + return self.newint(x) if isinstance(x, str): return wrapstr(self, x) if isinstance(x, unicode): From noreply at buildbot.pypy.org Sun Dec 4 10:47:48 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 10:47:48 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Fix encoding. Message-ID: <20111204094748.985B48205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50105:6c82c6444df7 Date: 2011-12-04 10:05 +0100 http://bitbucket.org/pypy/pypy/changeset/6c82c6444df7/ Log: Fix encoding. diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. From noreply at buildbot.pypy.org Sun Dec 4 10:47:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 10:47:50 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Finished the conversion. Now fixing bugs... Message-ID: <20111204094750.28C908205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50106:fa68a5d5cd5a Date: 2011-12-04 10:46 +0100 http://bitbucket.org/pypy/pypy/changeset/fa68a5d5cd5a/ Log: Finished the conversion. Now fixing bugs... diff --git a/lib_pypy/_locale.py b/lib_pypy/__locale.py copy from lib_pypy/_locale.py copy to lib_pypy/__locale.py diff --git a/lib_pypy/_md5.py b/lib_pypy/__md5.py copy from lib_pypy/_md5.py copy to lib_pypy/__md5.py diff --git a/lib_pypy/_minimal_curses.py b/lib_pypy/__minimal_curses.py copy from lib_pypy/_minimal_curses.py copy to lib_pypy/__minimal_curses.py --- a/lib_pypy/_minimal_curses.py +++ b/lib_pypy/__minimal_curses.py @@ -5,8 +5,8 @@ to use it, you have to import it and stick it in sys.modules['_curses'] manually. -Note that there is also a built-in module _minimal_curses which will -hide this one if compiled in. +Note that there is also a built-in module __buitltin__minimal_curses which +will be imported instead of this one if compiled in. """ import ctypes, ctypes.util diff --git a/lib_pypy/_sha.py b/lib_pypy/__sha.py copy from lib_pypy/_sha.py copy to lib_pypy/__sha.py diff --git a/lib_pypy/binascii.py b/lib_pypy/_binascii.py copy from lib_pypy/binascii.py copy to lib_pypy/_binascii.py diff --git a/lib_pypy/_continuation.py b/lib_pypy/_continuation.py --- a/lib_pypy/_continuation.py +++ b/lib_pypy/_continuation.py @@ -1,3 +1,4 @@ # indirection needed; otherwise the built-in module "_continuation" shadows # any file _continuation.py that would be found in the user dirs from __builtin__continuation import * +from __builtin__continuation import __doc__ diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_ffi" shadows +# any file _ffi.py that would be found in the user dirs +from __builtin__ffi import * diff --git a/lib_pypy/_hashlib.py b/lib_pypy/_hashlib.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_hashlib.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_hashlib" shadows +# any file _hashlib.py that would be found in the user dirs +from __builtin__hashlib import * diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py --- a/lib_pypy/_locale.py +++ b/lib_pypy/_locale.py @@ -1,337 +1,6 @@ -# ctypes implementation of _locale module by Victor Stinner, 2008-03-27 - -# ------------------------------------------------------------ -# Note that we also have our own interp-level implementation -# ------------------------------------------------------------ - -""" -Support for POSIX locales. -""" - -from ctypes import (Structure, POINTER, create_string_buffer, - c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t) -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno - -# load the platform-specific cache made by running locale.ctc.py -from ctypes_config_cache._locale_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# Ubuntu Gusty i386 structure -class lconv(Structure): - _fields_ = ( - # Numeric (non-monetary) information. - ("decimal_point", c_char_p), # Decimal point character. - ("thousands_sep", c_char_p), # Thousands separator. - - # Each element is the number of digits in each group; - # elements with higher indices are farther left. - # An element with value CHAR_MAX means that no further grouping is done. - # An element with value 0 means that the previous element is used - # for all groups farther left. */ - ("grouping", c_char_p), - - # Monetary information. - - # First three chars are a currency symbol from ISO 4217. - # Fourth char is the separator. Fifth char is '\0'. - ("int_curr_symbol", c_char_p), - ("currency_symbol", c_char_p), # Local currency symbol. - ("mon_decimal_point", c_char_p), # Decimal point character. - ("mon_thousands_sep", c_char_p), # Thousands separator. - ("mon_grouping", c_char_p), # Like `grouping' element (above). - ("positive_sign", c_char_p), # Sign for positive values. - ("negative_sign", c_char_p), # Sign for negative values. - ("int_frac_digits", c_ubyte), # Int'l fractional digits. - ("frac_digits", c_ubyte), # Local fractional digits. - # 1 if currency_symbol precedes a positive value, 0 if succeeds. - ("p_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a positive value. - ("p_sep_by_space", c_ubyte), - # 1 if currency_symbol precedes a negative value, 0 if succeeds. - ("n_cs_precedes", c_ubyte), - # 1 iff a space separates currency_symbol from a negative value. - ("n_sep_by_space", c_ubyte), - - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and currency_symbol. - # 1 The sign string precedes the quantity and currency_symbol. - # 2 The sign string follows the quantity and currency_symbol. - # 3 The sign string immediately precedes the currency_symbol. - # 4 The sign string immediately follows the currency_symbol. - ("p_sign_posn", c_ubyte), - ("n_sign_posn", c_ubyte), - # 1 if int_curr_symbol precedes a positive value, 0 if succeeds. - ("int_p_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a positive value. - ("int_p_sep_by_space", c_ubyte), - # 1 if int_curr_symbol precedes a negative value, 0 if succeeds. - ("int_n_cs_precedes", c_ubyte), - # 1 iff a space separates int_curr_symbol from a negative value. - ("int_n_sep_by_space", c_ubyte), - # Positive and negative sign positions: - # 0 Parentheses surround the quantity and int_curr_symbol. - # 1 The sign string precedes the quantity and int_curr_symbol. - # 2 The sign string follows the quantity and int_curr_symbol. - # 3 The sign string immediately precedes the int_curr_symbol. - # 4 The sign string immediately follows the int_curr_symbol. - ("int_p_sign_posn", c_ubyte), - ("int_n_sign_posn", c_ubyte), - ) - -_setlocale = libc.setlocale -_setlocale.argtypes = (c_int, c_char_p) -_setlocale.restype = c_char_p - -_localeconv = libc.localeconv -_localeconv.argtypes = None -_localeconv.restype = POINTER(lconv) - -_strcoll = libc.strcoll -_strcoll.argtypes = (c_char_p, c_char_p) -_strcoll.restype = c_int - -_wcscoll = libc.wcscoll -_wcscoll.argtypes = (c_wchar_p, c_wchar_p) -_wcscoll.restype = c_int - -_strxfrm = libc.strxfrm -_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t) -_strxfrm.restype = c_size_t - -HAS_LIBINTL = hasattr(libc, 'gettext') -if HAS_LIBINTL: - _gettext = libc.gettext - _gettext.argtypes = (c_char_p,) - _gettext.restype = c_char_p - - _dgettext = libc.dgettext - _dgettext.argtypes = (c_char_p, c_char_p) - _dgettext.restype = c_char_p - - _dcgettext = libc.dcgettext - _dcgettext.argtypes = (c_char_p, c_char_p, c_int) - _dcgettext.restype = c_char_p - - _textdomain = libc.textdomain - _textdomain.argtypes = (c_char_p,) - _textdomain.restype = c_char_p - - _bindtextdomain = libc.bindtextdomain - _bindtextdomain.argtypes = (c_char_p, c_char_p) - _bindtextdomain.restype = c_char_p - - HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset') - if HAS_BIND_TEXTDOMAIN_CODESET: - _bind_textdomain_codeset = libc.bindtextdomain_codeset - _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p) - _bind_textdomain_codeset.restype = c_char_p - -class Error(Exception): - pass - -def fixup_ulcase(): - import string - #import strop - - # create uppercase map string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isupper(): - ul.append(c) - ul = ''.join(ul) - string.uppercase = ul - #strop.uppercase = ul - - # create lowercase string - ul = [] - for c in xrange(256): - c = chr(c) - if c.islower(): - ul.append(c) - ul = ''.join(ul) - string.lowercase = ul - #strop.lowercase = ul - - # create letters string - ul = [] - for c in xrange(256): - c = chr(c) - if c.isalpha(): - ul.append(c) - ul = ''.join(ul) - string.letters = ul - - at builtinify -def setlocale(category, locale=None): - "(integer,string=None) -> string. Activates/queries locale processing." - if locale: - # set locale - result = _setlocale(category, locale) - if not result: - raise Error("unsupported locale setting") - - # record changes to LC_CTYPE - if category in (LC_CTYPE, LC_ALL): - fixup_ulcase() - else: - # get locale - result = _setlocale(category, None) - if not result: - raise Error("locale query failed") - return result - -def _copy_grouping(text): - groups = [ ord(group) for group in text ] - if groups: - groups.append(0) - return groups - - at builtinify -def localeconv(): - "() -> dict. Returns numeric and monetary locale-specific parameters." - - # if LC_NUMERIC is different in the C library, use saved value - lp = _localeconv() - l = lp.contents - - # hopefully, the localeconv result survives the C library calls - # involved herein - - # Numeric information - result = { - "decimal_point": l.decimal_point, - "thousands_sep": l.thousands_sep, - "grouping": _copy_grouping(l.grouping), - "int_curr_symbol": l.int_curr_symbol, - "currency_symbol": l.currency_symbol, - "mon_decimal_point": l.mon_decimal_point, - "mon_thousands_sep": l.mon_thousands_sep, - "mon_grouping": _copy_grouping(l.mon_grouping), - "positive_sign": l.positive_sign, - "negative_sign": l.negative_sign, - "int_frac_digits": l.int_frac_digits, - "frac_digits": l.frac_digits, - "p_cs_precedes": l.p_cs_precedes, - "p_sep_by_space": l.p_sep_by_space, - "n_cs_precedes": l.n_cs_precedes, - "n_sep_by_space": l.n_sep_by_space, - "p_sign_posn": l.p_sign_posn, - "n_sign_posn": l.n_sign_posn, - } - return result - - at builtinify -def strcoll(s1, s2): - "string,string -> int. Compares two strings according to the locale." - - # If both arguments are byte strings, use strcoll. - if isinstance(s1, str) and isinstance(s2, str): - return _strcoll(s1, s2) - - # If neither argument is unicode, it's an error. - if not isinstance(s1, unicode) and not isinstance(s2, unicode): - raise ValueError("strcoll arguments must be strings") - - # Convert the non-unicode argument to unicode. - s1 = unicode(s1) - s2 = unicode(s2) - - # Collate the strings. - return _wcscoll(s1, s2) - - at builtinify -def strxfrm(s): - "string -> string. Returns a string that behaves for cmp locale-aware." - - # assume no change in size, first - n1 = len(s) + 1 - buf = create_string_buffer(n1) - n2 = _strxfrm(buf, s, n1) + 1 - if n2 > n1: - # more space needed - buf = create_string_buffer(n2) - _strxfrm(buf, s, n2) - return buf.value - - at builtinify -def getdefaultlocale(): - # TODO: Port code from CPython for Windows and Mac OS - raise NotImplementedError() - -if HAS_LANGINFO: - _nl_langinfo = libc.nl_langinfo - _nl_langinfo.argtypes = (nl_item,) - _nl_langinfo.restype = c_char_p - - def nl_langinfo(key): - """nl_langinfo(key) -> string - Return the value for the locale information associated with key.""" - # Check whether this is a supported constant. GNU libc sometimes - # returns numeric values in the char* return value, which would - # crash PyString_FromString. - result = _nl_langinfo(key) - if result is not None: - return result - raise ValueError("unsupported langinfo constant") - -if HAS_LIBINTL: - @builtinify - def gettext(msg): - """gettext(msg) -> string - Return translation of msg.""" - return _gettext(msg) - - @builtinify - def dgettext(domain, msg): - """dgettext(domain, msg) -> string - Return translation of msg in domain.""" - return _dgettext(domain, msg) - - @builtinify - def dcgettext(domain, msg, category): - """dcgettext(domain, msg, category) -> string - Return translation of msg in domain and category.""" - return _dcgettext(domain, msg, category) - - @builtinify - def textdomain(domain): - """textdomain(domain) -> string - Set the C library's textdomain to domain, returning the new domain.""" - return _textdomain(domain) - - @builtinify - def bindtextdomain(domain, dir): - """bindtextdomain(domain, dir) -> string - Bind the C library's domain to dir.""" - dirname = _bindtextdomain(domain, dir) - if not dirname: - errno = get_errno() - raise OSError(errno) - return dirname - - if HAS_BIND_TEXTDOMAIN_CODESET: - @builtinify - def bind_textdomain_codeset(domain, codeset): - """bind_textdomain_codeset(domain, codeset) -> string - Bind the C library's domain to codeset.""" - codeset = _bind_textdomain_codeset(domain, codeset) - if codeset: - return codeset - return None - -__all__ = ( - 'Error', - 'setlocale', 'localeconv', 'strxfrm', 'strcoll', -) + ALL_CONSTANTS -if HAS_LIBINTL: - __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain', - 'bindtextdomain') - if HAS_BIND_TEXTDOMAIN_CODESET: - __all__ += ('bind_textdomain_codeset',) -if HAS_LANGINFO: - __all__ += ('nl_langinfo',) +# indirection needed; otherwise the built-in module "_locale" shadows +# any file _locale.py that would be found in the user dirs +try: + from __builtin__locale import * +except ImportError: + from __locale import * diff --git a/lib_pypy/_lsprof.py b/lib_pypy/_lsprof.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_lsprof.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_lsprof" shadows +# any file _lsprof.py that would be found in the user dirs +from __builtin__lsprof import * diff --git a/lib_pypy/_md5.py b/lib_pypy/_md5.py --- a/lib_pypy/_md5.py +++ b/lib_pypy/_md5.py @@ -1,388 +1,6 @@ -#!/usr/bin/env python -# -*- coding: iso-8859-1 -*- - -# Note that PyPy contains also a built-in module 'md5' which will hide -# this one if compiled in. - -"""A sample implementation of MD5 in pure Python. - -This is an implementation of the MD5 hash function, as specified by -RFC 1321, in pure Python. It was implemented using Bruce Schneier's -excellent book "Applied Cryptography", 2nd ed., 1996. - -Surely this is not meant to compete with the existing implementation -of the Python standard library (written in C). Rather, it should be -seen as a Python complement that is more readable than C and can be -used more conveniently for learning and experimenting purposes in -the field of cryptography. - -This module tries very hard to follow the API of the existing Python -standard library's "md5" module, but although it seems to work fine, -it has not been extensively tested! (But note that there is a test -module, test_md5py.py, that compares this Python implementation with -the C one of the Python standard library. - -BEWARE: this comes with no guarantee whatsoever about fitness and/or -other properties! Specifically, do not use this in any production -code! License is Python License! - -Special thanks to Aurelian Coman who fixed some nasty bugs! - -Dinu C. Gherman -""" - - -__date__ = '2004-11-17' -__version__ = 0.91 # Modernised by J. Hall�n and L. Creighton for Pypy - -__metaclass__ = type # or genrpy won't work - -import struct, copy - - -# ====================================================================== -# Bit-Manipulation helpers -# ====================================================================== - -def _bytelist2long(list): - "Transform a list of characters into a list of longs." - - imax = len(list)/4 - hl = [0L] * imax - - j = 0 - i = 0 - while i < imax: - b0 = long(ord(list[j])) - b1 = (long(ord(list[j+1]))) << 8 - b2 = (long(ord(list[j+2]))) << 16 - b3 = (long(ord(list[j+3]))) << 24 - hl[i] = b0 | b1 |b2 | b3 - i = i+1 - j = j+4 - - return hl - - -def _rotateLeft(x, n): - "Rotate x (32 bit) left n bits circularly." - - return (x << n) | (x >> (32-n)) - - -# ====================================================================== -# The real MD5 meat... -# -# Implemented after "Applied Cryptography", 2nd ed., 1996, -# pp. 436-441 by Bruce Schneier. -# ====================================================================== - -# F, G, H and I are basic MD5 functions. - -def F(x, y, z): - return (x & y) | ((~x) & z) - -def G(x, y, z): - return (x & z) | (y & (~z)) - -def H(x, y, z): - return x ^ y ^ z - -def I(x, y, z): - return y ^ (x | (~z)) - - -def XX(func, a, b, c, d, x, s, ac): - """Wrapper for call distribution to functions F, G, H and I. - - This replaces functions FF, GG, HH and II from "Appl. Crypto." - Rotation is separate from addition to prevent recomputation - (now summed-up in one function). - """ - - res = 0L - res = res + a + func(b, c, d) - res = res + x - res = res + ac - res = res & 0xffffffffL - res = _rotateLeft(res, s) - res = res & 0xffffffffL - res = res + b - - return res & 0xffffffffL - - -class MD5Type: - "An implementation of the MD5 hash function in pure Python." - - digest_size = digestsize = 16 - block_size = 64 - - def __init__(self): - "Initialisation." - - # Initial message length in bits(!). - self.length = 0L - self.count = [0, 0] - - # Initial empty message as a sequence of bytes (8 bit characters). - self.input = [] - - # Call a separate init function, that can be used repeatedly - # to start from scratch on the same object. - self.init() - - - def init(self): - "Initialize the message-digest and set all fields to zero." - - self.length = 0L - self.count = [0, 0] - self.input = [] - - # Load magic initialization constants. - self.A = 0x67452301L - self.B = 0xefcdab89L - self.C = 0x98badcfeL - self.D = 0x10325476L - - - def _transform(self, inp): - """Basic MD5 step transforming the digest based on the input. - - Note that if the Mysterious Constants are arranged backwards - in little-endian order and decrypted with the DES they produce - OCCULT MESSAGES! - """ - - a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D - - # Round 1. - - S11, S12, S13, S14 = 7, 12, 17, 22 - - a = XX(F, a, b, c, d, inp[ 0], S11, 0xD76AA478L) # 1 - d = XX(F, d, a, b, c, inp[ 1], S12, 0xE8C7B756L) # 2 - c = XX(F, c, d, a, b, inp[ 2], S13, 0x242070DBL) # 3 - b = XX(F, b, c, d, a, inp[ 3], S14, 0xC1BDCEEEL) # 4 - a = XX(F, a, b, c, d, inp[ 4], S11, 0xF57C0FAFL) # 5 - d = XX(F, d, a, b, c, inp[ 5], S12, 0x4787C62AL) # 6 - c = XX(F, c, d, a, b, inp[ 6], S13, 0xA8304613L) # 7 - b = XX(F, b, c, d, a, inp[ 7], S14, 0xFD469501L) # 8 - a = XX(F, a, b, c, d, inp[ 8], S11, 0x698098D8L) # 9 - d = XX(F, d, a, b, c, inp[ 9], S12, 0x8B44F7AFL) # 10 - c = XX(F, c, d, a, b, inp[10], S13, 0xFFFF5BB1L) # 11 - b = XX(F, b, c, d, a, inp[11], S14, 0x895CD7BEL) # 12 - a = XX(F, a, b, c, d, inp[12], S11, 0x6B901122L) # 13 - d = XX(F, d, a, b, c, inp[13], S12, 0xFD987193L) # 14 - c = XX(F, c, d, a, b, inp[14], S13, 0xA679438EL) # 15 - b = XX(F, b, c, d, a, inp[15], S14, 0x49B40821L) # 16 - - # Round 2. - - S21, S22, S23, S24 = 5, 9, 14, 20 - - a = XX(G, a, b, c, d, inp[ 1], S21, 0xF61E2562L) # 17 - d = XX(G, d, a, b, c, inp[ 6], S22, 0xC040B340L) # 18 - c = XX(G, c, d, a, b, inp[11], S23, 0x265E5A51L) # 19 - b = XX(G, b, c, d, a, inp[ 0], S24, 0xE9B6C7AAL) # 20 - a = XX(G, a, b, c, d, inp[ 5], S21, 0xD62F105DL) # 21 - d = XX(G, d, a, b, c, inp[10], S22, 0x02441453L) # 22 - c = XX(G, c, d, a, b, inp[15], S23, 0xD8A1E681L) # 23 - b = XX(G, b, c, d, a, inp[ 4], S24, 0xE7D3FBC8L) # 24 - a = XX(G, a, b, c, d, inp[ 9], S21, 0x21E1CDE6L) # 25 - d = XX(G, d, a, b, c, inp[14], S22, 0xC33707D6L) # 26 - c = XX(G, c, d, a, b, inp[ 3], S23, 0xF4D50D87L) # 27 - b = XX(G, b, c, d, a, inp[ 8], S24, 0x455A14EDL) # 28 - a = XX(G, a, b, c, d, inp[13], S21, 0xA9E3E905L) # 29 - d = XX(G, d, a, b, c, inp[ 2], S22, 0xFCEFA3F8L) # 30 - c = XX(G, c, d, a, b, inp[ 7], S23, 0x676F02D9L) # 31 - b = XX(G, b, c, d, a, inp[12], S24, 0x8D2A4C8AL) # 32 - - # Round 3. - - S31, S32, S33, S34 = 4, 11, 16, 23 - - a = XX(H, a, b, c, d, inp[ 5], S31, 0xFFFA3942L) # 33 - d = XX(H, d, a, b, c, inp[ 8], S32, 0x8771F681L) # 34 - c = XX(H, c, d, a, b, inp[11], S33, 0x6D9D6122L) # 35 - b = XX(H, b, c, d, a, inp[14], S34, 0xFDE5380CL) # 36 - a = XX(H, a, b, c, d, inp[ 1], S31, 0xA4BEEA44L) # 37 - d = XX(H, d, a, b, c, inp[ 4], S32, 0x4BDECFA9L) # 38 - c = XX(H, c, d, a, b, inp[ 7], S33, 0xF6BB4B60L) # 39 - b = XX(H, b, c, d, a, inp[10], S34, 0xBEBFBC70L) # 40 - a = XX(H, a, b, c, d, inp[13], S31, 0x289B7EC6L) # 41 - d = XX(H, d, a, b, c, inp[ 0], S32, 0xEAA127FAL) # 42 - c = XX(H, c, d, a, b, inp[ 3], S33, 0xD4EF3085L) # 43 - b = XX(H, b, c, d, a, inp[ 6], S34, 0x04881D05L) # 44 - a = XX(H, a, b, c, d, inp[ 9], S31, 0xD9D4D039L) # 45 - d = XX(H, d, a, b, c, inp[12], S32, 0xE6DB99E5L) # 46 - c = XX(H, c, d, a, b, inp[15], S33, 0x1FA27CF8L) # 47 - b = XX(H, b, c, d, a, inp[ 2], S34, 0xC4AC5665L) # 48 - - # Round 4. - - S41, S42, S43, S44 = 6, 10, 15, 21 - - a = XX(I, a, b, c, d, inp[ 0], S41, 0xF4292244L) # 49 - d = XX(I, d, a, b, c, inp[ 7], S42, 0x432AFF97L) # 50 - c = XX(I, c, d, a, b, inp[14], S43, 0xAB9423A7L) # 51 - b = XX(I, b, c, d, a, inp[ 5], S44, 0xFC93A039L) # 52 - a = XX(I, a, b, c, d, inp[12], S41, 0x655B59C3L) # 53 - d = XX(I, d, a, b, c, inp[ 3], S42, 0x8F0CCC92L) # 54 - c = XX(I, c, d, a, b, inp[10], S43, 0xFFEFF47DL) # 55 - b = XX(I, b, c, d, a, inp[ 1], S44, 0x85845DD1L) # 56 - a = XX(I, a, b, c, d, inp[ 8], S41, 0x6FA87E4FL) # 57 - d = XX(I, d, a, b, c, inp[15], S42, 0xFE2CE6E0L) # 58 - c = XX(I, c, d, a, b, inp[ 6], S43, 0xA3014314L) # 59 - b = XX(I, b, c, d, a, inp[13], S44, 0x4E0811A1L) # 60 - a = XX(I, a, b, c, d, inp[ 4], S41, 0xF7537E82L) # 61 - d = XX(I, d, a, b, c, inp[11], S42, 0xBD3AF235L) # 62 - c = XX(I, c, d, a, b, inp[ 2], S43, 0x2AD7D2BBL) # 63 - b = XX(I, b, c, d, a, inp[ 9], S44, 0xEB86D391L) # 64 - - A = (A + a) & 0xffffffffL - B = (B + b) & 0xffffffffL - C = (C + c) & 0xffffffffL - D = (D + d) & 0xffffffffL - - self.A, self.B, self.C, self.D = A, B, C, D - - - # Down from here all methods follow the Python Standard Library - # API of the md5 module. - - def update(self, inBuf): - """Add to the current message. - - Update the md5 object with the string arg. Repeated calls - are equivalent to a single call with the concatenation of all - the arguments, i.e. m.update(a); m.update(b) is equivalent - to m.update(a+b). - - The hash is immediately calculated for all full blocks. The final - calculation is made in digest(). This allows us to keep an - intermediate value for the hash, so that we only need to make - minimal recalculation if we call update() to add moredata to - the hashed string. - """ - - leninBuf = long(len(inBuf)) - - # Compute number of bytes mod 64. - index = (self.count[0] >> 3) & 0x3FL - - # Update number of bits. - self.count[0] = self.count[0] + (leninBuf << 3) - if self.count[0] < (leninBuf << 3): - self.count[1] = self.count[1] + 1 - self.count[1] = self.count[1] + (leninBuf >> 29) - - partLen = 64 - index - - if leninBuf >= partLen: - self.input[index:] = list(inBuf[:partLen]) - self._transform(_bytelist2long(self.input)) - i = partLen - while i + 63 < leninBuf: - self._transform(_bytelist2long(list(inBuf[i:i+64]))) - i = i + 64 - else: - self.input = list(inBuf[i:leninBuf]) - else: - i = 0 - self.input = self.input + list(inBuf) - - - def digest(self): - """Terminate the message-digest computation and return digest. - - Return the digest of the strings passed to the update() - method so far. This is a 16-byte string which may contain - non-ASCII characters, including null bytes. - """ - - A = self.A - B = self.B - C = self.C - D = self.D - input = [] + self.input - count = [] + self.count - - index = (self.count[0] >> 3) & 0x3fL - - if index < 56: - padLen = 56 - index - else: - padLen = 120 - index - - padding = ['\200'] + ['\000'] * 63 - self.update(padding[:padLen]) - - # Append length (before padding). - bits = _bytelist2long(self.input[:56]) + count - - self._transform(bits) - - # Store state in digest. - digest = struct.pack(" 0: - s = pack('>I', n & 0xffffffffL) + s - n = n >> 32 - - # Strip off leading zeros. - for i in range(len(s)): - if s[i] <> '\000': - break - else: - # Only happens when n == 0. - s = '\000' - i = 0 - - s = s[i:] - - # Add back some pad bytes. This could be done more efficiently - # w.r.t. the de-padding being done above, but sigh... - if blocksize > 0 and len(s) % blocksize: - s = (blocksize - len(s) % blocksize) * '\000' + s - - return s - - -def _bytelist2longBigEndian(list): - "Transform a list of characters into a list of longs." - - imax = len(list)/4 - hl = [0L] * imax - - j = 0 - i = 0 - while i < imax: - b0 = long(ord(list[j])) << 24 - b1 = long(ord(list[j+1])) << 16 - b2 = long(ord(list[j+2])) << 8 - b3 = long(ord(list[j+3])) - hl[i] = b0 | b1 | b2 | b3 - i = i+1 - j = j+4 - - return hl - - -def _rotateLeft(x, n): - "Rotate x (32 bit) left n bits circularly." - - return (x << n) | (x >> (32-n)) - - -# ====================================================================== -# The SHA transformation functions -# -# ====================================================================== - -def f0_19(B, C, D): - return (B & C) | ((~ B) & D) - -def f20_39(B, C, D): - return B ^ C ^ D - -def f40_59(B, C, D): - return (B & C) | (B & D) | (C & D) - -def f60_79(B, C, D): - return B ^ C ^ D - - -f = [f0_19, f20_39, f40_59, f60_79] - -# Constants to be used -K = [ - 0x5A827999L, # ( 0 <= t <= 19) - 0x6ED9EBA1L, # (20 <= t <= 39) - 0x8F1BBCDCL, # (40 <= t <= 59) - 0xCA62C1D6L # (60 <= t <= 79) - ] - -class sha: - "An implementation of the MD5 hash function in pure Python." - - digest_size = digestsize = 20 - block_size = 1 - - def __init__(self): - "Initialisation." - - # Initial message length in bits(!). - self.length = 0L - self.count = [0, 0] - - # Initial empty message as a sequence of bytes (8 bit characters). - self.input = [] - - # Call a separate init function, that can be used repeatedly - # to start from scratch on the same object. - self.init() - - - def init(self): - "Initialize the message-digest and set all fields to zero." - - self.length = 0L - self.input = [] - - # Initial 160 bit message digest (5 times 32 bit). - self.H0 = 0x67452301L - self.H1 = 0xEFCDAB89L - self.H2 = 0x98BADCFEL - self.H3 = 0x10325476L - self.H4 = 0xC3D2E1F0L - - def _transform(self, W): - - for t in range(16, 80): - W.append(_rotateLeft( - W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1) & 0xffffffffL) - - A = self.H0 - B = self.H1 - C = self.H2 - D = self.H3 - E = self.H4 - - """ - This loop was unrolled to gain about 10% in speed - for t in range(0, 80): - TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - """ - - for t in range(0, 20): - TEMP = _rotateLeft(A, 5) + ((B & C) | ((~ B) & D)) + E + W[t] + K[0] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - - for t in range(20, 40): - TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[1] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - - for t in range(40, 60): - TEMP = _rotateLeft(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[t] + K[2] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - - for t in range(60, 80): - TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[3] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - - - self.H0 = (self.H0 + A) & 0xffffffffL - self.H1 = (self.H1 + B) & 0xffffffffL - self.H2 = (self.H2 + C) & 0xffffffffL - self.H3 = (self.H3 + D) & 0xffffffffL - self.H4 = (self.H4 + E) & 0xffffffffL - - - # Down from here all methods follow the Python Standard Library - # API of the sha module. - - def update(self, inBuf): - """Add to the current message. - - Update the md5 object with the string arg. Repeated calls - are equivalent to a single call with the concatenation of all - the arguments, i.e. m.update(a); m.update(b) is equivalent - to m.update(a+b). - - The hash is immediately calculated for all full blocks. The final - calculation is made in digest(). It will calculate 1-2 blocks, - depending on how much padding we have to add. This allows us to - keep an intermediate value for the hash, so that we only need to - make minimal recalculation if we call update() to add more data - to the hashed string. - """ - - leninBuf = long(len(inBuf)) - - # Compute number of bytes mod 64. - index = (self.count[1] >> 3) & 0x3FL - - # Update number of bits. - self.count[1] = self.count[1] + (leninBuf << 3) - if self.count[1] < (leninBuf << 3): - self.count[0] = self.count[0] + 1 - self.count[0] = self.count[0] + (leninBuf >> 29) - - partLen = 64 - index - - if leninBuf >= partLen: - self.input[index:] = list(inBuf[:partLen]) - self._transform(_bytelist2longBigEndian(self.input)) - i = partLen - while i + 63 < leninBuf: - self._transform(_bytelist2longBigEndian(list(inBuf[i:i+64]))) - i = i + 64 - else: - self.input = list(inBuf[i:leninBuf]) - else: - i = 0 - self.input = self.input + list(inBuf) - - - def digest(self): - """Terminate the message-digest computation and return digest. - - Return the digest of the strings passed to the update() - method so far. This is a 16-byte string which may contain - non-ASCII characters, including null bytes. - """ - - H0 = self.H0 - H1 = self.H1 - H2 = self.H2 - H3 = self.H3 - H4 = self.H4 - input = [] + self.input - count = [] + self.count - - index = (self.count[1] >> 3) & 0x3fL - - if index < 56: - padLen = 56 - index - else: - padLen = 120 - index - - padding = ['\200'] + ['\000'] * 63 - self.update(padding[:padLen]) - - # Append length (before padding). - bits = _bytelist2longBigEndian(self.input[:56]) + count - - self._transform(bits) - - # Store state in digest. - digest = _long2bytesBigEndian(self.H0, 4) + \ - _long2bytesBigEndian(self.H1, 4) + \ - _long2bytesBigEndian(self.H2, 4) + \ - _long2bytesBigEndian(self.H3, 4) + \ - _long2bytesBigEndian(self.H4, 4) - - self.H0 = H0 - self.H1 = H1 - self.H2 = H2 - self.H3 = H3 - self.H4 = H4 - self.input = input - self.count = count - - return digest - - - def hexdigest(self): - """Terminate and return digest in HEX form. - - Like digest() except the digest is returned as a string of - length 32, containing only hexadecimal digits. This may be - used to exchange the value safely in email or other non- - binary environments. - """ - return ''.join(['%02x' % ord(c) for c in self.digest()]) - - def copy(self): - """Return a clone object. - - Return a copy ('clone') of the md5 object. This can be used - to efficiently compute the digests of strings that share - a common initial substring. - """ - - return copy.deepcopy(self) - - -# ====================================================================== -# Mimic Python top-level functions from standard library API -# for consistency with the _sha module of the standard library. -# ====================================================================== - -# These are mandatory variables in the module. They have constant values -# in the SHA standard. - -digest_size = 20 -digestsize = 20 -blocksize = 1 - -def new(arg=None): - """Return a new sha crypto object. - - If arg is present, the method call update(arg) is made. - """ - - crypto = sha() - if arg: - crypto.update(arg) - - return crypto +# indirection needed; otherwise the built-in module "_sha" shadows +# any file _sha.py that would be found in the user dirs +try: + from __builtin__sha import * +except ImportError: + from __sha import * diff --git a/lib_pypy/struct.py b/lib_pypy/_struct.py copy from lib_pypy/struct.py copy to lib_pypy/_struct.py diff --git a/lib_pypy/_winreg.py b/lib_pypy/_winreg.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_winreg.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "_winreg" shadows +# any file _winreg.py that would be found in the user dirs +from __builtin__winreg import * diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py --- a/lib_pypy/binascii.py +++ b/lib_pypy/binascii.py @@ -1,720 +1,6 @@ -"""A pure Python implementation of binascii. - -Rather slow and buggy in corner cases. -PyPy provides an RPython version too. -""" - -class Error(Exception): - pass - -class Done(Exception): - pass - -class Incomplete(Exception): - pass - -def a2b_uu(s): - if not s: - return '' - - length = (ord(s[0]) - 0x20) % 64 - - def quadruplets_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - except IndexError: - s += ' ' - yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) - return - s = s[4:] - - try: - result = [''.join( - [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), - chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), - chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) - ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] - except ValueError: - raise Error('Illegal char') - result = ''.join(result) - trailingdata = result[length:] - if trailingdata.strip('\x00'): - raise Error('Trailing garbage') - result = result[:length] - if len(result) < length: - result += ((length - len(result)) * '\x00') - return result - - -def b2a_uu(s): - length = len(s) - if length > 45: - raise Error('At most 45 bytes at once') - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - result = [''.join( - [chr(0x20 + (( A >> 2 ) & 0x3F)), - chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), - chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), - chr(0x20 + (( C ) & 0x3F))]) - for A, B, C in triples_gen(s)] - return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n' - - -table_a2b_base64 = { - 'A': 0, - 'B': 1, - 'C': 2, - 'D': 3, - 'E': 4, - 'F': 5, - 'G': 6, - 'H': 7, - 'I': 8, - 'J': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N': 13, - 'O': 14, - 'P': 15, - 'Q': 16, - 'R': 17, - 'S': 18, - 'T': 19, - 'U': 20, - 'V': 21, - 'W': 22, - 'X': 23, - 'Y': 24, - 'Z': 25, - 'a': 26, - 'b': 27, - 'c': 28, - 'd': 29, - 'e': 30, - 'f': 31, - 'g': 32, - 'h': 33, - 'i': 34, - 'j': 35, - 'k': 36, - 'l': 37, - 'm': 38, - 'n': 39, - 'o': 40, - 'p': 41, - 'q': 42, - 'r': 43, - 's': 44, - 't': 45, - 'u': 46, - 'v': 47, - 'w': 48, - 'x': 49, - 'y': 50, - 'z': 51, - '0': 52, - '1': 53, - '2': 54, - '3': 55, - '4': 56, - '5': 57, - '6': 58, - '7': 59, - '8': 60, - '9': 61, - '+': 62, - '/': 63, - '=': 0, -} - - -def a2b_base64(s): - if not isinstance(s, (str, unicode)): - raise TypeError("expected string or unicode, got %r" % (s,)) - s = s.rstrip() - # clean out all invalid characters, this also strips the final '=' padding - # check for correct padding - - def next_valid_char(s, pos): - for i in range(pos + 1, len(s)): - c = s[i] - if c < '\x7f': - try: - table_a2b_base64[c] - return c - except KeyError: - pass - return None - - quad_pos = 0 - leftbits = 0 - leftchar = 0 - res = [] - for i, c in enumerate(s): - if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': - continue - if c == '=': - if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): - continue - else: - leftbits = 0 - break - try: - next_c = table_a2b_base64[c] - except KeyError: - continue - quad_pos = (quad_pos + 1) & 0x03 - leftchar = (leftchar << 6) | next_c - leftbits += 6 - if leftbits >= 8: - leftbits -= 8 - res.append((leftchar >> leftbits & 0xff)) - leftchar &= ((1 << leftbits) - 1) - if leftbits != 0: - raise Error('Incorrect padding') - - return ''.join([chr(i) for i in res]) - -table_b2a_base64 = \ -"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - -def b2a_base64(s): - length = len(s) - final_length = length % 3 - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - s += '\0\0' - yield ord(s[0]), ord(s[1]), ord(s[2]) - return - s = s[3:] - - - a = triples_gen(s[ :length - final_length]) - - result = [''.join( - [table_b2a_base64[( A >> 2 ) & 0x3F], - table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], - table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], - table_b2a_base64[( C ) & 0x3F]]) - for A, B, C in a] - - final = s[length - final_length:] - if final_length == 0: - snippet = '' - elif final_length == 1: - a = ord(final[0]) - snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ - table_b2a_base64[(a << 4 ) & 0x3F] + '==' - else: - a = ord(final[0]) - b = ord(final[1]) - snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ - table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ - table_b2a_base64[(b << 2) & 0x3F] + '=' - return ''.join(result) + snippet + '\n' - -def a2b_qp(s, header=False): - inp = 0 - odata = [] - while inp < len(s): - if s[inp] == '=': - inp += 1 - if inp >= len(s): - break - # Soft line breaks - if (s[inp] == '\n') or (s[inp] == '\r'): - if s[inp] != '\n': - while inp < len(s) and s[inp] != '\n': - inp += 1 - if inp < len(s): - inp += 1 - elif s[inp] == '=': - # broken case from broken python qp - odata.append('=') - inp += 1 - elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: - ch = chr(int(s[inp:inp+2], 16)) - inp += 2 - odata.append(ch) - else: - odata.append('=') - elif header and s[inp] == '_': - odata.append(' ') - inp += 1 - else: - odata.append(s[inp]) - inp += 1 - return ''.join(odata) - -def b2a_qp(data, quotetabs=False, istext=True, header=False): - """quotetabs=True means that tab and space characters are always - quoted. - istext=False means that \r and \n are treated as regular characters - header=True encodes space characters with '_' and requires - real '_' characters to be quoted. - """ - MAXLINESIZE = 76 - - # See if this string is using CRLF line ends - lf = data.find('\n') - crlf = lf > 0 and data[lf-1] == '\r' - - inp = 0 - linelen = 0 - odata = [] - while inp < len(data): - c = data[inp] - if (c > '~' or - c == '=' or - (header and c == '_') or - (c == '.' and linelen == 0 and (inp+1 == len(data) or - data[inp+1] == '\n' or - data[inp+1] == '\r')) or - (not istext and (c == '\r' or c == '\n')) or - ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or - (c <= ' ' and c != '\r' and c != '\n' and - (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): - linelen += 3 - if linelen >= MAXLINESIZE: - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 3 - odata.append('=' + two_hex_digits(ord(c))) - inp += 1 - else: - if (istext and - (c == '\n' or (inp+1 < len(data) and c == '\r' and - data[inp+1] == '\n'))): - linelen = 0 - # Protect against whitespace on end of line - if (len(odata) > 0 and - (odata[-1] == ' ' or odata[-1] == '\t')): - ch = ord(odata[-1]) - odata[-1] = '=' - odata.append(two_hex_digits(ch)) - - if crlf: odata.append('\r') - odata.append('\n') - if c == '\r': - inp += 2 - else: - inp += 1 - else: - if (inp + 1 < len(data) and - data[inp+1] != '\n' and - (linelen + 1) >= MAXLINESIZE): - odata.append('=') - if crlf: odata.append('\r') - odata.append('\n') - linelen = 0 - - linelen += 1 - if header and c == ' ': - c = '_' - odata.append(c) - inp += 1 - return ''.join(odata) - -hex_numbers = '0123456789ABCDEF' -def hex(n): - if n == 0: - return '0' - - if n < 0: - n = -n - sign = '-' - else: - sign = '' - arr = [] - - def hex_gen(n): - """ Yield a nibble at a time. """ - while n: - yield n % 0x10 - n = n / 0x10 - - for nibble in hex_gen(n): - arr = [hex_numbers[nibble]] + arr - return sign + ''.join(arr) - -def two_hex_digits(n): - return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] - - -def strhex_to_int(s): - i = 0 - for c in s: - i = i * 0x10 + hex_numbers.index(c) - return i - -hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' - -DONE = 0x7f -SKIP = 0x7e -FAIL = 0x7d - -table_a2b_hqx = [ - #^@ ^A ^B ^C ^D ^E ^F ^G - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #\b \t \n ^K ^L \r ^N ^O - FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, - #^P ^Q ^R ^S ^T ^U ^V ^W - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - # ! " # $ % & ' - FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - #( ) * + , - . / - 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, - #0 1 2 3 4 5 6 7 - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, - #8 9 : ; < = > ? - 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, - #@ A B C D E F G - 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, - #H I J K L M N O - 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, - #P Q R S T U V W - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, - #X Y Z [ \ ] ^ _ - 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, - #` a b c d e f g - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, - #h i j k l m n o - 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, - #p q r s t u v w - 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, - #x y z { | } ~ ^? - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, - FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, -] - -def a2b_hqx(s): - result = [] - - def quadruples_gen(s): - t = [] - for c in s: - res = table_a2b_hqx[ord(c)] - if res == SKIP: - continue - elif res == FAIL: - raise Error('Illegal character') - elif res == DONE: - yield t - raise Done - else: - t.append(res) - if len(t) == 4: - yield t - t = [] - yield t - - done = 0 - try: - for snippet in quadruples_gen(s): - length = len(snippet) - if length == 4: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) - elif length == 3: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) - elif length == 2: - result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) - except Done: - done = 1 - except Error: - raise - return (''.join(result), done) - -def b2a_hqx(s): - result =[] - - def triples_gen(s): - while s: - try: - yield ord(s[0]), ord(s[1]), ord(s[2]) - except IndexError: - yield tuple([ord(c) for c in s]) - s = s[3:] - - for snippet in triples_gen(s): - length = len(snippet) - if length == 3: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) - result.append(hqx_encoding[snippet[2] & 0x3f]) - elif length == 2: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) - result.append(hqx_encoding[ - (snippet[1] & 0x0f) << 2]) - elif length == 1: - result.append( - hqx_encoding[(snippet[0] & 0xfc) >> 2]) - result.append(hqx_encoding[ - ((snippet[0] & 0x03) << 4)]) - return ''.join(result) - -crctab_hqx = [ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -] - -def crc_hqx(s, crc): - for c in s: - crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] - - return crc - -def rlecode_hqx(s): - """ - Run length encoding for binhex4. - The CPython implementation does not do run length encoding - of \x90 characters. This implementation does. - """ - if not s: - return '' - result = [] - prev = s[0] - count = 1 - # Add a dummy character to get the loop to go one extra round. - # The dummy must be different from the last character of s. - # In the same step we remove the first character, which has - # already been stored in prev. - if s[-1] == '!': - s = s[1:] + '?' - else: - s = s[1:] + '!' - - for c in s: - if c == prev and count < 255: - count += 1 - else: - if count == 1: - if prev != '\x90': - result.append(prev) - else: - result.extend(['\x90', '\x00']) - elif count < 4: - if prev != '\x90': - result.extend([prev] * count) - else: - result.extend(['\x90', '\x00'] * count) - else: - if prev != '\x90': - result.extend([prev, '\x90', chr(count)]) - else: - result.extend(['\x90', '\x00', '\x90', chr(count)]) - count = 1 - prev = c - - return ''.join(result) - -def rledecode_hqx(s): - s = s.split('\x90') - result = [s[0]] - prev = s[0] - for snippet in s[1:]: - count = ord(snippet[0]) - if count > 0: - result.append(prev[-1] * (count-1)) - prev = snippet - else: - result. append('\x90') - prev = '\x90' - result.append(snippet[1:]) - - return ''.join(result) - -crc_32_tab = [ - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -] - -def crc32(s, crc=0): - result = 0 - crc = ~long(crc) & 0xffffffffL - for c in s: - crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) - #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ 0xffffffffL - - if result > 2**31: - result = ((result + 2**31) % 2**32) - 2**31 - - return result - -def b2a_hex(s): - result = [] - for char in s: - c = (ord(char) >> 4) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - c = ord(char) & 0xf - if c > 9: - c = c + ord('a') - 10 - else: - c = c + ord('0') - result.append(chr(c)) - return ''.join(result) - -hexlify = b2a_hex - -table_hex = [ - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, - -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 -] - - -def a2b_hex(t): - result = [] - - def pairs_gen(s): - while s: - try: - yield table_hex[ord(s[0])], table_hex[ord(s[1])] - except IndexError: - if len(s): - raise TypeError('Odd-length string') - return - s = s[2:] - - for a, b in pairs_gen(t): - if a < 0 or b < 0: - raise TypeError('Non-hexadecimal digit found') - result.append(chr((a << 4) + b)) - return ''.join(result) - - -unhexlify = a2b_hex +# indirection needed; otherwise the built-in module "binascii" shadows +# any file binascii.py that would be found in the user dirs +try: + from __builtin_binascii import * +except ImportError: + from _binascii import * diff --git a/lib_pypy/bz2.py b/lib_pypy/bz2.py new file mode 100644 --- /dev/null +++ b/lib_pypy/bz2.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "bz2" shadows +# any file bz2.py that would be found in the user dirs +from __builtin_bz2 import * diff --git a/lib_pypy/cStringIO.py b/lib_pypy/cStringIO.py --- a/lib_pypy/cStringIO.py +++ b/lib_pypy/cStringIO.py @@ -1,16 +1,19 @@ -# -# StringIO-based cStringIO implementation. -# +# indirection needed; otherwise the built-in module "cStringIO" shadows +# any file cStringIO.py that would be found in the user dirs -# Note that PyPy contains also a built-in module 'cStringIO' which will hide -# this one if compiled in. +try: + from __builtin_cStringIO import * -from StringIO import * -from StringIO import __doc__ +except ImportError: + # + # StringIO-based cStringIO implementation. + # + from StringIO import * + from StringIO import __doc__ -class StringIO(StringIO): - def reset(self): - """ - reset() -- Reset the file position to the beginning - """ - self.seek(0, 0) + class StringIO(StringIO): + def reset(self): + """ + reset() -- Reset the file position to the beginning + """ + self.seek(0, 0) diff --git a/lib_pypy/cmath.py b/lib_pypy/cmath.py --- a/lib_pypy/cmath.py +++ b/lib_pypy/cmath.py @@ -1,3 +1,4 @@ # indirection needed; otherwise the built-in module "cmath" shadows # any file cmath.py that would be found in the user dirs from __builtin_cmath import * +from __builtin_cmath import __doc__ diff --git a/lib_pypy/cpyext.py b/lib_pypy/cpyext.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cpyext.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "cpyext" shadows +# any file cpyext.py that would be found in the user dirs +from __builtin_cpyext import * diff --git a/lib_pypy/crypt.py b/lib_pypy/crypt.py new file mode 100644 --- /dev/null +++ b/lib_pypy/crypt.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "crypt" shadows +# any file crypt.py that would be found in the user dirs +from __builtin_crypt import * diff --git a/lib_pypy/cx_Oracle.py b/lib_pypy/cx_Oracle.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cx_Oracle.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "cx_Oracle" shadows +# any file cx_Oracle.py that would be found in the user dirs +from __builtin_cx_Oracle import * diff --git a/lib_pypy/fcntl.py b/lib_pypy/fcntl.py new file mode 100644 --- /dev/null +++ b/lib_pypy/fcntl.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "fcntl" shadows +# any file fcntl.py that would be found in the user dirs +from __builtin_fcntl import * diff --git a/lib_pypy/math.py b/lib_pypy/math.py --- a/lib_pypy/math.py +++ b/lib_pypy/math.py @@ -1,3 +1,4 @@ # indirection needed; otherwise the built-in module "math" shadows # any file math.py that would be found in the user dirs from __builtin_math import * +from __builtin_math import __doc__ diff --git a/lib_pypy/mmap.py b/lib_pypy/mmap.py new file mode 100644 --- /dev/null +++ b/lib_pypy/mmap.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "mmap" shadows +# any file mmap.py that would be found in the user dirs +from __builtin_mmap import * diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py new file mode 100644 --- /dev/null +++ b/lib_pypy/numpy.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "numpy" shadows +# any file numpy.py that would be found in the user dirs +from __builtin_numpypy import * diff --git a/lib_pypy/numpypy.py b/lib_pypy/numpypy.py new file mode 100644 --- /dev/null +++ b/lib_pypy/numpypy.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "numpypy" shadows +# any file numpypy.py that would be found in the user dirs +from __builtin_numpypy import * diff --git a/lib_pypy/parser.py b/lib_pypy/parser.py new file mode 100644 --- /dev/null +++ b/lib_pypy/parser.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "parser" shadows +# any file parser.py that would be found in the user dirs +from __builtin_parser import * diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py --- a/lib_pypy/pwd.py +++ b/lib_pypy/pwd.py @@ -9,6 +9,9 @@ The uid and gid items are integers, all others are strings. An exception is raised if the entry asked for cannot be found. """ +# Note that if the built-in module 'pwd' is compiled in, it hides +# this pwd.py. Unsure why, but it is also a built-in module in +# CPython 2.7, instead of an extension module. import sys if sys.platform == 'win32': diff --git a/lib_pypy/pyexpat.py b/lib_pypy/pyexpat.py --- a/lib_pypy/pyexpat.py +++ b/lib_pypy/pyexpat.py @@ -1,448 +1,6 @@ - -import ctypes -import ctypes.util -from ctypes import c_char_p, c_int, c_void_p, POINTER, c_char, c_wchar_p -import sys - -# load the platform-specific cache made by running pyexpat.ctc.py -from ctypes_config_cache._pyexpat_cache import * - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -lib = ctypes.CDLL(ctypes.util.find_library('expat')) - - -XML_Content.children = POINTER(XML_Content) -XML_Parser = ctypes.c_void_p # an opaque pointer -assert XML_Char is ctypes.c_char # this assumption is everywhere in -# cpython's expat, let's explode - -def declare_external(name, args, res): - func = getattr(lib, name) - func.args = args - func.restype = res - globals()[name] = func - -declare_external('XML_ParserCreate', [c_char_p], XML_Parser) -declare_external('XML_ParserCreateNS', [c_char_p, c_char], XML_Parser) -declare_external('XML_Parse', [XML_Parser, c_char_p, c_int, c_int], c_int) -currents = ['CurrentLineNumber', 'CurrentColumnNumber', - 'CurrentByteIndex'] -for name in currents: - func = getattr(lib, 'XML_Get' + name) - func.args = [XML_Parser] - func.restype = c_int - -declare_external('XML_SetReturnNSTriplet', [XML_Parser, c_int], None) -declare_external('XML_GetSpecifiedAttributeCount', [XML_Parser], c_int) -declare_external('XML_SetParamEntityParsing', [XML_Parser, c_int], None) -declare_external('XML_GetErrorCode', [XML_Parser], c_int) -declare_external('XML_StopParser', [XML_Parser, c_int], None) -declare_external('XML_ErrorString', [c_int], c_char_p) -declare_external('XML_SetBase', [XML_Parser, c_char_p], None) -if XML_COMBINED_VERSION >= 19505: - declare_external('XML_UseForeignDTD', [XML_Parser, c_int], None) - -declare_external('XML_SetUnknownEncodingHandler', [XML_Parser, c_void_p, - c_void_p], None) -declare_external('XML_FreeContentModel', [XML_Parser, POINTER(XML_Content)], - None) -declare_external('XML_ExternalEntityParserCreate', [XML_Parser,c_char_p, - c_char_p], - XML_Parser) - -handler_names = [ - 'StartElement', - 'EndElement', - 'ProcessingInstruction', - 'CharacterData', - 'UnparsedEntityDecl', - 'NotationDecl', - 'StartNamespaceDecl', - 'EndNamespaceDecl', - 'Comment', - 'StartCdataSection', - 'EndCdataSection', - 'Default', - 'DefaultHandlerExpand', - 'NotStandalone', - 'ExternalEntityRef', - 'StartDoctypeDecl', - 'EndDoctypeDecl', - 'EntityDecl', - 'XmlDecl', - 'ElementDecl', - 'AttlistDecl', - ] -if XML_COMBINED_VERSION >= 19504: - handler_names.append('SkippedEntity') -setters = {} - -for name in handler_names: - if name == 'DefaultHandlerExpand': - newname = 'XML_SetDefaultHandlerExpand' - else: - name += 'Handler' - newname = 'XML_Set' + name - cfunc = getattr(lib, newname) - cfunc.args = [XML_Parser, ctypes.c_void_p] - cfunc.result = ctypes.c_int - setters[name] = cfunc - -class ExpatError(Exception): - def __str__(self): - return self.s - -error = ExpatError - -class XMLParserType(object): - specified_attributes = 0 - ordered_attributes = 0 - returns_unicode = 1 - encoding = 'utf-8' - def __init__(self, encoding, namespace_separator, _hook_external_entity=False): - self.returns_unicode = 1 - if encoding: - self.encoding = encoding - if not _hook_external_entity: - if namespace_separator is None: - self.itself = XML_ParserCreate(encoding) - else: - self.itself = XML_ParserCreateNS(encoding, ord(namespace_separator)) - if not self.itself: - raise RuntimeError("Creating parser failed") - self._set_unknown_encoding_handler() - self.storage = {} - self.buffer = None - self.buffer_size = 8192 - self.character_data_handler = None - self.intern = {} - self.__exc_info = None - - def _flush_character_buffer(self): - if not self.buffer: - return - res = self._call_character_handler(''.join(self.buffer)) - self.buffer = [] - return res - - def _call_character_handler(self, buf): - if self.character_data_handler: - self.character_data_handler(buf) - - def _set_unknown_encoding_handler(self): - def UnknownEncoding(encodingData, name, info_p): - info = info_p.contents - s = ''.join([chr(i) for i in range(256)]) - u = s.decode(self.encoding, 'replace') - for i in range(len(u)): - if u[i] == u'\xfffd': - info.map[i] = -1 - else: - info.map[i] = ord(u[i]) - info.data = None - info.convert = None - info.release = None - return 1 - - CB = ctypes.CFUNCTYPE(c_int, c_void_p, c_char_p, POINTER(XML_Encoding)) - cb = CB(UnknownEncoding) - self._unknown_encoding_handler = (cb, UnknownEncoding) - XML_SetUnknownEncodingHandler(self.itself, cb, None) - - def _set_error(self, code): - e = ExpatError() - e.code = code - lineno = lib.XML_GetCurrentLineNumber(self.itself) - colno = lib.XML_GetCurrentColumnNumber(self.itself) - e.offset = colno - e.lineno = lineno - err = XML_ErrorString(code)[:200] - e.s = "%s: line: %d, column: %d" % (err, lineno, colno) - e.message = e.s - self._error = e - - def Parse(self, data, is_final=0): - res = XML_Parse(self.itself, data, len(data), is_final) - if res == 0: - self._set_error(XML_GetErrorCode(self.itself)) - if self.__exc_info: - exc_info = self.__exc_info - self.__exc_info = None - raise exc_info[0], exc_info[1], exc_info[2] - else: - raise self._error - self._flush_character_buffer() - return res - - def _sethandler(self, name, real_cb): - setter = setters[name] - try: - cb = self.storage[(name, real_cb)] - except KeyError: - cb = getattr(self, 'get_cb_for_%s' % name)(real_cb) - self.storage[(name, real_cb)] = cb - except TypeError: - # weellll... - cb = getattr(self, 'get_cb_for_%s' % name)(real_cb) - setter(self.itself, cb) - - def _wrap_cb(self, cb): - def f(*args): - try: - return cb(*args) - except: - self.__exc_info = sys.exc_info() - XML_StopParser(self.itself, XML_FALSE) - return f - - def get_cb_for_StartElementHandler(self, real_cb): - def StartElement(unused, name, attrs): - # unpack name and attrs - conv = self.conv - self._flush_character_buffer() - if self.specified_attributes: - max = XML_GetSpecifiedAttributeCount(self.itself) - else: - max = 0 - while attrs[max]: - max += 2 # copied - if self.ordered_attributes: - res = [attrs[i] for i in range(max)] - else: - res = {} - for i in range(0, max, 2): - res[conv(attrs[i])] = conv(attrs[i + 1]) - real_cb(conv(name), res) - StartElement = self._wrap_cb(StartElement) - CB = ctypes.CFUNCTYPE(None, c_void_p, c_char_p, POINTER(c_char_p)) - return CB(StartElement) - - def get_cb_for_ExternalEntityRefHandler(self, real_cb): - def ExternalEntity(unused, context, base, sysId, pubId): - self._flush_character_buffer() - conv = self.conv - res = real_cb(conv(context), conv(base), conv(sysId), - conv(pubId)) - if res is None: - return 0 - return res - ExternalEntity = self._wrap_cb(ExternalEntity) - CB = ctypes.CFUNCTYPE(c_int, c_void_p, *([c_char_p] * 4)) - return CB(ExternalEntity) - - def get_cb_for_CharacterDataHandler(self, real_cb): - def CharacterData(unused, s, lgt): - if self.buffer is None: - self._call_character_handler(self.conv(s[:lgt])) - else: - if len(self.buffer) + lgt > self.buffer_size: - self._flush_character_buffer() - if self.character_data_handler is None: - return - if lgt >= self.buffer_size: - self._call_character_handler(s[:lgt]) - self.buffer = [] - else: - self.buffer.append(s[:lgt]) - CharacterData = self._wrap_cb(CharacterData) - CB = ctypes.CFUNCTYPE(None, c_void_p, POINTER(c_char), c_int) - return CB(CharacterData) - - def get_cb_for_NotStandaloneHandler(self, real_cb): - def NotStandaloneHandler(unused): - return real_cb() - NotStandaloneHandler = self._wrap_cb(NotStandaloneHandler) - CB = ctypes.CFUNCTYPE(c_int, c_void_p) - return CB(NotStandaloneHandler) - - def get_cb_for_EntityDeclHandler(self, real_cb): - def EntityDecl(unused, ename, is_param, value, value_len, base, - system_id, pub_id, not_name): - self._flush_character_buffer() - if not value: - value = None - else: - value = value[:value_len] - args = [ename, is_param, value, base, system_id, - pub_id, not_name] - args = [self.conv(arg) for arg in args] - real_cb(*args) - EntityDecl = self._wrap_cb(EntityDecl) - CB = ctypes.CFUNCTYPE(None, c_void_p, c_char_p, c_int, c_char_p, - c_int, c_char_p, c_char_p, c_char_p, c_char_p) - return CB(EntityDecl) - - def _conv_content_model(self, model): - children = tuple([self._conv_content_model(model.children[i]) - for i in range(model.numchildren)]) - return (model.type, model.quant, self.conv(model.name), - children) - - def get_cb_for_ElementDeclHandler(self, real_cb): - def ElementDecl(unused, name, model): - self._flush_character_buffer() - modelobj = self._conv_content_model(model[0]) - real_cb(name, modelobj) - XML_FreeContentModel(self.itself, model) - - ElementDecl = self._wrap_cb(ElementDecl) - CB = ctypes.CFUNCTYPE(None, c_void_p, c_char_p, POINTER(XML_Content)) - return CB(ElementDecl) - - def _new_callback_for_string_len(name, sign): - def get_callback_for_(self, real_cb): - def func(unused, s, len): - self._flush_character_buffer() - arg = self.conv(s[:len]) - real_cb(arg) - func.func_name = name - func = self._wrap_cb(func) - CB = ctypes.CFUNCTYPE(*sign) - return CB(func) - get_callback_for_.func_name = 'get_cb_for_' + name - return get_callback_for_ - - for name in ['DefaultHandlerExpand', - 'DefaultHandler']: - sign = [None, c_void_p, POINTER(c_char), c_int] - name = 'get_cb_for_' + name - locals()[name] = _new_callback_for_string_len(name, sign) - - def _new_callback_for_starargs(name, sign): - def get_callback_for_(self, real_cb): - def func(unused, *args): - self._flush_character_buffer() - args = [self.conv(arg) for arg in args] - real_cb(*args) - func.func_name = name - func = self._wrap_cb(func) - CB = ctypes.CFUNCTYPE(*sign) - return CB(func) - get_callback_for_.func_name = 'get_cb_for_' + name - return get_callback_for_ - - for name, num_or_sign in [ - ('EndElementHandler', 1), - ('ProcessingInstructionHandler', 2), - ('UnparsedEntityDeclHandler', 5), - ('NotationDeclHandler', 4), - ('StartNamespaceDeclHandler', 2), - ('EndNamespaceDeclHandler', 1), - ('CommentHandler', 1), - ('StartCdataSectionHandler', 0), - ('EndCdataSectionHandler', 0), - ('StartDoctypeDeclHandler', [None, c_void_p] + [c_char_p] * 3 + [c_int]), - ('XmlDeclHandler', [None, c_void_p, c_char_p, c_char_p, c_int]), - ('AttlistDeclHandler', [None, c_void_p] + [c_char_p] * 4 + [c_int]), - ('EndDoctypeDeclHandler', 0), - ('SkippedEntityHandler', [None, c_void_p, c_char_p, c_int]), - ]: - if isinstance(num_or_sign, int): - sign = [None, c_void_p] + [c_char_p] * num_or_sign - else: - sign = num_or_sign - name = 'get_cb_for_' + name - locals()[name] = _new_callback_for_starargs(name, sign) - - def conv_unicode(self, s): - if s is None or isinstance(s, int): - return s - return s.decode(self.encoding, "strict") - - def __setattr__(self, name, value): - # forest of ifs... - if name in ['ordered_attributes', - 'returns_unicode', 'specified_attributes']: - if value: - if name == 'returns_unicode': - self.conv = self.conv_unicode - self.__dict__[name] = 1 - else: - if name == 'returns_unicode': - self.conv = lambda s: s - self.__dict__[name] = 0 - elif name == 'buffer_text': - if value: - self.buffer = [] - else: - self._flush_character_buffer() - self.buffer = None - elif name == 'buffer_size': - if not isinstance(value, int): - raise TypeError("Expected int") - if value <= 0: - raise ValueError("Expected positive int") - self.__dict__[name] = value - elif name == 'namespace_prefixes': - XML_SetReturnNSTriplet(self.itself, int(bool(value))) - elif name in setters: - if name == 'CharacterDataHandler': - # XXX we need to flush buffer here - self._flush_character_buffer() - self.character_data_handler = value - #print name - #print value - #print - self._sethandler(name, value) - else: - self.__dict__[name] = value - - def SetParamEntityParsing(self, arg): - XML_SetParamEntityParsing(self.itself, arg) - - if XML_COMBINED_VERSION >= 19505: - def UseForeignDTD(self, arg=True): - if arg: - flag = XML_TRUE - else: - flag = XML_FALSE - XML_UseForeignDTD(self.itself, flag) - - def __getattr__(self, name): - if name == 'buffer_text': - return self.buffer is not None - elif name in currents: - return getattr(lib, 'XML_Get' + name)(self.itself) - elif name == 'ErrorColumnNumber': - return lib.XML_GetCurrentColumnNumber(self.itself) - elif name == 'ErrorLineNumber': - return lib.XML_GetCurrentLineNumber(self.itself) - return self.__dict__[name] - - def ParseFile(self, file): - return self.Parse(file.read(), False) - - def SetBase(self, base): - XML_SetBase(self.itself, base) - - def ExternalEntityParserCreate(self, context, encoding=None): - """ExternalEntityParserCreate(context[, encoding]) - Create a parser for parsing an external entity based on the - information passed to the ExternalEntityRefHandler.""" - new_parser = XMLParserType(encoding, None, True) - new_parser.itself = XML_ExternalEntityParserCreate(self.itself, - context, encoding) - new_parser._set_unknown_encoding_handler() - return new_parser - - at builtinify -def ErrorString(errno): - return XML_ErrorString(errno)[:200] - - at builtinify -def ParserCreate(encoding=None, namespace_separator=None, intern=None): - if (not isinstance(encoding, str) and - not encoding is None): - raise TypeError("ParserCreate() argument 1 must be string or None, not %s" % encoding.__class__.__name__) - if (not isinstance(namespace_separator, str) and - not namespace_separator is None): - raise TypeError("ParserCreate() argument 2 must be string or None, not %s" % namespace_separator.__class__.__name__) - if namespace_separator is not None: - if len(namespace_separator) > 1: - raise ValueError('namespace_separator must be at most one character, omitted, or None') - if len(namespace_separator) == 0: - namespace_separator = None - return XMLParserType(encoding, namespace_separator) +# indirection needed; otherwise the built-in module "pyexpat" shadows +# any file pyexpat.py that would be found in the user dirs +try: + from __builtin_pyexpat import * +except ImportError: + from _pyexpat import * diff --git a/lib_pypy/pypyjit.py b/lib_pypy/pypyjit.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypyjit.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "pypyjit" shadows +# any file pypyjit.py that would be found in the user dirs +from __builtin_pypyjit import * diff --git a/lib_pypy/select.py b/lib_pypy/select.py new file mode 100644 --- /dev/null +++ b/lib_pypy/select.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "select" shadows +# any file select.py that would be found in the user dirs +from __builtin_select import * diff --git a/lib_pypy/struct.py b/lib_pypy/struct.py --- a/lib_pypy/struct.py +++ b/lib_pypy/struct.py @@ -1,417 +1,8 @@ -# -# This module is a pure Python version of pypy.module.struct. -# It is only imported if the vastly faster pypy.module.struct is not -# compiled in. For now we keep this version for reference and -# because pypy.module.struct is not ootype-backend-friendly yet. -# - -"""Functions to convert between Python values and C structs. -Python strings are used to hold the data representing the C struct -and also as format strings to describe the layout of data in the C struct. - -The optional first format char indicates byte order, size and alignment: - @: native order, size & alignment (default) - =: native order, std. size & alignment - <: little-endian, std. size & alignment - >: big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -import math, sys - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0L - for b in bytes: - number = number << 8 | b - return int(number) - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = int(-1*(max - number)) - return number - -INFINITY = 1e200 * 1e200 -NAN = INFINITY / INFINITY - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if not isinstance(number, (int,long)): - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i too large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): - return str(char) - -def isinf(x): - return x != 0.0 and x / 2 == x -def isnan(v): - return v != v*1.0 or (v == 1.0 and v == 2.0) - -def pack_float(x, size, le): - unsigned = float_pack(x, size) - result = [] - for i in range(8): - result.append(chr((unsigned >> (i * 8)) & 0xFF)) - if le == "big": - result.reverse() - return ''.join(result) - -def unpack_float(data, index, size, le): - binary = [data[i] for i in range(index, index + 8)] - if le == "big": - binary.reverse() - unsigned = 0 - for i in range(8): - unsigned |= ord(binary[i]) << (i * 8) - return float_unpack(unsigned, size, le) - -def round_to_nearest(x): - """Python 3 style round: round a float x to the nearest int, but - unlike the builtin Python 2.x round function: - - - return an int, not a float - - do round-half-to-even, not round-half-away-from-zero. - - We assume that x is finite and nonnegative; except wrong results - if you use this for negative x. - - """ - int_part = int(x) - frac_part = x - int_part - if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: - int_part += 1 - return int_part - -def float_unpack(Q, size, le): - """Convert a 32-bit or 64-bit integer created - by float_pack into a Python float.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - if Q >> BITS: - raise ValueError("input out of range") - - # extract pieces - sign = Q >> BITS - 1 - exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 - mant = Q & ((1 << MANT_DIG - 1) - 1) - - if exp == MAX_EXP - MIN_EXP + 2: - # nan or infinity - result = float('nan') if mant else float('inf') - elif exp == 0: - # subnormal or zero - result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) - else: - # normal - mant += 1 << MANT_DIG - 1 - result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) - return -result if sign else result - - -def float_pack(x, size): - """Convert a Python float x into a 64-bit unsigned integer - with the same byte representation.""" - - if size == 8: - MIN_EXP = -1021 # = sys.float_info.min_exp - MAX_EXP = 1024 # = sys.float_info.max_exp - MANT_DIG = 53 # = sys.float_info.mant_dig - BITS = 64 - elif size == 4: - MIN_EXP = -125 # C's FLT_MIN_EXP - MAX_EXP = 128 # FLT_MAX_EXP - MANT_DIG = 24 # FLT_MANT_DIG - BITS = 32 - else: - raise ValueError("invalid size value") - - sign = math.copysign(1.0, x) < 0.0 - if math.isinf(x): - mant = 0 - exp = MAX_EXP - MIN_EXP + 2 - elif math.isnan(x): - mant = 1 << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 - elif x == 0.0: - mant = 0 - exp = 0 - else: - m, e = math.frexp(abs(x)) # abs(x) == m * 2**e - exp = e - (MIN_EXP - 1) - if exp > 0: - # Normal case. - mant = round_to_nearest(m * (1 << MANT_DIG)) - mant -= 1 << MANT_DIG - 1 - else: - # Subnormal case. - if exp + MANT_DIG - 1 >= 0: - mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) - else: - mant = 0 - exp = 0 - - # Special case: rounding produced a MANT_DIG-bit mantissa. - assert 0 <= mant <= 1 << MANT_DIG - 1 - if mant == 1 << MANT_DIG - 1: - mant = 0 - exp += 1 - - # Raise on overflow (in some circumstances, may want to return - # infinity instead). - if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") - - # check constraints - assert 0 <= mant < 1 << MANT_DIG - 1 - assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 - assert 0 <= sign <= 1 - return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant - - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop(0) - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) < num: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - - return tuple(result) - -def pack_into(fmt, buf, offset, *args): - data = pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -def unpack_from(fmt, buf, offset=0): - size = calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return unpack(fmt, data) +# indirection needed; otherwise the built-in module "struct" shadows +# any file struct.py that would be found in the user dirs +try: + from __builtin_struct import * + from __builtin_struct import __doc__ +except ImportError: + from _struct import * + from _struct import __doc__ diff --git a/lib_pypy/termios.py b/lib_pypy/termios.py new file mode 100644 --- /dev/null +++ b/lib_pypy/termios.py @@ -0,0 +1,4 @@ +# indirection needed; otherwise the built-in module "termios" shadows +# any file termios.py that would be found in the user dirs +from __builtin_termios import * +from __builtin_termios import __doc__ diff --git a/lib_pypy/time.py b/lib_pypy/time.py new file mode 100644 --- /dev/null +++ b/lib_pypy/time.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "time" shadows +# any file time.py that would be found in the user dirs +from __builtin_time import * diff --git a/lib_pypy/unicodedata.py b/lib_pypy/unicodedata.py new file mode 100644 --- /dev/null +++ b/lib_pypy/unicodedata.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "unicodedata" shadows +# any file unicodedata.py that would be found in the user dirs +from __builtin_unicodedata import * diff --git a/lib_pypy/zlib.py b/lib_pypy/zlib.py new file mode 100644 --- /dev/null +++ b/lib_pypy/zlib.py @@ -0,0 +1,3 @@ +# indirection needed; otherwise the built-in module "zlib" shadows +# any file zlib.py that would be found in the user dirs +from __builtin_zlib import * diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -414,39 +414,39 @@ return name def getbuiltinmodule(self, name, force_init=False): - w_name = self.wrap(name) - w_modules = self.sys.get('modules') - try: - w_mod = self.getitem(w_modules, w_name) - except OperationError, e: - if not e.match(self, self.w_KeyError): - raise - else: - if not force_init: - return w_mod - - # If the module is a builtin but not yet imported, - # retrieve it and initialize it try: w_mod = self.builtin_modules[name] except KeyError: - raise operationerrfmt( - self.w_SystemError, - "getbuiltinmodule() called " - "with non-builtin module %s", name) - else: - # Add the module to sys.modules - self.setitem(w_modules, w_name, w_mod) + tryname = '__builtin_' + name # hack, but a useful one + try: + w_mod = self.builtin_modules[tryname] + name = tryname + except KeyError: + raise operationerrfmt( + self.w_SystemError, + "getbuiltinmodule() called " + "with non-builtin module %s", name) - # And initialize it - from pypy.interpreter.module import Module - mod = self.interpclass_w(w_mod) - if isinstance(mod, Module): - self.timer.start("startup " + name) - mod.init(self) - self.timer.stop("startup " + name) + w_name = self.wrap(name) + w_modules = self.sys.get('modules') + if not force_init and self.is_true(self.contains(w_modules, w_name)): return w_mod + # If the module is a builtin but not yet imported, + # retrieve it and initialize it now + + # Add the module to sys.modules + self.setitem(w_modules, w_name, w_mod) + + # And initialize it + from pypy.interpreter.module import Module + mod = self.interpclass_w(w_mod) + if isinstance(mod, Module): + self.timer.start("startup " + name) + mod.init(self) + self.timer.stop("startup " + name) + return w_mod + def get_builtinmodule_to_install(self): """NOT_RPYTHON""" from pypy.tool.lib_pypy import LIB_PYPY diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -259,7 +259,7 @@ def descr_function__reduce__(self, space): from pypy.interpreter.gateway import BuiltinCode from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) code = self.code if isinstance(code, BuiltinCode): @@ -559,7 +559,7 @@ def descr_method__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule from pypy.interpreter.gateway import BuiltinCode - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('method_new') w = space.wrap diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -584,7 +584,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) builtin_code = mod.get('builtin_code') return space.newtuple([builtin_code, diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -24,7 +24,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('generator_new') w = space.wrap diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -86,7 +86,7 @@ if space.finditem(w_modules, w_name) is None: #not imported case from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('module_new') return space.newtuple([new_inst, diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -46,7 +46,7 @@ return space.cmp(self.w_value, other.w_value) def descr__reduce__(self, space): - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('cell_new') if self.w_value is None: #when would this happen? diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -368,7 +368,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('code_new') w = space.wrap diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -303,7 +303,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule from pypy.module._pickle_support import maker # helper fns - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('frame_new') w = space.wrap diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py --- a/pypy/interpreter/pytraceback.py +++ b/pypy/interpreter/pytraceback.py @@ -26,7 +26,7 @@ def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('traceback_new') w = space.wrap diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -227,7 +227,7 @@ def descr___reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) w_new_inst = mod.get('enumerate_new') w_info = space.newtuple([self.w_iter, self.w_index]) @@ -288,7 +288,7 @@ def descr___reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) w_new_inst = mod.get('reversed_new') info_w = [self.w_sequence, space.wrap(self.remaining)] @@ -412,7 +412,7 @@ def descr_reduce(self): from pypy.interpreter.mixedmodule import MixedModule space = self.space - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('xrangeiter_new') w = space.wrap diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -5,7 +5,7 @@ class BuildersModule(MixedModule): - applevel_name = '__pypy__.builders' + applevel_name = 'builders' appleveldefs = {} diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -50,6 +50,7 @@ W_Builder.__name__ = "W_%s" % name W_Builder.typedef = TypeDef(name, + __module__ = 'builders', __new__ = interp2app(func_with_new_name( W_Builder.descr__new__.im_func, '%s_new' % (name,))), diff --git a/pypy/module/_continuation/app_continuation.py b/pypy/module/_continuation/app_continuation.py --- a/pypy/module/_continuation/app_continuation.py +++ b/pypy/module/_continuation/app_continuation.py @@ -3,7 +3,7 @@ "Usage error of the _continuation module." -import _continuation +import __builtin__continuation as _continuation class generator(object): diff --git a/pypy/module/_demo/__init__.py b/pypy/module/_demo/__init__.py --- a/pypy/module/_demo/__init__.py +++ b/pypy/module/_demo/__init__.py @@ -2,6 +2,7 @@ class Module(MixedModule): """A demo built-in module based on ctypes.""" + applevel_name = '_demo' # not included in default pypy builds interpleveldefs = { 'measuretime' : 'demo.measuretime', diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_ffi/__init__.py --- a/pypy/module/_ffi/__init__.py +++ b/pypy/module/_ffi/__init__.py @@ -2,6 +2,7 @@ from pypy.module._ffi import interp_ffi class Module(MixedModule): + applevel_name = '__builtin__ffi' interpleveldefs = { 'CDLL': 'interp_ffi.W_CDLL', diff --git a/pypy/module/_hashlib/__init__.py b/pypy/module/_hashlib/__init__.py --- a/pypy/module/_hashlib/__init__.py +++ b/pypy/module/_hashlib/__init__.py @@ -3,6 +3,8 @@ class Module(MixedModule): + applevel_name = '__builtin__hashlib' + interpleveldefs = { 'new' : 'interp_hashlib.new', } diff --git a/pypy/module/_locale/__init__.py b/pypy/module/_locale/__init__.py --- a/pypy/module/_locale/__init__.py +++ b/pypy/module/_locale/__init__.py @@ -4,6 +4,7 @@ class Module(MixedModule): """Support for POSIX locales.""" + applevel_name = '__builtin__locale' interpleveldefs = { 'setlocale': 'interp_locale.setlocale', diff --git a/pypy/module/_lsprof/__init__.py b/pypy/module/_lsprof/__init__.py --- a/pypy/module/_lsprof/__init__.py +++ b/pypy/module/_lsprof/__init__.py @@ -5,6 +5,8 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = '__builtin__lsprof' + interpleveldefs = {'Profiler':'interp_lsprof.W_Profiler'} appleveldefs = {} diff --git a/pypy/module/_md5/__init__.py b/pypy/module/_md5/__init__.py --- a/pypy/module/_md5/__init__.py +++ b/pypy/module/_md5/__init__.py @@ -18,6 +18,8 @@ a.k.a. ``fingerprint'') of the concatenation of the strings fed to it so far using the digest() method.""" + applevel_name = '__builtin__md5' + interpleveldefs = { 'new': 'interp_md5.W_MD5', 'MD5Type': 'interp_md5.W_MD5', diff --git a/pypy/module/_minimal_curses/__init__.py b/pypy/module/_minimal_curses/__init__.py --- a/pypy/module/_minimal_curses/__init__.py +++ b/pypy/module/_minimal_curses/__init__.py @@ -17,6 +17,7 @@ """ Low-level interface for curses module, not meant to be used directly """ + applevel_name = '__builtin__minimal_curses' appleveldefs = { 'error' : 'app_curses.error', diff --git a/pypy/module/_multibytecodec/__init__.py b/pypy/module/_multibytecodec/__init__.py --- a/pypy/module/_multibytecodec/__init__.py +++ b/pypy/module/_multibytecodec/__init__.py @@ -2,6 +2,7 @@ class Module(MixedModule): + applevel_name = '__builtin__multibytecodec' interpleveldefs = { # for compatibility this name is obscured, and should be called diff --git a/pypy/module/_multiprocessing/__init__.py b/pypy/module/_multiprocessing/__init__.py --- a/pypy/module/_multiprocessing/__init__.py +++ b/pypy/module/_multiprocessing/__init__.py @@ -2,6 +2,7 @@ import sys class Module(MixedModule): + applevel_name = '__builtin__multiprocessing' interpleveldefs = { 'Connection' : 'interp_connection.W_FileConnection', diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -9,6 +9,7 @@ import sys class Module(MixedModule): + applevel_name = '__builtin__rawffi' interpleveldefs = { 'CDLL' : 'interp_rawffi.W_CDLL', diff --git a/pypy/module/_sha/__init__.py b/pypy/module/_sha/__init__.py --- a/pypy/module/_sha/__init__.py +++ b/pypy/module/_sha/__init__.py @@ -17,6 +17,7 @@ the update() method, and at any point you can ask it for the digest of the concatenation of the strings fed to it so far. SHA-1 digests are 160 bits instead of MD5's 128 bits.""" + applevel_name = '__builtin__sha' interpleveldefs = { 'new': 'interp_sha.W_SHA', diff --git a/pypy/module/_warnings/__init__.py b/pypy/module/_warnings/__init__.py --- a/pypy/module/_warnings/__init__.py +++ b/pypy/module/_warnings/__init__.py @@ -3,6 +3,7 @@ class Module(MixedModule): """provides basic warning filtering support. It is a helper module to speed up interpreter start-up.""" + applevel_name = '_warnings' interpleveldefs = { 'warn' : 'interp_warnings.warn', diff --git a/pypy/module/_winreg/__init__.py b/pypy/module/_winreg/__init__.py --- a/pypy/module/_winreg/__init__.py +++ b/pypy/module/_winreg/__init__.py @@ -39,6 +39,7 @@ Many constants are defined - see the documentation for each function to see what constants are used, and where.""" + applevel_name = '__builtin__winreg' appleveldefs = { } interpleveldefs = { diff --git a/pypy/module/binascii/__init__.py b/pypy/module/binascii/__init__.py --- a/pypy/module/binascii/__init__.py +++ b/pypy/module/binascii/__init__.py @@ -10,6 +10,7 @@ class Module(MixedModule): """binascii - Conversion between binary data and ASCII""" + applevel_name = '__builtin_binascii' appleveldefs = { } diff --git a/pypy/module/bz2/__init__.py b/pypy/module/bz2/__init__.py --- a/pypy/module/bz2/__init__.py +++ b/pypy/module/bz2/__init__.py @@ -6,6 +6,7 @@ the bz2 compression library. It implements a complete file interface, one shot (de)compression functions, and types for sequential (de)compression.""" + applevel_name = '__builtin_bz2' interpleveldefs = { 'BZ2Compressor': 'interp_bz2.W_BZ2Compressor', diff --git a/pypy/module/cStringIO/__init__.py b/pypy/module/cStringIO/__init__.py --- a/pypy/module/cStringIO/__init__.py +++ b/pypy/module/cStringIO/__init__.py @@ -3,6 +3,8 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = '__builtin_cStringIO' + appleveldefs = { } diff --git a/pypy/module/clr/__init__.py b/pypy/module/clr/__init__.py --- a/pypy/module/clr/__init__.py +++ b/pypy/module/clr/__init__.py @@ -6,6 +6,8 @@ class Module(MixedModule): """CLR module""" + applevel_name = 'clr' + appleveldefs = { 'dotnetimporter': 'app_importer.importer' } diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -5,6 +5,8 @@ from pypy.rpython.lltypesystem import rffi, lltype class Module(MixedModule): + applevel_name = '__builtin_cpyext' + interpleveldefs = { 'load_module': 'api.load_extension_module', } diff --git a/pypy/module/crypt/__init__.py b/pypy/module/crypt/__init__.py --- a/pypy/module/crypt/__init__.py +++ b/pypy/module/crypt/__init__.py @@ -2,6 +2,7 @@ class Module(MixedModule): """A demo built-in module based on rffi.""" + applevel_name = '__builtin_crypt' interpleveldefs = { 'crypt' : 'interp_crypt.crypt', diff --git a/pypy/module/fcntl/__init__.py b/pypy/module/fcntl/__init__.py --- a/pypy/module/fcntl/__init__.py +++ b/pypy/module/fcntl/__init__.py @@ -2,6 +2,8 @@ from pypy.rlib.rarithmetic import intmask class Module(MixedModule): + applevel_name = '__builtin_fcntl' + interpleveldefs = { 'fcntl': 'interp_fcntl.fcntl', 'flock': 'interp_fcntl.flock', diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -2,7 +2,7 @@ class Module(MixedModule): - applevel_name = 'numpypy' + applevel_name = '__builtin_numpypy' interpleveldefs = { 'ndarray': 'interp_numarray.W_NDimArray', diff --git a/pypy/module/mmap/__init__.py b/pypy/module/mmap/__init__.py --- a/pypy/module/mmap/__init__.py +++ b/pypy/module/mmap/__init__.py @@ -2,6 +2,8 @@ from pypy.rlib import rmmap class Module(MixedModule): + applevel_name = '__builtin_mmap' + interpleveldefs = { 'PAGESIZE': 'space.wrap(interp_mmap.PAGESIZE)', 'ALLOCATIONGRANULARITY': 'space.wrap(interp_mmap.ALLOCATIONGRANULARITY)', diff --git a/pypy/module/oracle/__init__.py b/pypy/module/oracle/__init__.py --- a/pypy/module/oracle/__init__.py +++ b/pypy/module/oracle/__init__.py @@ -1,7 +1,7 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): - applevel_name = 'cx_Oracle' + applevel_name = '__builtin_cx_Oracle' interpleveldefs = { 'connect': 'interp_connect.W_Connection', diff --git a/pypy/module/parser/__init__.py b/pypy/module/parser/__init__.py --- a/pypy/module/parser/__init__.py +++ b/pypy/module/parser/__init__.py @@ -4,15 +4,12 @@ class Module(MixedModule): """The builtin parser module.""" - applevel_name = 'parser' + applevel_name = '__builtin_parser' appleveldefs = { } interpleveldefs = { - '__name__' : '(space.wrap("parser"))', - '__doc__' : '(space.wrap("parser module"))', - 'suite' : 'pyparser.suite', 'expr' : 'pyparser.expr', 'issuite' : 'pyparser.issuite', diff --git a/pypy/module/pwd/__init__.py b/pypy/module/pwd/__init__.py --- a/pypy/module/pwd/__init__.py +++ b/pypy/module/pwd/__init__.py @@ -11,6 +11,8 @@ The uid and gid items are integers, all others are strings. An exception is raised if the entry asked for cannot be found. """ + applevel_name = 'pwd' # this is a built-in module on CPython too, + # instead of an extension module. Unsure why interpleveldefs = { 'getpwuid': 'interp_pwd.getpwuid', diff --git a/pypy/module/pyexpat/__init__.py b/pypy/module/pyexpat/__init__.py --- a/pypy/module/pyexpat/__init__.py +++ b/pypy/module/pyexpat/__init__.py @@ -4,6 +4,7 @@ class ErrorsModule(MixedModule): "Definition of pyexpat.errors module." + applevel_name = 'errors' appleveldefs = {} interpleveldefs = {} @@ -16,6 +17,7 @@ class ModelModule(MixedModule): "Definition of pyexpat.model module." + applevel_name = 'model' appleveldefs = {} interpleveldefs = {} @@ -28,6 +30,7 @@ class Module(MixedModule): "Python wrapper for Expat parser." + applevel_name = '__builtin_pyexpat' appleveldefs = { } diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -1,6 +1,8 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = '__builtin_pypyjit' + appleveldefs = { } diff --git a/pypy/module/rbench/__init__.py b/pypy/module/rbench/__init__.py --- a/pypy/module/rbench/__init__.py +++ b/pypy/module/rbench/__init__.py @@ -3,6 +3,7 @@ class Module(MixedModule): """geninterpreted benchmarks""" + applevel_name = 'rbench' # not in standard translations appleveldefs = { 'pystone': 'app_bench.pystone', diff --git a/pypy/module/rctime/__init__.py b/pypy/module/rctime/__init__.py --- a/pypy/module/rctime/__init__.py +++ b/pypy/module/rctime/__init__.py @@ -5,7 +5,7 @@ _WIN = os.name == "nt" class Module(MixedModule): - applevel_name = 'time' + applevel_name = '__builtin_time' interpleveldefs = { 'time': 'interp_time.time', diff --git a/pypy/module/select/__init__.py b/pypy/module/select/__init__.py --- a/pypy/module/select/__init__.py +++ b/pypy/module/select/__init__.py @@ -6,6 +6,8 @@ class Module(MixedModule): + applevel_name = '__builtin_select' + appleveldefs = { } diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -4,6 +4,8 @@ import signal as cpy_signal class Module(MixedModule): + applevel_name = 'signal' + interpleveldefs = { 'signal': 'interp_signal.signal', 'getsignal': 'interp_signal.getsignal', diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -45,6 +45,8 @@ The variable struct.error is an exception raised on errors.""" + applevel_name = '__builtin_struct' + interpleveldefs = { 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', diff --git a/pypy/module/termios/__init__.py b/pypy/module/termios/__init__.py --- a/pypy/module/termios/__init__.py +++ b/pypy/module/termios/__init__.py @@ -13,6 +13,8 @@ argument. This can be an integer file descriptor, such as returned by\n\ sys.stdin.fileno(), or a file object, such as sys.stdin itself." + applevel_name = '__builtin_termios' + appleveldefs = { } diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -3,6 +3,8 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = 'thread' + appleveldefs = { } diff --git a/pypy/module/time/__init__.py b/pypy/module/time/__init__.py --- a/pypy/module/time/__init__.py +++ b/pypy/module/time/__init__.py @@ -5,6 +5,7 @@ class Module(MixedModule): """time module""" + applevel_name = '__builtin_time' appleveldefs = { } diff --git a/pypy/module/unicodedata/__init__.py b/pypy/module/unicodedata/__init__.py --- a/pypy/module/unicodedata/__init__.py +++ b/pypy/module/unicodedata/__init__.py @@ -8,6 +8,7 @@ # http://www.fileformat.info/info/unicode/char/search.htm class Module(MixedModule): + applevel_name = '__builtin_unicodedata' appleveldefs = { } interpleveldefs = { diff --git a/pypy/module/zipimport/__init__.py b/pypy/module/zipimport/__init__.py --- a/pypy/module/zipimport/__init__.py +++ b/pypy/module/zipimport/__init__.py @@ -5,6 +5,7 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): + applevel_name = 'zipimport' interpleveldefs = { 'zipimporter':'interp_zipimport.W_ZipImporter', diff --git a/pypy/module/zlib/__init__.py b/pypy/module/zlib/__init__.py --- a/pypy/module/zlib/__init__.py +++ b/pypy/module/zlib/__init__.py @@ -23,6 +23,8 @@ Compressor objects support compress() and flush() methods; decompressor objects support decompress() and flush().""" + applevel_name = '__builtin_zlib' + interpleveldefs = { 'crc32': 'interp_zlib.crc32', 'adler32': 'interp_zlib.adler32', diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py --- a/pypy/objspace/std/dicttype.py +++ b/pypy/objspace/std/dicttype.py @@ -148,7 +148,7 @@ XXX to do: remove this __reduce__ method and do a registration with copy_reg, instead. """ - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('dictiter_surrogate_new') w_typeobj = space.gettypeobject(dictiter_typedef) diff --git a/pypy/objspace/std/itertype.py b/pypy/objspace/std/itertype.py --- a/pypy/objspace/std/itertype.py +++ b/pypy/objspace/std/itertype.py @@ -17,7 +17,7 @@ from pypy.objspace.std.iterobject import W_AbstractSeqIterObject assert isinstance(w_self, W_AbstractSeqIterObject) from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('seqiter_new') tup = [w_self.w_seq, space.wrap(w_self.index)] @@ -33,7 +33,7 @@ from pypy.objspace.std.iterobject import W_ReverseSeqIterObject assert isinstance(w_self, W_ReverseSeqIterObject) from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('__builtin__pickle_support') + w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('reverseseqiter_new') tup = [w_self.w_seq, space.wrap(w_self.index)] From noreply at buildbot.pypy.org Sun Dec 4 11:15:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 11:15:11 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Fixes for faked modules. Message-ID: <20111204101511.479328205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50107:6d9a5e6b2428 Date: 2011-12-04 11:14 +0100 http://bitbucket.org/pypy/pypy/changeset/6d9a5e6b2428/ Log: Fixes for faked modules. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -476,6 +476,8 @@ for modname in self.ALL_BUILTIN_MODULES: if not LIB_PYPY.join(modname+'.py').check(file=True): modules.append('faked+'+modname) + else: + modules.append('faked+__builtin_'+modname) self._builtinmodule_list = modules return self._builtinmodule_list @@ -586,11 +588,15 @@ try: module = self.load_cpython_module(modname) except ImportError: - return - else: - w_modules = self.sys.get('modules') - self.setitem(w_modules, self.wrap(modname), self.wrap(module)) - installed_builtin_modules.append(modname) + if not modname.startswith('__builtin_'): + return + try: + module = self.load_cpython_module(modname[10:]) + except ImportError: + return + w_modules = self.sys.get('modules') + self.setitem(w_modules, self.wrap(modname), self.wrap(module)) + installed_builtin_modules.append(modname) def setup_builtin_modules(self): "NOT_RPYTHON: only for initializing the space." diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -16,7 +16,9 @@ def test__isfake_currently_true(self): from __pypy__ import isfake import select - assert isfake(select) + assert not isfake(select) # that's lib_pypy/select.py + import __builtin_select + assert isfake(__builtin_select) def test_cpumodel(self): import __pypy__ From noreply at buildbot.pypy.org Sun Dec 4 12:09:09 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 12:09:09 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Replace LoopWithIds with TraceWithIds that now represents a trace that includes both entry bridge and peeled loop. Log._filter() now does a lazy filtering of such traces that is not forced until TraceWithIds.allops() is called Message-ID: <20111204110909.E9A6F8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50108:7188d29a992b Date: 2011-12-04 12:08 +0100 http://bitbucket.org/pypy/pypy/changeset/7188d29a992b/ Log: Replace LoopWithIds with TraceWithIds that now represents a trace that includes both entry bridge and peeled loop. Log._filter() now does a lazy filtering of such traces that is not forced until TraceWithIds.allops() is called diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -47,32 +47,33 @@ storage = LoopStorage() traces = [SimpleParser.parse_from_input(rawtrace) for rawtrace in rawtraces] traces = storage.reconnect_loops(traces) - self.loops = [LoopWithIds.from_trace(trace, storage) for trace in traces] + self.loops = [TraceWithIds.from_trace(trace, storage) for trace in traces] def _filter(self, loop, is_entry_bridge=False): - return is_entry_bridge == '*' or loop.is_entry_bridge == is_entry_bridge + if is_entry_bridge == '*': + return loop + assert is_entry_bridge in (True, False) + return PartialTraceWithIds(loop, is_entry_bridge) def loops_by_filename(self, filename, **kwds): """ Return all loops which start in the file ``filename`` """ - return [loop for loop in self.loops - if loop.filename == filename and self._filter(loop, **kwds)] + return [self._filter(loop, **kwds) for loop in self.loops + if loop.filename == filename] def loops_by_id(self, id, **kwds): """ Return all loops which contain the ID ``id`` """ - return [loop for loop in self.loops - if loop.has_id(id) and self._filter(loop, **kwds)] + return [self._filter(loop, **kwds) for loop in self.loops + if loop.has_id(id)] @classmethod def opnames(self, oplist): return [op.name for op in oplist] -class LoopWithIds(Function): - - is_entry_bridge = False +class TraceWithIds(Function): def __init__(self, *args, **kwds): Function.__init__(self, *args, **kwds) @@ -88,7 +89,6 @@ @classmethod def from_trace(cls, trace, storage): res = cls.from_operations(trace.operations, storage) - res.is_entry_bridge = 'entry bridge' in trace.comment return res def flatten_chunks(self): @@ -117,7 +117,7 @@ # # 2. compute the ids of all the inlined functions for chunk in self.chunks: - if isinstance(chunk, LoopWithIds): + if isinstance(chunk, TraceWithIds): chunk.compute_ids(ids) def get_set_of_opcodes(self): @@ -144,19 +144,14 @@ (opcode and opcode.__class__.__name__ == opcode_name): for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op + else: + for op in chunk.operations: + if op.name == 'label': + yield op def allops(self, *args, **kwds): return list(self._allops(*args, **kwds)) - def simple_loop_ops(self): - ops = self.allops() - labels = [op for op in self.allops() if op.name == 'label'] - jumpop = ops[-1] - assert jumpop.name == 'jump' - assert jumpop.getdescr() == labels[-1].getdescr() - i = ops.index(labels[-1]) - return ops[i+1:] - def format_ops(self, id=None, **kwds): if id is None: ops = self.allops(**kwds) @@ -170,7 +165,7 @@ def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] - loop_ops = self.simple_loop_ops() + loop_ops = self.allops(include_debug_merge_points, opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or @@ -183,7 +178,7 @@ return list(self._ops_by_id(*args, **kwds)) def match(self, expected_src, **kwds): - ops = list(self.simple_loop_ops()) + ops = self.allops() matcher = OpMatcher(ops) return matcher.match(expected_src, **kwds) @@ -192,6 +187,42 @@ matcher = OpMatcher(ops) return matcher.match(expected_src) +class PartialTraceWithIds(TraceWithIds): + def __init__(self, trace, is_entry_bridge=False): + self.trace = trace + self.is_entry_bridge = is_entry_bridge + + def allops(self, *args, **kwds): + if self.is_entry_bridge: + return self.entry_bridge_ops(*args, **kwds) + else: + return self.simple_loop_ops(*args, **kwds) + + def simple_loop_ops(self, *args, **kwds): + ops = list(self._allops(*args, **kwds)) + labels = [op for op in ops if op.name == 'label'] + jumpop = ops[-1] + assert jumpop.name == 'jump' + assert jumpop.getdescr() == labels[-1].getdescr() + i = ops.index(labels[-1]) + return ops[i+1:] + + def entry_bridge_ops(self, *args, **kwds): + ops = list(self._allops(*args, **kwds)) + labels = [op for op in ops if op.name == 'label'] + assert ops.index(labels[0]) == 0 + i = ops.index(labels[1]) + return ops[1:i] + + @property + def chunks(self): + return self.trace.chunks + + @property + def ids(self): + return self.trace.ids + + class InvalidMatch(Exception): opindex = None diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -8,7 +8,7 @@ from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - LoopWithIds, OpMatcher + TraceWithIds, OpMatcher class BaseTestPyPyC(object): def setup_class(cls): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -99,7 +99,7 @@ i15 = int_add_ovf(i12, 1) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i15, i6, p7, p8, descr=) + jump(p0, p1, p2, p3, p4, i15, i6, p7, p8, descr=...) """) def test_method_call(self): From notifications-noreply at bitbucket.org Sun Dec 4 12:41:29 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sun, 04 Dec 2011 11:41:29 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20111204114129.23794.7650@bitbucket13.managed.contegix.com> You have received a notification from andersas. Hi, I forked pypy. My fork is at https://bitbucket.org/andersas/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Sun Dec 4 12:53:57 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 12:53:57 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: The jumpop is not always among those selected Message-ID: <20111204115357.CB4D48205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50109:ecb2ebdb7e57 Date: 2011-12-04 12:25 +0100 http://bitbucket.org/pypy/pypy/changeset/ecb2ebdb7e57/ Log: The jumpop is not always among those selected diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -201,7 +201,7 @@ def simple_loop_ops(self, *args, **kwds): ops = list(self._allops(*args, **kwds)) labels = [op for op in ops if op.name == 'label'] - jumpop = ops[-1] + jumpop = self.chunks[-1].operations[-1] assert jumpop.name == 'jump' assert jumpop.getdescr() == labels[-1].getdescr() i = ops.index(labels[-1]) From noreply at buildbot.pypy.org Sun Dec 4 12:53:58 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 12:53:58 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix test Message-ID: <20111204115358.F2ACF8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50110:4c0512063545 Date: 2011-12-04 12:25 +0100 http://bitbucket.org/pypy/pypy/changeset/4c0512063545/ Log: fix test diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -142,7 +142,7 @@ i19 = int_add_ovf(i10, i17) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, i19, p7, i17, i9, i10, p11, p12, p13, descr=) + jump(p0, p1, p2, p3, p4, p5, i19, p7, i17, i9, i10, p11, p12, p13, descr=...) """) def test_static_classmethod_call(self): @@ -174,7 +174,7 @@ guard_no_overflow(descr=...) i18 = force_token() --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_default_and_kw(self): @@ -394,7 +394,7 @@ guard_not_invalidated(descr=...) i120 = int_add(i5, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_global_closure_has_constant_cells(self): @@ -438,7 +438,7 @@ i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=) + jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=...) """) def test_local_closure_is_virtual(self): @@ -461,7 +461,7 @@ p22 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p22, i13, descr=) setfield_gc(p4, p22, descr=) - jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) + jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) def test_kwargs_virtual(self): From noreply at buildbot.pypy.org Sun Dec 4 12:54:00 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 12:54:00 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix test Message-ID: <20111204115400.2D7EC8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50111:26267a7217be Date: 2011-12-04 12:30 +0100 http://bitbucket.org/pypy/pypy/changeset/26267a7217be/ Log: fix test diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -115,5 +115,5 @@ i35 = int_add_ovf(i5, i34) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=) - """) \ No newline at end of file + jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=...) + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_exception.py b/pypy/module/pypyjit/test_pypy_c/test_exception.py --- a/pypy/module/pypyjit/test_pypy_c/test_exception.py +++ b/pypy/module/pypyjit/test_pypy_c/test_exception.py @@ -42,7 +42,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=...) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_exception_inside_loop_2(self): @@ -89,5 +89,5 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) From noreply at buildbot.pypy.org Sun Dec 4 12:54:01 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 12:54:01 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: this is not a simple loop and ops are somewhat reoordered now Message-ID: <20111204115401.52E118205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50112:d9c55c23c419 Date: 2011-12-04 12:47 +0100 http://bitbucket.org/pypy/pypy/changeset/d9c55c23c419/ Log: this is not a simple loop and ops are somewhat reoordered now diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -15,12 +15,14 @@ g() log = self.run(main, [500]) - loop, = log.loops_by_filename(self.filepath) + loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') assert loop.match_by_id("generator", """ + ... + label(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) i47 = arraylen_gc(p8, descr=) # Should be removed by backend - setarrayitem_gc(p8, 0, p45, descr=) - setfield_gc(p45, i29, descr=) jump(..., descr=...) """) From noreply at buildbot.pypy.org Sun Dec 4 12:54:02 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 12:54:02 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix tests Message-ID: <20111204115402.7C39B8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50113:34216d424b05 Date: 2011-12-04 12:53 +0100 http://bitbucket.org/pypy/pypy/changeset/34216d424b05/ Log: fix tests diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -27,7 +27,7 @@ i9 = int_add_ovf(i5, 2) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i9, i6, descr=) + jump(p0, p1, p2, p3, p4, i9, i6, descr=...) """) def test_load_attr(self): @@ -52,7 +52,7 @@ i10 = int_add_ovf(i5, i7) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i10, i6, i7, p8, descr=) + jump(p0, p1, p2, p3, p4, i10, i6, i7, p8, descr=...) """) def test_getattr_with_dynamic_attribute(self): @@ -125,9 +125,9 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p20, i11, descr=) setfield_gc(ConstPtr(ptr21), p20, descr=) - setfield_gc(p20, i11, descr=) - jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=) + jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) def test_oldstyle_newstyle_mix(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py --- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py +++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py @@ -97,7 +97,7 @@ guard_no_overflow(descr=...) i17 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i14, i12, i17, p8, i9, descr=) + jump(p0, p1, p2, p3, p4, i14, i12, i17, p8, i9, descr=...) """) def test_intbound_sub_lt(self): @@ -121,7 +121,7 @@ guard_no_overflow(descr=...) i13 = int_add(i5, 1) --TICK-- - jump(p0, p1, p2, p3, i11, i13, descr=) + jump(p0, p1, p2, p3, i11, i13, descr=...) """) def test_intbound_addsub_ge(self): @@ -150,7 +150,7 @@ guard_no_overflow(descr=...) i19 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i16, i14, i19, p8, i9, descr=) + jump(p0, p1, p2, p3, p4, i16, i14, i19, p8, i9, descr=...) """) def test_intbound_addmul_ge(self): @@ -178,7 +178,7 @@ guard_no_overflow(descr=...) i21 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i18, i14, i21, p8, descr=) + jump(p0, p1, p2, p3, p4, i18, i14, i21, p8, descr=...) """) def test_intbound_eq(self): @@ -210,7 +210,7 @@ guard_no_overflow(descr=...) i16 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, p6, i14, i16, p8, descr=) + jump(p0, p1, p2, p3, p4, p6, i14, i16, p8, descr=...) """) def test_intbound_mul(self): @@ -236,7 +236,7 @@ guard_no_overflow(descr=...) i14 = int_add(i6, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i12, i14, descr=) + jump(p0, p1, p2, p3, p4, i12, i14, descr=...) """) def test_assert(self): @@ -257,7 +257,7 @@ guard_no_overflow(descr=...) i12 = int_add(i6, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i10, i12, descr=) + jump(p0, p1, p2, p3, p4, i10, i12, descr=...) """) def test_xor(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -29,7 +29,7 @@ f5 = float_add(f0, f4) i4 = int_add(i0, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_sin_cos(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -22,7 +22,7 @@ guard_no_overflow(descr=...) i11 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i11, i9, descr=) + jump(p0, p1, p2, p3, i11, i9, descr=...) """) def test_silly_max(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -88,7 +88,7 @@ guard_true(i9, descr=...) f10 = float_add(f8, f5) --TICK-- - jump(p0, p1, p2, p3, p4, f10, p6, f7, f8, descr=) + jump(p0, p1, p2, p3, p4, f10, p6, f7, f8, descr=...) """) @@ -159,7 +159,7 @@ i27 = int_add_ovf(i7, i18) guard_no_overflow(descr=...) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) @@ -219,7 +219,7 @@ i28 = int_add_ovf(i10, i25) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, p12, i19, descr=) + jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, p12, i19, descr=...) """) From noreply at buildbot.pypy.org Sun Dec 4 13:24:41 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:41 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: The 'appleveldefs' is now not very useful any more, because we can Message-ID: <20111204122441.75FC98205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50114:0daa576a26ef Date: 2011-12-04 11:17 +0100 http://bitbucket.org/pypy/pypy/changeset/0daa576a26ef/ Log: The 'appleveldefs' is now not very useful any more, because we can simply have the app-level part of the module in lib_pypy. Use this technique instead of hacking at the import for _bisect. diff --git a/lib_pypy/_bisect.py b/lib_pypy/_bisect.py --- a/lib_pypy/_bisect.py +++ b/lib_pypy/_bisect.py @@ -1,3 +1,28 @@ # indirection needed; otherwise the built-in module "_bisect" shadows # any file _bisect.py that would be found in the user dirs from __builtin__bisect import * + + +def insort_left(a, x, lo=0, hi=-1): + """Insert item x in list a, and keep it sorted assuming a is sorted. + +If x is already in a, insert it to the left of the leftmost x. + +Optional args lo (default 0) and hi (default len(a)) bound the +slice of a to be searched.""" + n = bisect_left(a, x, lo, hi) + a.insert(n, x) + + +def insort_right(a, x, lo=0, hi=-1): + """Insert item x in list a, and keep it sorted assuming a is sorted. + +If x is already in a, insert it to the right of the rightmost x. + +Optional args lo (default 0) and hi (default len(a)) bound the +slice of a to be searched.""" + n = bisect_right(a, x, lo, hi) + a.insert(n, x) + + +insort = insort_right diff --git a/pypy/module/_bisect/__init__.py b/pypy/module/_bisect/__init__.py --- a/pypy/module/_bisect/__init__.py +++ b/pypy/module/_bisect/__init__.py @@ -16,9 +16,6 @@ applevel_name = '__builtin__bisect' appleveldefs = { - 'insort': 'app_bisect.insort_right', - 'insort_left': 'app_bisect.insort_left', - 'insort_right': 'app_bisect.insort_right', } interpleveldefs = { diff --git a/pypy/module/_bisect/app_bisect.py b/pypy/module/_bisect/app_bisect.py deleted file mode 100644 --- a/pypy/module/_bisect/app_bisect.py +++ /dev/null @@ -1,23 +0,0 @@ -from _bisect import bisect_left, bisect_right - - -def insort_left(a, x, lo=0, hi=-1): - """Insert item x in list a, and keep it sorted assuming a is sorted. - -If x is already in a, insert it to the left of the leftmost x. - -Optional args lo (default 0) and hi (default len(a)) bound the -slice of a to be searched.""" - n = bisect_left(a, x, lo, hi) - a.insert(n, x) - - -def insort_right(a, x, lo=0, hi=-1): - """Insert item x in list a, and keep it sorted assuming a is sorted. - -If x is already in a, insert it to the right of the rightmost x. - -Optional args lo (default 0) and hi (default len(a)) bound the -slice of a to be searched.""" - n = bisect_right(a, x, lo, hi) - a.insert(n, x) From noreply at buildbot.pypy.org Sun Dec 4 13:24:42 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:42 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Fix import. Message-ID: <20111204122442.AD4288205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50115:8d78ae0722b6 Date: 2011-12-04 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/8d78ae0722b6/ Log: Fix import. diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -7,7 +7,7 @@ # 'dict'. But what we can do is write individual methods at # interp-level. -import _collections +import __builtin__collections as _collections class defaultdict(dict): From noreply at buildbot.pypy.org Sun Dec 4 13:24:43 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:43 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Add an import Message-ID: <20111204122443.E19F58205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50116:c8b8463377a3 Date: 2011-12-04 11:27 +0100 http://bitbucket.org/pypy/pypy/changeset/c8b8463377a3/ Log: Add an import diff --git a/lib_pypy/unicodedata.py b/lib_pypy/unicodedata.py --- a/lib_pypy/unicodedata.py +++ b/lib_pypy/unicodedata.py @@ -1,3 +1,4 @@ # indirection needed; otherwise the built-in module "unicodedata" shadows # any file unicodedata.py that would be found in the user dirs from __builtin_unicodedata import * +from __builtin_unicodedata import _get_code From noreply at buildbot.pypy.org Sun Dec 4 13:24:45 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:45 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Use space.getbuiltinmodule() here. Message-ID: <20111204122445.202108205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50117:0433e66a6367 Date: 2011-12-04 11:28 +0100 http://bitbucket.org/pypy/pypy/changeset/0433e66a6367/ Log: Use space.getbuiltinmodule() here. diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -67,10 +67,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_builtin = space.getbuiltinmodule('__builtin__') - w_import = space.getattr(w_builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None From noreply at buildbot.pypy.org Sun Dec 4 13:24:46 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:46 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Fix. Obscure, I guess it is some combination of factors that Message-ID: <20111204122446.5214C8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50118:99f3326949de Date: 2011-12-04 11:30 +0100 http://bitbucket.org/pypy/pypy/changeset/99f3326949de/ Log: Fix. Obscure, I guess it is some combination of factors that used to get us the faked array module here... diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -3,7 +3,7 @@ class AppTestHashlib: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_hashlib']) + cls.space = gettestobjspace(usemodules=['_hashlib', 'array']) def test_simple(self): import _hashlib From noreply at buildbot.pypy.org Sun Dec 4 13:24:47 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:47 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Fixes. Message-ID: <20111204122447.7842F8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50119:c29aab975e44 Date: 2011-12-04 11:32 +0100 http://bitbucket.org/pypy/pypy/changeset/c29aab975e44/ Log: Fixes. diff --git a/lib_pypy/_multibytecodec.py b/lib_pypy/_multibytecodec.py --- a/lib_pypy/_multibytecodec.py +++ b/lib_pypy/_multibytecodec.py @@ -1,3 +1,4 @@ # indirection needed; otherwise the built-in module "_multibytecodec" shadows # any file _multibytecodec.py that would be found in the user dirs from __builtin__multibytecodec import * +from __builtin__multibytecodec import __getcodec diff --git a/pypy/module/_multibytecodec/app_multibytecodec.py b/pypy/module/_multibytecodec/app_multibytecodec.py --- a/pypy/module/_multibytecodec/app_multibytecodec.py +++ b/pypy/module/_multibytecodec/app_multibytecodec.py @@ -2,8 +2,8 @@ # # The interface here may be a little bit on the lightweight side. -from _multibytecodec import MultibyteIncrementalDecoder -from _multibytecodec import MultibyteIncrementalEncoder +from __builtin__multibytecodec import MultibyteIncrementalDecoder +from __builtin__multibytecodec import MultibyteIncrementalEncoder class MultibyteStreamReader(MultibyteIncrementalDecoder): From noreply at buildbot.pypy.org Sun Dec 4 13:24:48 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:48 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Fixes: missing the 'array' module, again Message-ID: <20111204122448.ADBF28205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50120:eeb74932b7d1 Date: 2011-12-04 11:44 +0100 http://bitbucket.org/pypy/pypy/changeset/eeb74932b7d1/ Log: Fixes: missing the 'array' module, again diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -141,7 +141,7 @@ class AppTestOpen: def setup_class(cls): - cls.space = gettestobjspace(usemodules=['_io', '_locale']) + cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array']) tmpfile = udir.join('tmpfile').ensure() cls.w_tmpfile = cls.space.wrap(str(tmpfile)) diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -89,7 +89,8 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', + 'signal', 'array')) cls.space = space cls.w_connections = space.newlist([]) @@ -150,4 +151,4 @@ import _multiprocessing raises(IOError, _multiprocessing.Connection, -1) - raises(IOError, _multiprocessing.Connection, -15) \ No newline at end of file + raises(IOError, _multiprocessing.Connection, -15) From noreply at buildbot.pypy.org Sun Dec 4 13:24:49 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:49 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Add a hack that makes it unnecessary to list in lib_pypy/*.py all the Message-ID: <20111204122449.DD0EE8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50121:411dad65158a Date: 2011-12-04 12:05 +0100 http://bitbucket.org/pypy/pypy/changeset/411dad65158a/ Log: Add a hack that makes it unnecessary to list in lib_pypy/*.py all the names starting with an underscore. This was needed because "import *" ignores them, but now we provide an __all__ list, so it works again without them. diff --git a/lib_pypy/_continuation.py b/lib_pypy/_continuation.py --- a/lib_pypy/_continuation.py +++ b/lib_pypy/_continuation.py @@ -1,4 +1,3 @@ # indirection needed; otherwise the built-in module "_continuation" shadows # any file _continuation.py that would be found in the user dirs from __builtin__continuation import * -from __builtin__continuation import __doc__ diff --git a/lib_pypy/_multibytecodec.py b/lib_pypy/_multibytecodec.py --- a/lib_pypy/_multibytecodec.py +++ b/lib_pypy/_multibytecodec.py @@ -1,4 +1,3 @@ # indirection needed; otherwise the built-in module "_multibytecodec" shadows # any file _multibytecodec.py that would be found in the user dirs from __builtin__multibytecodec import * -from __builtin__multibytecodec import __getcodec diff --git a/lib_pypy/cmath.py b/lib_pypy/cmath.py --- a/lib_pypy/cmath.py +++ b/lib_pypy/cmath.py @@ -1,4 +1,3 @@ # indirection needed; otherwise the built-in module "cmath" shadows # any file cmath.py that would be found in the user dirs from __builtin_cmath import * -from __builtin_cmath import __doc__ diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -2,7 +2,6 @@ # any file itertools.py that would be found in the user dirs try: from __builtin_itertools import * - from __builtin_itertools import __doc__ except ImportError: from _itertools import * from _itertools import __doc__ diff --git a/lib_pypy/math.py b/lib_pypy/math.py --- a/lib_pypy/math.py +++ b/lib_pypy/math.py @@ -1,4 +1,3 @@ # indirection needed; otherwise the built-in module "math" shadows # any file math.py that would be found in the user dirs from __builtin_math import * -from __builtin_math import __doc__ diff --git a/lib_pypy/struct.py b/lib_pypy/struct.py --- a/lib_pypy/struct.py +++ b/lib_pypy/struct.py @@ -2,7 +2,6 @@ # any file struct.py that would be found in the user dirs try: from __builtin_struct import * - from __builtin_struct import __doc__ except ImportError: from _struct import * from _struct import __doc__ diff --git a/lib_pypy/termios.py b/lib_pypy/termios.py --- a/lib_pypy/termios.py +++ b/lib_pypy/termios.py @@ -1,4 +1,3 @@ # indirection needed; otherwise the built-in module "termios" shadows # any file termios.py that would be found in the user dirs from __builtin_termios import * -from __builtin_termios import __doc__ diff --git a/lib_pypy/unicodedata.py b/lib_pypy/unicodedata.py --- a/lib_pypy/unicodedata.py +++ b/lib_pypy/unicodedata.py @@ -1,4 +1,3 @@ # indirection needed; otherwise the built-in module "unicodedata" shadows # any file unicodedata.py that would be found in the user dirs from __builtin_unicodedata import * -from __builtin_unicodedata import _get_code diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -56,8 +56,22 @@ if self.w_initialdict is None: Module.init(self, space) - if not self.lazy and self.w_initialdict is None: - self.w_initialdict = space.call_method(self.w_dict, 'items') + if not self.lazy: + self._initial_nonlazy_init(space) + + def _initial_nonlazy_init(self, space): + # for modules called '__builtin_*', build a default '__all__' + # that has all keys, including the ones starting with '_'. + if space.str_w(self.w_name).startswith('__builtin_'): + w_all = space.call_method(self.w_dict, 'keys') + try: + space.call_method(w_all, 'remove', space.wrap('__file__')) + except OperationError: + pass + space.setitem(self.w_dict, space.wrap('__all__'), w_all) + # + if self.w_initialdict is None: + self.w_initialdict = space.call_method(self.w_dict, 'items') def get_applevel_name(cls): @@ -124,7 +138,7 @@ w_value = self.get(name) space.setitem(self.w_dict, space.new_interned_str(name), w_value) self.lazy = False - self.w_initialdict = space.call_method(self.w_dict, 'items') + self._initial_nonlazy_init(space) return self.w_dict def _freeze_(self): diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -28,7 +28,7 @@ 'IncrementalNewlineDecoder': 'interp_textio.W_IncrementalNewlineDecoder', } - def init(self, space): + def startup(self, space): w_UnsupportedOperation = space.call_function( space.w_type, space.wrap('UnsupportedOperation'), From noreply at buildbot.pypy.org Sun Dec 4 13:24:51 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:51 +0100 (CET) Subject: [pypy-commit] pypy builtin-module: Clean up the hack, replacing it with a simpler hack... Message-ID: <20111204122451.1C3BB8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: builtin-module Changeset: r50122:4875ef10dc16 Date: 2011-12-04 13:06 +0100 http://bitbucket.org/pypy/pypy/changeset/4875ef10dc16/ Log: Clean up the hack, replacing it with a simpler hack... diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -56,22 +56,8 @@ if self.w_initialdict is None: Module.init(self, space) - if not self.lazy: - self._initial_nonlazy_init(space) - - def _initial_nonlazy_init(self, space): - # for modules called '__builtin_*', build a default '__all__' - # that has all keys, including the ones starting with '_'. - if space.str_w(self.w_name).startswith('__builtin_'): - w_all = space.call_method(self.w_dict, 'keys') - try: - space.call_method(w_all, 'remove', space.wrap('__file__')) - except OperationError: - pass - space.setitem(self.w_dict, space.wrap('__all__'), w_all) - # - if self.w_initialdict is None: - self.w_initialdict = space.call_method(self.w_dict, 'items') + if not self.lazy and self.w_initialdict is None: + self.w_initialdict = space.call_method(self.w_dict, 'items') def get_applevel_name(cls): @@ -138,7 +124,7 @@ w_value = self.get(name) space.setitem(self.w_dict, space.new_interned_str(name), w_value) self.lazy = False - self._initial_nonlazy_init(space) + self.w_initialdict = space.call_method(self.w_dict, 'items') return self.w_dict def _freeze_(self): @@ -167,6 +153,10 @@ loaders['__file__'] = cls.get__file__ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ + if '__all__' not in loaders: + if ((cls.applevel_name or '').startswith('__builtin_') and + cls.applevel_name != '__builtin__'): + loaders['__all__'] = cls.get__all__Ellipsis buildloaders = classmethod(buildloaders) @@ -204,6 +194,10 @@ return space.wrap(cls.__doc__) get__doc__ = classmethod(get__doc__) + def get__all__Ellipsis(cls, space): + return space.w_Ellipsis + get__all__Ellipsis = classmethod(get__all__Ellipsis) + def getinterpevalloader(pkgroot, spec): """ NOT_RPYTHON """ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1488,6 +1488,12 @@ skip_leading_underscores = True else: skip_leading_underscores = False + # pypy extension, for __builtin_* modules + if all is Ellipsis: + all = set(module.__dict__) + all.discard('__name__') + all.discard('__file__') + all.discard('__all__') for name in all: if skip_leading_underscores and name[0]=='_': continue From noreply at buildbot.pypy.org Sun Dec 4 13:24:52 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:52 +0100 (CET) Subject: [pypy-commit] pypy default: Use space.getbuiltinmodule() here. Message-ID: <20111204122452.4894F8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50123:0e150a447e8d Date: 2011-12-04 11:28 +0100 http://bitbucket.org/pypy/pypy/changeset/0e150a447e8d/ Log: Use space.getbuiltinmodule() here. diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -67,10 +67,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_builtin = space.getbuiltinmodule('__builtin__') - w_import = space.getattr(w_builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None From noreply at buildbot.pypy.org Sun Dec 4 13:24:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:53 +0100 (CET) Subject: [pypy-commit] pypy default: Fix encoding. Message-ID: <20111204122453.7488E8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50124:7976bddd8b38 Date: 2011-12-04 10:05 +0100 http://bitbucket.org/pypy/pypy/changeset/7976bddd8b38/ Log: Fix encoding. diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. From noreply at buildbot.pypy.org Sun Dec 4 13:24:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:54 +0100 (CET) Subject: [pypy-commit] pypy default: backout 03e42e96479d Message-ID: <20111204122454.9A28F8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50125:8d1c4e952989 Date: 2011-12-04 13:21 +0100 http://bitbucket.org/pypy/pypy/changeset/8d1c4e952989/ Log: backout 03e42e96479d diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py deleted file mode 100644 --- a/lib_pypy/itertools.py +++ /dev/null @@ -1,6 +0,0 @@ -try: - from __builtin_itertools import * - from __builtin_itertools import __doc__ -except ImportError: - from _itertools import * - from _itertools import __doc__ From noreply at buildbot.pypy.org Sun Dec 4 13:24:55 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 13:24:55 +0100 (CET) Subject: [pypy-commit] pypy default: backout d5a684ab8934: the approach does not really work at all. Message-ID: <20111204122455.D15E88205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50126:6d5002ae61ff Date: 2011-12-04 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/6d5002ae61ff/ Log: backout d5a684ab8934: the approach does not really work at all. User code can really expect that after "import itertools", the itertools module is the built-in one. For example it behaves differently when reload() is called. Major mess. diff --git a/lib_pypy/_itertools.py b/lib_pypy/_itertools.py deleted file mode 100644 --- a/lib_pypy/_itertools.py +++ /dev/null @@ -1,670 +0,0 @@ -# Note that PyPy contains also a built-in implementation of 'itertools'; -# when translated with default options, this one is not used. - -"""Functional tools for creating and using iterators. - -Infinite iterators: -count([n]) --> n, n+1, n+2, ... -cycle(p) --> p0, p1, ... plast, p0, p1, ... -repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times - -Iterators terminating on the shortest input sequence: -izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... -ifilter(pred, seq) --> elements of seq where pred(elem) is True -ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False -islice(seq, [start,] stop [, step]) --> elements from - seq[start:stop:step] -imap(fun, p, q, ...) --> fun(p0, q0), fun(p1, q1), ... -starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ... -tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n -chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... -takewhile(pred, seq) --> seq[0], seq[1], until pred fails -dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails -groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) -""" - -__all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', - 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee', 'compress', 'product'] - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -class chain(object): - """Make an iterator that returns elements from the first iterable - until it is exhausted, then proceeds to the next iterable, until - all of the iterables are exhausted. Used for treating consecutive - sequences as a single sequence. - - Equivalent to : - - def chain(*iterables): - for it in iterables: - for element in it: - yield element - """ - def __init__(self, *iterables): - self._iterables_iter = iter(map(iter, iterables)) - # little trick for the first chain.next() call - self._cur_iterable_iter = iter([]) - - def __iter__(self): - return self - - def next(self): - while True: - try: - return self._cur_iterable_iter.next() - except StopIteration: - self._cur_iterable_iter = self._iterables_iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._cur_iterable_iter)) - - -class compress(object): - def __init__(self, data, selectors): - self.data = iter(data) - self.selectors = iter(selectors) - - def __iter__(self): - return self - - def next(self): - while True: - next_item = self.data.next() - next_selector = self.selectors.next() - if bool(next_selector): - return next_item - - -class count(object): - """Make an iterator that returns consecutive integers starting - with n. If not specified n defaults to zero. Does not currently - support python long integers. Often used as an argument to imap() - to generate consecutive data points. Also, used with izip() to - add sequence numbers. - - Equivalent to : - - def count(n=0): - if not isinstance(n, int): - raise TypeError("%s is not a regular integer" % n) - while True: - yield n - n += 1 - """ - def __init__(self, n=0): - if not isinstance(n, int): - raise TypeError('%s is not a regular integer' % n) - self.times = n-1 - - def __iter__(self): - return self - - def next(self): - self.times += 1 - return self.times - - def __repr__(self): - return 'count(%d)' % (self.times + 1) - - - -class cycle(object): - """Make an iterator returning elements from the iterable and - saving a copy of each. When the iterable is exhausted, return - elements from the saved copy. Repeats indefinitely. - - Equivalent to : - - def cycle(iterable): - saved = [] - for element in iterable: - yield element - saved.append(element) - while saved: - for element in saved: - yield element - """ - def __init__(self, iterable): - self._cur_iter = iter(iterable) - self._saved = [] - self._must_save = True - - def __iter__(self): - return self - - def next(self): - # XXX Could probably be improved - try: - next_elt = self._cur_iter.next() - if self._must_save: - self._saved.append(next_elt) - except StopIteration: - self._cur_iter = iter(self._saved) - next_elt = self._cur_iter.next() - self._must_save = False - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._cur_iter)) - return next_elt - - -class dropwhile(object): - """Make an iterator that drops elements from the iterable as long - as the predicate is true; afterwards, returns every - element. Note, the iterator does not produce any output until the - predicate is true, so it may have a lengthy start-up time. - - Equivalent to : - - def dropwhile(predicate, iterable): - iterable = iter(iterable) - for x in iterable: - if not predicate(x): - yield x - break - for x in iterable: - yield x - """ - def __init__(self, predicate, iterable): - self._predicate = predicate - self._iter = iter(iterable) - self._dropped = False - - def __iter__(self): - return self - - def next(self): - try: - value = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - if self._dropped: - return value - while self._predicate(value): - value = self._iter.next() - self._dropped = True - return value - -class groupby(object): - """Make an iterator that returns consecutive keys and groups from the - iterable. The key is a function computing a key value for each - element. If not specified or is None, key defaults to an identity - function and returns the element unchanged. Generally, the - iterable needs to already be sorted on the same key function. - - The returned group is itself an iterator that shares the - underlying iterable with groupby(). Because the source is shared, - when the groupby object is advanced, the previous group is no - longer visible. So, if that data is needed later, it should be - stored as a list: - - groups = [] - uniquekeys = [] - for k, g in groupby(data, keyfunc): - groups.append(list(g)) # Store group iterator as a list - uniquekeys.append(k) - """ - def __init__(self, iterable, key=None): - if key is None: - key = lambda x: x - self.keyfunc = key - self.it = iter(iterable) - self.tgtkey = self.currkey = self.currvalue = xrange(0) - - def __iter__(self): - return self - - def next(self): - while self.currkey == self.tgtkey: - try: - self.currvalue = self.it.next() # Exit on StopIteration - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self.it)) - self.currkey = self.keyfunc(self.currvalue) - self.tgtkey = self.currkey - return (self.currkey, self._grouper(self.tgtkey)) - - def _grouper(self, tgtkey): - while self.currkey == tgtkey: - yield self.currvalue - self.currvalue = self.it.next() # Exit on StopIteration - self.currkey = self.keyfunc(self.currvalue) - - - -class _ifilter_base(object): - """base class for ifilter and ifilterflase""" - def __init__(self, predicate, iterable): - # Make sure iterable *IS* iterable - self._iter = iter(iterable) - if predicate is None: - self._predicate = bool - else: - self._predicate = predicate - - def __iter__(self): - return self - -class ifilter(_ifilter_base): - """Make an iterator that filters elements from iterable returning - only those for which the predicate is True. If predicate is - None, return the items that are true. - - Equivalent to : - - def ifilter: - if predicate is None: - predicate = bool - for x in iterable: - if predicate(x): - yield x - """ - def next(self): - try: - next_elt = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - while True: - if self._predicate(next_elt): - return next_elt - next_elt = self._iter.next() - -class ifilterfalse(_ifilter_base): - """Make an iterator that filters elements from iterable returning - only those for which the predicate is False. If predicate is - None, return the items that are false. - - Equivalent to : - - def ifilterfalse(predicate, iterable): - if predicate is None: - predicate = bool - for x in iterable: - if not predicate(x): - yield x - """ - def next(self): - try: - next_elt = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - while True: - if not self._predicate(next_elt): - return next_elt - next_elt = self._iter.next() - - - - -class imap(object): - """Make an iterator that computes the function using arguments - from each of the iterables. If function is set to None, then - imap() returns the arguments as a tuple. Like map() but stops - when the shortest iterable is exhausted instead of filling in - None for shorter iterables. The reason for the difference is that - infinite iterator arguments are typically an error for map() - (because the output is fully evaluated) but represent a common - and useful way of supplying arguments to imap(). - - Equivalent to : - - def imap(function, *iterables): - iterables = map(iter, iterables) - while True: - args = [i.next() for i in iterables] - if function is None: - yield tuple(args) - else: - yield function(*args) - - """ - def __init__(self, function, iterable, *other_iterables): - self._func = function - self._iters = map(iter, (iterable, ) + other_iterables) - - def __iter__(self): - return self - - def next(self): - try: - args = [it.next() for it in self._iters] - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (it)) - if self._func is None: - return tuple(args) - else: - return self._func(*args) - - - -class islice(object): - """Make an iterator that returns selected elements from the - iterable. If start is non-zero, then elements from the iterable - are skipped until start is reached. Afterward, elements are - returned consecutively unless step is set higher than one which - results in items being skipped. If stop is None, then iteration - continues until the iterator is exhausted, if at all; otherwise, - it stops at the specified position. Unlike regular slicing, - islice() does not support negative values for start, stop, or - step. Can be used to extract related fields from data where the - internal structure has been flattened (for example, a multi-line - report may list a name field on every third line). - """ - def __init__(self, iterable, *args): - s = slice(*args) - self.start, self.stop, self.step = s.start or 0, s.stop, s.step - if not isinstance(self.start, (int, long)): - raise ValueError("Start argument must be an integer") - if self.stop is not None and not isinstance(self.stop, (int,long)): - raise ValueError("Stop argument must be an integer or None") - if self.step is None: - self.step = 1 - if self.start<0 or (self.stop is not None and self.stop<0 - ) or self.step<=0: - raise ValueError, "indices for islice() must be positive" - self.it = iter(iterable) - self.donext = None - self.cnt = 0 - - def __iter__(self): - return self - - def next(self): - if self.donext is None: - try: - self.donext = self.it.next - except AttributeError: - raise TypeError - nextindex = self.start - if self.stop is not None and nextindex >= self.stop: - raise StopIteration - while self.cnt <= nextindex: - nextitem = self.donext() - self.cnt += 1 - self.start += self.step - return nextitem - -class izip(object): - """Make an iterator that aggregates elements from each of the - iterables. Like zip() except that it returns an iterator instead - of a list. Used for lock-step iteration over several iterables at - a time. - - Equivalent to : - - def izip(*iterables): - iterables = map(iter, iterables) - while iterables: - result = [i.next() for i in iterables] - yield tuple(result) - """ - def __init__(self, *iterables): - self._iterators = map(iter, iterables) - self._result = [None] * len(self._iterators) - - def __iter__(self): - return self - - def next(self): - if not self._iterators: - raise StopIteration() - try: - return tuple([i.next() for i in self._iterators]) - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % (i)) - - -class product(object): - - def __init__(self, *args, **kw): - if len(kw) > 1: - raise TypeError("product() takes at most 1 argument (%d given)" % - len(kw)) - self.repeat = kw.get('repeat', 1) - self.gears = [x for x in args] * self.repeat - self.num_gears = len(self.gears) - # initialization of indicies to loop over - self.indicies = [(0, len(self.gears[x])) - for x in range(0, self.num_gears)] - self.cont = True - - def roll_gears(self): - # Starting from the end of the gear indicies work to the front - # incrementing the gear until the limit is reached. When the limit - # is reached carry operation to the next gear - should_carry = True - for n in range(0, self.num_gears): - nth_gear = self.num_gears - n - 1 - if should_carry: - count, lim = self.indicies[nth_gear] - count += 1 - if count == lim and nth_gear == 0: - self.cont = False - if count == lim: - should_carry = True - count = 0 - else: - should_carry = False - self.indicies[nth_gear] = (count, lim) - else: - break - - def __iter__(self): - return self - - def next(self): - if not self.cont: - raise StopIteration - l = [] - for x in range(0, self.num_gears): - index, limit = self.indicies[x] - l.append(self.gears[x][index]) - self.roll_gears() - return tuple(l) - - -class repeat(object): - """Make an iterator that returns object over and over again. - Runs indefinitely unless the times argument is specified. Used - as argument to imap() for invariant parameters to the called - function. Also used with izip() to create an invariant part of a - tuple record. - - Equivalent to : - - def repeat(object, times=None): - if times is None: - while True: - yield object - else: - for i in xrange(times): - yield object - """ - def __init__(self, obj, times=None): - self._obj = obj - if times is not None: - xrange(times) # Raise a TypeError - if times < 0: - times = 0 - self._times = times - - def __iter__(self): - return self - - def next(self): - # next() *need* to decrement self._times when consumed - if self._times is not None: - if self._times <= 0: - raise StopIteration() - self._times -= 1 - return self._obj - - def __repr__(self): - if self._times is not None: - return 'repeat(%r, %r)' % (self._obj, self._times) - else: - return 'repeat(%r)' % (self._obj,) - - def __len__(self): - if self._times == -1 or self._times is None: - raise TypeError("len() of uniszed object") - return self._times - - -class starmap(object): - """Make an iterator that computes the function using arguments - tuples obtained from the iterable. Used instead of imap() when - argument parameters are already grouped in tuples from a single - iterable (the data has been ``pre-zipped''). The difference - between imap() and starmap() parallels the distinction between - function(a,b) and function(*c). - - Equivalent to : - - def starmap(function, iterable): - iterable = iter(iterable) - while True: - yield function(*iterable.next()) - """ - def __init__(self, function, iterable): - self._func = function - self._iter = iter(iterable) - - def __iter__(self): - return self - - def next(self): - # CPython raises a TypeError when the iterator doesn't return a tuple - try: - t = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % self._iter) - if not isinstance(t, tuple): - raise TypeError("iterator must return a tuple") - return self._func(*t) - - - -class takewhile(object): - """Make an iterator that returns elements from the iterable as - long as the predicate is true. - - Equivalent to : - - def takewhile(predicate, iterable): - for x in iterable: - if predicate(x): - yield x - else: - break - """ - def __init__(self, predicate, iterable): - self._predicate = predicate - self._iter = iter(iterable) - - def __iter__(self): - return self - - def next(self): - try: - value = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - if not self._predicate(value): - raise StopIteration() - return value - - -class TeeData(object): - """Holds cached values for TeeObjects""" - def __init__(self, iterator): - self.data = [] - self._iter = iterator - - def __getitem__(self, i): - # iterates until 'i' if not done yet - while i>= len(self.data): - try: - self.data.append( self._iter.next() ) - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % self._iter) - return self.data[i] - - -class TeeObject(object): - """Iterables / Iterators as returned by the tee() function""" - def __init__(self, iterable=None, tee_data=None): - if tee_data: - self.tee_data = tee_data - self.pos = 0 - # <=> Copy constructor - elif isinstance(iterable, TeeObject): - self.tee_data = iterable.tee_data - self.pos = iterable.pos - else: - self.tee_data = TeeData(iter(iterable)) - self.pos = 0 - - def next(self): - data = self.tee_data[self.pos] - self.pos += 1 - return data - - def __iter__(self): - return self - - - at builtinify -def tee(iterable, n=2): - """Return n independent iterators from a single iterable. - Note : once tee() has made a split, the original iterable - should not be used anywhere else; otherwise, the iterable could get - advanced without the tee objects being informed. - - Note : this member of the toolkit may require significant auxiliary - storage (depending on how much temporary data needs to be stored). - In general, if one iterator is going to use most or all of the - data before the other iterator, it is faster to use list() instead - of tee() - - Equivalent to : - - def tee(iterable, n=2): - def gen(next, data={}, cnt=[0]): - for i in count(): - if i == cnt[0]: - item = data[i] = next() - cnt[0] += 1 - else: - item = data.pop(i) - yield item - it = iter(iterable) - return tuple([gen(it.next) for i in range(n)]) - """ - if isinstance(iterable, TeeObject): - # a,b = tee(range(10)) ; c,d = tee(a) ; self.assert_(a is c) - return tuple([iterable] + - [TeeObject(tee_data=iterable.tee_data) for i in xrange(n-1)]) - tee_data = TeeData(iter(iterable)) - return tuple([TeeObject(tee_data=tee_data) for i in xrange(n)]) diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py new file mode 100644 --- /dev/null +++ b/lib_pypy/itertools.py @@ -0,0 +1,670 @@ +# Note that PyPy contains also a built-in module 'itertools' which will +# hide this one if compiled in. + +"""Functional tools for creating and using iterators. + +Infinite iterators: +count([n]) --> n, n+1, n+2, ... +cycle(p) --> p0, p1, ... plast, p0, p1, ... +repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times + +Iterators terminating on the shortest input sequence: +izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... +ifilter(pred, seq) --> elements of seq where pred(elem) is True +ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False +islice(seq, [start,] stop [, step]) --> elements from + seq[start:stop:step] +imap(fun, p, q, ...) --> fun(p0, q0), fun(p1, q1), ... +starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ... +tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n +chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... +takewhile(pred, seq) --> seq[0], seq[1], until pred fails +dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails +groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) +""" + +__all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', + 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', + 'takewhile', 'tee', 'compress', 'product'] + +try: from __pypy__ import builtinify +except ImportError: builtinify = lambda f: f + + +class chain(object): + """Make an iterator that returns elements from the first iterable + until it is exhausted, then proceeds to the next iterable, until + all of the iterables are exhausted. Used for treating consecutive + sequences as a single sequence. + + Equivalent to : + + def chain(*iterables): + for it in iterables: + for element in it: + yield element + """ + def __init__(self, *iterables): + self._iterables_iter = iter(map(iter, iterables)) + # little trick for the first chain.next() call + self._cur_iterable_iter = iter([]) + + def __iter__(self): + return self + + def next(self): + while True: + try: + return self._cur_iterable_iter.next() + except StopIteration: + self._cur_iterable_iter = self._iterables_iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._cur_iterable_iter)) + + +class compress(object): + def __init__(self, data, selectors): + self.data = iter(data) + self.selectors = iter(selectors) + + def __iter__(self): + return self + + def next(self): + while True: + next_item = self.data.next() + next_selector = self.selectors.next() + if bool(next_selector): + return next_item + + +class count(object): + """Make an iterator that returns consecutive integers starting + with n. If not specified n defaults to zero. Does not currently + support python long integers. Often used as an argument to imap() + to generate consecutive data points. Also, used with izip() to + add sequence numbers. + + Equivalent to : + + def count(n=0): + if not isinstance(n, int): + raise TypeError("%s is not a regular integer" % n) + while True: + yield n + n += 1 + """ + def __init__(self, n=0): + if not isinstance(n, int): + raise TypeError('%s is not a regular integer' % n) + self.times = n-1 + + def __iter__(self): + return self + + def next(self): + self.times += 1 + return self.times + + def __repr__(self): + return 'count(%d)' % (self.times + 1) + + + +class cycle(object): + """Make an iterator returning elements from the iterable and + saving a copy of each. When the iterable is exhausted, return + elements from the saved copy. Repeats indefinitely. + + Equivalent to : + + def cycle(iterable): + saved = [] + for element in iterable: + yield element + saved.append(element) + while saved: + for element in saved: + yield element + """ + def __init__(self, iterable): + self._cur_iter = iter(iterable) + self._saved = [] + self._must_save = True + + def __iter__(self): + return self + + def next(self): + # XXX Could probably be improved + try: + next_elt = self._cur_iter.next() + if self._must_save: + self._saved.append(next_elt) + except StopIteration: + self._cur_iter = iter(self._saved) + next_elt = self._cur_iter.next() + self._must_save = False + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._cur_iter)) + return next_elt + + +class dropwhile(object): + """Make an iterator that drops elements from the iterable as long + as the predicate is true; afterwards, returns every + element. Note, the iterator does not produce any output until the + predicate is true, so it may have a lengthy start-up time. + + Equivalent to : + + def dropwhile(predicate, iterable): + iterable = iter(iterable) + for x in iterable: + if not predicate(x): + yield x + break + for x in iterable: + yield x + """ + def __init__(self, predicate, iterable): + self._predicate = predicate + self._iter = iter(iterable) + self._dropped = False + + def __iter__(self): + return self + + def next(self): + try: + value = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._iter)) + if self._dropped: + return value + while self._predicate(value): + value = self._iter.next() + self._dropped = True + return value + +class groupby(object): + """Make an iterator that returns consecutive keys and groups from the + iterable. The key is a function computing a key value for each + element. If not specified or is None, key defaults to an identity + function and returns the element unchanged. Generally, the + iterable needs to already be sorted on the same key function. + + The returned group is itself an iterator that shares the + underlying iterable with groupby(). Because the source is shared, + when the groupby object is advanced, the previous group is no + longer visible. So, if that data is needed later, it should be + stored as a list: + + groups = [] + uniquekeys = [] + for k, g in groupby(data, keyfunc): + groups.append(list(g)) # Store group iterator as a list + uniquekeys.append(k) + """ + def __init__(self, iterable, key=None): + if key is None: + key = lambda x: x + self.keyfunc = key + self.it = iter(iterable) + self.tgtkey = self.currkey = self.currvalue = xrange(0) + + def __iter__(self): + return self + + def next(self): + while self.currkey == self.tgtkey: + try: + self.currvalue = self.it.next() # Exit on StopIteration + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self.it)) + self.currkey = self.keyfunc(self.currvalue) + self.tgtkey = self.currkey + return (self.currkey, self._grouper(self.tgtkey)) + + def _grouper(self, tgtkey): + while self.currkey == tgtkey: + yield self.currvalue + self.currvalue = self.it.next() # Exit on StopIteration + self.currkey = self.keyfunc(self.currvalue) + + + +class _ifilter_base(object): + """base class for ifilter and ifilterflase""" + def __init__(self, predicate, iterable): + # Make sure iterable *IS* iterable + self._iter = iter(iterable) + if predicate is None: + self._predicate = bool + else: + self._predicate = predicate + + def __iter__(self): + return self + +class ifilter(_ifilter_base): + """Make an iterator that filters elements from iterable returning + only those for which the predicate is True. If predicate is + None, return the items that are true. + + Equivalent to : + + def ifilter: + if predicate is None: + predicate = bool + for x in iterable: + if predicate(x): + yield x + """ + def next(self): + try: + next_elt = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._iter)) + while True: + if self._predicate(next_elt): + return next_elt + next_elt = self._iter.next() + +class ifilterfalse(_ifilter_base): + """Make an iterator that filters elements from iterable returning + only those for which the predicate is False. If predicate is + None, return the items that are false. + + Equivalent to : + + def ifilterfalse(predicate, iterable): + if predicate is None: + predicate = bool + for x in iterable: + if not predicate(x): + yield x + """ + def next(self): + try: + next_elt = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._iter)) + while True: + if not self._predicate(next_elt): + return next_elt + next_elt = self._iter.next() + + + + +class imap(object): + """Make an iterator that computes the function using arguments + from each of the iterables. If function is set to None, then + imap() returns the arguments as a tuple. Like map() but stops + when the shortest iterable is exhausted instead of filling in + None for shorter iterables. The reason for the difference is that + infinite iterator arguments are typically an error for map() + (because the output is fully evaluated) but represent a common + and useful way of supplying arguments to imap(). + + Equivalent to : + + def imap(function, *iterables): + iterables = map(iter, iterables) + while True: + args = [i.next() for i in iterables] + if function is None: + yield tuple(args) + else: + yield function(*args) + + """ + def __init__(self, function, iterable, *other_iterables): + self._func = function + self._iters = map(iter, (iterable, ) + other_iterables) + + def __iter__(self): + return self + + def next(self): + try: + args = [it.next() for it in self._iters] + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (it)) + if self._func is None: + return tuple(args) + else: + return self._func(*args) + + + +class islice(object): + """Make an iterator that returns selected elements from the + iterable. If start is non-zero, then elements from the iterable + are skipped until start is reached. Afterward, elements are + returned consecutively unless step is set higher than one which + results in items being skipped. If stop is None, then iteration + continues until the iterator is exhausted, if at all; otherwise, + it stops at the specified position. Unlike regular slicing, + islice() does not support negative values for start, stop, or + step. Can be used to extract related fields from data where the + internal structure has been flattened (for example, a multi-line + report may list a name field on every third line). + """ + def __init__(self, iterable, *args): + s = slice(*args) + self.start, self.stop, self.step = s.start or 0, s.stop, s.step + if not isinstance(self.start, (int, long)): + raise ValueError("Start argument must be an integer") + if self.stop is not None and not isinstance(self.stop, (int,long)): + raise ValueError("Stop argument must be an integer or None") + if self.step is None: + self.step = 1 + if self.start<0 or (self.stop is not None and self.stop<0 + ) or self.step<=0: + raise ValueError, "indices for islice() must be positive" + self.it = iter(iterable) + self.donext = None + self.cnt = 0 + + def __iter__(self): + return self + + def next(self): + if self.donext is None: + try: + self.donext = self.it.next + except AttributeError: + raise TypeError + nextindex = self.start + if self.stop is not None and nextindex >= self.stop: + raise StopIteration + while self.cnt <= nextindex: + nextitem = self.donext() + self.cnt += 1 + self.start += self.step + return nextitem + +class izip(object): + """Make an iterator that aggregates elements from each of the + iterables. Like zip() except that it returns an iterator instead + of a list. Used for lock-step iteration over several iterables at + a time. + + Equivalent to : + + def izip(*iterables): + iterables = map(iter, iterables) + while iterables: + result = [i.next() for i in iterables] + yield tuple(result) + """ + def __init__(self, *iterables): + self._iterators = map(iter, iterables) + self._result = [None] * len(self._iterators) + + def __iter__(self): + return self + + def next(self): + if not self._iterators: + raise StopIteration() + try: + return tuple([i.next() for i in self._iterators]) + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % (i)) + + +class product(object): + + def __init__(self, *args, **kw): + if len(kw) > 1: + raise TypeError("product() takes at most 1 argument (%d given)" % + len(kw)) + self.repeat = kw.get('repeat', 1) + self.gears = [x for x in args] * self.repeat + self.num_gears = len(self.gears) + # initialization of indicies to loop over + self.indicies = [(0, len(self.gears[x])) + for x in range(0, self.num_gears)] + self.cont = True + + def roll_gears(self): + # Starting from the end of the gear indicies work to the front + # incrementing the gear until the limit is reached. When the limit + # is reached carry operation to the next gear + should_carry = True + for n in range(0, self.num_gears): + nth_gear = self.num_gears - n - 1 + if should_carry: + count, lim = self.indicies[nth_gear] + count += 1 + if count == lim and nth_gear == 0: + self.cont = False + if count == lim: + should_carry = True + count = 0 + else: + should_carry = False + self.indicies[nth_gear] = (count, lim) + else: + break + + def __iter__(self): + return self + + def next(self): + if not self.cont: + raise StopIteration + l = [] + for x in range(0, self.num_gears): + index, limit = self.indicies[x] + l.append(self.gears[x][index]) + self.roll_gears() + return tuple(l) + + +class repeat(object): + """Make an iterator that returns object over and over again. + Runs indefinitely unless the times argument is specified. Used + as argument to imap() for invariant parameters to the called + function. Also used with izip() to create an invariant part of a + tuple record. + + Equivalent to : + + def repeat(object, times=None): + if times is None: + while True: + yield object + else: + for i in xrange(times): + yield object + """ + def __init__(self, obj, times=None): + self._obj = obj + if times is not None: + xrange(times) # Raise a TypeError + if times < 0: + times = 0 + self._times = times + + def __iter__(self): + return self + + def next(self): + # next() *need* to decrement self._times when consumed + if self._times is not None: + if self._times <= 0: + raise StopIteration() + self._times -= 1 + return self._obj + + def __repr__(self): + if self._times is not None: + return 'repeat(%r, %r)' % (self._obj, self._times) + else: + return 'repeat(%r)' % (self._obj,) + + def __len__(self): + if self._times == -1 or self._times is None: + raise TypeError("len() of uniszed object") + return self._times + + +class starmap(object): + """Make an iterator that computes the function using arguments + tuples obtained from the iterable. Used instead of imap() when + argument parameters are already grouped in tuples from a single + iterable (the data has been ``pre-zipped''). The difference + between imap() and starmap() parallels the distinction between + function(a,b) and function(*c). + + Equivalent to : + + def starmap(function, iterable): + iterable = iter(iterable) + while True: + yield function(*iterable.next()) + """ + def __init__(self, function, iterable): + self._func = function + self._iter = iter(iterable) + + def __iter__(self): + return self + + def next(self): + # CPython raises a TypeError when the iterator doesn't return a tuple + try: + t = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % self._iter) + if not isinstance(t, tuple): + raise TypeError("iterator must return a tuple") + return self._func(*t) + + + +class takewhile(object): + """Make an iterator that returns elements from the iterable as + long as the predicate is true. + + Equivalent to : + + def takewhile(predicate, iterable): + for x in iterable: + if predicate(x): + yield x + else: + break + """ + def __init__(self, predicate, iterable): + self._predicate = predicate + self._iter = iter(iterable) + + def __iter__(self): + return self + + def next(self): + try: + value = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._iter)) + if not self._predicate(value): + raise StopIteration() + return value + + +class TeeData(object): + """Holds cached values for TeeObjects""" + def __init__(self, iterator): + self.data = [] + self._iter = iterator + + def __getitem__(self, i): + # iterates until 'i' if not done yet + while i>= len(self.data): + try: + self.data.append( self._iter.next() ) + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % self._iter) + return self.data[i] + + +class TeeObject(object): + """Iterables / Iterators as returned by the tee() function""" + def __init__(self, iterable=None, tee_data=None): + if tee_data: + self.tee_data = tee_data + self.pos = 0 + # <=> Copy constructor + elif isinstance(iterable, TeeObject): + self.tee_data = iterable.tee_data + self.pos = iterable.pos + else: + self.tee_data = TeeData(iter(iterable)) + self.pos = 0 + + def next(self): + data = self.tee_data[self.pos] + self.pos += 1 + return data + + def __iter__(self): + return self + + + at builtinify +def tee(iterable, n=2): + """Return n independent iterators from a single iterable. + Note : once tee() has made a split, the original iterable + should not be used anywhere else; otherwise, the iterable could get + advanced without the tee objects being informed. + + Note : this member of the toolkit may require significant auxiliary + storage (depending on how much temporary data needs to be stored). + In general, if one iterator is going to use most or all of the + data before the other iterator, it is faster to use list() instead + of tee() + + Equivalent to : + + def tee(iterable, n=2): + def gen(next, data={}, cnt=[0]): + for i in count(): + if i == cnt[0]: + item = data[i] = next() + cnt[0] += 1 + else: + item = data.pop(i) + yield item + it = iter(iterable) + return tuple([gen(it.next) for i in range(n)]) + """ + if isinstance(iterable, TeeObject): + # a,b = tee(range(10)) ; c,d = tee(a) ; self.assert_(a is c) + return tuple([iterable] + + [TeeObject(tee_data=iterable.tee_data) for i in xrange(n-1)]) + tee_data = TeeData(iter(iterable)) + return tuple([TeeObject(tee_data=tee_data) for i in xrange(n)]) diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -23,7 +23,6 @@ dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) """ - applevel_name = '__builtin_itertools' interpleveldefs = { 'chain' : 'interp_itertools.W_Chain', From noreply at buildbot.pypy.org Sun Dec 4 17:40:43 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 17:40:43 +0100 (CET) Subject: [pypy-commit] pypy default: Change GetLastError() to return Signed, and SetLastError() and Message-ID: <20111204164043.611F08205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50127:306fca1568b4 Date: 2011-12-04 16:19 +0100 http://bitbucket.org/pypy/pypy/changeset/306fca1568b4/ Log: Change GetLastError() to return Signed, and SetLastError() and FormatError() to take a Signed. diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,6 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def set_last_error(space, w_error): + at unwrap_spec(error='nonnegint') +def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError - SetLastError(space.uint_w(w_error)) + SetLastError(error) diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -98,8 +98,13 @@ INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) - GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) - SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + _GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) + _SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + + def GetLastError(): + return rffi.cast(lltype.Signed, _GetLastError()) + def SetLastError(err): + _SetLastError(rffi.cast(DWORD, err)) # In tests, the first call to GetLastError is always wrong, because error # is hidden by operations in ll2ctypes. Call it now. @@ -184,12 +189,12 @@ msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, None, - code, + rffi.cast(DWORD, code), DEFAULT_LANGUAGE, rffi.cast(rffi.CCHARP, buf), 0, None) - if msglen <= 2 or msglen > sys.maxint: + if msglen <= 2: # includes the case msglen < 0 return fake_FormatError(code) # FormatMessage always appends \r\n. diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -1773,7 +1773,7 @@ @registering(rwin32.FormatError) def register_rwin32_FormatError(self): - return extdef([rwin32.DWORD], str, + return extdef([lltype.Signed], str, "rwin32_FormatError", llimpl=rwin32.llimpl_FormatError, ooimpl=rwin32.fake_FormatError) From noreply at buildbot.pypy.org Sun Dec 4 17:40:44 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 17:40:44 +0100 (CET) Subject: [pypy-commit] pypy default: Don't care about forbidding negative values. The issue is that Win32 really uses all 32 bits for its error codes... Message-ID: <20111204164044.8A4A282ABA@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50128:a3afe9f1db72 Date: 2011-12-04 16:38 +0100 http://bitbucket.org/pypy/pypy/changeset/a3afe9f1db72/ Log: Don't care about forbidding negative values. The issue is that Win32 really uses all 32 bits for its error codes... diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,7 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) - at unwrap_spec(error='nonnegint') + at unwrap_spec(error=int) def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError SetLastError(error) From noreply at buildbot.pypy.org Sun Dec 4 17:40:45 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 17:40:45 +0100 (CET) Subject: [pypy-commit] pypy default: Fix Message-ID: <20111204164045.BA3FB82ABB@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50129:ba8b7187ed70 Date: 2011-12-04 17:01 +0100 http://bitbucket.org/pypy/pypy/changeset/ba8b7187ed70/ Log: Fix diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -235,7 +235,7 @@ elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise OperationError(space.w_OverflowError, space.wrap("timeout is too large")) - full_msecs = int(timeout + 0.5) + full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) @@ -243,7 +243,7 @@ if res != rwin32.WAIT_TIMEOUT: return True - msecs = r_uint(full_msecs) + msecs = full_msecs start = _GetTickCount() while True: @@ -269,7 +269,7 @@ ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False - msecs = r_uint(full_msecs - (ticks - start)) + msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: From noreply at buildbot.pypy.org Sun Dec 4 17:56:35 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 17:56:35 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: these might fail too? Message-ID: <20111204165635.6B3C88205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50130:d5fd12021c6f Date: 2011-12-04 14:00 +0100 http://bitbucket.org/pypy/pypy/changeset/d5fd12021c6f/ Log: these might fail too? diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -106,7 +106,8 @@ raise NotImplementedError def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.AbstractVirtualStructValue) + if not isinstance(value, virtualize.AbstractVirtualStructValue): + raise BadVirtualState if not value.is_virtual(): raise BadVirtualState for i in range(len(self.fielddescrs)): @@ -187,7 +188,8 @@ self.arraydescr is other.arraydescr) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayValue) + if not isinstance(value, virtualize.VArrayValue): + raise BadVirtualState if not value.is_virtual(): raise BadVirtualState for i in range(len(self.fieldstate)): @@ -259,7 +261,8 @@ s.enum(virtual_state) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayStructValue) + if not isinstance(value, virtualize.VArrayStructValue): + raise BadVirtualState if not value.is_virtual(): raise BadVirtualState p = 0 From noreply at buildbot.pypy.org Sun Dec 4 17:56:36 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 17:56:36 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: passing tests Message-ID: <20111204165636.A422D8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50131:2bd815433960 Date: 2011-12-04 17:51 +0100 http://bitbucket.org/pypy/pypy/changeset/2bd815433960/ Log: passing tests diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -113,6 +113,32 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) + def test_nonmatching_arraystruct_1(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p4, 2, f0, descr=compleximagdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_2(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(2, descr=complexarraydescr) + setinteriorfield_gc(p4, 0, f0, descr=complexrealdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestMultiLabel, LLtypeMixin): pass From noreply at buildbot.pypy.org Sun Dec 4 17:56:37 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 17:56:37 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: tests for d5fd12021c6f Message-ID: <20111204165637.DAD328205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50132:9e092e671cd9 Date: 2011-12-04 17:55 +0100 http://bitbucket.org/pypy/pypy/changeset/9e092e671cd9/ Log: tests for d5fd12021c6f diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -138,6 +138,40 @@ """ with raises(InvalidLoop): self.optimize_loop(ops, ops) + + def test_not_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_array(self): + ops = """ + [p1] + p3 = new_array(3, descr=arraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_arraystruct(self): + ops = """ + [p1] + p3 = new_array(3, descr=complexarraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestMultiLabel, LLtypeMixin): From noreply at buildbot.pypy.org Sun Dec 4 18:04:21 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Sun, 4 Dec 2011 18:04:21 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge default Message-ID: <20111204170421.3E2AA8205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50133:7450e63f5a36 Date: 2011-12-04 17:41 +0100 http://bitbucket.org/pypy/pypy/changeset/7450e63f5a36/ Log: merge default diff --git a/lib_pypy/_itertools.py b/lib_pypy/_itertools.py deleted file mode 100644 --- a/lib_pypy/_itertools.py +++ /dev/null @@ -1,670 +0,0 @@ -# Note that PyPy contains also a built-in implementation of 'itertools'; -# when translated with default options, this one is not used. - -"""Functional tools for creating and using iterators. - -Infinite iterators: -count([n]) --> n, n+1, n+2, ... -cycle(p) --> p0, p1, ... plast, p0, p1, ... -repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times - -Iterators terminating on the shortest input sequence: -izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... -ifilter(pred, seq) --> elements of seq where pred(elem) is True -ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False -islice(seq, [start,] stop [, step]) --> elements from - seq[start:stop:step] -imap(fun, p, q, ...) --> fun(p0, q0), fun(p1, q1), ... -starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ... -tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n -chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... -takewhile(pred, seq) --> seq[0], seq[1], until pred fails -dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails -groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) -""" - -__all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', - 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee', 'compress', 'product'] - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -class chain(object): - """Make an iterator that returns elements from the first iterable - until it is exhausted, then proceeds to the next iterable, until - all of the iterables are exhausted. Used for treating consecutive - sequences as a single sequence. - - Equivalent to : - - def chain(*iterables): - for it in iterables: - for element in it: - yield element - """ - def __init__(self, *iterables): - self._iterables_iter = iter(map(iter, iterables)) - # little trick for the first chain.next() call - self._cur_iterable_iter = iter([]) - - def __iter__(self): - return self - - def next(self): - while True: - try: - return self._cur_iterable_iter.next() - except StopIteration: - self._cur_iterable_iter = self._iterables_iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._cur_iterable_iter)) - - -class compress(object): - def __init__(self, data, selectors): - self.data = iter(data) - self.selectors = iter(selectors) - - def __iter__(self): - return self - - def next(self): - while True: - next_item = self.data.next() - next_selector = self.selectors.next() - if bool(next_selector): - return next_item - - -class count(object): - """Make an iterator that returns consecutive integers starting - with n. If not specified n defaults to zero. Does not currently - support python long integers. Often used as an argument to imap() - to generate consecutive data points. Also, used with izip() to - add sequence numbers. - - Equivalent to : - - def count(n=0): - if not isinstance(n, int): - raise TypeError("%s is not a regular integer" % n) - while True: - yield n - n += 1 - """ - def __init__(self, n=0): - if not isinstance(n, int): - raise TypeError('%s is not a regular integer' % n) - self.times = n-1 - - def __iter__(self): - return self - - def next(self): - self.times += 1 - return self.times - - def __repr__(self): - return 'count(%d)' % (self.times + 1) - - - -class cycle(object): - """Make an iterator returning elements from the iterable and - saving a copy of each. When the iterable is exhausted, return - elements from the saved copy. Repeats indefinitely. - - Equivalent to : - - def cycle(iterable): - saved = [] - for element in iterable: - yield element - saved.append(element) - while saved: - for element in saved: - yield element - """ - def __init__(self, iterable): - self._cur_iter = iter(iterable) - self._saved = [] - self._must_save = True - - def __iter__(self): - return self - - def next(self): - # XXX Could probably be improved - try: - next_elt = self._cur_iter.next() - if self._must_save: - self._saved.append(next_elt) - except StopIteration: - self._cur_iter = iter(self._saved) - next_elt = self._cur_iter.next() - self._must_save = False - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._cur_iter)) - return next_elt - - -class dropwhile(object): - """Make an iterator that drops elements from the iterable as long - as the predicate is true; afterwards, returns every - element. Note, the iterator does not produce any output until the - predicate is true, so it may have a lengthy start-up time. - - Equivalent to : - - def dropwhile(predicate, iterable): - iterable = iter(iterable) - for x in iterable: - if not predicate(x): - yield x - break - for x in iterable: - yield x - """ - def __init__(self, predicate, iterable): - self._predicate = predicate - self._iter = iter(iterable) - self._dropped = False - - def __iter__(self): - return self - - def next(self): - try: - value = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - if self._dropped: - return value - while self._predicate(value): - value = self._iter.next() - self._dropped = True - return value - -class groupby(object): - """Make an iterator that returns consecutive keys and groups from the - iterable. The key is a function computing a key value for each - element. If not specified or is None, key defaults to an identity - function and returns the element unchanged. Generally, the - iterable needs to already be sorted on the same key function. - - The returned group is itself an iterator that shares the - underlying iterable with groupby(). Because the source is shared, - when the groupby object is advanced, the previous group is no - longer visible. So, if that data is needed later, it should be - stored as a list: - - groups = [] - uniquekeys = [] - for k, g in groupby(data, keyfunc): - groups.append(list(g)) # Store group iterator as a list - uniquekeys.append(k) - """ - def __init__(self, iterable, key=None): - if key is None: - key = lambda x: x - self.keyfunc = key - self.it = iter(iterable) - self.tgtkey = self.currkey = self.currvalue = xrange(0) - - def __iter__(self): - return self - - def next(self): - while self.currkey == self.tgtkey: - try: - self.currvalue = self.it.next() # Exit on StopIteration - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self.it)) - self.currkey = self.keyfunc(self.currvalue) - self.tgtkey = self.currkey - return (self.currkey, self._grouper(self.tgtkey)) - - def _grouper(self, tgtkey): - while self.currkey == tgtkey: - yield self.currvalue - self.currvalue = self.it.next() # Exit on StopIteration - self.currkey = self.keyfunc(self.currvalue) - - - -class _ifilter_base(object): - """base class for ifilter and ifilterflase""" - def __init__(self, predicate, iterable): - # Make sure iterable *IS* iterable - self._iter = iter(iterable) - if predicate is None: - self._predicate = bool - else: - self._predicate = predicate - - def __iter__(self): - return self - -class ifilter(_ifilter_base): - """Make an iterator that filters elements from iterable returning - only those for which the predicate is True. If predicate is - None, return the items that are true. - - Equivalent to : - - def ifilter: - if predicate is None: - predicate = bool - for x in iterable: - if predicate(x): - yield x - """ - def next(self): - try: - next_elt = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - while True: - if self._predicate(next_elt): - return next_elt - next_elt = self._iter.next() - -class ifilterfalse(_ifilter_base): - """Make an iterator that filters elements from iterable returning - only those for which the predicate is False. If predicate is - None, return the items that are false. - - Equivalent to : - - def ifilterfalse(predicate, iterable): - if predicate is None: - predicate = bool - for x in iterable: - if not predicate(x): - yield x - """ - def next(self): - try: - next_elt = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - while True: - if not self._predicate(next_elt): - return next_elt - next_elt = self._iter.next() - - - - -class imap(object): - """Make an iterator that computes the function using arguments - from each of the iterables. If function is set to None, then - imap() returns the arguments as a tuple. Like map() but stops - when the shortest iterable is exhausted instead of filling in - None for shorter iterables. The reason for the difference is that - infinite iterator arguments are typically an error for map() - (because the output is fully evaluated) but represent a common - and useful way of supplying arguments to imap(). - - Equivalent to : - - def imap(function, *iterables): - iterables = map(iter, iterables) - while True: - args = [i.next() for i in iterables] - if function is None: - yield tuple(args) - else: - yield function(*args) - - """ - def __init__(self, function, iterable, *other_iterables): - self._func = function - self._iters = map(iter, (iterable, ) + other_iterables) - - def __iter__(self): - return self - - def next(self): - try: - args = [it.next() for it in self._iters] - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (it)) - if self._func is None: - return tuple(args) - else: - return self._func(*args) - - - -class islice(object): - """Make an iterator that returns selected elements from the - iterable. If start is non-zero, then elements from the iterable - are skipped until start is reached. Afterward, elements are - returned consecutively unless step is set higher than one which - results in items being skipped. If stop is None, then iteration - continues until the iterator is exhausted, if at all; otherwise, - it stops at the specified position. Unlike regular slicing, - islice() does not support negative values for start, stop, or - step. Can be used to extract related fields from data where the - internal structure has been flattened (for example, a multi-line - report may list a name field on every third line). - """ - def __init__(self, iterable, *args): - s = slice(*args) - self.start, self.stop, self.step = s.start or 0, s.stop, s.step - if not isinstance(self.start, (int, long)): - raise ValueError("Start argument must be an integer") - if self.stop is not None and not isinstance(self.stop, (int,long)): - raise ValueError("Stop argument must be an integer or None") - if self.step is None: - self.step = 1 - if self.start<0 or (self.stop is not None and self.stop<0 - ) or self.step<=0: - raise ValueError, "indices for islice() must be positive" - self.it = iter(iterable) - self.donext = None - self.cnt = 0 - - def __iter__(self): - return self - - def next(self): - if self.donext is None: - try: - self.donext = self.it.next - except AttributeError: - raise TypeError - nextindex = self.start - if self.stop is not None and nextindex >= self.stop: - raise StopIteration - while self.cnt <= nextindex: - nextitem = self.donext() - self.cnt += 1 - self.start += self.step - return nextitem - -class izip(object): - """Make an iterator that aggregates elements from each of the - iterables. Like zip() except that it returns an iterator instead - of a list. Used for lock-step iteration over several iterables at - a time. - - Equivalent to : - - def izip(*iterables): - iterables = map(iter, iterables) - while iterables: - result = [i.next() for i in iterables] - yield tuple(result) - """ - def __init__(self, *iterables): - self._iterators = map(iter, iterables) - self._result = [None] * len(self._iterators) - - def __iter__(self): - return self - - def next(self): - if not self._iterators: - raise StopIteration() - try: - return tuple([i.next() for i in self._iterators]) - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % (i)) - - -class product(object): - - def __init__(self, *args, **kw): - if len(kw) > 1: - raise TypeError("product() takes at most 1 argument (%d given)" % - len(kw)) - self.repeat = kw.get('repeat', 1) - self.gears = [x for x in args] * self.repeat - self.num_gears = len(self.gears) - # initialization of indicies to loop over - self.indicies = [(0, len(self.gears[x])) - for x in range(0, self.num_gears)] - self.cont = True - - def roll_gears(self): - # Starting from the end of the gear indicies work to the front - # incrementing the gear until the limit is reached. When the limit - # is reached carry operation to the next gear - should_carry = True - for n in range(0, self.num_gears): - nth_gear = self.num_gears - n - 1 - if should_carry: - count, lim = self.indicies[nth_gear] - count += 1 - if count == lim and nth_gear == 0: - self.cont = False - if count == lim: - should_carry = True - count = 0 - else: - should_carry = False - self.indicies[nth_gear] = (count, lim) - else: - break - - def __iter__(self): - return self - - def next(self): - if not self.cont: - raise StopIteration - l = [] - for x in range(0, self.num_gears): - index, limit = self.indicies[x] - l.append(self.gears[x][index]) - self.roll_gears() - return tuple(l) - - -class repeat(object): - """Make an iterator that returns object over and over again. - Runs indefinitely unless the times argument is specified. Used - as argument to imap() for invariant parameters to the called - function. Also used with izip() to create an invariant part of a - tuple record. - - Equivalent to : - - def repeat(object, times=None): - if times is None: - while True: - yield object - else: - for i in xrange(times): - yield object - """ - def __init__(self, obj, times=None): - self._obj = obj - if times is not None: - xrange(times) # Raise a TypeError - if times < 0: - times = 0 - self._times = times - - def __iter__(self): - return self - - def next(self): - # next() *need* to decrement self._times when consumed - if self._times is not None: - if self._times <= 0: - raise StopIteration() - self._times -= 1 - return self._obj - - def __repr__(self): - if self._times is not None: - return 'repeat(%r, %r)' % (self._obj, self._times) - else: - return 'repeat(%r)' % (self._obj,) - - def __len__(self): - if self._times == -1 or self._times is None: - raise TypeError("len() of uniszed object") - return self._times - - -class starmap(object): - """Make an iterator that computes the function using arguments - tuples obtained from the iterable. Used instead of imap() when - argument parameters are already grouped in tuples from a single - iterable (the data has been ``pre-zipped''). The difference - between imap() and starmap() parallels the distinction between - function(a,b) and function(*c). - - Equivalent to : - - def starmap(function, iterable): - iterable = iter(iterable) - while True: - yield function(*iterable.next()) - """ - def __init__(self, function, iterable): - self._func = function - self._iter = iter(iterable) - - def __iter__(self): - return self - - def next(self): - # CPython raises a TypeError when the iterator doesn't return a tuple - try: - t = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % self._iter) - if not isinstance(t, tuple): - raise TypeError("iterator must return a tuple") - return self._func(*t) - - - -class takewhile(object): - """Make an iterator that returns elements from the iterable as - long as the predicate is true. - - Equivalent to : - - def takewhile(predicate, iterable): - for x in iterable: - if predicate(x): - yield x - else: - break - """ - def __init__(self, predicate, iterable): - self._predicate = predicate - self._iter = iter(iterable) - - def __iter__(self): - return self - - def next(self): - try: - value = self._iter.next() - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % \ - (self._iter)) - if not self._predicate(value): - raise StopIteration() - return value - - -class TeeData(object): - """Holds cached values for TeeObjects""" - def __init__(self, iterator): - self.data = [] - self._iter = iterator - - def __getitem__(self, i): - # iterates until 'i' if not done yet - while i>= len(self.data): - try: - self.data.append( self._iter.next() ) - except AttributeError: - # CPython raises a TypeError when next() is not defined - raise TypeError('%s has no next() method' % self._iter) - return self.data[i] - - -class TeeObject(object): - """Iterables / Iterators as returned by the tee() function""" - def __init__(self, iterable=None, tee_data=None): - if tee_data: - self.tee_data = tee_data - self.pos = 0 - # <=> Copy constructor - elif isinstance(iterable, TeeObject): - self.tee_data = iterable.tee_data - self.pos = iterable.pos - else: - self.tee_data = TeeData(iter(iterable)) - self.pos = 0 - - def next(self): - data = self.tee_data[self.pos] - self.pos += 1 - return data - - def __iter__(self): - return self - - - at builtinify -def tee(iterable, n=2): - """Return n independent iterators from a single iterable. - Note : once tee() has made a split, the original iterable - should not be used anywhere else; otherwise, the iterable could get - advanced without the tee objects being informed. - - Note : this member of the toolkit may require significant auxiliary - storage (depending on how much temporary data needs to be stored). - In general, if one iterator is going to use most or all of the - data before the other iterator, it is faster to use list() instead - of tee() - - Equivalent to : - - def tee(iterable, n=2): - def gen(next, data={}, cnt=[0]): - for i in count(): - if i == cnt[0]: - item = data[i] = next() - cnt[0] += 1 - else: - item = data.pop(i) - yield item - it = iter(iterable) - return tuple([gen(it.next) for i in range(n)]) - """ - if isinstance(iterable, TeeObject): - # a,b = tee(range(10)) ; c,d = tee(a) ; self.assert_(a is c) - return tuple([iterable] + - [TeeObject(tee_data=iterable.tee_data) for i in xrange(n-1)]) - tee_data = TeeData(iter(iterable)) - return tuple([TeeObject(tee_data=tee_data) for i in xrange(n)]) diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -1,6 +1,670 @@ -try: - from __builtin_itertools import * - from __builtin_itertools import __doc__ -except ImportError: - from _itertools import * - from _itertools import __doc__ +# Note that PyPy contains also a built-in module 'itertools' which will +# hide this one if compiled in. + +"""Functional tools for creating and using iterators. + +Infinite iterators: +count([n]) --> n, n+1, n+2, ... +cycle(p) --> p0, p1, ... plast, p0, p1, ... +repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times + +Iterators terminating on the shortest input sequence: +izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... +ifilter(pred, seq) --> elements of seq where pred(elem) is True +ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False +islice(seq, [start,] stop [, step]) --> elements from + seq[start:stop:step] +imap(fun, p, q, ...) --> fun(p0, q0), fun(p1, q1), ... +starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ... +tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n +chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... +takewhile(pred, seq) --> seq[0], seq[1], until pred fails +dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails +groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) +""" + +__all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', + 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', + 'takewhile', 'tee', 'compress', 'product'] + +try: from __pypy__ import builtinify +except ImportError: builtinify = lambda f: f + + +class chain(object): + """Make an iterator that returns elements from the first iterable + until it is exhausted, then proceeds to the next iterable, until + all of the iterables are exhausted. Used for treating consecutive + sequences as a single sequence. + + Equivalent to : + + def chain(*iterables): + for it in iterables: + for element in it: + yield element + """ + def __init__(self, *iterables): + self._iterables_iter = iter(map(iter, iterables)) + # little trick for the first chain.next() call + self._cur_iterable_iter = iter([]) + + def __iter__(self): + return self + + def next(self): + while True: + try: + return self._cur_iterable_iter.next() + except StopIteration: + self._cur_iterable_iter = self._iterables_iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._cur_iterable_iter)) + + +class compress(object): + def __init__(self, data, selectors): + self.data = iter(data) + self.selectors = iter(selectors) + + def __iter__(self): + return self + + def next(self): + while True: + next_item = self.data.next() + next_selector = self.selectors.next() + if bool(next_selector): + return next_item + + +class count(object): + """Make an iterator that returns consecutive integers starting + with n. If not specified n defaults to zero. Does not currently + support python long integers. Often used as an argument to imap() + to generate consecutive data points. Also, used with izip() to + add sequence numbers. + + Equivalent to : + + def count(n=0): + if not isinstance(n, int): + raise TypeError("%s is not a regular integer" % n) + while True: + yield n + n += 1 + """ + def __init__(self, n=0): + if not isinstance(n, int): + raise TypeError('%s is not a regular integer' % n) + self.times = n-1 + + def __iter__(self): + return self + + def next(self): + self.times += 1 + return self.times + + def __repr__(self): + return 'count(%d)' % (self.times + 1) + + + +class cycle(object): + """Make an iterator returning elements from the iterable and + saving a copy of each. When the iterable is exhausted, return + elements from the saved copy. Repeats indefinitely. + + Equivalent to : + + def cycle(iterable): + saved = [] + for element in iterable: + yield element + saved.append(element) + while saved: + for element in saved: + yield element + """ + def __init__(self, iterable): + self._cur_iter = iter(iterable) + self._saved = [] + self._must_save = True + + def __iter__(self): + return self + + def next(self): + # XXX Could probably be improved + try: + next_elt = self._cur_iter.next() + if self._must_save: + self._saved.append(next_elt) + except StopIteration: + self._cur_iter = iter(self._saved) + next_elt = self._cur_iter.next() + self._must_save = False + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._cur_iter)) + return next_elt + + +class dropwhile(object): + """Make an iterator that drops elements from the iterable as long + as the predicate is true; afterwards, returns every + element. Note, the iterator does not produce any output until the + predicate is true, so it may have a lengthy start-up time. + + Equivalent to : + + def dropwhile(predicate, iterable): + iterable = iter(iterable) + for x in iterable: + if not predicate(x): + yield x + break + for x in iterable: + yield x + """ + def __init__(self, predicate, iterable): + self._predicate = predicate + self._iter = iter(iterable) + self._dropped = False + + def __iter__(self): + return self + + def next(self): + try: + value = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._iter)) + if self._dropped: + return value + while self._predicate(value): + value = self._iter.next() + self._dropped = True + return value + +class groupby(object): + """Make an iterator that returns consecutive keys and groups from the + iterable. The key is a function computing a key value for each + element. If not specified or is None, key defaults to an identity + function and returns the element unchanged. Generally, the + iterable needs to already be sorted on the same key function. + + The returned group is itself an iterator that shares the + underlying iterable with groupby(). Because the source is shared, + when the groupby object is advanced, the previous group is no + longer visible. So, if that data is needed later, it should be + stored as a list: + + groups = [] + uniquekeys = [] + for k, g in groupby(data, keyfunc): + groups.append(list(g)) # Store group iterator as a list + uniquekeys.append(k) + """ + def __init__(self, iterable, key=None): + if key is None: + key = lambda x: x + self.keyfunc = key + self.it = iter(iterable) + self.tgtkey = self.currkey = self.currvalue = xrange(0) + + def __iter__(self): + return self + + def next(self): + while self.currkey == self.tgtkey: + try: + self.currvalue = self.it.next() # Exit on StopIteration + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self.it)) + self.currkey = self.keyfunc(self.currvalue) + self.tgtkey = self.currkey + return (self.currkey, self._grouper(self.tgtkey)) + + def _grouper(self, tgtkey): + while self.currkey == tgtkey: + yield self.currvalue + self.currvalue = self.it.next() # Exit on StopIteration + self.currkey = self.keyfunc(self.currvalue) + + + +class _ifilter_base(object): + """base class for ifilter and ifilterflase""" + def __init__(self, predicate, iterable): + # Make sure iterable *IS* iterable + self._iter = iter(iterable) + if predicate is None: + self._predicate = bool + else: + self._predicate = predicate + + def __iter__(self): + return self + +class ifilter(_ifilter_base): + """Make an iterator that filters elements from iterable returning + only those for which the predicate is True. If predicate is + None, return the items that are true. + + Equivalent to : + + def ifilter: + if predicate is None: + predicate = bool + for x in iterable: + if predicate(x): + yield x + """ + def next(self): + try: + next_elt = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._iter)) + while True: + if self._predicate(next_elt): + return next_elt + next_elt = self._iter.next() + +class ifilterfalse(_ifilter_base): + """Make an iterator that filters elements from iterable returning + only those for which the predicate is False. If predicate is + None, return the items that are false. + + Equivalent to : + + def ifilterfalse(predicate, iterable): + if predicate is None: + predicate = bool + for x in iterable: + if not predicate(x): + yield x + """ + def next(self): + try: + next_elt = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._iter)) + while True: + if not self._predicate(next_elt): + return next_elt + next_elt = self._iter.next() + + + + +class imap(object): + """Make an iterator that computes the function using arguments + from each of the iterables. If function is set to None, then + imap() returns the arguments as a tuple. Like map() but stops + when the shortest iterable is exhausted instead of filling in + None for shorter iterables. The reason for the difference is that + infinite iterator arguments are typically an error for map() + (because the output is fully evaluated) but represent a common + and useful way of supplying arguments to imap(). + + Equivalent to : + + def imap(function, *iterables): + iterables = map(iter, iterables) + while True: + args = [i.next() for i in iterables] + if function is None: + yield tuple(args) + else: + yield function(*args) + + """ + def __init__(self, function, iterable, *other_iterables): + self._func = function + self._iters = map(iter, (iterable, ) + other_iterables) + + def __iter__(self): + return self + + def next(self): + try: + args = [it.next() for it in self._iters] + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (it)) + if self._func is None: + return tuple(args) + else: + return self._func(*args) + + + +class islice(object): + """Make an iterator that returns selected elements from the + iterable. If start is non-zero, then elements from the iterable + are skipped until start is reached. Afterward, elements are + returned consecutively unless step is set higher than one which + results in items being skipped. If stop is None, then iteration + continues until the iterator is exhausted, if at all; otherwise, + it stops at the specified position. Unlike regular slicing, + islice() does not support negative values for start, stop, or + step. Can be used to extract related fields from data where the + internal structure has been flattened (for example, a multi-line + report may list a name field on every third line). + """ + def __init__(self, iterable, *args): + s = slice(*args) + self.start, self.stop, self.step = s.start or 0, s.stop, s.step + if not isinstance(self.start, (int, long)): + raise ValueError("Start argument must be an integer") + if self.stop is not None and not isinstance(self.stop, (int,long)): + raise ValueError("Stop argument must be an integer or None") + if self.step is None: + self.step = 1 + if self.start<0 or (self.stop is not None and self.stop<0 + ) or self.step<=0: + raise ValueError, "indices for islice() must be positive" + self.it = iter(iterable) + self.donext = None + self.cnt = 0 + + def __iter__(self): + return self + + def next(self): + if self.donext is None: + try: + self.donext = self.it.next + except AttributeError: + raise TypeError + nextindex = self.start + if self.stop is not None and nextindex >= self.stop: + raise StopIteration + while self.cnt <= nextindex: + nextitem = self.donext() + self.cnt += 1 + self.start += self.step + return nextitem + +class izip(object): + """Make an iterator that aggregates elements from each of the + iterables. Like zip() except that it returns an iterator instead + of a list. Used for lock-step iteration over several iterables at + a time. + + Equivalent to : + + def izip(*iterables): + iterables = map(iter, iterables) + while iterables: + result = [i.next() for i in iterables] + yield tuple(result) + """ + def __init__(self, *iterables): + self._iterators = map(iter, iterables) + self._result = [None] * len(self._iterators) + + def __iter__(self): + return self + + def next(self): + if not self._iterators: + raise StopIteration() + try: + return tuple([i.next() for i in self._iterators]) + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % (i)) + + +class product(object): + + def __init__(self, *args, **kw): + if len(kw) > 1: + raise TypeError("product() takes at most 1 argument (%d given)" % + len(kw)) + self.repeat = kw.get('repeat', 1) + self.gears = [x for x in args] * self.repeat + self.num_gears = len(self.gears) + # initialization of indicies to loop over + self.indicies = [(0, len(self.gears[x])) + for x in range(0, self.num_gears)] + self.cont = True + + def roll_gears(self): + # Starting from the end of the gear indicies work to the front + # incrementing the gear until the limit is reached. When the limit + # is reached carry operation to the next gear + should_carry = True + for n in range(0, self.num_gears): + nth_gear = self.num_gears - n - 1 + if should_carry: + count, lim = self.indicies[nth_gear] + count += 1 + if count == lim and nth_gear == 0: + self.cont = False + if count == lim: + should_carry = True + count = 0 + else: + should_carry = False + self.indicies[nth_gear] = (count, lim) + else: + break + + def __iter__(self): + return self + + def next(self): + if not self.cont: + raise StopIteration + l = [] + for x in range(0, self.num_gears): + index, limit = self.indicies[x] + l.append(self.gears[x][index]) + self.roll_gears() + return tuple(l) + + +class repeat(object): + """Make an iterator that returns object over and over again. + Runs indefinitely unless the times argument is specified. Used + as argument to imap() for invariant parameters to the called + function. Also used with izip() to create an invariant part of a + tuple record. + + Equivalent to : + + def repeat(object, times=None): + if times is None: + while True: + yield object + else: + for i in xrange(times): + yield object + """ + def __init__(self, obj, times=None): + self._obj = obj + if times is not None: + xrange(times) # Raise a TypeError + if times < 0: + times = 0 + self._times = times + + def __iter__(self): + return self + + def next(self): + # next() *need* to decrement self._times when consumed + if self._times is not None: + if self._times <= 0: + raise StopIteration() + self._times -= 1 + return self._obj + + def __repr__(self): + if self._times is not None: + return 'repeat(%r, %r)' % (self._obj, self._times) + else: + return 'repeat(%r)' % (self._obj,) + + def __len__(self): + if self._times == -1 or self._times is None: + raise TypeError("len() of uniszed object") + return self._times + + +class starmap(object): + """Make an iterator that computes the function using arguments + tuples obtained from the iterable. Used instead of imap() when + argument parameters are already grouped in tuples from a single + iterable (the data has been ``pre-zipped''). The difference + between imap() and starmap() parallels the distinction between + function(a,b) and function(*c). + + Equivalent to : + + def starmap(function, iterable): + iterable = iter(iterable) + while True: + yield function(*iterable.next()) + """ + def __init__(self, function, iterable): + self._func = function + self._iter = iter(iterable) + + def __iter__(self): + return self + + def next(self): + # CPython raises a TypeError when the iterator doesn't return a tuple + try: + t = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % self._iter) + if not isinstance(t, tuple): + raise TypeError("iterator must return a tuple") + return self._func(*t) + + + +class takewhile(object): + """Make an iterator that returns elements from the iterable as + long as the predicate is true. + + Equivalent to : + + def takewhile(predicate, iterable): + for x in iterable: + if predicate(x): + yield x + else: + break + """ + def __init__(self, predicate, iterable): + self._predicate = predicate + self._iter = iter(iterable) + + def __iter__(self): + return self + + def next(self): + try: + value = self._iter.next() + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % \ + (self._iter)) + if not self._predicate(value): + raise StopIteration() + return value + + +class TeeData(object): + """Holds cached values for TeeObjects""" + def __init__(self, iterator): + self.data = [] + self._iter = iterator + + def __getitem__(self, i): + # iterates until 'i' if not done yet + while i>= len(self.data): + try: + self.data.append( self._iter.next() ) + except AttributeError: + # CPython raises a TypeError when next() is not defined + raise TypeError('%s has no next() method' % self._iter) + return self.data[i] + + +class TeeObject(object): + """Iterables / Iterators as returned by the tee() function""" + def __init__(self, iterable=None, tee_data=None): + if tee_data: + self.tee_data = tee_data + self.pos = 0 + # <=> Copy constructor + elif isinstance(iterable, TeeObject): + self.tee_data = iterable.tee_data + self.pos = iterable.pos + else: + self.tee_data = TeeData(iter(iterable)) + self.pos = 0 + + def next(self): + data = self.tee_data[self.pos] + self.pos += 1 + return data + + def __iter__(self): + return self + + + at builtinify +def tee(iterable, n=2): + """Return n independent iterators from a single iterable. + Note : once tee() has made a split, the original iterable + should not be used anywhere else; otherwise, the iterable could get + advanced without the tee objects being informed. + + Note : this member of the toolkit may require significant auxiliary + storage (depending on how much temporary data needs to be stored). + In general, if one iterator is going to use most or all of the + data before the other iterator, it is faster to use list() instead + of tee() + + Equivalent to : + + def tee(iterable, n=2): + def gen(next, data={}, cnt=[0]): + for i in count(): + if i == cnt[0]: + item = data[i] = next() + cnt[0] += 1 + else: + item = data.pop(i) + yield item + it = iter(iterable) + return tuple([gen(it.next) for i in range(n)]) + """ + if isinstance(iterable, TeeObject): + # a,b = tee(range(10)) ; c,d = tee(a) ; self.assert_(a is c) + return tuple([iterable] + + [TeeObject(tee_data=iterable.tee_data) for i in xrange(n-1)]) + tee_data = TeeData(iter(iterable)) + return tuple([TeeObject(tee_data=tee_data) for i in xrange(n)]) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -67,10 +67,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_builtin = space.getbuiltinmodule('__builtin__') - w_import = space.getattr(w_builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -235,7 +235,7 @@ elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise OperationError(space.w_OverflowError, space.wrap("timeout is too large")) - full_msecs = int(timeout + 0.5) + full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) @@ -243,7 +243,7 @@ if res != rwin32.WAIT_TIMEOUT: return True - msecs = r_uint(full_msecs) + msecs = full_msecs start = _GetTickCount() while True: @@ -269,7 +269,7 @@ ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False - msecs = r_uint(full_msecs - (ticks - start)) + msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,6 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def set_last_error(space, w_error): + at unwrap_spec(error=int) +def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError - SetLastError(space.uint_w(w_error)) + SetLastError(error) diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -23,7 +23,6 @@ dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) """ - applevel_name = '__builtin_itertools' interpleveldefs = { 'chain' : 'interp_itertools.W_Chain', diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -98,8 +98,13 @@ INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) - GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) - SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + _GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) + _SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + + def GetLastError(): + return rffi.cast(lltype.Signed, _GetLastError()) + def SetLastError(err): + _SetLastError(rffi.cast(DWORD, err)) # In tests, the first call to GetLastError is always wrong, because error # is hidden by operations in ll2ctypes. Call it now. @@ -184,12 +189,12 @@ msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, None, - code, + rffi.cast(DWORD, code), DEFAULT_LANGUAGE, rffi.cast(rffi.CCHARP, buf), 0, None) - if msglen <= 2 or msglen > sys.maxint: + if msglen <= 2: # includes the case msglen < 0 return fake_FormatError(code) # FormatMessage always appends \r\n. diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -1782,7 +1782,7 @@ @registering(rwin32.FormatError) def register_rwin32_FormatError(self): - return extdef([rwin32.DWORD], str, + return extdef([lltype.Signed], str, "rwin32_FormatError", llimpl=rwin32.llimpl_FormatError, ooimpl=rwin32.fake_FormatError) From noreply at buildbot.pypy.org Sun Dec 4 18:16:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 18:16:00 +0100 (CET) Subject: [pypy-commit] pypy default: fix Message-ID: <20111204171600.2F5388205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50134:25bdc9bbabab Date: 2011-12-04 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/25bdc9bbabab/ Log: fix diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -100,7 +100,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, maxlength) - res = intmask(res) # XXX why? try: if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -117,7 +116,6 @@ res, newbuf = self.do_recv_string( space, length - offset, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: raise BufferTooShort(space, space.wrap( @@ -148,7 +146,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) @@ -413,7 +410,7 @@ self.buffer, min(self.BUFFER_SIZE, buflength), read_ptr, rffi.NULL) if result: - return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) + return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: From noreply at buildbot.pypy.org Sun Dec 4 18:16:01 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 18:16:01 +0100 (CET) Subject: [pypy-commit] pypy default: Fix Message-ID: <20111204171601.5DF4F8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50135:c8366cfed4c1 Date: 2011-12-04 18:15 +0100 http://bitbucket.org/pypy/pypy/changeset/c8366cfed4c1/ Log: Fix diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py --- a/pypy/rpython/module/ll_os_stat.py +++ b/pypy/rpython/module/ll_os_stat.py @@ -12,6 +12,7 @@ from pypy.rpython.tool import rffi_platform as platform from pypy.rpython.lltypesystem.rtupletype import TUPLE_TYPE from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import specialize from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.annlowlevel import hlstr @@ -442,20 +443,19 @@ # Helper functions for win32 def make_longlong(high, low): - return (lltype.r_longlong(high) << 32) + lltype.r_longlong(low) + return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) # Seconds between 1.1.1601 and 1.1.1970 -secs_between_epochs = lltype.r_longlong(11644473600) +secs_between_epochs = rffi.r_longlong(11644473600) def FILE_TIME_to_time_t_nsec(filetime): ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) # FILETIME is in units of 100 nsec nsec = (ft % 10000000) * 100 time = (ft / 10000000) - secs_between_epochs - return time, nsec + return intmask(time), intmask(nsec) def time_t_to_FILE_TIME(time, filetime): - ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) - filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & lltype.r_uint(-1)) - + ft = (rffi.r_longlong(time) + secs_between_epochs) * 10000000 + filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) + filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits From noreply at buildbot.pypy.org Sun Dec 4 18:37:27 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Sun, 4 Dec 2011 18:37:27 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge with default Message-ID: <20111204173727.AE1C58205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50136:2426fc9271a4 Date: 2011-12-04 18:14 +0100 http://bitbucket.org/pypy/pypy/changeset/2426fc9271a4/ Log: merge with default diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -100,7 +100,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, maxlength) - res = intmask(res) # XXX why? try: if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -117,7 +116,6 @@ res, newbuf = self.do_recv_string( space, length - offset, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: raise BufferTooShort(space, space.wrap( @@ -148,7 +146,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) @@ -413,7 +410,7 @@ self.buffer, min(self.BUFFER_SIZE, buflength), read_ptr, rffi.NULL) if result: - return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) + return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py --- a/pypy/rpython/module/ll_os_stat.py +++ b/pypy/rpython/module/ll_os_stat.py @@ -12,6 +12,7 @@ from pypy.rpython.tool import rffi_platform as platform from pypy.rpython.lltypesystem.rtupletype import TUPLE_TYPE from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import specialize from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.annlowlevel import hlstr @@ -319,7 +320,6 @@ assert len(STAT_FIELDS) == 10 # no extra fields on Windows def attributes_to_mode(attributes): - attributes = lltype.r_uint(attributes) m = 0 if attributes & win32traits.FILE_ATTRIBUTE_DIRECTORY: m |= win32traits._S_IFDIR | 0111 # IFEXEC for user,group,other @@ -443,20 +443,19 @@ # Helper functions for win32 def make_longlong(high, low): - return (lltype.r_longlong(high) << 32) + lltype.r_longlong(low) + return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) # Seconds between 1.1.1601 and 1.1.1970 -secs_between_epochs = lltype.r_longlong(11644473600) +secs_between_epochs = rffi.r_longlong(11644473600) def FILE_TIME_to_time_t_nsec(filetime): ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) # FILETIME is in units of 100 nsec nsec = (ft % 10000000) * 100 time = (ft / 10000000) - secs_between_epochs - return time, nsec + return intmask(time), intmask(nsec) def time_t_to_FILE_TIME(time, filetime): - ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) - filetime.c_dwHighDateTime = lltype.r_uint32(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint32(ft & lltype.r_uint(-1)) - + ft = (rffi.r_longlong(time) + secs_between_epochs) * 10000000 + filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) + filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits From noreply at buildbot.pypy.org Sun Dec 4 19:40:08 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 19:40:08 +0100 (CET) Subject: [pypy-commit] pypy default: Fix Message-ID: <20111204184008.8DB498205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50137:12b00b7aeb0c Date: 2011-12-04 18:18 +0100 http://bitbucket.org/pypy/pypy/changeset/12b00b7aeb0c/ Log: Fix diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -473,7 +473,7 @@ block = timeout < 0 if not block: # XXX does not check for overflow - deadline = _GetTickCount() + int(1000 * timeout + 0.5) + deadline = intmask(_GetTickCount()) + int(1000 * timeout + 0.5) else: deadline = 0 @@ -497,7 +497,7 @@ return True if not block: - now = _GetTickCount() + now = intmask(_GetTickCount()) if now > deadline: return False diff = deadline - now From noreply at buildbot.pypy.org Sun Dec 4 19:40:09 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Dec 2011 19:40:09 +0100 (CET) Subject: [pypy-commit] pypy default: fix? Message-ID: <20111204184009.BF3E18205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50138:4162bc8b5f4c Date: 2011-12-04 18:56 +0100 http://bitbucket.org/pypy/pypy/changeset/4162bc8b5f4c/ Log: fix? diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = cConfig.INVALID_SOCKET + INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 From noreply at buildbot.pypy.org Sun Dec 4 20:09:58 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 20:09:58 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix tests Message-ID: <20111204190958.A40A28205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50139:8c5047ecbd38 Date: 2011-12-04 19:37 +0100 http://bitbucket.org/pypy/pypy/changeset/8c5047ecbd38/ Log: fix tests diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -156,7 +156,7 @@ guard_no_overflow(descr=...) i40 = int_sub(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i40, i38, descr=) + jump(p0, p1, p2, p3, i40, i38, descr=...) """) def test_getattr_promote(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -24,5 +24,5 @@ guard_true(i2, descr=...) i3 = int_add(i0, 1) --THREAD-TICK-- - jump(..., descr=) + jump(..., descr=...) """) From noreply at buildbot.pypy.org Sun Dec 4 20:09:59 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 20:09:59 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix tests Message-ID: <20111204190959.D1F748205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50140:5c46c4af8ec6 Date: 2011-12-04 19:47 +0100 http://bitbucket.org/pypy/pypy/changeset/5c46c4af8ec6/ Log: fix tests diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -221,6 +221,14 @@ @property def ids(self): return self.trace.ids + + @property + def filename(self): + return self.trace.filename + + @property + def code(self): + return self.trace.code class InvalidMatch(Exception): diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -317,14 +317,17 @@ loops = log.loops_by_filename(self.filepath) assert len(loops) == 1 assert loops[0].filename == self.filepath - assert not loops[0].is_entry_bridge + assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge=True) assert len(loops) == 1 - assert loops[0].is_entry_bridge + assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) > 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge='*') - assert len(loops) == 2 + assert len(loops) == 1 + assert len([op for op in loops[0].allops() if op.name == 'label']) == 2 def test_loops_by_id(self): def f(): From noreply at buildbot.pypy.org Sun Dec 4 20:10:03 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 20:10:03 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge default Message-ID: <20111204191003.92DB28205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50141:a18379234939 Date: 2011-12-04 20:08 +0100 http://bitbucket.org/pypy/pypy/changeset/a18379234939/ Log: hg merge default diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -304,5 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. + .. include:: _ref.txt diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -21,7 +21,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -1464,6 +1464,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1542,12 +1546,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] diff --git a/pypy/jit/backend/llsupport/asmmemmgr.py b/pypy/jit/backend/llsupport/asmmemmgr.py --- a/pypy/jit/backend/llsupport/asmmemmgr.py +++ b/pypy/jit/backend/llsupport/asmmemmgr.py @@ -37,25 +37,25 @@ self._add_free_block(smaller_stop, stop) stop = smaller_stop result = (start, stop) - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result # pair (start, stop) def free(self, start, stop): """Free a block (start, stop) returned by a previous malloc().""" - self.total_mallocs -= (stop - start) + self.total_mallocs -= r_uint(stop - start) self._add_free_block(start, stop) def open_malloc(self, minsize): """Allocate at least minsize bytes. Returns (start, stop).""" result = self._allocate_block(minsize) (start, stop) = result - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result def open_free(self, middle, stop): """Used for freeing the end of an open-allocated block of memory.""" if stop - middle >= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -648,14 +648,10 @@ # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1< -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -288,15 +284,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -451,8 +447,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -460,8 +462,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -487,7 +492,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -153,6 +153,13 @@ self.cpu.execute_token(looptoken) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -434,6 +441,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + f(123, *[None]*11) # check that the check() are ok + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -241,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + return True # better safe than sorry + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -481,8 +500,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) @@ -1053,35 +1086,20 @@ # jit.codewriter.support. for _op, _oopspec in [('llong_invert', 'INVERT'), - ('ullong_invert', 'INVERT'), ('llong_lt', 'LT'), ('llong_le', 'LE'), ('llong_eq', 'EQ'), ('llong_ne', 'NE'), ('llong_gt', 'GT'), ('llong_ge', 'GE'), - ('ullong_lt', 'ULT'), - ('ullong_le', 'ULE'), - ('ullong_eq', 'EQ'), - ('ullong_ne', 'NE'), - ('ullong_gt', 'UGT'), - ('ullong_ge', 'UGE'), ('llong_add', 'ADD'), ('llong_sub', 'SUB'), ('llong_mul', 'MUL'), ('llong_and', 'AND'), ('llong_or', 'OR'), ('llong_xor', 'XOR'), - ('ullong_add', 'ADD'), - ('ullong_sub', 'SUB'), - ('ullong_mul', 'MUL'), - ('ullong_and', 'AND'), - ('ullong_or', 'OR'), - ('ullong_xor', 'XOR'), ('llong_lshift', 'LSHIFT'), ('llong_rshift', 'RSHIFT'), - ('ullong_lshift', 'LSHIFT'), - ('ullong_rshift', 'URSHIFT'), ('cast_int_to_longlong', 'FROM_INT'), ('truncate_longlong_to_int', 'TO_INT'), ('cast_float_to_longlong', 'FROM_FLOAT'), @@ -1104,6 +1122,21 @@ ('cast_uint_to_ulonglong', 'FROM_UINT'), ('cast_float_to_ulonglong', 'FROM_FLOAT'), ('cast_ulonglong_to_float', 'U_TO_FLOAT'), + ('ullong_invert', 'INVERT'), + ('ullong_lt', 'ULT'), + ('ullong_le', 'ULE'), + ('ullong_eq', 'EQ'), + ('ullong_ne', 'NE'), + ('ullong_gt', 'UGT'), + ('ullong_ge', 'UGE'), + ('ullong_add', 'ADD'), + ('ullong_sub', 'SUB'), + ('ullong_mul', 'MUL'), + ('ullong_and', 'AND'), + ('ullong_or', 'OR'), + ('ullong_xor', 'XOR'), + ('ullong_lshift', 'LSHIFT'), + ('ullong_rshift', 'URSHIFT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): @@ -1134,7 +1167,7 @@ def rewrite_op_llong_is_true(self, op): v = varoftype(op.args[0].concretetype) - op0 = SpaceOperation('cast_int_to_longlong', + op0 = SpaceOperation('cast_primitive', [Constant(0, lltype.Signed)], v) args = [op.args[0], v] diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -258,6 +258,9 @@ y = ~r_ulonglong(xll) return u_to_longlong(y) +def _ll_1_ullong_invert(xull): + return ~xull + def _ll_2_llong_lt(xll, yll): return xll < yll @@ -276,16 +279,22 @@ def _ll_2_llong_ge(xll, yll): return xll >= yll -def _ll_2_llong_ult(xull, yull): +def _ll_2_ullong_eq(xull, yull): + return xull == yull + +def _ll_2_ullong_ne(xull, yull): + return xull != yull + +def _ll_2_ullong_ult(xull, yull): return xull < yull -def _ll_2_llong_ule(xull, yull): +def _ll_2_ullong_ule(xull, yull): return xull <= yull -def _ll_2_llong_ugt(xull, yull): +def _ll_2_ullong_ugt(xull, yull): return xull > yull -def _ll_2_llong_uge(xull, yull): +def _ll_2_ullong_uge(xull, yull): return xull >= yull def _ll_2_llong_add(xll, yll): @@ -312,14 +321,41 @@ z = r_ulonglong(xll) ^ r_ulonglong(yll) return u_to_longlong(z) +def _ll_2_ullong_add(xull, yull): + z = (xull) + (yull) + return (z) + +def _ll_2_ullong_sub(xull, yull): + z = (xull) - (yull) + return (z) + +def _ll_2_ullong_mul(xull, yull): + z = (xull) * (yull) + return (z) + +def _ll_2_ullong_and(xull, yull): + z = (xull) & (yull) + return (z) + +def _ll_2_ullong_or(xull, yull): + z = (xull) | (yull) + return (z) + +def _ll_2_ullong_xor(xull, yull): + z = (xull) ^ (yull) + return (z) + def _ll_2_llong_lshift(xll, y): z = r_ulonglong(xll) << y return u_to_longlong(z) +def _ll_2_ullong_lshift(xull, y): + return xull << y + def _ll_2_llong_rshift(xll, y): return xll >> y -def _ll_2_llong_urshift(xull, y): +def _ll_2_ullong_urshift(xull, y): return xull >> y def _ll_1_llong_from_int(x): @@ -563,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1180,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -1071,6 +1071,25 @@ # a jump back to itself and possibly a few bridges ending with finnish. # Only the operations within the loop formed by that single jump will # be counted. + + # XXX hacked version, ignore and remove me when jit-targets is merged. + loops = self.get_all_loops() + loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX + assert len(loops) == 1 + loop, = loops + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + insns = {} + for op in loop.operations: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. loops = self.get_all_loops() assert len(loops) == 1 loop = loops[0] diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -7,7 +7,7 @@ from pypy.rlib.libffi import Func from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.lltypesystem import llmemory, rffi class FuncInfo(object): @@ -237,7 +237,7 @@ else: assert False, "unsupported ffitype or kind" # - fieldsize = ffitype.c_size + fieldsize = rffi.getintfield(ffitype, 'c_size') return self.optimizer.cpu.interiorfielddescrof_dynamic( offset, width, fieldsize, is_pointer, is_float, is_signed ) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -565,9 +565,12 @@ descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - newboxes = modifier.finish(self.values, self.pendingfields) - if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here - compile.giveup() + try: + newboxes = modifier.finish(self.values, self.pendingfields) + if len(newboxes) > self.metainterp_sd.options.failargs_limit: + raise resume.TagOverflow + except resume.TagOverflow: + raise compile.giveup() descr.store_final_boxes(op, newboxes) # if op.getopnum() == rop.GUARD_VALUE: diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -31,6 +31,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + def optimize_LABEL(self, op): self.last_label_descr = op.getdescr() self.emit_operation(op) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6508,6 +6508,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -242,6 +242,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -496,6 +496,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -93,12 +93,14 @@ TAGMASK = 3 +class TagOverflow(Exception): + pass + def tag(value, tagbits): - if tagbits >> 2: - raise ValueError + assert 0 <= tagbits <= 3 sx = value >> 13 if sx != 0 and sx != -1: - raise ValueError + raise TagOverflow return rffi.r_short(value<<2|tagbits) def untag(value): @@ -153,7 +155,7 @@ return self._newconst(const) try: return tag(val, TAGINT) - except ValueError: + except TagOverflow: pass tagged = self.large_ints.get(val, UNASSIGNED) if not tagged_eq(tagged, UNASSIGNED): @@ -429,8 +431,7 @@ fieldnum = self._gettagged(fieldbox) # the index is limited to 2147483647 (64-bit machines only) if itemindex > 2147483647: - from pypy.jit.metainterp import compile - compile.giveup() + raise TagOverflow itemindex = rffi.cast(rffi.INT, itemindex) # rd_pendingfields[i].lldescr = lldescr diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3537,6 +3537,132 @@ self.interp_operations(f, [5], translationoptions=translationoptions) + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py --- a/pypy/jit/metainterp/test/test_resume.py +++ b/pypy/jit/metainterp/test/test_resume.py @@ -23,11 +23,11 @@ assert tag(-3, 2) == rffi.r_short(-3<<2|2) assert tag((1<<13)-1, 3) == rffi.r_short(((1<<15)-1)|3) assert tag(-1<<13, 3) == rffi.r_short((-1<<15)|3) - py.test.raises(ValueError, tag, 3, 5) - py.test.raises(ValueError, tag, 1<<13, 0) - py.test.raises(ValueError, tag, (1<<13)+1, 0) - py.test.raises(ValueError, tag, (-1<<13)-1, 0) - py.test.raises(ValueError, tag, (-1<<13)-5, 0) + py.test.raises(AssertionError, tag, 3, 5) + py.test.raises(TagOverflow, tag, 1<<13, 0) + py.test.raises(TagOverflow, tag, (1<<13)+1, 0) + py.test.raises(TagOverflow, tag, (-1<<13)-1, 0) + py.test.raises(TagOverflow, tag, (-1<<13)-5, 0) def test_untag(): assert untag(tag(3, 1)) == (3, 1) @@ -1318,8 +1318,7 @@ assert rffi.cast(lltype.Signed, pf[1].fieldnum) == 1062 assert rffi.cast(lltype.Signed, pf[1].itemindex) == 2147483647 # - from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole - py.test.raises(SwitchToBlackhole, modifier._add_pending_fields, + py.test.raises(TagOverflow, modifier._add_pending_fields, [(array_a, 42, 63, 2147483648)]) def test_resume_reader_fields_and_arrayitems(): diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -615,7 +615,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_trace_count(3) + self.check_trace_count(4) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_trace_count(3) @@ -764,6 +764,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + def test_retrace_not_matching_bridge(self): @dont_look_inside def external(node): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -255,10 +255,8 @@ s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] graph = copygraph(graph) - graph.startblock.isstartblock = False [jmpp] = find_jit_merge_points([graph]) graph.startblock = support.split_before_jit_merge_point(*jmpp) - graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot # to list some variable in greens=[] or reds=[] in JitDriver, # or that a jit_merge_point() takes a constant as an argument. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,11 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + if isinstance(w_list, W_ListObject): + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,21 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + + l = [1, 2, 3] + assert list_strategy(l) == "int" + l = ["a", "b", "c"] + assert list_strategy(l) == "str" + l = [1.1, 2.2, 3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1, "b", 3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + raises(TypeError, list_strategy, 5) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -67,10 +67,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_builtin = space.getbuiltinmodule('__builtin__') - w_import = space.getattr(w_builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -100,7 +100,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, maxlength) - res = intmask(res) # XXX why? try: if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -117,7 +116,6 @@ res, newbuf = self.do_recv_string( space, length - offset, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: raise BufferTooShort(space, space.wrap( @@ -148,7 +146,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) @@ -413,7 +410,7 @@ self.buffer, min(self.BUFFER_SIZE, buflength), read_ptr, rffi.NULL) if result: - return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) + return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: @@ -476,7 +473,7 @@ block = timeout < 0 if not block: # XXX does not check for overflow - deadline = _GetTickCount() + int(1000 * timeout + 0.5) + deadline = intmask(_GetTickCount()) + int(1000 * timeout + 0.5) else: deadline = 0 @@ -500,7 +497,7 @@ return True if not block: - now = _GetTickCount() + now = intmask(_GetTickCount()) if now > deadline: return False diff = deadline - now diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -235,7 +235,7 @@ elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise OperationError(space.w_OverflowError, space.wrap("timeout is too large")) - full_msecs = int(timeout + 0.5) + full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) @@ -243,7 +243,7 @@ if res != rwin32.WAIT_TIMEOUT: return True - msecs = r_uint(full_msecs) + msecs = full_msecs start = _GetTickCount() while True: @@ -269,7 +269,7 @@ ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False - msecs = r_uint(full_msecs - (ticks - start)) + msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,6 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def set_last_error(space, w_error): + at unwrap_spec(error=int) +def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError - SetLastError(space.uint_w(w_error)) + SetLastError(error) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,17 +5,34 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.NDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', + 'dot': 'interp_numarray.dot', 'fromstring': 'interp_support.fromstring', + 'flatiter': 'interp_numarray.W_FlatIterator', - 'True_': 'space.w_True', - 'False_': 'space.w_False', + 'True_': 'types.Bool.True', + 'False_': 'types.Bool.False', + + 'generic': 'interp_boxes.W_GenericBox', + 'number': 'interp_boxes.W_NumberBox', + 'integer': 'interp_boxes.W_IntegerBox', + 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'bool_': 'interp_boxes.W_BoolBox', + 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', + 'int64': 'interp_boxes.W_Int64Box', + 'int_': 'interp_boxes.W_LongBox', + 'inexact': 'interp_boxes.W_InexactBox', + 'floating': 'interp_boxes.W_FloatingBox', + 'float64': 'interp_boxes.W_Float64Box', } # ufuncs @@ -48,6 +65,7 @@ ("sign", "sign"), ("sin", "sin"), ("subtract", "subtract"), + ('sqrt', 'sqrt'), ("tan", "tan"), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl @@ -57,4 +75,5 @@ 'mean': 'app_numpy.mean', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', + 'arange': 'app_numpy.arange', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -6,12 +6,33 @@ inf = float("inf") e = math.e + def average(a): # This implements a weighted average, for now we don't implement the # weighting, just the average part! return mean(a) + def mean(a): if not hasattr(a, "mean"): a = numpypy.array(a) return a.mean() + + +def arange(start, stop=None, step=1, dtype=None): + '''arange([start], stop[, step], dtype=None) + Generate values in the half-interval [start, stop). + ''' + if stop is None: + stop = start + start = 0 + if dtype is None: + test = numpypy.array([start, stop, step, 0]) + dtype = test.dtype + arr = numpypy.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + i = start + for j in range(arr.size): + arr[j] = i + j += 1 + i += step + return arr diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,13 +3,16 @@ It should not be imported by the module itself """ +import re + from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_BoolDtype +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, NDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs -from pypy.rlib.objectmodel import specialize -import re +from pypy.rlib.objectmodel import specialize, instantiate + class BogusBytecode(Exception): pass @@ -48,15 +51,12 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_float64dtype = W_Float64Dtype(self) def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): - if w_obj.tp == w_tp: - return True - return False + return w_obj.tp == w_tp def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -69,8 +69,12 @@ if start < 0: start += size if stop < 0: - stop += size - return (start, stop, step, size//step) + stop += size + 1 + if step < 0: + lgt = (stop - start + 1) / step + 1 + else: + lgt = (stop - start - 1) / step + 1 + return (start, stop, step, lgt) @specialize.argtype(1) def wrap(self, obj): @@ -93,11 +97,13 @@ fixedview = listview def float(self, w_obj): - assert isinstance(w_obj, FloatObject) - return w_obj + if isinstance(w_obj, FloatObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): - assert isinstance(w_obj, FloatObject) + assert isinstance(w_obj, FloatObject) return w_obj.floatval def int_w(self, w_obj): @@ -108,7 +114,10 @@ raise NotImplementedError def int(self, w_obj): - return w_obj + if isinstance(w_obj, IntObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.int(w_obj.descr_int(self)) def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) @@ -131,6 +140,9 @@ assert isinstance(what, tp) return what + def allocate_instance(self, klass, w_subtype): + return instantiate(klass) + def len_w(self, w_obj): if isinstance(w_obj, ListObject): return len(w_obj.items) @@ -208,11 +220,12 @@ def execute(self, interp): arr = interp.variables[self.name] - w_index = self.index.execute(interp).eval(arr.start_iter()).wrap(interp.space) + w_index = self.index.execute(interp) # cast to int if isinstance(w_index, FloatObject): w_index = IntObject(int(w_index.floatval)) - w_val = self.expr.execute(interp).eval(arr.start_iter()).wrap(interp.space) + w_val = self.expr.execute(interp) + assert isinstance(arr, BaseArray) arr.descr_setitem(interp.space, w_index, w_val) def __repr__(self): @@ -240,23 +253,28 @@ w_rhs = self.rhs.wrap(interp.space) else: w_rhs = self.rhs.execute(interp) + if not isinstance(w_lhs, BaseArray): + # scalar + dtype = get_dtype_cache(interp.space).w_float64dtype + w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) if self.name == '+': w_res = w_lhs.descr_add(interp.space, w_rhs) elif self.name == '*': w_res = w_lhs.descr_mul(interp.space, w_rhs) elif self.name == '-': - w_res = w_lhs.descr_sub(interp.space, w_rhs) + w_res = w_lhs.descr_sub(interp.space, w_rhs) elif self.name == '->': - if isinstance(w_rhs, Scalar): - w_rhs = w_rhs.eval(w_rhs.start_iter()).wrap(interp.space) - assert isinstance(w_rhs, FloatObject) + assert not isinstance(w_rhs, Scalar) + if isinstance(w_rhs, FloatObject): w_rhs = IntObject(int(w_rhs.floatval)) + assert isinstance(w_lhs, BaseArray) w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError - if not isinstance(w_res, BaseArray): - dtype = interp.space.fromcache(W_Float64Dtype) + if (not isinstance(w_res, BaseArray) and + not isinstance(w_res, interp_boxes.W_GenericBox)): + dtype = get_dtype_cache(interp.space).w_float64dtype w_res = scalar_w(interp.space, dtype, w_res) return w_res @@ -274,9 +292,7 @@ return space.wrap(self.v) def execute(self, interp): - dtype = interp.space.fromcache(W_Float64Dtype) - assert isinstance(dtype, W_Float64Dtype) - return Scalar(dtype, dtype.box(self.v)) + return interp.space.wrap(self.v) class RangeConstant(Node): def __init__(self, v): @@ -284,10 +300,10 @@ def execute(self, interp): w_list = interp.space.newlist( - [interp.space.wrap(float(i)) for i in range(self.v)]) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + [interp.space.wrap(float(i)) for i in range(self.v)] + ) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -308,9 +324,8 @@ def execute(self, interp): w_list = self.wrap(interp.space) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" @@ -325,6 +340,9 @@ def wrap(self, space): return SliceObject(self.start, self.stop, self.step) + def execute(self, interp): + return SliceObject(self.start, self.stop, self.step) + def __repr__(self): return 'slice(%s,%s,%s)' % (self.start, self.stop, self.step) @@ -374,9 +392,11 @@ if isinstance(w_res, BaseArray): return w_res if isinstance(w_res, FloatObject): - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype elif isinstance(w_res, BoolObject): - dtype = interp.space.fromcache(W_BoolDtype) + dtype = get_dtype_cache(interp.space).w_booldtype + elif isinstance(w_res, interp_boxes.W_GenericBox): + dtype = w_res.get_dtype(interp.space) else: dtype = None return scalar_w(interp.space, dtype, w_res) @@ -477,8 +497,8 @@ else: step = 1 return SliceConstant(start, stop, step) - - + + def parse_expression(self, tokens): stack = [] while tokens.remaining(): @@ -532,7 +552,7 @@ if token.name == 'array_right': return elems assert token.name == 'coma' - + def parse_statement(self, tokens): if (tokens.get(0).name == 'identifier' and tokens.get(1).name == 'assign'): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_boxes.py @@ -0,0 +1,267 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef +from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.inttype import int_typedef +from pypy.objspace.std.typeobject import W_TypeObject +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.tool.sourcetools import func_with_new_name + + +MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () + +def new_dtype_getter(name): + def get_dtype(space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return getattr(get_dtype_cache(space), "w_%sdtype" % name) + def new(space, w_subtype, w_value): + dtype = get_dtype(space) + return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) + return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + +class PrimitiveBox(object): + _mixin_ = True + + def __init__(self, value): + self.value = value + + def convert_to(self, dtype): + return dtype.box(self.value) + +class W_GenericBox(Wrappable): + _attrs_ = () + + def descr__new__(space, w_subtype, __args__): + assert isinstance(w_subtype, W_TypeObject) + raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", + w_subtype.get_module_type_name() + ) + + def descr_str(self, space): + return self.descr_repr(space) + + def descr_repr(self, space): + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + + def descr_int(self, space): + box = self.convert_to(W_LongBox.get_dtype(space)) + assert isinstance(box, W_LongBox) + return space.wrap(box.value) + + def descr_float(self, space): + box = self.convert_to(W_Float64Box.get_dtype(space)) + assert isinstance(box, W_Float64Box) + return space.wrap(box.value) + + def descr_nonzero(self, space): + dtype = self.get_dtype(space) + return space.wrap(dtype.itemtype.bool(self)) + + def _binop_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + def _binop_right_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) + + def _unaryop_impl(ufunc_name): + def impl(self, space): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + + descr_radd = _binop_right_impl("add") + descr_rmul = _binop_right_impl("multiply") + + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") + + +class W_BoolBox(W_GenericBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("bool") + +class W_NumberBox(W_GenericBox): + _attrs_ = () + +class W_IntegerBox(W_NumberBox): + pass + +class W_SignedIntegerBox(W_IntegerBox): + pass + +class W_UnsignedIntgerBox(W_IntegerBox): + pass + +class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int8") + +class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint8") + +class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int16") + +class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint16") + +class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int32") + +class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint32") + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("long") + +class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int64") + +class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_InexactBox(W_NumberBox): + _attrs_ = () + +class W_FloatingBox(W_InexactBox): + _attrs_ = () + +class W_Float32Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float32") + +class W_Float64Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float64") + + + +W_GenericBox.typedef = TypeDef("generic", + __module__ = "numpypy", + + __new__ = interp2app(W_GenericBox.descr__new__.im_func), + + __str__ = interp2app(W_GenericBox.descr_str), + __repr__ = interp2app(W_GenericBox.descr_repr), + __int__ = interp2app(W_GenericBox.descr_int), + __float__ = interp2app(W_GenericBox.descr_float), + __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + + __add__ = interp2app(W_GenericBox.descr_add), + __sub__ = interp2app(W_GenericBox.descr_sub), + __mul__ = interp2app(W_GenericBox.descr_mul), + __div__ = interp2app(W_GenericBox.descr_div), + + __radd__ = interp2app(W_GenericBox.descr_add), + __rmul__ = interp2app(W_GenericBox.descr_rmul), + + __eq__ = interp2app(W_GenericBox.descr_eq), + __ne__ = interp2app(W_GenericBox.descr_ne), + __lt__ = interp2app(W_GenericBox.descr_lt), + __le__ = interp2app(W_GenericBox.descr_le), + __gt__ = interp2app(W_GenericBox.descr_gt), + __ge__ = interp2app(W_GenericBox.descr_ge), + + __neg__ = interp2app(W_GenericBox.descr_neg), + __abs__ = interp2app(W_GenericBox.descr_abs), +) + +W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_BoolBox.descr__new__.im_func), +) + +W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + +W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int8Box.descr__new__.im_func), +) + +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), +) + +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), +) + +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +if LONG_BIT == 32: + long_name = "int32" +elif LONG_BIT == 64: + long_name = "int64" +W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), + __module__ = "numpypy", +) + +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, + __module__ = "numpypy", + __new__ = interp2app(W_Int64Box.descr__new__.im_func), +) + +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, + __module__ = "numpypy", +) + +W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, + __module__ = "numpypy", +) + +W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), + __module__ = "numpypy", + + __new__ = interp2app(W_Float64Box.descr__new__.im_func), +) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,16 +1,11 @@ -import functools -import math - from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty -from pypy.module.micronumpy import signature -from pypy.objspace.std.floatobject import float2string -from pypy.rlib import rarithmetic, rfloat -from pypy.rlib.rarithmetic import LONG_BIT, widen -from pypy.rlib.objectmodel import specialize, enforceargs -from pypy.rlib.unroll import unrolling_iterable +from pypy.interpreter.typedef import (TypeDef, GetSetProperty, + interp_attrproperty, interp_attrproperty_w) +from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT from pypy.rpython.lltypesystem import lltype, rffi @@ -19,501 +14,218 @@ BOOLLTR = "b" FLOATINGLTR = "f" + +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) + class W_Dtype(Wrappable): - def __init__(self, space): - pass + _immuable_fields_ = ["itemtype", "num", "kind"] + + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): + self.signature = signature.BaseSignature() + self.itemtype = itemtype + self.num = num + self.kind = kind + self.name = name + self.char = char + self.w_box_type = w_box_type + self.alternate_constructors = alternate_constructors + + def malloc(self, length): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True + ) + + @specialize.argtype(1) + def box(self, value): + return self.itemtype.box(value) + + def coerce(self, space, w_item): + return self.itemtype.coerce(space, w_item) + + def getitem(self, storage, i): + return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + + def setitem(self, storage, i, box): + self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + + def fill(self, storage, box, start, stop): + self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + if space.is_w(w_dtype, space.w_None): - return space.fromcache(W_Float64Dtype) + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype elif space.isinstance_w(w_dtype, space.w_str): - dtype = space.str_w(w_dtype) - for alias, dtype_class in dtypes_by_alias: - if alias == dtype: - return space.fromcache(dtype_class) - elif isinstance(space.interpclass_w(w_dtype), W_Dtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_type): - for typename, dtype_class in dtypes_by_apptype: - if space.is_w(getattr(space, "w_%s" % typename), w_dtype): - return space.fromcache(dtype_class) + name = space.str_w(w_dtype) + for dtype in cache.builtin_dtypes: + if dtype.name == name or dtype.char == name: + return dtype + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + def descr_str(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("dtype('%s')" % self.name) - def descr_str(self, space): - return space.wrap(self.name) + def descr_get_itemsize(self, space): + return space.wrap(self.itemtype.get_element_size()) def descr_get_shape(self, space): return space.newtuple([]) - -class BaseBox(object): - pass - -VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) - -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, - expected_size=None): - - class Box(BaseBox): - def __init__(self, val): - self.val = val - - def wrap(self, space): - val = self.val - if valtype is rarithmetic.r_singlefloat: - val = float(val) - return space.wrap(val) - - def convert_to(self, dtype): - return dtype.adapt_val(self.val) - Box.__name__ = "%sBox" % T._name - - TP = lltype.Ptr(lltype.Array(T, hints={'nolength': True})) - class W_LowLevelDtype(W_Dtype): - signature = signature.BaseSignature() - - def erase(self, storage): - return rffi.cast(VOID_TP, storage) - - def unerase(self, storage): - return rffi.cast(TP, storage) - - @enforceargs(None, valtype) - def box(self, value): - return Box(value) - - def unbox(self, box): - assert isinstance(box, Box) - return box.val - - def unwrap(self, space, w_item): - raise NotImplementedError - - def malloc(self, size): - # XXX find out why test_zjit explodes with tracking of allocations - return self.erase(lltype.malloc(TP.TO, size, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - )) - - def getitem(self, storage, i): - return Box(self.unerase(storage)[i]) - - def setitem(self, storage, i, item): - self.unerase(storage)[i] = self.unbox(item) - - def setitem_w(self, space, storage, i, w_item): - self.setitem(storage, i, self.unwrap(space, w_item)) - - def fill(self, storage, item, start, stop): - storage = self.unerase(storage) - item = self.unbox(item) - for i in xrange(start, stop): - storage[i] = item - - @specialize.argtype(1) - def adapt_val(self, val): - return self.box(rffi.cast(TP.TO.OF, val)) - - W_LowLevelDtype.__name__ = "W_%sDtype" % name.capitalize() - W_LowLevelDtype.num = num - W_LowLevelDtype.kind = kind - W_LowLevelDtype.name = name - W_LowLevelDtype.aliases = aliases - W_LowLevelDtype.applevel_types = applevel_types - W_LowLevelDtype.num_bytes = rffi.sizeof(T) - if expected_size is not None: - assert W_LowLevelDtype.num_bytes == expected_size - return W_LowLevelDtype - - -def binop(func): - @functools.wraps(func) - def impl(self, v1, v2): - return self.adapt_val(func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)), - )) - return impl - -def raw_binop(func): - # Returns the result unwrapped. - @functools.wraps(func) - def impl(self, v1, v2): - return func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)) - ) - return impl - -def unaryop(func): - @functools.wraps(func) - def impl(self, v): - return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) - return impl - -class ArithmeticTypeMixin(object): - _mixin_ = True - - @binop - def add(self, v1, v2): - return v1 + v2 - @binop - def sub(self, v1, v2): - return v1 - v2 - @binop - def mul(self, v1, v2): - return v1 * v2 - - @unaryop - def pos(self, v): - return +v - @unaryop - def neg(self, v): - return -v - @unaryop - def abs(self, v): - return abs(v) - - @binop - def max(self, v1, v2): - return max(v1, v2) - @binop - def min(self, v1, v2): - return min(v1, v2) - - def bool(self, v): - return bool(self.for_computation(self.unbox(v))) - @raw_binop - def eq(self, v1, v2): - return v1 == v2 - @raw_binop - def ne(self, v1, v2): - return v1 != v2 - @raw_binop - def lt(self, v1, v2): - return v1 < v2 - @raw_binop - def le(self, v1, v2): - return v1 <= v2 - @raw_binop - def gt(self, v1, v2): - return v1 > v2 - @raw_binop - def ge(self, v1, v2): - return v1 >= v2 - - -class FloatArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def for_computation(self, v): - return float(v) - - def str_format(self, item): - return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION) - - @binop - def div(self, v1, v2): - try: - return v1 / v2 - except ZeroDivisionError: - if v1 == v2 == 0.0: - return rfloat.NAN - return rfloat.copysign(rfloat.INFINITY, v1 * v2) - @binop - def mod(self, v1, v2): - return math.fmod(v1, v2) - @binop - def pow(self, v1, v2): - return math.pow(v1, v2) - - @unaryop - def sign(self, v): - if v == 0.0: - return 0.0 - return rfloat.copysign(1.0, v) - @unaryop - def reciprocal(self, v): - if v == 0.0: - return rfloat.copysign(rfloat.INFINITY, v) - return 1.0 / v - @unaryop - def fabs(self, v): - return math.fabs(v) - @unaryop - def floor(self, v): - return math.floor(v) - - @binop - def copysign(self, v1, v2): - return math.copysign(v1, v2) - @unaryop - def exp(self, v): - try: - return math.exp(v) - except OverflowError: - return rfloat.INFINITY - @unaryop - def sin(self, v): - return math.sin(v) - @unaryop - def cos(self, v): - return math.cos(v) - @unaryop - def tan(self, v): - return math.tan(v) - @unaryop - def arcsin(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.asin(v) - @unaryop - def arccos(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.acos(v) - @unaryop - def arctan(self, v): - return math.atan(v) - @unaryop - def arcsinh(self, v): - return math.asinh(v) - @unaryop - def arctanh(self, v): - if v == 1.0 or v == -1.0: - return math.copysign(rfloat.INFINITY, v) - if not -1.0 < v < 1.0: - return rfloat.NAN - return math.atanh(v) - -class IntegerArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) - - def for_computation(self, v): - return widen(v) - - def str_format(self, item): - return str(widen(self.unbox(item))) - - @binop - def div(self, v1, v2): - if v2 == 0: - return 0 - return v1 / v2 - @binop - def mod(self, v1, v2): - return v1 % v2 - -class SignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - if v > 0: - return 1 - elif v < 0: - return -1 - else: - assert v == 0 - return 0 - -class UnsignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - return int(v != 0) - - -W_BoolDtype = create_low_level_dtype( - num = 0, kind = BOOLLTR, name = "bool", - aliases = ["?", "bool", "bool8"], - applevel_types = ["bool"], - T = lltype.Bool, - valtype = bool, -) -class W_BoolDtype(SignedIntegerArithmeticDtype, W_BoolDtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.is_true(w_item)) - - def str_format(self, item): - v = self.unbox(item) - return "True" if v else "False" - - def for_computation(self, v): - return int(v) - -W_Int8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "int8", - aliases = ["b", "int8", "i1"], - applevel_types = [], - T = rffi.SIGNEDCHAR, - valtype = rffi.SIGNEDCHAR._type, - expected_size = 1, -) -class W_Int8Dtype(SignedIntegerArithmeticDtype, W_Int8Dtype): - pass - -W_UInt8Dtype = create_low_level_dtype( - num = 2, kind = UNSIGNEDLTR, name = "uint8", - aliases = ["B", "uint8", "I1"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(UnsignedIntegerArithmeticDtype, W_UInt8Dtype): - pass - -W_Int16Dtype = create_low_level_dtype( - num = 3, kind = SIGNEDLTR, name = "int16", - aliases = ["h", "int16", "i2"], - applevel_types = [], - T = rffi.SHORT, - valtype = rffi.SHORT._type, - expected_size = 2, -) -class W_Int16Dtype(SignedIntegerArithmeticDtype, W_Int16Dtype): - pass - -W_UInt16Dtype = create_low_level_dtype( - num = 4, kind = UNSIGNEDLTR, name = "uint16", - aliases = ["H", "uint16", "I2"], - applevel_types = [], - T = rffi.USHORT, - valtype = rffi.USHORT._type, - expected_size = 2, -) -class W_UInt16Dtype(UnsignedIntegerArithmeticDtype, W_UInt16Dtype): - pass - -W_Int32Dtype = create_low_level_dtype( - num = 5, kind = SIGNEDLTR, name = "int32", - aliases = ["i", "int32", "i4"], - applevel_types = [], - T = rffi.INT, - valtype = rffi.INT._type, - expected_size = 4, -) -class W_Int32Dtype(SignedIntegerArithmeticDtype, W_Int32Dtype): - pass - -W_UInt32Dtype = create_low_level_dtype( - num = 6, kind = UNSIGNEDLTR, name = "uint32", - aliases = ["I", "uint32", "I4"], - applevel_types = [], - T = rffi.UINT, - valtype = rffi.UINT._type, - expected_size = 4, -) -class W_UInt32Dtype(UnsignedIntegerArithmeticDtype, W_UInt32Dtype): - pass - -W_Int64Dtype = create_low_level_dtype( - num = 9, kind = SIGNEDLTR, name = "int64", - aliases = ["q", "int64", "i8"], - applevel_types = ["long"], - T = rffi.LONGLONG, - valtype = rffi.LONGLONG._type, - expected_size = 8, -) -class W_Int64Dtype(SignedIntegerArithmeticDtype, W_Int64Dtype): - pass - -W_UInt64Dtype = create_low_level_dtype( - num = 10, kind = UNSIGNEDLTR, name = "uint64", - aliases = ["Q", "uint64", "I8"], - applevel_types = [], - T = rffi.ULONGLONG, - valtype = rffi.ULONGLONG._type, - expected_size = 8, -) -class W_UInt64Dtype(UnsignedIntegerArithmeticDtype, W_UInt64Dtype): - pass - -if LONG_BIT == 32: - long_dtype = W_Int32Dtype - ulong_dtype = W_UInt32Dtype -elif LONG_BIT == 64: - long_dtype = W_Int64Dtype - ulong_dtype = W_UInt64Dtype -else: - assert False - -class W_LongDtype(long_dtype): - num = 7 - aliases = ["l"] - applevel_types = ["int"] - -class W_ULongDtype(ulong_dtype): - num = 8 - aliases = ["L"] - -W_Float32Dtype = create_low_level_dtype( - num = 11, kind = FLOATINGLTR, name = "float32", - aliases = ["f", "float32", "f4"], - applevel_types = [], - T = lltype.SingleFloat, - valtype = rarithmetic.r_singlefloat, - expected_size = 4, -) -class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): - pass - -W_Float64Dtype = create_low_level_dtype( - num = 12, kind = FLOATINGLTR, name = "float64", - aliases = ["d", "float64", "f8"], - applevel_types = ["float"], - T = lltype.Float, - valtype = float, - expected_size = 8, -) -class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): - pass - -ALL_DTYPES = [ - W_BoolDtype, - W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, - W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, - W_Int64Dtype, W_UInt64Dtype, - W_Float32Dtype, W_Float64Dtype, -] - -dtypes_by_alias = unrolling_iterable([ - (alias, dtype) - for dtype in ALL_DTYPES - for alias in dtype.aliases -]) -dtypes_by_apptype = unrolling_iterable([ - (apptype, dtype) - for dtype in ALL_DTYPES - for apptype in dtype.applevel_types -]) -dtypes_by_num_bytes = unrolling_iterable(sorted([ - (dtype.num_bytes, dtype) - for dtype in ALL_DTYPES -])) - W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), + __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), - __str__ = interp2app(W_Dtype.descr_str), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), - itemsize = interp_attrproperty("num_bytes", cls=W_Dtype), + type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), shape = GetSetProperty(W_Dtype.descr_get_shape), ) W_Dtype.typedef.acceptable_as_base_class = False + +class DtypeCache(object): + def __init__(self, space): + self.w_booldtype = W_Dtype( + types.Bool(), + num=0, + kind=BOOLLTR, + name="bool", + char="?", + w_box_type = space.gettypefor(interp_boxes.W_BoolBox), + alternate_constructors=[space.w_bool], + ) + self.w_int8dtype = W_Dtype( + types.Int8(), + num=1, + kind=SIGNEDLTR, + name="int8", + char="b", + w_box_type = space.gettypefor(interp_boxes.W_Int8Box) + ) + self.w_uint8dtype = W_Dtype( + types.UInt8(), + num=2, + kind=UNSIGNEDLTR, + name="uint8", + char="B", + w_box_type = space.gettypefor(interp_boxes.W_UInt8Box), + ) + self.w_int16dtype = W_Dtype( + types.Int16(), + num=3, + kind=SIGNEDLTR, + name="int16", + char="h", + w_box_type = space.gettypefor(interp_boxes.W_Int16Box), + ) + self.w_uint16dtype = W_Dtype( + types.UInt16(), + num=4, + kind=UNSIGNEDLTR, + name="uint16", + char="H", + w_box_type = space.gettypefor(interp_boxes.W_UInt16Box), + ) + self.w_int32dtype = W_Dtype( + types.Int32(), + num=5, + kind=SIGNEDLTR, + name="int32", + char="i", + w_box_type = space.gettypefor(interp_boxes.W_Int32Box), + ) + self.w_uint32dtype = W_Dtype( + types.UInt32(), + num=6, + kind=UNSIGNEDLTR, + name="uint32", + char="I", + w_box_type = space.gettypefor(interp_boxes.W_UInt32Box), + ) + if LONG_BIT == 32: + name = "int32" + elif LONG_BIT == 64: + name = "int64" + self.w_longdtype = W_Dtype( + types.Long(), + num=7, + kind=SIGNEDLTR, + name=name, + char="l", + w_box_type = space.gettypefor(interp_boxes.W_LongBox), + alternate_constructors=[space.w_int], + ) + self.w_ulongdtype = W_Dtype( + types.ULong(), + num=8, + kind=UNSIGNEDLTR, + name="u" + name, + char="L", + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + ) + self.w_int64dtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name="int64", + char="q", + w_box_type = space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], + ) + self.w_uint64dtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name="uint64", + char="Q", + w_box_type = space.gettypefor(interp_boxes.W_UInt64Box), + ) + self.w_float32dtype = W_Dtype( + types.Float32(), + num=11, + kind=FLOATINGLTR, + name="float32", + char="f", + w_box_type = space.gettypefor(interp_boxes.W_Float32Box), + ) + self.w_float64dtype = W_Dtype( + types.Float64(), + num=12, + kind=FLOATINGLTR, + name="float64", + char="d", + w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + alternate_constructors=[space.w_float], + ) + + self.builtin_dtypes = [ + self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, + self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, + self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, + self.w_float64dtype + ] + self.dtypes_by_num_bytes = sorted( + (dtype.itemtype.get_element_size(), dtype) + for dtype in self.builtin_dtypes + ) + +def get_dtype_cache(space): + return space.fromcache(DtypeCache) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature from pypy.rlib import jit -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder from pypy.rlib.objectmodel import instantiate @@ -98,46 +98,6 @@ endshape[i] = remainder[i] return endshape -def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, - w_order=NoneNotWrapped): - # find scalar - if not space.issequence_w(w_item_or_iterable): - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, - w_item_or_iterable, - w_dtype) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - return scalar_w(space, dtype, w_item_or_iterable) - if w_order is None: - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise operationerrfmt(space.w_ValueError, "Unknown order: %s", - order) - shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) - # they come back in C order - size = len(elems_w) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): - break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = NDimArray(size, shape[:], dtype=dtype, order=order) - shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) - for i in range(len(elems_w)): - w_elem = elems_w[i] - dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) - arr_iter = arr_iter.next(shapelen) - return arr # Iterators for arrays # -------------------- @@ -178,6 +138,25 @@ def get_offset(self): return self.offset +class OneDimIterator(BaseIterator): + def __init__(self, start, step, stop): + self.offset = start + self.step = step + self.size = stop * step + start + + def next(self, shapelen): + arr = instantiate(OneDimIterator) + arr.size = self.size + arr.step = self.step + arr.offset = self.offset + self.step + return arr + + def done(self): + return self.offset == self.size + + def get_offset(self): + return self.offset + class ViewIterator(BaseIterator): def __init__(self, arr): self.indices = [0] * len(arr.shape) @@ -227,7 +206,7 @@ self.strides = [] self.backstrides = [] for i in range(len(arr.shape)): - if arr.shape[i]==1: + if arr.shape[i] == 1: self.strides.append(0) self.backstrides.append(0) else: @@ -312,12 +291,12 @@ def get_offset(self): return 0 + class BaseArray(Wrappable): _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", "start", 'order'] - _immutable_fields_ = ['shape[*]', "strides[*]", "backstrides[*]", 'start', - "order"] + _immutable_fields_ = ['start', "order"] strides = None start = 0 @@ -327,21 +306,24 @@ self.shape = shape self.order = order if self.strides is None: - strides = [] - backstrides = [] - s = 1 - shape_rev = shape[:] - if order == 'C': - shape_rev.reverse() - for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh - if order == 'C': - strides.reverse() - backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] + self.calc_strides(shape) + + def calc_strides(self, shape): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if self.order == 'C': + shape_rev.reverse() + for sh in shape_rev: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] def invalidated(self): if self.invalidates: @@ -355,6 +337,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -428,8 +417,8 @@ self=self, dtype=dtype, i=i, result=result, idx=idx, cur_best=cur_best) - new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.ne(new_best, cur_best): + new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best i = i.next(shapelen) @@ -439,8 +428,7 @@ size = self.find_size() if size == 0: raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) + space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -452,7 +440,7 @@ all_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if not dtype.bool(self.eval(i)): + if not dtype.itemtype.bool(self.eval(i)): return False i = i.next(shapelen) return True @@ -467,7 +455,7 @@ any_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if dtype.bool(self.eval(i)): + if dtype.itemtype.bool(self.eval(i)): return True i = i.next(shapelen) return False @@ -499,7 +487,7 @@ return space.wrap(self.find_size()) def descr_copy(self, space): - return space.call_function(space.gettypefor(BaseArray), self, self.find_dtype()) + return self.get_concrete().copy() def descr_len(self, space): return self.get_concrete().descr_len(space) @@ -519,8 +507,8 @@ res.append(')') else: concrete.to_str(space, 1, res, indent=' ') - if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or \ + if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and + dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ not self.find_size(): res.append(", dtype=" + dtype.name) res.append(")") @@ -589,7 +577,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] # Add a comma only if comma is False - this prevents adding two # commas @@ -602,7 +590,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] i += 1 else: @@ -674,6 +662,7 @@ return False return True + @jit.unroll_safe def _prepare_slice_args(self, space, w_idx): if (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): @@ -684,31 +673,31 @@ def descr_getitem(self, space, w_idx): if self._single_item_result(space, w_idx): concrete = self.get_concrete() + if len(concrete.shape) < 1: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item).wrap(space) + return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) return space.wrap(self.create_slice(space, chunks)) def descr_setitem(self, space, w_idx, w_value): self.invalidated() - concrete = self.get_concrete() if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + if len(concrete.shape) < 1: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) concrete.setitem_w(space, item, w_value) return - if isinstance(w_value, BaseArray): - # for now we just copy if setting part of an array from - # part of itself. can be improved. - if (concrete.get_root_storage() == - w_value.get_concrete().get_root_storage()): - w_value = space.call_function(space.gettypefor(BaseArray), w_value) - assert isinstance(w_value, BaseArray) - else: + if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_idx) view = self.create_slice(space, chunks) view.setslice(space, w_value) + @jit.unroll_safe def create_slice(self, space, chunks): if len(chunks) == 1: start, stop, step, lgt = chunks[0] @@ -747,17 +736,35 @@ shape[:]) def descr_mean(self, space): - return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) + return space.div(self.descr_sum(space), space.wrap(self.find_size())) def descr_nonzero(self, space): - try: - if self.find_size() > 1: - raise OperationError(space.w_ValueError, space.wrap( - "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - except ValueError: - pass - return space.wrap(space.is_true(self.get_concrete().eval( - self.start_iter(self.shape)).wrap(space))) + if self.find_size() > 1: + raise OperationError(space.w_ValueError, space.wrap( + "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + return space.wrap(space.is_true( + self.get_concrete().eval(self.start_iter(self.shape)) + )) + + def descr_get_transpose(self, space): + concrete = self.get_concrete() + if len(concrete.shape) < 2: + return space.wrap(self) + new_sig = signature.Signature.find_sig([ + NDimSlice.signature, self.signature + ]) + strides = [] + backstrides = [] + shape = [] + for i in range(len(concrete.shape) - 1, -1, -1): + strides.append(concrete.strides[i]) + backstrides.append(concrete.backstrides[i]) + shape.append(concrete.shape[i]) + return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) + + def descr_get_flatiter(self, space): + return space.wrap(W_FlatIterator(self)) def getitem(self, item): raise NotImplementedError @@ -765,22 +772,22 @@ def start_iter(self, res_shape=None): raise NotImplementedError + def descr_debug_repr(self, space): + return space.wrap(self.debug_repr()) + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) return scalar_w(space, dtype, w_obj) def scalar_w(space, dtype, w_obj): - assert isinstance(dtype, interp_dtype.W_Dtype) - return Scalar(dtype, dtype.unwrap(space, w_obj)) + return Scalar(dtype, dtype.coerce(space, w_obj)) class Scalar(BaseArray): """ @@ -791,12 +798,13 @@ _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): + self.shape = self.strides = [] BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value def find_size(self): - raise ValueError + return 1 def get_concrete(self): return self @@ -805,7 +813,7 @@ return self.dtype def getitem(self, item): - return self.value + raise NotImplementedError def eval(self, iter): return self.value @@ -814,7 +822,13 @@ return ConstantIterator() def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.str_format(self.value)) + builder.append(self.dtype.itemtype.str_format(self.value)) + + def copy(self): + return Scalar(self.dtype, self.value) + + def debug_repr(self): + return 'Scalar' class VirtualArray(BaseArray): """ @@ -834,7 +848,7 @@ i = 0 signature = self.signature result_size = self.find_size() - result = NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) i = self.start_iter() ri = result.start_iter() @@ -907,6 +921,17 @@ return self.forced_result.start_iter(res_shape) return Call1Iterator(self.values.start_iter(res_shape)) + def debug_repr(self): + sig = self.signature + assert isinstance(sig, signature.Signature) + call_sig = sig.components[0] + assert isinstance(call_sig, signature.Call1) + if self.forced_result is not None: + return 'Call1(%s, forced=%s)' % (call_sig.name, + self.forced_result.debug_repr()) + return 'Call1(%s, %s)' % (call_sig.name, + self.values.debug_repr()) + class Call2(VirtualArray): """ Intermediate class for performing binary operations. @@ -946,6 +971,18 @@ assert isinstance(call_sig, signature.Call2) return call_sig.func(self.calc_dtype, lhs, rhs) + def debug_repr(self): + sig = self.signature + assert isinstance(sig, signature.Signature) + call_sig = sig.components[0] + assert isinstance(call_sig, signature.Call2) + if self.forced_result is not None: + return 'Call2(%s, forced=%s)' % (call_sig.name, + self.forced_result.debug_repr()) + return 'Call2(%s, %s, %s)' % (call_sig.name, + self.left.debug_repr(), + self.right.debug_repr()) + class ViewArray(BaseArray): """ Class for representing views of arrays, they will reflect changes of parent @@ -985,8 +1022,6 @@ return space.wrap(self.shape[0]) return space.wrap(1) -class VirtualView(VirtualArray): - pass class NDimSlice(ViewArray): signature = signature.BaseSignature() @@ -1001,9 +1036,6 @@ for sh in shape: self.size *= sh - def get_root_storage(self): - return self.parent.get_concrete().get_root_storage() - def find_size(self): return self.size @@ -1032,14 +1064,25 @@ def start_iter(self, res_shape=None): if res_shape is not None and res_shape != self.shape: return BroadcastIterator(self, res_shape) - # XXX there is a possible optimization here with SingleDimViewIterator - # ignore for now + if len(self.shape) == 1: + return OneDimIterator(self.start, self.strides[0], self.shape[0]) return ViewIterator(self) def setitem(self, item, value): self.parent.setitem(item, value) -class NDimArray(BaseArray): + def debug_repr(self): + return 'Slice(%s)' % self.parent.debug_repr() + + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = self.start_iter() + while not iter.done(): + array.setitem(iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + return array + +class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ @@ -1053,9 +1096,6 @@ def get_concrete(self): return self - def get_root_storage(self): - return self.storage - def find_size(self): return self.size @@ -1068,6 +1108,15 @@ def eval(self, iter): return self.dtype.getitem(self.storage, iter.get_offset()) + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + rffi.c_memcpy( + array.storage, + self.storage, + self.size * self.dtype.itemtype.get_element_size() + ) + return array + def descr_len(self, space): if len(self.shape): return space.wrap(self.shape[0]) @@ -1075,8 +1124,7 @@ "len() of unsized object")) def setitem_w(self, space, item, w_value): - self.invalidated() - self.dtype.setitem_w(space, self.storage, item, w_value) + return self.setitem(item, self.dtype.coerce(space, w_value)) def setitem(self, item, value): self.invalidated() @@ -1089,6 +1137,9 @@ return ArrayIterator(self.size) raise NotImplementedError # use ViewIterator simply, test it + def debug_repr(self): + return 'Array' + def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1105,27 +1156,75 @@ shape.append(item) return size, shape +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr + def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) size, shape = _find_size_and_shape(space, w_size) - return space.wrap(NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) + size, shape = _find_size_and_shape(space, w_size) - arr = NDimArray(size, shape[:], dtype=dtype) - one = dtype.adapt_val(1) + arr = W_NDimArray(size, shape[:], dtype=dtype) + one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) +def dot(space, w_obj, w_obj2): + w_arr = convert_to_array(space, w_obj) + if isinstance(w_arr, Scalar): + return convert_to_array(space, w_obj2).descr_dot(space, w_arr) + return w_arr.descr_dot(space, w_obj2) + BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), @@ -1159,11 +1258,15 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), + __debug_repr__ = interp2app(BaseArray.descr_debug_repr), dtype = GetSetProperty(BaseArray.descr_get_dtype), shape = GetSetProperty(BaseArray.descr_get_shape), size = GetSetProperty(BaseArray.descr_get_size), + T = GetSetProperty(BaseArray.descr_get_transpose), + flat = GetSetProperty(BaseArray.descr_get_flatiter), + mean = interp2app(BaseArray.descr_mean), sum = interp2app(BaseArray.descr_sum), prod = interp2app(BaseArray.descr_prod), @@ -1177,3 +1280,54 @@ copy = interp2app(BaseArray.descr_copy), ) + + +class W_FlatIterator(ViewArray): + signature = signature.BaseSignature() + + @jit.unroll_safe + def __init__(self, arr): + size = 1 + for sh in arr.shape: + size *= sh + new_sig = signature.Signature.find_sig([ + W_FlatIterator.signature, arr.signature + ]) + ViewArray.__init__(self, arr, new_sig, [arr.strides[-1]], + [arr.backstrides[-1]], [size]) + self.shapelen = len(arr.shape) + self.arr = arr + self.iter = self.start_iter() + + def start_iter(self, res_shape=None): + if res_shape is not None and res_shape != self.shape: + return BroadcastIterator(self, res_shape) + return OneDimIterator(self.arr.start, self.strides[0], + self.shape[0]) + + def find_dtype(self): + return self.arr.find_dtype() + + def find_size(self): + return self.shape[0] + + def descr_next(self, space): + if self.iter.done(): + raise OperationError(space.w_StopIteration, space.w_None) + result = self.eval(self.iter) + self.iter = self.iter.next(self.shapelen) + return result + + def descr_iter(self): + return self + + def debug_repr(self): + return 'FlatIter(%s)' % self.arr.debug_repr() + + +W_FlatIterator.typedef = TypeDef( + 'flatiter', + next = interp2app(W_FlatIterator.descr_next), + __iter__ = interp2app(W_FlatIterator.descr_iter), +) +W_FlatIterator.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import NDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -18,8 +18,8 @@ raise OperationError(space.w_ValueError, space.wrap( "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - dtype = space.fromcache(W_Float64Dtype) - a = NDimArray(number, [number], dtype=dtype) + dtype = get_dtype_cache(space).w_float64dtype + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_dtype, signature +from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -15,6 +15,7 @@ class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + _immutable_fields_ = ["promote_to_float", "promote_bools"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -29,7 +30,7 @@ def descr_get_identity(self, space): if self.identity is None: return space.w_None - return self.identity.wrap(space) + return self.identity def descr_call(self, space, __args__): if __args__.keywords or len(__args__.arguments_w) < self.argcount: @@ -80,8 +81,7 @@ new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, - dtype).wrap(space) + return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): while not i.done(): @@ -115,7 +115,7 @@ promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) @@ -124,6 +124,7 @@ class W_Ufunc2(W_Ufunc): + _immutable_fields_ = ["comparison_func", "func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -148,14 +149,14 @@ promote_bools=self.promote_bools, ) if self.comparison_func: - res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): return self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - ).wrap(space) + ) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature @@ -169,7 +170,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), @@ -187,7 +188,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. @@ -197,14 +198,14 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == 11 and dt1.num_bytes >= 4: - return space.fromcache(interp_dtype.W_Float64Dtype) + if dt2.num == 11 and dt1.itemtype.get_element_size() >= 4: + return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == interp_dtype.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it - if dt1.num_bytes < dt2.num_bytes: + if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 @@ -214,10 +215,11 @@ # UInt64 + signed = Float64 if dt2.num == 10: dtypenum += 1 - newdtype = interp_dtype.ALL_DTYPES[dtypenum] + newdtype = interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] - if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(newdtype) + if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or + newdtype.kind == interp_dtype.FLOATINGLTR): + return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes @@ -225,35 +227,42 @@ dtypenum += 2 else: dtypenum += 3 - return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum]) + return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt if dt.num >= 5: - return space.fromcache(interp_dtype.W_Float64Dtype) - for bytes, dtype in interp_dtype.dtypes_by_num_bytes: - if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: - return space.fromcache(dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype + for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + if (dtype.kind == interp_dtype.FLOATINGLTR and + dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): + return dtype if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: - return space.fromcache(interp_dtype.W_Int64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.UNSIGNEDLTR: - return space.fromcache(interp_dtype.W_UInt64Dtype) + return interp_dtype.get_dtype_cache(space).w_uint64dtype else: assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) + bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + + if isinstance(w_obj, interp_boxes.W_GenericBox): + dtype = w_obj.get_dtype(space) + if current_guess is None: + return dtype + return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: @@ -269,20 +278,19 @@ current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): - return getattr(res_dtype, op_name)(value) + return getattr(res_dtype.itemtype, op_name)(value) elif argcount == 2: + dtype_cache = interp_dtype.get_dtype_cache(space) def impl(res_dtype, lvalue, rvalue): - res = getattr(res_dtype, op_name)(lvalue, rvalue) + res = getattr(res_dtype.itemtype, op_name)(lvalue, rvalue) if comparison_func: - booldtype = space.fromcache(interp_dtype.W_BoolDtype) - assert isinstance(booldtype, interp_dtype.W_BoolDtype) - res = booldtype.box(res) + return dtype_cache.w_booldtype.box(res) return res return func_with_new_name(impl, ufunc_name) @@ -319,6 +327,8 @@ ("floor", "floor", 1, {"promote_to_float": True}), ("exp", "exp", 1, {"promote_to_float": True}), + ('sqrt', 'sqrt', 1, {'promote_to_float': True}), + ("sin", "sin", 1, {"promote_to_float": True}), ("cos", "cos", 1, {"promote_to_float": True}), ("tan", "tan", 1, {"promote_to_float": True}), @@ -336,7 +346,7 @@ identity = extra_kwargs.get("identity") if identity is not None: - identity = space.fromcache(interp_dtype.W_LongDtype).adapt_val(identity) + identity = interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -40,13 +40,15 @@ return Signature._known_sigs.setdefault(components, Signature(components)) class Call1(BaseSignature): - _immutable_fields_ = ["func"] + _immutable_fields_ = ["func", "name"] def __init__(self, func): self.func = func + self.name = func.func_name class Call2(BaseSignature): - _immutable_fields_ = ["func"] + _immutable_fields_ = ["func", "name"] def __init__(self, func): self.func = func + self.name = func.func_name diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace -from pypy.module.micronumpy import interp_dtype -from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar +from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -11,9 +11,10 @@ class TestSignature(object): def test_binop_signature(self, space): - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + float64_dtype = get_dtype_cache(space).w_float64dtype + bool_dtype = get_dtype_cache(space).w_booldtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -22,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_BoolDtype)) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -30,7 +31,9 @@ assert v5.signature is v6.signature def test_slice_signature(self, space): - ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_Float64Dtype)) + float64_dtype = get_dtype_cache(space).w_float64dtype + + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature @@ -41,10 +44,10 @@ class TestUfuncCoerscion(object): def test_binops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Basic pairing assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype @@ -62,19 +65,19 @@ assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype def test_unaryops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - uint8_dtype = space.fromcache(interp_dtype.W_UInt8Dtype) - int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype) - uint16_dtype = space.fromcache(interp_dtype.W_UInt16Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - uint32_dtype = space.fromcache(interp_dtype.W_UInt32Dtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - ulong_dtype = space.fromcache(interp_dtype.W_ULongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) - uint64_dtype = space.fromcache(interp_dtype.W_UInt64Dtype) - float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + uint8_dtype = get_dtype_cache(space).w_uint8dtype + int16_dtype = get_dtype_cache(space).w_int16dtype + uint16_dtype = get_dtype_cache(space).w_uint16dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + uint32_dtype = get_dtype_cache(space).w_uint32dtype + long_dtype = get_dtype_cache(space).w_longdtype + ulong_dtype = get_dtype_cache(space).w_ulongdtype + int64_dtype = get_dtype_cache(space).w_int64dtype + uint64_dtype = get_dtype_cache(space).w_uint64dtype + float32_dtype = get_dtype_cache(space).w_float32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Normal rules, everything returns itself assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,9 @@ +import py -import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): @@ -106,7 +109,7 @@ c -> 3 """ interp = self.run(code) - assert interp.results[-1].value.val == 9 + assert interp.results[-1].value == 9 def test_array_getitem(self): code = """ @@ -115,7 +118,7 @@ a + b -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 3 + 6 + assert interp.results[0].value == 3 + 6 def test_range_getitem(self): code = """ @@ -123,7 +126,7 @@ r -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_sum(self): code = """ @@ -132,7 +135,7 @@ r """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value.value == 15 def test_array_write(self): code = """ @@ -141,7 +144,7 @@ a -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_min(self): interp = self.run(""" @@ -150,7 +153,7 @@ b = a + a min(b) """) - assert interp.results[0].value.val == -24 + assert interp.results[0].value.value == -24 def test_max(self): interp = self.run(""" @@ -159,7 +162,7 @@ b = a + a max(b) """) - assert interp.results[0].value.val == 256 + assert interp.results[0].value.value == 256 def test_slice(self): interp = self.run(""" @@ -167,7 +170,7 @@ b = a -> : b -> 3 """) - assert interp.results[0].value.val == 4 + assert interp.results[0].value == 4 def test_slice_step(self): interp = self.run(""" @@ -175,14 +178,35 @@ b = a -> ::2 b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 + + def test_setslice(self): + interp = self.run(""" + a = |30| + b = |10| + b[1] = 5 + a[::3] = b + a -> 3 + """) + assert interp.results[0].value == 5 + + + def test_slice2(self): + interp = self.run(""" + a = |30| + s1 = a -> 0:20:2 + s2 = a -> 0:30:3 + b = s1 + s2 + b -> 3 + """) + assert interp.results[0].value == 15 def test_multidim_getitem(self): interp = self.run(""" a = [[1,2]] a -> 0 -> 1 """) - assert interp.results[0].value.val == 2 + assert interp.results[0].value == 2 def test_multidim_getitem_2(self): interp = self.run(""" @@ -190,4 +214,24 @@ b = a + a b -> 1 -> 1 """) - assert interp.results[0].value.val == 8 + assert interp.results[0].value == 8 + + def test_set_slice(self): + interp = self.run(""" + a = |30| + b = |30| + b[:] = a + a + b -> 3 + """) + assert interp.results[0].value == 6 + + def test_set_slice2(self): + interp = self.run(""" + a = |30| + b = |10| + b[1] = 5.5 + c = b + b + a[0:30:3] = c + a -> 3 + """) + assert interp.results[0].value == 11 diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -30,7 +30,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -44,13 +44,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from numpypy import array, False_, True_ + from numpypy import array, False_, True_, int64 a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], (int, long)) + assert isinstance(a[0], int64) b = a.copy() - assert isinstance(b[0], (int, long)) + assert isinstance(b[0], int64) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -72,17 +72,17 @@ assert a[i] is True_ def test_zeros_long(self): - from numpypy import zeros + from numpypy import zeros, int64 a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 0 def test_ones_long(self): - from numpypy import ones + from numpypy import ones, int64 a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 1 def test_overflow(self): @@ -165,3 +165,99 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) + +class AppTestTypes(BaseNumpyAppTest): + def test_abstract_types(self): + import numpypy as numpy + raises(TypeError, numpy.generic, 0) + raises(TypeError, numpy.number, 0) + raises(TypeError, numpy.integer, 0) + exc = raises(TypeError, numpy.signedinteger, 0) + assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + + raises(TypeError, numpy.floating, 0) + raises(TypeError, numpy.inexact, 0) + + def test_bool(self): + import numpypy as numpy + + assert numpy.bool_.mro() == [numpy.bool_, numpy.generic, object] + assert numpy.bool_(3) is numpy.True_ + assert numpy.bool_("") is numpy.False_ + assert type(numpy.True_) is type(numpy.False_) is numpy.bool_ + + class X(numpy.bool_): + pass + + assert type(X(True)) is numpy.bool_ + assert X(True) is numpy.True_ + + def test_int8(self): + import numpypy as numpy + + assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.int8) + assert type(a[1]) is numpy.int8 + assert numpy.dtype("int8").type is numpy.int8 + + x = numpy.int8(128) + assert x == -128 + assert x != 128 + assert type(x) is numpy.int8 + assert repr(x) == "-128" + + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + + def test_int_(self): + import numpypy as numpy + + assert numpy.int_ is numpy.dtype(int).type + assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + + def test_int64(self): + import sys + import numpypy as numpy + + if sys.maxint == 2 ** 63 -1: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + else: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.int64).type is numpy.int64 + assert numpy.int64(3) == 3 + + def test_float64(self): + import numpypy as numpy + + assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] + + a = numpy.array([1, 2, 3], numpy.float64) + assert type(a[1]) is numpy.float64 + assert numpy.dtype(float).type is numpy.float64 + + assert numpy.float64(2.0) == 2.0 + + def test_subclass_type(self): + import numpypy as numpy + + class X(numpy.float64): + def m(self): + return self + 2 + + b = X(10) + assert type(b) is X + assert b.m() == 12 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,16 +1,19 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy import signature from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace + class MockDtype(object): signature = signature.BaseSignature() + def malloc(self, size): return None + class TestNumArrayDirect(object): def newslice(self, *args): return self.space.newslice(*[self.space.wrap(arg) for arg in args]) @@ -25,18 +28,18 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -55,7 +58,7 @@ def test_create_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -75,7 +78,7 @@ def test_slice_of_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -93,7 +96,7 @@ def test_slice_of_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -111,7 +114,7 @@ def test_negative_step_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] @@ -119,14 +122,14 @@ def test_negative_step_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -136,7 +139,7 @@ assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -150,10 +153,28 @@ assert shape_agreement(self.space, [1, 2, 3], [1, 2, 3]) == [1, 2, 3] py.test.raises(OperationError, shape_agreement, self.space, [2], [3]) assert shape_agreement(self.space, [4, 4], []) == [4, 4] - assert shape_agreement(self.space, [8, 1, 6, 1], [7, 1, 5]) == [8, 7, 6, 5] - assert shape_agreement(self.space, [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + assert shape_agreement(self.space, + [8, 1, 6, 1], [7, 1, 5]) == [8, 7, 6, 5] + assert shape_agreement(self.space, + [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -170,8 +191,7 @@ def test_size(self): from numpypy import array - # XXX fixed on multidim branch - #assert array(3).size == 1 + assert array(3).size == 1 a = array([1, 2, 3]) assert a.size == 3 assert (a + a).size == 3 @@ -204,13 +224,13 @@ a[3] = 22 assert b[3] == 3 + a = array(1) + assert a.copy() == a + def test_iterator_init(self): from numpypy import array a = array(range(5)) assert a[3] == 3 - a = array(1) - assert a[0] == 1 - assert a.shape == () def test_getitem(self): from numpypy import array @@ -258,7 +278,7 @@ assert a[1] == 0. assert a[3] == 1. b[::-1] = b - assert b[0] == 1. + assert b[0] == 0. assert b[1] == 0. def test_setslice_of_slice_array(self): @@ -296,9 +316,13 @@ assert a[3] == 0. def test_scalar(self): - from numpypy import array + from numpypy import array, dtype a = array(3) - assert a[0] == 3 + #assert a[0] == 3 + raises(IndexError, "a[0]") + assert a.size == 1 + assert a.shape == () + assert a.dtype is dtype(int) def test_len(self): from numpypy import array @@ -350,11 +374,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 @@ -455,9 +479,11 @@ a = array(range(5), float) b = a ** a for i in range(5): - print b[i], i ** i assert b[i] == i ** i + a = array(range(5)) + assert (a ** 2 == a * a).all() + def test_pow_other(self): from numpypy import array a = array(range(5), float) @@ -681,12 +707,14 @@ assert c.any() == False def test_dot(self): - from numpypy import array + from numpypy import array, dot a = array(range(5)) assert a.dot(a) == 30.0 a = array(range(5)) assert a.dot(range(5)) == 30 + assert dot(range(5), range(5)) == 30 + assert (dot(5, [1, 2, 3]) == [5, 10, 15]).all() def test_dot_constant(self): from numpypy import array @@ -696,7 +724,7 @@ assert b[i] == 2.5 * a[i] def test_dtype_guessing(self): - from numpypy import array, dtype + from numpypy import array, dtype, float64, int8, bool_ assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) @@ -706,6 +734,10 @@ assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + assert array([float64(2)]).dtype is dtype(float) + assert array([int8(3)]).dtype is dtype("int8") + assert array([bool_(True)]).dtype is dtype(bool) + assert array([bool_(True), 3.0]).dtype is dtype(float) def test_comparison(self): import operator @@ -737,6 +769,29 @@ assert bool(array([1])) assert not bool(array([0])) + def test_slice_assignment(self): + from numpypy import array + a = array(range(5)) + a[::-1] = a + assert (a == [0, 1, 2, 1, 0]).all() + # but we force intermediates + a = array(range(5)) + a[::-1] = a + a + assert (a == [8, 6, 4, 2, 0]).all() + + def test_debug_repr(self): + from numpypy import zeros, sin + a = zeros(1) + assert a.__debug_repr__() == 'Array' + assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' + assert (a[::2]).__debug_repr__() == 'Slice(Array)' + assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' + assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' + assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + b = a + a + b[0] = 3 + assert b.__debug_repr__() == 'Call2(add, forced=Array)' + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -829,7 +884,8 @@ def test_ufunc(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) - assert ((a + a) == array([[1 + 1, 2 + 2], [3 + 3, 4 + 4], [5 + 5, 6 + 6]])).all() + assert ((a + a) == \ + array([[1 + 1, 2 + 2], [3 + 3, 4 + 4], [5 + 5, 6 + 6]])).all() def test_getitem_add(self): from numpypy import array @@ -844,7 +900,8 @@ def test_getitem_3(self): from numpypy import array - a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]) + a = array([[1, 2], [3, 4], [5, 6], [7, 8], + [9, 10], [11, 12], [13, 14]]) b = a[::2] print a print b @@ -877,11 +934,12 @@ b = array(((10, 11, 12), (20, 21, 22), (30, 31, 32))) c = ((a + b) == [b, b, b]) assert c.all() - a = array((((10,11,12), ), ((20, 21, 22), ), ((30,31,32), ))) + a = array((((10, 11, 12), ), ((20, 21, 22), ), ((30, 31, 32), ))) assert(a.shape == (3, 1, 3)) d = zeros((3, 3)) c = ((a + d) == [b, b, b]) - c = ((a + d) == array([[[10., 11., 12.]]*3, [[20.,21.,22.]]*3, [[30.,31.,32.]]*3])) + c = ((a + d) == array([[[10., 11., 12.]] * 3, + [[20., 21., 22.]] * 3, [[30., 31., 32.]] * 3])) assert c.all() def test_broadcast_scalar(self): @@ -906,14 +964,15 @@ from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) assert a.argmax() == 5 - assert a[:2,].argmax() == 3 + assert a[:2, ].argmax() == 3 def test_broadcast_wrong_shapes(self): from numpypy import zeros a = zeros((4, 3, 2)) b = zeros((4, 2)) exc = raises(ValueError, lambda: a + b) - assert str(exc.value) == "operands could not be broadcast together with shapes (4,3,2) (4,2)" + assert str(exc.value) == "operands could not be broadcast" \ + " together with shapes (4,3,2) (4,2)" def test_reduce(self): from numpypy import array @@ -923,10 +982,55 @@ c = b + b assert c.sum() == (6 + 8 + 10 + 12) * 2 -class AppTestSupport(object): + def test_transpose(self): + from numpypy import array + a = array(((range(3), range(3, 6)), + (range(6, 9), range(9, 12)), + (range(12, 15), range(15, 18)), + (range(18, 21), range(21, 24)))) + assert a.shape == (4, 2, 3) + b = a.T + assert b.shape == (3, 2, 4) + assert(b[0, :, 0] == [0, 3]).all() + b[:, 0, 0] = 1000 + assert(a[0, 0, :] == [1000, 1000, 1000]).all() + a = array(range(5)) + b = a.T + assert(b == range(5)).all() + a = array((range(10), range(20, 30))) + b = a.T + assert(b[:, 0] == a[0, :]).all() + + def test_flatiter(self): + from numpypy import array, flatiter + a = array([[10, 30], [40, 60]]) + f_iter = a.flat + assert f_iter.next() == 10 + assert f_iter.next() == 30 + assert f_iter.next() == 40 + assert f_iter.next() == 60 + raises(StopIteration, "f_iter.next()") + raises(TypeError, "flatiter()") + s = 0 + for k in a.flat: + s += k + assert s == 140 + + def test_flatiter_array_conv(self): + from numpypy import array, dot + a = array([1, 2, 3]) + assert dot(a.flat, a.flat) == 14 + + def test_slice_copy(self): + from numpypy import zeros + a = zeros((10, 10)) + b = a[0].copy() + assert (b == zeros(10)).all() + +class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct - cls.space = gettestobjspace(usemodules=('micronumpy',)) + BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) def test_fromstring(self): @@ -936,6 +1040,7 @@ assert a[i] == i + 1 raises(ValueError, fromstring, "abc") + class AppTestRepr(BaseNumpyAppTest): def test_repr(self): from numpypy import array, zeros @@ -1008,7 +1113,9 @@ assert str(a) == "3" a = zeros((400, 400), dtype=int) - assert str(a) == "[[0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n ..., \n [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]]" + assert str(a) == "[[0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n" \ + " [0 0 0 ..., 0 0 0]\n ..., \n [0 0 0 ..., 0 0 0]\n" \ + " [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]]" a = zeros((2, 2, 2)) r = str(a) assert r == '[[[0.0 0.0]\n [0.0 0.0]]\n\n [[0.0 0.0]\n [0.0 0.0]]]' @@ -1026,3 +1133,25 @@ assert str(b) == "[7 8 9]" b = a[2:1, ] assert str(b) == "[]" + + +class AppTestRanges(BaseNumpyAppTest): + def test_arange(self): + from numpypy import arange, array, dtype + a = arange(3) + assert (a == [0, 1, 2]).all() + assert a.dtype is dtype(int) + a = arange(3.0) + assert (a == [0., 1., 2.]).all() + assert a.dtype is dtype(float) + a = arange(3, 7) + assert (a == [3, 4, 5, 6]).all() + assert a.dtype is dtype(int) + a = arange(3, 7, 2) + assert (a == [3, 5]).all() + a = arange(3, dtype=float) + assert (a == [0., 1., 2.]).all() + assert a.dtype is dtype(float) + a = arange(0, 0.8, 0.1) + assert len(a) == 8 + assert arange(False, True, True).dtype is dtype(int) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) @@ -111,6 +111,8 @@ for i in range(3): assert c[i] == a[i] / b[i] + assert (divide(array([-10]), array([2])) == array([-5])).all() + def test_fabs(self): from numpypy import array, fabs from math import fabs as math_fabs @@ -319,6 +321,17 @@ for v in [1.0, -1.0]: assert arctanh(v) == math.copysign(float("inf"), v) + def test_sqrt(self): + import math + from numpypy import sqrt + + nan, inf = float("nan"), float("inf") + data = [1, 2, 3, inf] + results = [math.sqrt(1), math.sqrt(2), math.sqrt(3), inf] + assert (sqrt(data) == results).all() + assert math.isnan(sqrt(-1)) + assert math.isnan(sqrt(nan)) + def test_reduce_errors(self): from numpypy import sin, add diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,10 +8,11 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_ufuncs, signature +from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import NDimArray, NDimSlice +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, + BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr @@ -47,16 +48,15 @@ def f(i): interp = InterpreterState(codes[i]) interp.run(space) - res = interp.results[-1] - w_res = res.eval(res.start_iter()).wrap(interp.space) - if isinstance(w_res, BoolObject): - return float(w_res.boolval) - elif isinstance(w_res, FloatObject): - return w_res.floatval - elif isinstance(w_res, IntObject): - return w_res.intval - else: - return -42. + w_res = interp.results[-1] + if isinstance(w_res, BaseArray): + w_res = w_res.eval(w_res.start_iter()) + + if isinstance(w_res, interp_boxes.W_Float64Box): + return w_res.value + elif isinstance(w_res, interp_boxes.W_BoolBox): + return float(w_res.value) + raise TypeError(w_res) if self.graph is None: interp, graph = self.meta_interp(f, [i], @@ -78,10 +78,9 @@ def test_add(self): result = self.run("add") - self.check_resops({'setarrayitem_raw': 2, 'getfield_gc': 19, 'guard_class': 11, - 'int_add': 6, 'guard_isnull': 1, 'jump': 1, 'int_ge': 2, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 2, - 'guard_value': 1}) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 def define_float_add(): @@ -93,10 +92,9 @@ def test_floatadd(self): result = self.run("float_add") assert result == 3 + 3 - self.check_resops({'setarrayitem_raw': 2, 'getfield_gc': 17, 'guard_class': 11, - 'int_add': 4, 'guard_isnull': 1, 'jump': 1, 'int_ge': 2, - 'getarrayitem_raw': 2, 'float_add': 2, 'guard_false': 2, - 'guard_value': 1}) + self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_sum(): return """ @@ -108,9 +106,9 @@ def test_sum(self): result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_resops({'guard_class': 10, 'getfield_gc': 17, 'jump': 1, - 'getarrayitem_raw': 4, 'guard_value': 2, 'int_add': 4, - 'guard_isnull': 1, 'int_ge': 2, 'float_add': 4, 'guard_false': 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, + "int_add": 2, "int_ge": 1, "guard_false": 1, + "jump": 1}) def define_prod(): return """ @@ -125,11 +123,9 @@ for i in range(30): expected *= i * 2 assert result == expected - self.check_resops({'guard_class': 10, 'getfield_gc': 17, 'int_add': 4, - 'float_mul': 2, 'guard_isnull': 1, 'jump': 1, 'int_ge': 2, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 2, - 'guard_value': 2}) - + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -140,9 +136,9 @@ max(b) """) assert result == 256 - self.check_loops({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def test_min(self): py.test.skip("broken, investigate") @@ -153,10 +149,9 @@ min(b) """) assert result == -24 - self.check_resops({'guard_class': 10, 'getfield_gc': 15, 'guard_value': 1, - 'int_add': 4, 'guard_isnull': 1, 'jump': 1, 'int_ge': 2, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, - 'float_ne': 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def define_any(): return """ @@ -169,10 +164,10 @@ def test_any(self): result = self.run("any") assert result == 1 - self.check_resops({'guard_class': 10, 'getfield_gc': 15, 'guard_value': 1, - 'int_add': 4, 'guard_isnull': 1, 'jump': 1, 'int_ge': 2, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, - 'float_ne': 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) def define_already_forced(): return """ @@ -189,13 +184,14 @@ # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + # XXX the comment above is wrong now. We need preferrably a way to + # count the two loops separately + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 2, 'int_ge': 4, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) - def define_ufunc(): return """ a = |30| @@ -207,11 +203,9 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -6 - self.check_resops({'setarrayitem_raw': 2, 'getfield_gc': 24, 'guard_class': 14, - 'int_add': 6, 'float_neg': 2, 'guard_isnull': 2, 'jump': 1, - 'int_ge': 2, 'getarrayitem_raw': 4, 'float_add': 2, - 'guard_false': 2, 'guard_value': 2}) - + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, + "setinteriorfield_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_specialization(): return """ @@ -249,10 +243,28 @@ def test_slice(self): result = self.run("slice") assert result == 18 - py.test.skip("Few remaining arraylen_gc left") - self.check_loops({'int_mul': 2, 'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) + self.check_simple_loop({'getinteriorfield_raw': 2, + 'float_add': 1, + 'setinteriorfield_raw': 1, + 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, + 'jump': 1}) + + def define_slice2(): + return """ + a = |30| + s1 = a -> :20:2 + s2 = a -> :30:3 + b = s1 + s2 + b -> 3 + """ + + def test_slice2(self): + result = self.run("slice2") + assert result == 15 + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) def define_multidim(): return """ @@ -264,12 +276,11 @@ def test_multidim(self): result = self.run('multidim') assert result == 8 - self.check_resops({'setarrayitem_raw': 2, 'getfield_gc': 19, 'guard_class': 11, - 'int_add': 6, 'guard_isnull': 1, 'jump': 1, 'int_ge': 2, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 2, - 'guard_value': 1}) # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization + self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1}) def define_multidim_slice(): return """ @@ -285,7 +296,7 @@ py.test.skip("improve") # XXX the bridge here is scary. Hopefully jit-targets will fix that, # otherwise it looks kind of good - self.check_loops({}) + self.check_simple_loop({}) def define_broadcast(): return """ @@ -299,58 +310,34 @@ result = self.run("broadcast") assert result == 10 py.test.skip("improve") - self.check_loops({}) + self.check_simple_loop({}) + + def define_setslice(): + return """ + a = |30| + b = |10| + b[1] = 5.5 + c = b + b + a[0:30:3] = c + a -> 3 + """ + + def test_setslice(self): + result = self.run("setslice") + assert result == 11.0 + self.check_loop_count(1) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import W_Float64Dtype + from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() - cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) - - def test_slice2(self): - def f(i): - step1 = 2 - step2 = 3 - ar = NDimArray(step2*i, dtype=self.float64_dtype) - new_sig = signature.Signature.find_sig([ - NDimSlice.signature, ar.signature - ]) - s1 = NDimSlice(0, step1*i, step1, i, ar, new_sig) - new_sig = signature.Signature.find_sig([ - NDimSlice.signature, s1.signature - ]) - s2 = NDimSlice(0, step2*i, step2, i, ar, new_sig) - v = interp_ufuncs.get(self.space).add.call(self.space, [s1, s2]) - return v.get_concrete().eval(3).val - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - self.check_loops({'int_mul': 2, 'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 1, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) - assert result == f(5) - - def test_setslice(self): - space = self.space - float64_dtype = self.float64_dtype - - def f(i): - step = NonConstant(3) - ar = NDimArray(step*i, dtype=float64_dtype) - ar2 = NDimArray(i, dtype=float64_dtype) - ar2.get_concrete().setitem(1, float64_dtype.box(5.5)) - arg = ar2.descr_add(space, ar2) - ar.setslice(space, 0, step*i, step, i, arg) - return ar.get_concrete().eval(3).val - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - self.check_loops({'getarrayitem_raw': 2, - 'float_add' : 1, - 'setarrayitem_raw': 1, 'int_add': 2, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) - assert result == 11.0 + cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " @@ -365,7 +352,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = NDimArray(n, [n], dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/types.py @@ -0,0 +1,389 @@ +import functools +import math + +from pypy.module.micronumpy import interp_boxes +from pypy.objspace.std.floatobject import float2string +from pypy.rlib import rfloat, libffi, clibffi +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rpython.lltypesystem import lltype, rffi + + +def simple_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v): + return self.box( + func( + self, + self.for_computation(self.unbox(v)) + ) + ) + return dispatcher + +def simple_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return self.box( + func( + self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + ) + ) + return dispatcher + +def raw_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)) + ) + return dispatcher + +class BaseType(object): + def _unimplemented_ufunc(self, *args): + raise NotImplementedError + # add = sub = mul = div = mod = pow = eq = ne = lt = le = gt = ge = max = \ + # min = copysign = pos = neg = abs = sign = reciprocal = fabs = floor = \ + # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ + # arctanh = _unimplemented_ufunc + +class Primitive(object): + _mixin_ = True + def get_element_size(self): + return rffi.sizeof(self.T) + + @specialize.argtype(1) + def box(self, value): + return self.BoxType(rffi.cast(self.T, value)) + + def unbox(self, box): + assert isinstance(box, self.BoxType) + return box.value + + def coerce(self, space, w_item): + if isinstance(w_item, self.BoxType): + return w_item + return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # XXX: ugly + w_obj = space.allocate_instance(self.BoxType, w_subtype) + assert isinstance(w_obj, self.BoxType) + w_obj.__init__(self._coerce(space, w_item).value) + return w_obj + + def _coerce(self, space, w_item): + raise NotImplementedError + + def read(self, storage, width, i, offset): + return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset + )) + + def store(self, storage, width, i, offset, box): + value = self.unbox(box) + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + @simple_binary_op + def add(self, v1, v2): + return v1 + v2 + + @simple_binary_op + def sub(self, v1, v2): + return v1 - v2 + + @simple_binary_op + def mul(self, v1, v2): + return v1 * v2 + + @simple_unary_op + def pos(self, v): + return +v + + @simple_unary_op + def neg(self, v): + return -v + + @simple_unary_op + def abs(self, v): + return abs(v) + + @raw_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @raw_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @raw_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @raw_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @raw_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @raw_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + def bool(self, v): + return bool(self.for_computation(self.unbox(v))) + + @simple_binary_op + def max(self, v1, v2): + return max(v1, v2) + + @simple_binary_op + def min(self, v1, v2): + return min(v1, v2) + +class Bool(BaseType, Primitive): + T = lltype.Bool + BoxType = interp_boxes.W_BoolBox + + True = BoxType(True) + False = BoxType(False) + + @specialize.argtype(1) + def box(self, value): + box = Primitive.box(self, value) + if box.value: + return self.True + else: + return self.False + + def coerce_subtype(self, space, w_subtype, w_item): + # Doesn't return subclasses so it can return the constants. + return self._coerce(space, w_item) + + def _coerce(self, space, w_item): + return self.box(space.is_true(w_item)) + + def str_format(self, box): + value = self.unbox(box) + return "True" if value else "False" + + def for_computation(self, v): + return int(v) + +class Integer(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.int_w(space.int(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return str(self.for_computation(value)) + + def for_computation(self, v): + return widen(v) + + @simple_binary_op + def div(self, v1, v2): + if v2 == 0: + return 0 + return v1 / v2 + + @simple_binary_op + def mod(self, v1, v2): + return v1 % v2 + + @simple_binary_op + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + + @simple_unary_op + def sign(self, v): + if v > 0: + return 1 + elif v < 0: + return -1 + else: + assert v == 0 + return 0 + +class Int8(BaseType, Integer): + T = rffi.SIGNEDCHAR + BoxType = interp_boxes.W_Int8Box + +class UInt8(BaseType, Integer): + T = rffi.UCHAR + BoxType = interp_boxes.W_UInt8Box + +class Int16(BaseType, Integer): + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + +class UInt16(BaseType, Integer): + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + +class Int32(BaseType, Integer): + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + +class UInt32(BaseType, Integer): + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + +class Int64(BaseType, Integer): + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + +class UInt64(BaseType, Integer): + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + +class Float(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.float_w(space.float(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + + def for_computation(self, v): + return float(v) + + @simple_binary_op + def div(self, v1, v2): + try: + return v1 / v2 + except ZeroDivisionError: + if v1 == v2 == 0.0: + return rfloat.NAN + return rfloat.copysign(rfloat.INFINITY, v1 * v2) + + @simple_binary_op + def mod(self, v1, v2): + return math.fmod(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return math.pow(v1, v2) + + @simple_binary_op + def copysign(self, v1, v2): + return math.copysign(v1, v2) + + @simple_unary_op + def sign(self, v): + if v == 0.0: + return 0.0 + return rfloat.copysign(1.0, v) + + @simple_unary_op + def fabs(self, v): + return math.fabs(v) + + @simple_unary_op + def reciprocal(self, v): + if v == 0.0: + return rfloat.copysign(rfloat.INFINITY, v) + return 1.0 / v + + @simple_unary_op + def floor(self, v): + return math.floor(v) + + @simple_unary_op + def exp(self, v): + try: + return math.exp(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def sin(self, v): + return math.sin(v) + + @simple_unary_op + def cos(self, v): + return math.cos(v) + + @simple_unary_op + def tan(self, v): + return math.tan(v) + + @simple_unary_op + def arcsin(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.asin(v) + + @simple_unary_op + def arccos(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.acos(v) + + @simple_unary_op + def arctan(self, v): + return math.atan(v) + + @simple_unary_op + def arcsinh(self, v): + return math.asinh(v) + + @simple_unary_op + def arctanh(self, v): + if v == 1.0 or v == -1.0: + return math.copysign(rfloat.INFINITY, v) + if not -1.0 < v < 1.0: + return rfloat.NAN + return math.atanh(v) + + @simple_unary_op + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN + + +class Float32(BaseType, Float): + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + +class Float64(BaseType, Float): + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box \ No newline at end of file diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -3,7 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.rlib import rmmap -from pypy.rlib.rmmap import RValueError, RTypeError, ROverflowError +from pypy.rlib.rmmap import RValueError, RTypeError class W_MMap(Wrappable): @@ -212,8 +212,6 @@ raise OperationError(space.w_ValueError, space.wrap(e.message)) except RTypeError, e: raise OperationError(space.w_TypeError, space.wrap(e.message)) - except ROverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(e.message)) return space.wrap(self) elif rmmap._MS_WINDOWS: @@ -233,8 +231,6 @@ raise OperationError(space.w_ValueError, space.wrap(e.message)) except RTypeError, e: raise OperationError(space.w_TypeError, space.wrap(e.message)) - except ROverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(e.message)) return space.wrap(self) W_MMap.typedef = TypeDef("mmap", diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.rpython.lltypesystem import lltype -from pypy.rlib.rarithmetic import ovfcheck_float_to_int +from pypy.rlib.rarithmetic import ovfcheck_float_to_int, intmask from pypy.rlib import rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo import os @@ -585,7 +585,7 @@ # More likely, the format yields an empty result, # e.g. an empty format, or %Z when the timezone # is unknown. - result = rffi.charp2strn(outbuf, buflen) + result = rffi.charp2strn(outbuf, intmask(buflen)) return space.wrap(result) finally: lltype.free(outbuf, flavor='raw') diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -38,7 +38,6 @@ def __init__(self, name, startblock, return_var=None): self.name = name # function name (possibly mangled already) self.startblock = startblock - self.startblock.isstartblock = True # build default returnblock self.returnblock = Block([return_var or Variable()]) self.returnblock.operations = () @@ -171,11 +170,10 @@ class Block(object): - __slots__ = """isstartblock inputargs operations exitswitch + __slots__ = """inputargs operations exitswitch exits blockcolor""".split() def __init__(self, inputargs): - self.isstartblock = False self.inputargs = list(inputargs) # mixed list of variable/const XXX self.operations = [] # list of SpaceOperation(s) self.exitswitch = None # a variable or @@ -452,7 +450,6 @@ newblock.closeblock(*newlinks) newstartblock = blockmap[graph.startblock] - newstartblock.isstartblock = True newgraph = FunctionGraph(graph.name, newstartblock) newgraph.returnblock = blockmap[graph.returnblock] newgraph.exceptblock = blockmap[graph.exceptblock] @@ -490,7 +487,6 @@ for block in graph.iterblocks(): - assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( "block.exits is a %s (closeblock() or recloseblock() missing?)" % (type(block.exits).__name__,)) diff --git a/pypy/objspace/flow/test/test_checkgraph.py b/pypy/objspace/flow/test/test_checkgraph.py --- a/pypy/objspace/flow/test/test_checkgraph.py +++ b/pypy/objspace/flow/test/test_checkgraph.py @@ -13,20 +13,6 @@ py.test.raises(AssertionError, checkgraph, g) -def test_nostartblock(): - g = FunctionGraph("g", Block([])) - g.startblock.closeblock(Link([Constant(1)], g.returnblock)) - g.startblock.isstartblock = False - py.test.raises(AssertionError, checkgraph, g) - -def test_twostartblocks(): - g = FunctionGraph("g", Block([])) - b = Block([]) - b.isstartblock = True - g.startblock.closeblock(Link([], b)) - b.closeblock(Link([Constant(1)], g.returnblock)) - py.test.raises(AssertionError, checkgraph, g) - def test_exitlessblocknotexitblock(): g = FunctionGraph("g", Block([])) py.test.raises(AssertionError, checkgraph, g) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -50,6 +50,13 @@ else: return space.fromcache(StringListStrategy) + # check for floats + for w_obj in list_w: + if not is_W_FloatObject(w_obj): + break + else: + return space.fromcache(FloatListStrategy) + return space.fromcache(ObjectListStrategy) def is_W_IntObject(w_object): @@ -60,7 +67,9 @@ from pypy.objspace.std.stringobject import W_StringObject return type(w_object) is W_StringObject - +def is_W_FloatObject(w_object): + from pypy.objspace.std.floatobject import W_FloatObject + return type(w_object) is W_FloatObject class W_ListObject(W_AbstractListObject): from pypy.objspace.std.listtype import list_typedef as typedef @@ -317,6 +326,8 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + _applevel_repr = "empty" + def __init__(self, space): ListStrategy.__init__(self, space) # cache an empty list that is used whenever getitems is called (i.e. sorting) @@ -364,6 +375,8 @@ strategy = self.space.fromcache(IntegerListStrategy) elif is_W_StringObject(w_item): strategy = self.space.fromcache(StringListStrategy) + elif is_W_FloatObject(w_item): + strategy = self.space.fromcache(FloatListStrategy) else: strategy = self.space.fromcache(ObjectListStrategy) @@ -415,6 +428,8 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" + _applevel_repr = "range" + def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -853,6 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -881,6 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 + _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -905,8 +922,36 @@ if reverse: l.reverse() +class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = 0.0 + _applevel_repr = "float" + + def wrap(self, floatval): + return self.space.wrap(floatval) + + def unwrap(self, w_float): + return self.space.float_w(w_float) + + erase, unerase = rerased.new_erasing_pair("float") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def is_correct_type(self, w_obj): + return is_W_FloatObject(w_obj) + + def list_is_correct_type(self, w_list): + return w_list.strategy is self.space.fromcache(FloatListStrategy) + + def sort(self, w_list, reverse): + l = self.unerase(w_list.lstorage) + sorter = FloatSort(l, len(l)) + sorter.sort() + if reverse: + l.reverse() + class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "str" def wrap(self, stringval): return self.space.wrap(stringval) @@ -934,6 +979,7 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) + # _______________________________________________________ init_signature = Signature(['sequence'], None, None) @@ -1282,6 +1328,7 @@ TimSort = make_timsort_class() IntBaseTimSort = make_timsort_class() +FloatBaseTimSort = make_timsort_class() StringBaseTimSort = make_timsort_class() class KeyContainer(baseobjspace.W_Root): @@ -1302,6 +1349,10 @@ def lt(self, a, b): return a < b +class FloatSort(FloatBaseTimSort): + def lt(self, a, b): + return a < b + class StringSort(StringBaseTimSort): def lt(self, a, b): return a < b diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -453,12 +453,12 @@ multi = r_uint(1822399083) + r_uint(1822399083) + 1 if w_set.hash != 0: return space.wrap(w_set.hash) - hash = 1927868237 - hash *= (len(w_set.setdata) + 1) + hash = r_uint(1927868237) + hash *= r_uint(len(w_set.setdata) + 1) for w_item in w_set.setdata: h = space.hash_w(w_item) - value = ((h ^ (h << 16) ^ 89869747) * multi) - hash = intmask(hash ^ value) + value = (r_uint(h ^ (h << 16) ^ 89869747) * multi) + hash = hash ^ value hash = hash * 69069 + 907133923 if hash == 0: hash = 590923713 diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -470,11 +470,17 @@ l.extend(iter([1, 2, 3, 4])) assert l is l0 assert l == [1, 1, 2, 3, 4] + l = l0 = ['a'] l.extend(iter(['b', 'c', 'd'])) assert l == ['a', 'b', 'c', 'd'] assert l is l0 + l = l0 = [1.2] + l.extend(iter([2.3, 3.4, 4.5])) + assert l == [1.2, 2.3, 3.4, 4.5] + assert l is l0 + def test_sort(self): l = l0 = [1, 5, 3, 0] l.sort() @@ -493,6 +499,10 @@ l.sort(reverse=True) assert l == ["d", "c", "b", "a"] + l = [3.3, 2.2, 4.4, 1.1, 3.1, 5.5] + l.sort() + assert l == [1.1, 2.2, 3.1, 3.3, 4.4, 5.5] + def test_sort_cmp(self): def lencmp(a,b): return cmp(len(a), len(b)) l = [ 'a', 'fiver', 'tre', '' ] @@ -546,11 +556,19 @@ assert l[-2] == 6 raises(IndexError, "l[len(l)]") raises(IndexError, "l[-len(l)-1]") + l = ['a', 'b', 'c'] assert l[0] == 'a' assert l[-1] == 'c' assert l[-2] == 'b' raises(IndexError, "l[len(l)]") + + l = [1.1, 2.2, 3.3] + assert l[0] == 1.1 + assert l[-1] == 3.3 + assert l[-2] == 2.2 + raises(IndexError, "l[len(l)]") + l = [] raises(IndexError, "l[1]") @@ -588,6 +606,16 @@ assert l is l0 raises(IndexError, "del l[0]") + l = l0 = [1.1, 2.2, 3.3] + del l[0] + assert l == [2.2, 3.3] + del l[-1] + assert l == [2.2] + del l[-1] + assert l == [] + assert l is l0 + raises(IndexError, "del l[0]") + l = range(10) del l[5] assert l == [0, 1, 2, 3, 4, 6, 7, 8, 9] @@ -627,9 +655,15 @@ del l[:] assert l is l0 assert l == [] + l = ['a', 'b'] del l[:] assert l == [] + + l = [1.1, 2.2] + del l[:] + assert l == [] + l = range(5) del l[:] assert l == [] @@ -640,6 +674,11 @@ assert l is l0 assert l == [1,2,3,4,5] + l = l0 = [1.1,2.2,3.3] + l += [4.4,5.5] + assert l is l0 + assert l == [1.1,2.2,3.3,4.4,5.5] + l = l0 = ['a', 'b', 'c'] l1 = l[:] l += ['d'] @@ -697,6 +736,11 @@ l *= -5 assert l == [] + l = l0 = [1.1, 2.2] + l *= 2 + assert l is l0 + assert l == [1.1, 2.2, 1.1, 2.2] + l = range(2) l *= 2 assert l == [0, 1, 0, 1] @@ -731,6 +775,10 @@ assert c.index(0) == 0 raises(ValueError, c.index, 3) + c = [0.0, 2.2, 4.4] + assert c.index(0) == 0.0 + raises(ValueError, c.index, 3) + def test_index_cpython_bug(self): if self.on_cpython: skip("cpython has a bug here") @@ -779,6 +827,10 @@ l[::3] = ('a', 'b') assert l == ['a', 1, 2, 'b', 4, 5] + l = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5] + l[::3] = ('a', 'b') + assert l == ['a', 1.1, 2.2, 'b', 4.4, 5.5] + def test_setslice_with_self(self): l = [1,2,3,4] l[:] = l @@ -835,6 +887,10 @@ l.append("a") assert l == [1,2,3,"a"] + l = [1.1, 2.2, 3.3] + l.append(4.4) + assert l == [1.1, 2.2, 3.3, 4.4] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -875,6 +931,10 @@ l.pop() assert l == range(9) + l = [1.1, 2.2, 3.3] + l.pop() + assert l == [1.1, 2.2] + l = [] raises(IndexError, l.pop, 0) @@ -897,16 +957,19 @@ l2 = ["1", "2", "3", "4"] l3 = range(5) l4 = [1, 2, 3, "4"] + l5 = [1.1, 2.2, 3.3, 4.4] raises(IndexError, l1.pop, -5) raises(IndexError, l2.pop, -5) raises(IndexError, l3.pop, -6) raises(IndexError, l4.pop, -5) + raises(IndexError, l5.pop, -5) assert l1.pop(-2) == 3 assert l2.pop(-2) == "3" assert l3.pop(-2) == 3 assert l4.pop(-2) == 3 + assert l5.pop(-2) == 3.3 def test_remove(self): c = list('hello world') @@ -925,6 +988,13 @@ l = [0, 3, 5] raises(ValueError, c.remove, 2) + l = [0.0, 1.1, 2.2, 3.3, 4.4] + l.remove(2.2) + assert l == [0.0, 1.1, 3.3, 4.4] + l = [0.0, 3.3, 5.5] + raises(ValueError, c.remove, 2) + raises(ValueError, c.remove, 2.2) + def test_reverse(self): c = list('hello world') c.reverse() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, StringListStrategy, RangeListStrategy, make_range_list +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -15,7 +15,7 @@ def test_empty_to_any(self): l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) - l.append(self.space.wrap(1.)) + l.append(self.space.wrap((1,3))) assert isinstance(l.strategy, ObjectListStrategy) l = W_ListObject(self.space, []) @@ -28,6 +28,11 @@ l.append(self.space.wrap('a')) assert isinstance(l.strategy, StringListStrategy) + l = W_ListObject(self.space, []) + assert isinstance(l.strategy, EmptyListStrategy) + l.append(self.space.wrap(1.2)) + assert isinstance(l.strategy, FloatListStrategy) + def test_int_to_any(self): l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) @@ -44,6 +49,14 @@ l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) + def test_float_to_any(self): + l = W_ListObject(self.space, [self.space.wrap(1.1),self.space.wrap(2.2),self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.append(self.space.wrap(4.4)) + assert isinstance(l.strategy, FloatListStrategy) + l.append(self.space.wrap("a")) + assert isinstance(l.strategy, ObjectListStrategy) + def test_setitem(self): # This should work if test_listobject.py passes l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) @@ -65,6 +78,12 @@ l.setitem(0, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) + # FloatStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap(1.2),self.space.wrap(2.3),self.space.wrap(3.4)]) + assert isinstance(l.strategy, FloatListStrategy) + l.setitem(0, self.space.wrap("a")) + assert isinstance(l.strategy, ObjectListStrategy) + def test_insert(self): # no change l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) @@ -84,6 +103,12 @@ l.insert(3, self.space.wrap('d')) assert isinstance(l.strategy, ObjectListStrategy) + # FloatStrategy + l = W_ListObject(self.space, [self.space.wrap(1.1),self.space.wrap(2.2),self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.insert(3, self.space.wrap('d')) + assert isinstance(l.strategy, ObjectListStrategy) + # EmptyStrategy l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -95,7 +120,9 @@ l.insert(0, self.space.wrap(2)) assert isinstance(l.strategy, IntegerListStrategy) - def notest_list_empty_after_delete(self): + def test_list_empty_after_delete(self): + import py + py.test.skip("return to emptyliststrategy is not supported anymore") l = W_ListObject(self.space, [self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.deleteitem(0) @@ -117,21 +144,36 @@ l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) + # IntegerStrategy to IntegerStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + # ObjectStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap('b'), self.space.wrap(3)]) assert isinstance(l.strategy, ObjectListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, ObjectListStrategy) + # IntegerStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')])) assert isinstance(l.strategy, ObjectListStrategy) + # StringStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')]) + assert isinstance(l.strategy, StringListStrategy) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, ObjectListStrategy) + + # FloatStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, ObjectListStrategy) + def test_setslice_List(self): def wrapitems(items): @@ -160,6 +202,11 @@ keep_other_strategy(l, 0, 2, other.length(), other) assert l.strategy is self.space.fromcache(StringListStrategy) + l = W_ListObject(self.space, wrapitems([1.1, 2.2, 3.3, 4.4, 5.5])) + other = W_ListObject(self.space, []) + keep_other_strategy(l, 0, 1, l.length(), other) + assert l.strategy is self.space.fromcache(FloatListStrategy) + l = W_ListObject(self.space, wrapitems(["a",3,"c",4,"e"])) other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) @@ -194,6 +241,11 @@ l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + l = W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) + assert isinstance(l.strategy, ObjectListStrategy) + def test_empty_extend_with_any(self): empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -220,6 +272,11 @@ empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) + empty.extend(W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)])) + assert isinstance(empty.strategy, FloatListStrategy) + + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(self.space, [])) assert isinstance(empty.strategy, EmptyListStrategy) @@ -293,12 +350,13 @@ l.setslice(0, 1, 3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) - def test_get_items_copy(self): + def test_copy_list(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) - l2 = l1.getitems() + l2 = l1.clone() l2.append(self.space.wrap(4)) assert not l2 == l1.getitems() + def test_getitems_does_not_copy_object_list(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap("two"), self.space.wrap(3)]) l2 = l1.getitems() l2.append(self.space.wrap("four")) @@ -345,7 +403,6 @@ # should not raise assert getslice__List_ANY_ANY(self.space, l, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) - def test_add_to_rangelist(self): l1 = make_range_list(self.space, 1, 1, 3) l2 = W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5)]) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -102,6 +102,7 @@ 'instancetypedef', 'terminator', '_version_tag?', + 'name?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = cConfig.INVALID_SOCKET + INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 diff --git a/pypy/rlib/_stacklet_n_a.py b/pypy/rlib/_stacklet_n_a.py --- a/pypy/rlib/_stacklet_n_a.py +++ b/pypy/rlib/_stacklet_n_a.py @@ -1,4 +1,5 @@ from pypy.rlib import _rffi_stacklet as _c +from pypy.rlib import objectmodel, debug from pypy.rpython.annlowlevel import llhelper from pypy.tool.staticmethods import StaticMethods @@ -21,6 +22,9 @@ def destroy(thrd, h): _c.destroy(thrd._thrd, h) + if objectmodel.we_are_translated(): + debug.debug_print("not using a framework GC: " + "stacklet_destroy() may leak") is_empty_handle = _c.is_empty_handle diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -176,7 +176,6 @@ return decorator @oopspec("jit.isconstant(value)") - at specialize.ll() def isconstant(value): """ While tracing, returns whether or not the value is currently known to be @@ -186,9 +185,9 @@ This is for advanced usage only. """ return NonConstant(False) +isconstant._annspecialcase_ = "specialize:call_location" @oopspec("jit.isvirtual(value)") - at specialize.ll() def isvirtual(value): """ Returns if this value is virtual, while tracing, it's relatively @@ -197,6 +196,7 @@ This is for advanced usage only. """ return NonConstant(False) +isvirtual._annspecialcase_ = "specialize:call_location" class Entry(ExtRegistryEntry): _about_ = hint @@ -738,3 +738,29 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +def record_known_class(value, cls): + """ + Assure the JIT that value is an instance of cls. This is not a precise + class check, unlike a guard_class. + """ + assert isinstance(value, cls) + + +class Entry(ExtRegistryEntry): + _about_ = record_known_class + + def compute_result_annotation(self, s_inst, s_cls): + from pypy.annotation import model as annmodel + assert s_cls.is_constant() + assert not s_inst.can_be_none() + assert isinstance(s_inst, annmodel.SomeInstance) + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype, rclass + classrepr = rclass.get_type_repr(hop.rtyper) + + hop.exception_cannot_occur() + v_inst = hop.inputarg(hop.args_r[0], arg=0) + v_cls = hop.inputarg(classrepr, arg=1) + return hop.genop('jit_record_known_class', [v_inst, v_cls], + resulttype=lltype.Void) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -411,6 +411,10 @@ def getaddressindll(self, name): return dlsym(self.lib, name) +# These specialize.call_location's should really be specialize.arg(0), however +# you can't hash a pointer obj, which the specialize machinery wants to do. +# Given the present usage of these functions, it's good enough. + at specialize.call_location() @jit.oopspec("libffi_array_getitem(ffitype, width, addr, index, offset)") def array_getitem(ffitype, width, addr, index, offset): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -420,6 +424,7 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False + at specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -428,4 +433,4 @@ addr = rffi.ptradd(addr, offset) rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return - assert False \ No newline at end of file + assert False diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -91,9 +91,18 @@ return decorated_func + def call_location(self): + """ Specializes the function for each call site. + """ + def decorated_func(func): + func._annspecialcase_ = "specialize:call_location" + return func + + return decorated_func + def _wrap(self, args): return "("+','.join([repr(arg) for arg in args]) +")" - + specialize = _Specialize() def enforceargs(*args): @@ -125,7 +134,7 @@ def __hash__(self): raise TypeError("Symbolics are not hashable!") - + def __nonzero__(self): raise TypeError("Symbolics are not comparable") @@ -155,7 +164,7 @@ def lltype(self): from pypy.rpython.lltypesystem import lltype return lltype.Signed - + malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) # running_on_llinterp is meant to have the value 0 in all backends @@ -221,7 +230,7 @@ def compute_result_annotation(self, s_sizehint): from pypy.annotation.model import SomeInteger - + assert isinstance(s_sizehint, SomeInteger) return self.bookkeeper.newlist() diff --git a/pypy/rlib/rarithmetic.py b/pypy/rlib/rarithmetic.py --- a/pypy/rlib/rarithmetic.py +++ b/pypy/rlib/rarithmetic.py @@ -143,7 +143,9 @@ return self_type if self_type in (bool, int, long): return other_type - return build_int(None, self_type.SIGNED and other_type.SIGNED, max(self_type.BITS, other_type.BITS)) + if self_type.SIGNED == other_type.SIGNED: + return build_int(None, self_type.SIGNED, max(self_type.BITS, other_type.BITS)) + raise AssertionError, "Merging these types (%s, %s) is not supported" % (self_type, other_type) def signedtype(t): if t in (bool, int, long): diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -229,7 +229,7 @@ sign = self.sign if intmask(x) < 0 and (sign > 0 or (x << 1) != 0): raise OverflowError - return intmask(x * sign) + return intmask(intmask(x) * sign) def tolonglong(self): return _AsLongLong(self) @@ -1384,7 +1384,7 @@ # Now remove the excess 2 bits, rounding to nearest integer (with # ties rounded to even). - q = (q >> 2) + (bool(q & 2) and bool(q & 5)) + q = (q >> 2) + r_uint((bool(q & 2) and bool(q & 5))) if exp > DBL_MAX_EXP or (exp == DBL_MAX_EXP and q == r_ulonglong(1) << DBL_MANT_DIG): @@ -1540,8 +1540,8 @@ assert extra_bits == 2 or extra_bits == 3 # Round by remembering a modified copy of the low digit of x - mask = 1 << (extra_bits - 1) - low = x.udigit(0) | inexact + mask = r_uint(1 << (extra_bits - 1)) + low = x.udigit(0) | r_uint(inexact) if (low & mask) != 0 and (low & (3*mask-1)) != 0: low += mask x_digit_0 = low & ~(mask-1) @@ -1790,7 +1790,7 @@ i = v.numdigits() - 1 while i >= 0: prev = x - x = (x << SHIFT) + v.widedigit(i) + x = (x << SHIFT) + r_ulonglong(v.widedigit(i)) if (x >> SHIFT) != prev: raise OverflowError( "long int too large to convert to unsigned long long int") @@ -1833,8 +1833,8 @@ if x < v.udigit(i): x += 1 i -= 1 - x = intmask(x * sign) - return x + res = intmask(intmask(x) * sign) + return res #_________________________________________________________________ diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -23,10 +23,6 @@ def __init__(self, message): self.message = message -class ROverflowError(Exception): - def __init__(self, message): - self.message = message - includes = ["sys/types.h"] if _POSIX: includes += ['unistd.h', 'sys/mman.h'] @@ -597,8 +593,6 @@ def _check_map_size(size): if size < 0: raise RTypeError("memory mapped size must be positive") - if rffi.cast(size_t, size) != size: - raise ROverflowError("memory mapped size is too large (limited by C int)") if _POSIX: def mmap(fileno, length, flags=MAP_SHARED, diff --git a/pypy/rlib/rrandom.py b/pypy/rlib/rrandom.py --- a/pypy/rlib/rrandom.py +++ b/pypy/rlib/rrandom.py @@ -31,7 +31,7 @@ mt[0]= s & MASK_32 for mti in range(1, N): mt[mti] = (MAGIC_CONSTANT_A * - (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti) + (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + r_uint(mti)) # See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. # In the previous versions, MSBs of the seed affect # only MSBs of the array mt[]. @@ -52,7 +52,7 @@ for k in range(max_k, 0, -1): mt[i] = ((mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * MAGIC_CONSTANT_C)) - + init_key[j] + j) # non linear + + init_key[j] + r_uint(j)) # non linear mt[i] &= MASK_32 # for WORDSIZE > 32 machines i += 1 j += 1 @@ -104,5 +104,5 @@ j = n % i mt[i], mt[j] = mt[j], mt[i] for i in range(N): - mt[i] += i + 1 + mt[i] += r_uint(i + 1) self.index = N diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -17,7 +17,7 @@ from pypy.rlib.objectmodel import instantiate, keepalive_until_here from pypy.rlib import _rsocket_rffi as _c -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.rffi import sizeof, offsetof @@ -131,11 +131,12 @@ from_object = staticmethod(from_object) @staticmethod - def _check_port(space, port): + def make_ushort_port(space, port): from pypy.interpreter.error import OperationError if port < 0 or port > 0xffff: raise OperationError(space.w_ValueError, space.wrap( "port must be 0-65535.")) + return rffi.cast(rffi.USHORT, port) def fill_from_object(self, space, w_address): """ Purely abstract @@ -167,7 +168,7 @@ # IPv4 also supports the special name "". if name == '': - return makeipv4addr(intmask(INADDR_BROADCAST), result) + return makeipv4addr(r_uint(INADDR_BROADCAST), result) # "dd.dd.dd.dd" format. digits = name.split('.') @@ -184,9 +185,11 @@ 0 <= d1 <= 255 and 0 <= d2 <= 255 and 0 <= d3 <= 255): - return makeipv4addr(intmask(htonl( - (intmask(d0 << 24)) | (d1 << 16) | (d2 << 8) | (d3 << 0))), - result) + + addr = intmask(d0 << 24) | (d1 << 16) | (d2 << 8) | (d3 << 0) + addr = rffi.cast(rffi.UINT, addr) + addr = htonl(addr) + return makeipv4addr(addr, result) # generic host name to IP conversion info = getaddrinfo(name, None, family=family, address_to_fill=result) @@ -236,7 +239,9 @@ def get_protocol(self): a = self.lock(_c.sockaddr_ll) - res = ntohs(rffi.getintfield(a, 'c_sll_protocol')) + proto = rffi.getintfield(a, 'c_sll_protocol') + proto = rffi.cast(rffi.USHORT, proto) + res = ntohs(proto) self.unlock() return res @@ -277,6 +282,7 @@ def __init__(self, host, port): makeipaddr(host, self) a = self.lock(_c.sockaddr_in) + port = rffi.cast(rffi.USHORT, port) rffi.setintfield(a, 'c_sin_port', htons(port)) self.unlock() @@ -309,7 +315,7 @@ raise TypeError("AF_INET address must be a tuple of length 2") host = space.str_w(w_host) port = space.int_w(w_port) - Address._check_port(space, port) + port = Address.make_ushort_port(space, port) return INETAddress(host, port) from_object = staticmethod(from_object) @@ -318,7 +324,7 @@ from pypy.interpreter.error import OperationError _, w_port = space.unpackiterable(w_address, 2) port = space.int_w(w_port) - self._check_port(space, port) + port = self.make_ushort_port(space, port) a = self.lock(_c.sockaddr_in) rffi.setintfield(a, 'c_sin_port', htons(port)) self.unlock() @@ -403,7 +409,7 @@ "to 4, not %d" % len(pieces_w)) host = space.str_w(pieces_w[0]) port = space.int_w(pieces_w[1]) - Address._check_port(space, port) + port = Address.make_ushort_port(space, port) if len(pieces_w) > 2: flowinfo = space.uint_w(pieces_w[2]) else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) @@ -419,7 +425,7 @@ raise RSocketError("AF_INET6 address must be a tuple of length 2 " "to 4, not %d" % len(pieces_w)) port = space.int_w(pieces_w[1]) - self._check_port(space, port) + port = self.make_ushort_port(space, port) if len(pieces_w) > 2: flowinfo = space.uint_w(pieces_w[2]) else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) @@ -1295,9 +1301,13 @@ servent = _c.getservbyname(name, proto) if not servent: raise RSocketError("service/proto not found") - return ntohs(servent.c_s_port) + port = rffi.cast(rffi.UINT, servent.c_s_port) + return ntohs(port) def getservbyport(port, proto=None): + # This function is only called from pypy/module/_socket and the range of + # port is checked there + port = rffi.cast(rffi.USHORT, port) servent = _c.getservbyport(htons(port), proto) if not servent: raise RSocketError("port/proto not found") diff --git a/pypy/rlib/rstruct/standardfmttable.py b/pypy/rlib/rstruct/standardfmttable.py --- a/pypy/rlib/rstruct/standardfmttable.py +++ b/pypy/rlib/rstruct/standardfmttable.py @@ -206,7 +206,7 @@ if signed and i == 0 and x >= 128: x -= 256 intvalue <<= 8 - intvalue |= x + intvalue |= inttype(x) idx += 1 else: for i in unroll_range_size: diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -98,8 +98,13 @@ INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) - GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) - SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + _GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) + _SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + + def GetLastError(): + return rffi.cast(lltype.Signed, _GetLastError()) + def SetLastError(err): + _SetLastError(rffi.cast(DWORD, err)) # In tests, the first call to GetLastError is always wrong, because error # is hidden by operations in ll2ctypes. Call it now. @@ -184,12 +189,12 @@ msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, None, - code, + rffi.cast(DWORD, code), DEFAULT_LANGUAGE, rffi.cast(rffi.CCHARP, buf), 0, None) - if msglen <= 2 or msglen > sys.maxint: + if msglen <= 2: # includes the case msglen < 0 return fake_FormatError(code) # FormatMessage always appends \r\n. diff --git a/pypy/rlib/test/test_rarithmetic.py b/pypy/rlib/test/test_rarithmetic.py --- a/pypy/rlib/test/test_rarithmetic.py +++ b/pypy/rlib/test/test_rarithmetic.py @@ -126,13 +126,18 @@ cmp = f(r_uint(arg)) assert res == cmp - def binary_test(self, f, rargs = None): + def binary_test(self, f, rargs = None, translated=False): mask = maxint_mask if not rargs: rargs = (1, 3, 55) + # when translated merging different int types is not allowed + if translated: + alltypes = [(r_uint, r_uint)] + else: + alltypes = [(int, r_uint), (r_uint, int), (r_uint, r_uint)] for larg in (0, 1, 2, 3, 1234): for rarg in rargs: - for types in ((int, r_uint), (r_uint, int), (r_uint, r_uint)): + for types in alltypes: res = f(larg, rarg) left, right = types cmp = f(left(larg), right(rarg)) @@ -335,6 +340,14 @@ from pypy.rpython.lltypesystem.rffi import r_int_real assert compute_restype(r_int_real, r_int_real) is r_int_real +def test_compute_restype_incompatible(): + from pypy.rpython.lltypesystem.rffi import r_int_real, r_short, r_ushort + testcases = [(r_uint, r_longlong), (r_int_real, r_uint), + (r_short, r_ushort)] + for t1, t2 in testcases: + py.test.raises(AssertionError, compute_restype, t1, t2) + py.test.raises(AssertionError, compute_restype, t2, t1) + def test_most_neg_value_of(): assert most_neg_value_of_same_type(123) == -sys.maxint-1 assert most_neg_value_of_same_type(r_uint(123)) == 0 diff --git a/pypy/rlib/test/test_rrandom.py b/pypy/rlib/test/test_rrandom.py --- a/pypy/rlib/test/test_rrandom.py +++ b/pypy/rlib/test/test_rrandom.py @@ -1,4 +1,5 @@ from pypy.rlib.rrandom import Random, N, r_uint +from pypy.rlib.rarithmetic import intmask import _random # the numbers were created by using CPython's _randommodule.c @@ -24,13 +25,13 @@ def test_init_by_array(): rnd = Random() - rnd.init_by_array([1, 2, 3, 4]) + rnd.init_by_array([r_uint(n) for n in [1, 2, 3, 4]]) assert rnd.state[:14] == [2147483648, 1269538435, 699006892, 381364451, 172015551, 3237099449, 3609464087, 2187366456, 654585064, 2665903765, 3735624613, 1241943673, 2038528247, 3774211972] # try arrays of various sizes to test for corner cases for size in [N, N - 1, N + 1, N // 2, 2 * N]: - rnd.init_by_array(range(N)) + rnd.init_by_array([r_uint(n) for n in range(N)]) def test_jumpahead(): rnd = Random() @@ -47,8 +48,8 @@ def f(x, y): rnd = Random(x) rnd.init_by_array([x, y]) - rnd.jumpahead(y) + rnd.jumpahead(intmask(y)) return rnd.genrand32(), rnd.random() t = Translation(f) - fc = t.compile_c([int, int]) - assert fc(1, 2) == f(1, 2) + fc = t.compile_c([r_uint, r_uint]) + assert fc(r_uint(1), r_uint(2)) == f(r_uint(1), r_uint(2)) diff --git a/pypy/rlib/test/test_rstacklet.py b/pypy/rlib/test/test_rstacklet.py --- a/pypy/rlib/test/test_rstacklet.py +++ b/pypy/rlib/test/test_rstacklet.py @@ -1,4 +1,4 @@ -import gc +import gc, sys import py from pypy.rpython.tool.rffi_platform import CompilationError try: @@ -65,6 +65,15 @@ self.tasks[0].withdepth(self.random.genrand32() % 50) assert len(self.tasks[0].lst) == 0 + @here_is_a_test + def test_destroy(self): + # this used to give MemoryError in shadowstack tests + for i in range(100000): + self.status = 0 + h = self.sthread.new(switchbackonce_callback, + rffi.cast(llmemory.Address, 321)) + self.sthread.destroy(h) + def any_alive(self): for task in self.tasks: if task.h: @@ -228,6 +237,8 @@ cls.old_values = Runner.config, Runner.STATUSMAX Runner.config = config Runner.STATUSMAX = 25000 + if cls.gcrootfinder == "asmgcc" and sys.platform == "win32": + py.test.skip("fails with asmgcc on win32") def teardown_class(cls): Runner.config, Runner.STATUSMAX = cls.old_values diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -548,6 +548,9 @@ def op_jit_marker(self, *args): pass + def op_jit_record_known_class(self, *args): + pass + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -430,6 +430,7 @@ 'jit_force_virtual': LLOp(canrun=True), 'jit_is_virtual': LLOp(canrun=True), 'jit_force_quasi_immutable': LLOp(canrun=True), + 'jit_record_known_class' : LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -548,6 +548,9 @@ def op_jit_force_quasi_immutable(*args): pass +def op_jit_record_known_class(x, y): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -711,7 +711,7 @@ # # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. - self.rawmalloced_total_size += allocsize + self.rawmalloced_total_size += r_uint(allocsize) if can_make_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() @@ -886,8 +886,8 @@ #return (num_bits + (LONG_BIT - 1)) >> LONG_BIT_SHIFT # --- Optimized version: return intmask( - ((r_uint(length) + ((LONG_BIT << self.card_page_shift) - 1)) >> - (self.card_page_shift + LONG_BIT_SHIFT))) + ((r_uint(length) + r_uint((LONG_BIT << self.card_page_shift) - 1)) >> + (self.card_page_shift + LONG_BIT_SHIFT))) def card_marking_bytes_for_length(self, length): # --- Unoptimized version: @@ -895,7 +895,7 @@ #return (num_bits + 7) >> 3 # --- Optimized version: return intmask( - ((r_uint(length) + ((8 << self.card_page_shift) - 1)) >> + ((r_uint(length) + r_uint((8 << self.card_page_shift) - 1)) >> (self.card_page_shift + 3))) def debug_check_consistency(self): @@ -1523,7 +1523,7 @@ llarena.arena_reserve(arena, totalsize) # size_gc_header = self.gcheaderbuilder.size_gc_header - self.rawmalloced_total_size += raw_malloc_usage(totalsize) + self.rawmalloced_total_size += r_uint(raw_malloc_usage(totalsize)) self.old_rawmalloced_objects.append(arena + size_gc_header) return arena @@ -1689,7 +1689,7 @@ allocsize += extra_words * WORD # llarena.arena_free(arena) - self.rawmalloced_total_size -= allocsize + self.rawmalloced_total_size -= r_uint(allocsize) def free_unvisited_rawmalloc_objects(self): list = self.old_rawmalloced_objects diff --git a/pypy/rpython/memory/gc/minimarkpage.py b/pypy/rpython/memory/gc/minimarkpage.py --- a/pypy/rpython/memory/gc/minimarkpage.py +++ b/pypy/rpython/memory/gc/minimarkpage.py @@ -149,7 +149,7 @@ ll_assert(nsize > 0, "malloc: size is null or negative") ll_assert(nsize <= self.small_request_threshold,"malloc: size too big") ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned") - self.total_memory_used += nsize + self.total_memory_used += r_uint(nsize) # # Get the page to use from the size size_class = nsize >> WORD_POWER_2 @@ -474,7 +474,7 @@ obj += block_size # # Update the global total size of objects. - self.total_memory_used += surviving * block_size + self.total_memory_used += r_uint(surviving * block_size) # # Return the number of surviving objects. return surviving diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -92,7 +92,6 @@ # make a copy of the graph that will reload the values graph2 = copygraph(fnptr._obj.graph) block2 = graph2.startblock - block2.isstartblock = False block1 = Block([]) reloadedvars = [] for v, c_p in zip(block2.inputargs, sra): @@ -109,7 +108,6 @@ [w], v)) reloadedvars.append(v) block1.closeblock(Link(reloadedvars, block2)) - block1.isstartblock = True graph2.startblock = block1 FUNC2 = lltype.FuncType([], FUNC1.RESULT) fnptr2 = lltype.functionptr(FUNC2, diff --git a/pypy/rpython/memory/gctransform/shadowstack.py b/pypy/rpython/memory/gctransform/shadowstack.py --- a/pypy/rpython/memory/gctransform/shadowstack.py +++ b/pypy/rpython/memory/gctransform/shadowstack.py @@ -307,7 +307,7 @@ "restore_state_from: broken shadowstack") self.gcdata.root_stack_base = shadowstackref.base self.gcdata.root_stack_top = shadowstackref.top - self.destroy(shadowstackref) + self._cleanup(shadowstackref) def start_fresh_new_state(self): self.gcdata.root_stack_base = self.unused_full_stack @@ -315,6 +315,10 @@ self.unused_full_stack = llmemory.NULL def destroy(self, shadowstackref): + llmemory.raw_free(shadowstackref.base) + self._cleanup(shadowstackref) + + def _cleanup(self, shadowstackref): shadowstackref.base = llmemory.NULL shadowstackref.top = llmemory.NULL shadowstackref.context = llmemory.NULL diff --git a/pypy/rpython/memory/gctransform/test/test_transform.py b/pypy/rpython/memory/gctransform/test/test_transform.py --- a/pypy/rpython/memory/gctransform/test/test_transform.py +++ b/pypy/rpython/memory/gctransform/test/test_transform.py @@ -102,12 +102,12 @@ llops.genop("gc_pop_alive", [var]) -def checkblock(block, is_borrowed): +def checkblock(block, is_borrowed, is_start_block): if block.operations == (): # a return/exception block -- don't want to think about them # (even though the test passes for somewhat accidental reasons) return - if block.isstartblock: + if is_start_block: refs_in = 0 else: refs_in = len([v for v in block.inputargs if isinstance(v, Variable) @@ -167,7 +167,7 @@ if check: for graph, is_borrowed in graphs_borrowed.iteritems(): for block in graph.iterblocks(): - checkblock(block, is_borrowed) + checkblock(block, is_borrowed, block is graph.startblock) return t, transformer def getops(graph): diff --git a/pypy/rpython/memory/gctransform/transform.py b/pypy/rpython/memory/gctransform/transform.py --- a/pypy/rpython/memory/gctransform/transform.py +++ b/pypy/rpython/memory/gctransform/transform.py @@ -263,9 +263,7 @@ # still be empty (but let's check) if starts_with_empty_block(graph) and inserted_empty_startblock: old_startblock = graph.startblock - graph.startblock.isstartblock = False graph.startblock = graph.startblock.exits[0].target - graph.startblock.isstartblock = True checkgraph(graph) diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -1773,7 +1773,7 @@ @registering(rwin32.FormatError) def register_rwin32_FormatError(self): - return extdef([rwin32.DWORD], str, + return extdef([lltype.Signed], str, "rwin32_FormatError", llimpl=rwin32.llimpl_FormatError, ooimpl=rwin32.fake_FormatError) diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py --- a/pypy/rpython/module/ll_os_stat.py +++ b/pypy/rpython/module/ll_os_stat.py @@ -12,6 +12,7 @@ from pypy.rpython.tool import rffi_platform as platform from pypy.rpython.lltypesystem.rtupletype import TUPLE_TYPE from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import specialize from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.annlowlevel import hlstr @@ -442,20 +443,19 @@ # Helper functions for win32 def make_longlong(high, low): - return (lltype.r_longlong(high) << 32) + lltype.r_longlong(low) + return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) # Seconds between 1.1.1601 and 1.1.1970 -secs_between_epochs = lltype.r_longlong(11644473600) +secs_between_epochs = rffi.r_longlong(11644473600) def FILE_TIME_to_time_t_nsec(filetime): ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) # FILETIME is in units of 100 nsec nsec = (ft % 10000000) * 100 time = (ft / 10000000) - secs_between_epochs - return time, nsec + return intmask(time), intmask(nsec) def time_t_to_FILE_TIME(time, filetime): - ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) - filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & lltype.r_uint(-1)) - + ft = (rffi.r_longlong(time) + secs_between_epochs) * 10000000 + filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) + filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits diff --git a/pypy/rpython/module/ll_termios.py b/pypy/rpython/module/ll_termios.py --- a/pypy/rpython/module/ll_termios.py +++ b/pypy/rpython/module/ll_termios.py @@ -72,9 +72,14 @@ def tcsetattr_llimpl(fd, when, attributes): c_struct = lltype.malloc(TERMIOSP.TO, flavor='raw') - c_struct.c_c_iflag, c_struct.c_c_oflag, c_struct.c_c_cflag, \ - c_struct.c_c_lflag, ispeed, ospeed, cc = attributes try: + c_struct.c_c_iflag = r_uint(attributes[0]) + c_struct.c_c_oflag = r_uint(attributes[1]) + c_struct.c_c_cflag = r_uint(attributes[2]) + c_struct.c_c_lflag = r_uint(attributes[3]) + ispeed = r_uint(attributes[4]) + ospeed = r_uint(attributes[5]) + cc = attributes[6] for i in range(NCCS): c_struct.c_c_cc[i] = rffi.r_uchar(ord(cc[i][0])) if c_cfsetispeed(c_struct, ispeed) < 0: @@ -87,8 +92,8 @@ lltype.free(c_struct, flavor='raw') r_uint = rffi.r_uint -register_external(rtermios.tcsetattr, [int, int, (r_uint, r_uint, r_uint, - r_uint, r_uint, r_uint, [str])], llimpl=tcsetattr_llimpl, +register_external(rtermios.tcsetattr, [int, int, (int, int, int, + int, int, int, [str])], llimpl=tcsetattr_llimpl, export_name='termios.tcsetattr') # a bit C-c C-v code follows... diff --git a/pypy/rpython/normalizecalls.py b/pypy/rpython/normalizecalls.py --- a/pypy/rpython/normalizecalls.py +++ b/pypy/rpython/normalizecalls.py @@ -116,8 +116,6 @@ v = Constant(default) outlist.append(v) newblock.closeblock(Link(outlist, oldblock)) - oldblock.isstartblock = False - newblock.isstartblock = True graph.startblock = newblock for i in range(len(newdefaults)-1,-1,-1): if newdefaults[i] is NODEFAULT: @@ -171,8 +169,6 @@ # prepare the output args of newblock and link outlist = inlist[:] newblock.closeblock(Link(outlist, oldblock)) - oldblock.isstartblock = False - newblock.isstartblock = True graph.startblock = newblock # finished checkgraph(graph) diff --git a/pypy/tool/nullpath.py b/pypy/tool/nullpath.py --- a/pypy/tool/nullpath.py +++ b/pypy/tool/nullpath.py @@ -1,4 +1,4 @@ -import py +import py, os class NullPyPathLocal(py.path.local): @@ -6,7 +6,7 @@ return self.__class__(py.path.local.join(self, *args)) def open(self, mode): - return open('/dev/null', mode) + return open(os.devnull, mode) def __repr__(self): return py.path.local.__repr__(self) + ' [fake]' diff --git a/pypy/tool/test/test_nullpath.py b/pypy/tool/test/test_nullpath.py --- a/pypy/tool/test/test_nullpath.py +++ b/pypy/tool/test/test_nullpath.py @@ -1,11 +1,7 @@ -import sys +import sys, os import py from pypy.tool.nullpath import NullPyPathLocal -def setup_module(): - if 'posix' not in sys.builtin_module_names: - py.test.skip('posix only') - def test_nullpath(tmpdir): path = NullPyPathLocal(tmpdir) assert repr(path).endswith('[fake]') @@ -13,4 +9,4 @@ assert isinstance(foo_txt, NullPyPathLocal) # f = foo_txt.open('w') - assert f.name == '/dev/null' + assert f.name == os.devnull diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -37,8 +37,9 @@ except (KeyboardInterrupt, SystemExit): raise except Exception, e: - log.WARNING('constant-folding %r:' % (spaceop,)) - log.WARNING(' %s: %s' % (e.__class__.__name__, e)) + pass # turn off reporting these as warnings: useless + #log.WARNING('constant-folding %r:' % (spaceop,)) + #log.WARNING(' %s: %s' % (e.__class__.__name__, e)) else: # success in folding this space operation if spaceop.opname in fixup_op_result: diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -453,7 +453,6 @@ #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) - copiedstartblock.isstartblock = False #find args passed to startblock of inlined function passon_args = [] for arg in self.op.args[1:]: diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -391,7 +391,6 @@ virtualframe = VirtualFrame(graph2.startblock, 0, nodelist) graphbuilder = GraphBuilder(self, graph2) specblock = graphbuilder.start_from_virtualframe(virtualframe) - specblock.isstartblock = True specgraph = graph2 specgraph.name += '_mallocv' specgraph.startblock = specblock diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -50,7 +50,8 @@ # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) - simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks())) + simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks()), + [graph]) if progress and option.view: t.view() if expected_result is not Ellipsis: diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py --- a/pypy/translator/c/genc.py +++ b/pypy/translator/c/genc.py @@ -542,7 +542,7 @@ mk.rule(*rule) if self.config.translation.gcrootfinder == 'asmgcc': - trackgcfiles = [cfile[:-2] for cfile in mk.cfiles] + trackgcfiles = [cfile[:cfile.rfind('.')] for cfile in mk.cfiles] if self.translator.platform.name == 'msvc': trackgcfiles = [f for f in trackgcfiles if f.startswith(('implement', 'testing', @@ -579,7 +579,7 @@ if self.translator.platform.name == 'msvc': lblofiles = [] for cfile in mk.cfiles: - f = cfile[:-2] + f = cfile[:cfile.rfind('.')] if f in trackgcfiles: ofile = '%s.lbl.obj' % (f,) else: diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -11,6 +11,7 @@ #endif /* MIN */ #define RUNNING_ON_LLINTERP 0 +#define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ #define FAIL_EXCEPTION(exc, msg) \ { \ diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -476,12 +476,13 @@ def f(n): result = () for cls in classes: + nn = cls(n) for OP in operators: x = getmin(cls) - res1 = OP(x, n) + res1 = OP(x, nn) result = result + (res1,) x = getmax(cls) - res1 = OP(x, n) + res1 = OP(x, nn) result = result + (res1,) return result diff --git a/pypy/translator/c/test/test_refcount.py b/pypy/translator/c/test/test_refcount.py --- a/pypy/translator/c/test/test_refcount.py +++ b/pypy/translator/c/test/test_refcount.py @@ -229,7 +229,6 @@ graph = t.buildflowgraph(g) assert graph.startblock.operations == [] graph.startblock = graph.startblock.exits[0].target - graph.startblock.isstartblock = True from pypy.objspace.flow.model import checkgraph checkgraph(graph) t._prebuilt_graphs[g] = graph diff --git a/pypy/translator/c/test/test_standalone.py b/pypy/translator/c/test/test_standalone.py --- a/pypy/translator/c/test/test_standalone.py +++ b/pypy/translator/c/test/test_standalone.py @@ -224,7 +224,7 @@ filename = str(udir.join('test_standalone_largefile')) r4800000000 = r_longlong(4800000000L) def entry_point(argv): - assert str(r4800000000 + len(argv)) == '4800000003' + assert str(r4800000000 + r_longlong(len(argv))) == '4800000003' fd = os.open(filename, os.O_RDWR | os.O_CREAT, 0644) os.lseek(fd, r4800000000, 0) newpos = os.lseek(fd, 0, 1) diff --git a/pypy/translator/c/test/test_typed.py b/pypy/translator/c/test/test_typed.py --- a/pypy/translator/c/test/test_typed.py +++ b/pypy/translator/c/test/test_typed.py @@ -261,7 +261,7 @@ f._annspecialcase_ = "specialize:argtype(0)" def g(n): if n > 0: - return f(r_longlong(0)) + return intmask(f(r_longlong(0))) else: return f(0) diff --git a/pypy/translator/jvm/test/test_rarithmetic.py b/pypy/translator/jvm/test/test_rarithmetic.py --- a/pypy/translator/jvm/test/test_rarithmetic.py +++ b/pypy/translator/jvm/test/test_rarithmetic.py @@ -32,7 +32,7 @@ cache[types] = fun return cache[types](x, y) return f(x,y) - super(BaseAdaptedTest,self).binary_test(new_func, rargs) + super(BaseAdaptedTest,self).binary_test(new_func, rargs, translated=True) class Test_r_uint(BaseAdaptedTest, BaseTest_r_uint): RTYPE = ra.r_uint diff --git a/pypy/translator/platform/darwin.py b/pypy/translator/platform/darwin.py --- a/pypy/translator/platform/darwin.py +++ b/pypy/translator/platform/darwin.py @@ -12,17 +12,10 @@ so_ext = 'dylib' - # NOTE: GCC 4.2 will fail at runtime due to subtle issues, possibly - # related to GC roots. Using LLVM-GCC or Clang will break the build. - default_cc = 'gcc-4.0' - - def __init__(self, cc=None): - if cc is None: - try: - cc = os.environ['CC'] - except KeyError: - cc = self.default_cc - self.cc = cc + # NOTE: With asmgcc GCC 4.2 will fail at runtime due to subtle issues, + # possibly related to GC roots. Using LLVM-GCC or Clang will break the + # build. On Darwin asmgcc is not the default anymore, so it is fine to use + # whatever gcc we find on the system def _args_for_shared(self, args): return (list(self.shared_only) diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -179,7 +179,7 @@ # The c compiler accepts any order of arguments, while # the assembler still has the old behavior that all options # must come first, and after the file name all options are ignored. - # So please be careful with the oder of parameters! ;-) + # So please be careful with the order of parameters! ;-) args = ['/nologo', '/c'] + compile_args + ['/Fo%s' % (oname,), str(cfile)] self._execute_c_compiler(cc, args, oname) return oname @@ -265,7 +265,7 @@ return fpath rel_cfiles = [m.pathrel(cfile) for cfile in cfiles] - rel_ofiles = [rel_cfile[:-2]+'.obj' for rel_cfile in rel_cfiles] + rel_ofiles = [rel_cfile[:rel_cfile.rfind('.')]+'.obj' for rel_cfile in rel_cfiles] m.cfiles = rel_cfiles rel_includedirs = [pypyrel(incldir) for incldir in eci.include_dirs] @@ -296,6 +296,7 @@ rules = [ ('all', '$(DEFAULT_TARGET)', []), ('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)'), + ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), ] for rule in rules: diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -397,7 +397,8 @@ def transform_dead_op_vars(graph, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a graph.""" - return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), translator) + return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), + [graph], translator) # the set of operations that can safely be removed # (they have no side effects, at least in R-Python) @@ -419,11 +420,19 @@ hasattr: True, } -def transform_dead_op_vars_in_blocks(blocks, translator=None): +def find_start_blocks(graphs): + start_blocks = set() + for graph in graphs: + start_blocks.add(graph.startblock) + return start_blocks + +def transform_dead_op_vars_in_blocks(blocks, graphs, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a set of blocks""" read_vars = {} # set of variables really used variable_flow = {} # map {Var: list-of-Vars-it-depends-on} + set_of_blocks = set(blocks) + start_blocks = find_start_blocks(graphs) def canremove(op, block): if op.opname not in CanRemove: @@ -451,7 +460,7 @@ if block.exits: for link in block.exits: - if link.target not in blocks: + if link.target not in set_of_blocks: for arg, targetarg in zip(link.args, link.target.inputargs): read_vars[arg] = True read_vars[targetarg] = True @@ -465,7 +474,7 @@ read_vars[arg] = True # an input block's inputargs should not be modified, even if some # of the function's input arguments are not actually used - if block.isstartblock: + if block in start_blocks: for arg in block.inputargs: read_vars[arg] = True diff --git a/pypy/translator/transform.py b/pypy/translator/transform.py --- a/pypy/translator/transform.py +++ b/pypy/translator/transform.py @@ -115,7 +115,7 @@ # to kill dead (never-followed) links, # which can possibly remove more variables. from pypy.translator.simplify import transform_dead_op_vars_in_blocks - transform_dead_op_vars_in_blocks(block_subset) + transform_dead_op_vars_in_blocks(block_subset, self.translator.graphs) def transform_dead_code(self, block_subset): """Remove dead code: these are the blocks that are not annotated at all diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -42,9 +42,7 @@ vars = [copyvar(annotator, v) for v in graph.startblock.inputargs] newblock = Block(vars) newblock.closeblock(Link(vars, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newblock - graph.startblock.isstartblock = True def starts_with_empty_block(graph): return (not graph.startblock.operations @@ -151,9 +149,7 @@ newop = SpaceOperation('direct_call', [c_initial_func], v_none) extrablock.operations = [newop] extrablock.closeblock(Link(args, entry_point.startblock)) - entry_point.startblock.isstartblock = False entry_point.startblock = extrablock - entry_point.startblock.isstartblock = True checkgraph(entry_point) def call_final_function(translator, final_func, annhelper=None): From noreply at buildbot.pypy.org Sun Dec 4 20:10:04 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Dec 2011 20:10:04 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix test Message-ID: <20111204191004.B792C8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50142:79fb16cde001 Date: 2011-12-04 20:09 +0100 http://bitbucket.org/pypy/pypy/changeset/79fb16cde001/ Log: fix test diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -325,7 +325,7 @@ def test_setslice(self): result = self.run("setslice") assert result == 11.0 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) From noreply at buildbot.pypy.org Sun Dec 4 20:15:39 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Dec 2011 20:15:39 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: merged default Message-ID: <20111204191539.9308D8205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50143:5a3841c1a299 Date: 2011-12-04 14:00 -0500 http://bitbucket.org/pypy/pypy/changeset/5a3841c1a299/ Log: merged default diff too long, truncating to 10000 out of 15014 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,3 +1,4 @@ b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 +ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,6 +231,9 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None +sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] +sqlite.sqlite3_enable_load_extension.restype = c_int + ########################################## # END Wrapped SQLite C API and constants ########################################## @@ -705,6 +708,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() + + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") + DML, DQL, DDL = range(3) class Cursor(object): diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -307,7 +307,7 @@ self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo - self.typename = self.type.__name__ + self.typename = getattr(self.type, "__name__", "???") self.traceback = py.code.Traceback(tb) def __repr__(self): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -281,6 +281,9 @@ "actually create the full list until the resulting " "list is mutated", default=False), + BoolOption("withliststrategies", + "enable optimized ways to store lists of primitives ", + default=True), BoolOption("withtypeversion", "version type objects when changing them", diff --git a/pypy/config/test/test_translationoption.py b/pypy/config/test/test_translationoption.py new file mode 100644 --- /dev/null +++ b/pypy/config/test/test_translationoption.py @@ -0,0 +1,10 @@ +import py +from pypy.config.translationoption import get_combined_translation_config +from pypy.config.translationoption import set_opt_level +from pypy.config.config import ConflictConfigError + + +def test_no_gcrootfinder_with_boehm(): + config = get_combined_translation_config() + config.translation.gcrootfinder = "shadowstack" + py.test.raises(ConflictConfigError, set_opt_level, config, '0') diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, @@ -398,6 +398,10 @@ # make_sure_not_resized often relies on it, so we always enable them config.translation.suggest(list_comprehension_operations=True) + # finally, make the choice of the gc definitive. This will fail + # if we have specified strange inconsistent settings. + config.translation.gc = config.translation.gc + # ---------------------------------------------------------------- def set_platform(config): diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -270,7 +270,12 @@ - *slicing*: the slice start must be within bounds. The stop doesn't need to, but it must not be smaller than the start. All negative indexes are disallowed, except for - the [:-1] special case. No step. + the [:-1] special case. No step. Slice deletion follows the same rules. + + - *slice assignment*: + only supports ``lst[x:y] = sublist``, if ``len(sublist) == y - x``. + In other words, slice assignment cannot change the total length of the list, + but just replace items. - *other operators*: ``+``, ``+=``, ``in``, ``*``, ``*=``, ``==``, ``!=`` work as expected. diff --git a/pypy/doc/config/objspace.std.withliststrategies.txt b/pypy/doc/config/objspace.std.withliststrategies.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withliststrategies.txt @@ -0,0 +1,2 @@ +Enable list strategies: Use specialized representations for lists of primitive +objects, such as ints. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -262,6 +262,26 @@ documented as such (as e.g. for hasattr()), in most cases PyPy lets the exception propagate instead. +Object Identity of Primitive Values, ``is`` and ``id`` +------------------------------------------------------- + +Object identity of primitive values works by value equality, not by identity of +the wrapper. This means that ``x + 1 is x + 1`` is always true, for arbitrary +integers ``x``. The rule applies for the following types: + + - ``int`` + + - ``float`` + + - ``long`` + + - ``complex`` + +This change requires some changes to ``id`` as well. ``id`` fulfills the +following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the +above types will return a value that is computed from the argument, and can +thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long). + Miscellaneous ------------- @@ -284,14 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. -* Do not compare immutable objects with ``is``. For example on CPython - it is true that ``x is 0`` works, i.e. does the same as ``type(x) is - int and x == 0``, but it is so by accident. If you do instead - ``x is 1000``, then it stops working, because 1000 is too large and - doesn't come from the internal cache. In PyPy it fails to work in - both cases, because we have no need for a cache at all. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. -* Also, object identity of immutable keys in dictionaries is not necessarily - preserved. .. include:: _ref.txt diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,6 +1,3 @@ -.. include:: needswork.txt - -.. needs work, it talks about svn. also, it is not really user documentation Making a PyPy Release ======================= @@ -12,11 +9,8 @@ forgetting things. A set of todo files may also work. Check and prioritize all issues for the release, postpone some if necessary, -create new issues also as necessary. A meeting (or meetings) should be -organized to decide what things are priorities, should go in and work for -the release. - -An important thing is to get the documentation into an up-to-date state! +create new issues also as necessary. An important thing is to get +the documentation into an up-to-date state! Release Steps ---------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -23,17 +23,20 @@ PyPy's implementation of the Python ``long`` type is slower than CPython's. Find out why and optimize them. +Make bytearray type fast +------------------------ + +PyPy's bytearray type is very inefficient. It would be an interesting +task to look into possible optimizations on this. + Numpy improvements ------------------ -This is more of a project-container than a single project. Possible ideas: +The numpy is rapidly progressing in pypy, so feel free to come to IRC and +ask for proposed topic. A not necesarilly up-to-date `list of topics`_ +is also available. -* experiment with auto-vectorization using SSE or implement vectorization - without automatically detecting it for array operations. - -* improve numpy, for example implement memory views. - -* interface with fortran/C libraries. +.. _`list of topics`: https://bitbucket.org/pypy/extradoc/src/extradoc/planning/micronumpy.txt Improving the jitviewer ------------------------ diff --git a/pypy/doc/release-1.7.0.rst b/pypy/doc/release-1.7.0.rst --- a/pypy/doc/release-1.7.0.rst +++ b/pypy/doc/release-1.7.0.rst @@ -1,44 +1,94 @@ -===================== -PyPy 1.7 -===================== +================================== +PyPy 1.7 - widening the sweet spot +================================== + +We're pleased to announce the 1.7 release of PyPy. As became a habit, this +release brings a lot of bugfixes and performance improvements over the 1.6 +release. However, unlike the previous releases, the focus has been on widening +the "sweet spot" of PyPy. That is, classes of Python code that PyPy can greatly +speed up should be vastly improved with this release. You can download the 1.7 +release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 1.7 and cpython 2.7.1`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 32/64 or +Windows 32. Windows 64 work is ongoing, but not yet natively supported. + +The main topic of this release is widening the range of code which PyPy +can greatly speed up. On average on +our benchmark suite, PyPy 1.7 is around **30%** faster than PyPy 1.6 and up +to **20 times** faster on some benchmarks. + +.. _`pypy 1.7 and cpython 2.7.1`: http://speed.pypy.org + Highlights ========== -* numerous performance improvements, PyPy 1.7 is xxx faster than 1.6 +* Numerous performance improvements. There are too many examples which python + constructs now should behave faster to list them. -* numerous bugfixes, compatibility fixes +* Bugfixes and compatibility fixes with CPython. -* windows fixes +* Windows fixes. -* stackless and JIT integration +* PyPy now comes with stackless features enabled by default. However, + any loop using stackless features will interrupt the JIT for now, so no real + performance improvement for stackless-based programs. Contact pypy-dev for + info how to help on removing this restriction. -* numpy progress - dtypes, numpy -> numpypy renaming +* NumPy effort in PyPy was renamed numpypy. In order to try using it, simply + write:: -* brand new JSON encoder + import numpypy as numpy -* improved memory footprint on heavy users of C APIs example - tornado + at the beginning of your program. There is a huge progress on numpy in PyPy + since 1.6, the main feature being implementation of dtypes. -* cpyext progress +* JSON encoder (but not decoder) has been replaced with a new one. This one + is written in pure Python, but is known to outperform CPython's C extension + up to **2 times** in some cases. It's about **20 times** faster than + the one that we had in 1.6. + +* The memory footprint of some of our RPython modules has been drastically + improved. This should impact any applications using for example cryptography, + like tornado. + +* There was some progress in exposing even more CPython C API via cpyext. Things that didn't make it, expect in 1.8 soon ============================================== -* list strategies +There is an ongoing work, which while didn't make it to the release, is +probably worth mentioning here. This is what you should probably expect in +1.8 some time soon: -* multi-dimensional arrays for numpy +* Specialized list implementation. There is a branch that implements lists of + integers/floats/strings as compactly as array.array. This should drastically + improve performance/memory impact of some applications -* ARM backend +* NumPy effort is progressing forward, with multi-dimensional arrays coming + soon. -* PPC backend +* There are two brand new JIT assembler backends, notably for the PowerPC and + ARM processors. -Things we're working on with unclear ETA -======================================== +Fundraising +=========== -* windows 64 (?) +It's maybe worth mentioning that we're running fundraising campaigns for +NumPy effort in PyPy and for Python 3 in PyPy. In case you want to see any +of those happen faster, we urge you to donate to `numpy proposal`_ or +`py3k proposal`_. In case you want PyPy to progress, but you trust us with +the general direction, you can always donate to the `general pot`_. -* Py3k - -* SSE for numpy - -* specialized objects +.. _`numpy proposal`: http://pypy.org/numpydonate.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`general pot`: http://pypy.org diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -188,6 +188,12 @@ # ------------------------------------------------------------------- + def is_w(self, space, w_other): + return self is w_other + + def unique_id(self, space): + return space.wrap(compute_unique_id(self)) + def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) raise OperationError(space.w_TypeError, w_msg) @@ -681,9 +687,17 @@ """shortcut for space.is_true(space.eq(w_obj1, w_obj2))""" return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2)) - def is_w(self, w_obj1, w_obj2): - """shortcut for space.is_true(space.is_(w_obj1, w_obj2))""" - return self.is_true(self.is_(w_obj1, w_obj2)) + def is_(self, w_one, w_two): + return self.newbool(self.is_w(w_one, w_two)) + + def is_w(self, w_one, w_two): + # done by a method call on w_two (and not on w_one, because of the + # expected programming style where we say "if x is None" or + # "if x is object"). + return w_two.is_w(self, w_one) + + def id(self, w_obj): + return w_obj.unique_id(self) def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -879,6 +893,16 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_str(self, w_list): + """ Return a list of unwrapped strings out of a list of strings. If the + argument is not a list or does not contain only strings, return None. + May return None anyway. + """ + return None + + def newlist_str(self, list_s): + return self.newlist([self.wrap(s) for s in list_s]) + @jit.unroll_safe def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" @@ -1013,9 +1037,6 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) - def id(self, w_obj): - return self.wrap(compute_unique_id(w_obj)) - # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the __builtin__ diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -1,8 +1,9 @@ +from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped +from pypy.interpreter.pyopcode import LoopBlock from pypy.rlib import jit -from pypy.interpreter.pyopcode import LoopBlock +from pypy.rlib.objectmodel import specialize class GeneratorIterator(Wrappable): @@ -156,38 +157,43 @@ break block = block.previous - def unpack_into(self, results_w): - """This is a hack for performance: runs the generator and collects - all produced items in a list.""" - # XXX copied and simplified version of send_ex() - space = self.space - if self.running: - raise OperationError(space.w_ValueError, - space.wrap('generator already executing')) - frame = self.frame - if frame is None: # already finished - return - self.running = True - try: - pycode = self.pycode - while True: - jitdriver.jit_merge_point(self=self, frame=frame, - results_w=results_w, - pycode=pycode) - try: - w_result = frame.execute_frame(space.w_None) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - # if the frame is now marked as finished, it was RETURNed from - if frame.frame_finished_execution: - break - results_w.append(w_result) # YIELDed - finally: - frame.f_backref = jit.vref_None - self.running = False - self.frame = None - -jitdriver = jit.JitDriver(greens=['pycode'], - reds=['self', 'frame', 'results_w']) + # Results can be either an RPython list of W_Root, or it can be an + # app-level W_ListObject, which also has an append() method, that's why we + # generate 2 versions of the function and 2 jit drivers. + def _create_unpack_into(): + jitdriver = jit.JitDriver(greens=['pycode'], + reds=['self', 'frame', 'results']) + def unpack_into(self, results): + """This is a hack for performance: runs the generator and collects + all produced items in a list.""" + # XXX copied and simplified version of send_ex() + space = self.space + if self.running: + raise OperationError(space.w_ValueError, + space.wrap('generator already executing')) + frame = self.frame + if frame is None: # already finished + return + self.running = True + try: + pycode = self.pycode + while True: + jitdriver.jit_merge_point(self=self, frame=frame, + results=results, pycode=pycode) + try: + w_result = frame.execute_frame(space.w_None) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + # if the frame is now marked as finished, it was RETURNed from + if frame.frame_finished_execution: + break + results.append(w_result) # YIELDed + finally: + frame.f_backref = jit.vref_None + self.running = False + self.frame = None + return unpack_into + unpack_into = _create_unpack_into() + unpack_into_w = _create_unpack_into() \ No newline at end of file diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -587,7 +587,7 @@ assert isinstance(meth2, Method) assert meth2.call_args(args) == obj1 # Check method returned from unbound_method.__get__() - w_meth3 = descr_function_get(space, func, None, space.type(obj2)) + w_meth3 = descr_function_get(space, func, space.w_None, space.type(obj2)) meth3 = space.unwrap(w_meth3) w_meth4 = meth3.descr_method_get(obj2, space.w_None) meth4 = space.unwrap(w_meth4) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -63,10 +63,13 @@ def test_unpackiterable(self): space = self.space w = space.wrap - l = [w(1), w(2), w(3), w(4)] + l = [space.newlist([]) for l in range(4)] w_l = space.newlist(l) - assert space.unpackiterable(w_l) == l - assert space.unpackiterable(w_l, 4) == l + l1 = space.unpackiterable(w_l) + l2 = space.unpackiterable(w_l, 4) + for i in range(4): + assert space.is_w(l1[i], l[i]) + assert space.is_w(l2[i], l[i]) err = raises(OperationError, space.unpackiterable, w_l, 3) assert err.value.match(space, space.w_ValueError) err = raises(OperationError, space.unpackiterable, w_l, 5) diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -20,7 +20,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -1514,13 +1514,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) -do_setinteriorfield_raw_float = new_setinteriorfield_raw(libffi.types.double) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] diff --git a/pypy/jit/backend/llsupport/asmmemmgr.py b/pypy/jit/backend/llsupport/asmmemmgr.py --- a/pypy/jit/backend/llsupport/asmmemmgr.py +++ b/pypy/jit/backend/llsupport/asmmemmgr.py @@ -37,25 +37,25 @@ self._add_free_block(smaller_stop, stop) stop = smaller_stop result = (start, stop) - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result # pair (start, stop) def free(self, start, stop): """Free a block (start, stop) returned by a previous malloc().""" - self.total_mallocs -= (stop - start) + self.total_mallocs -= r_uint(stop - start) self._add_free_block(start, stop) def open_malloc(self, minsize): """Allocate at least minsize bytes. Returns (start, stop).""" result = self._allocate_block(minsize) (start, stop) = result - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result def open_free(self, middle, stop): """Used for freeing the end of an open-allocated block of memory.""" if stop - middle >= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -648,14 +648,10 @@ # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1< -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + f(123, *[None]*11) # check that the check() are ok + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -241,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + return True # better safe than sorry + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -481,8 +500,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) @@ -1053,35 +1086,20 @@ # jit.codewriter.support. for _op, _oopspec in [('llong_invert', 'INVERT'), - ('ullong_invert', 'INVERT'), ('llong_lt', 'LT'), ('llong_le', 'LE'), ('llong_eq', 'EQ'), ('llong_ne', 'NE'), ('llong_gt', 'GT'), ('llong_ge', 'GE'), - ('ullong_lt', 'ULT'), - ('ullong_le', 'ULE'), - ('ullong_eq', 'EQ'), - ('ullong_ne', 'NE'), - ('ullong_gt', 'UGT'), - ('ullong_ge', 'UGE'), ('llong_add', 'ADD'), ('llong_sub', 'SUB'), ('llong_mul', 'MUL'), ('llong_and', 'AND'), ('llong_or', 'OR'), ('llong_xor', 'XOR'), - ('ullong_add', 'ADD'), - ('ullong_sub', 'SUB'), - ('ullong_mul', 'MUL'), - ('ullong_and', 'AND'), - ('ullong_or', 'OR'), - ('ullong_xor', 'XOR'), ('llong_lshift', 'LSHIFT'), ('llong_rshift', 'RSHIFT'), - ('ullong_lshift', 'LSHIFT'), - ('ullong_rshift', 'URSHIFT'), ('cast_int_to_longlong', 'FROM_INT'), ('truncate_longlong_to_int', 'TO_INT'), ('cast_float_to_longlong', 'FROM_FLOAT'), @@ -1104,6 +1122,21 @@ ('cast_uint_to_ulonglong', 'FROM_UINT'), ('cast_float_to_ulonglong', 'FROM_FLOAT'), ('cast_ulonglong_to_float', 'U_TO_FLOAT'), + ('ullong_invert', 'INVERT'), + ('ullong_lt', 'ULT'), + ('ullong_le', 'ULE'), + ('ullong_eq', 'EQ'), + ('ullong_ne', 'NE'), + ('ullong_gt', 'UGT'), + ('ullong_ge', 'UGE'), + ('ullong_add', 'ADD'), + ('ullong_sub', 'SUB'), + ('ullong_mul', 'MUL'), + ('ullong_and', 'AND'), + ('ullong_or', 'OR'), + ('ullong_xor', 'XOR'), + ('ullong_lshift', 'LSHIFT'), + ('ullong_rshift', 'URSHIFT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): @@ -1134,7 +1167,7 @@ def rewrite_op_llong_is_true(self, op): v = varoftype(op.args[0].concretetype) - op0 = SpaceOperation('cast_int_to_longlong', + op0 = SpaceOperation('cast_primitive', [Constant(0, lltype.Signed)], v) args = [op.args[0], v] diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -258,6 +258,9 @@ y = ~r_ulonglong(xll) return u_to_longlong(y) +def _ll_1_ullong_invert(xull): + return ~xull + def _ll_2_llong_lt(xll, yll): return xll < yll @@ -276,16 +279,22 @@ def _ll_2_llong_ge(xll, yll): return xll >= yll -def _ll_2_llong_ult(xull, yull): +def _ll_2_ullong_eq(xull, yull): + return xull == yull + +def _ll_2_ullong_ne(xull, yull): + return xull != yull + +def _ll_2_ullong_ult(xull, yull): return xull < yull -def _ll_2_llong_ule(xull, yull): +def _ll_2_ullong_ule(xull, yull): return xull <= yull -def _ll_2_llong_ugt(xull, yull): +def _ll_2_ullong_ugt(xull, yull): return xull > yull -def _ll_2_llong_uge(xull, yull): +def _ll_2_ullong_uge(xull, yull): return xull >= yull def _ll_2_llong_add(xll, yll): @@ -312,14 +321,41 @@ z = r_ulonglong(xll) ^ r_ulonglong(yll) return u_to_longlong(z) +def _ll_2_ullong_add(xull, yull): + z = (xull) + (yull) + return (z) + +def _ll_2_ullong_sub(xull, yull): + z = (xull) - (yull) + return (z) + +def _ll_2_ullong_mul(xull, yull): + z = (xull) * (yull) + return (z) + +def _ll_2_ullong_and(xull, yull): + z = (xull) & (yull) + return (z) + +def _ll_2_ullong_or(xull, yull): + z = (xull) | (yull) + return (z) + +def _ll_2_ullong_xor(xull, yull): + z = (xull) ^ (yull) + return (z) + def _ll_2_llong_lshift(xll, y): z = r_ulonglong(xll) << y return u_to_longlong(z) +def _ll_2_ullong_lshift(xull, y): + return xull << y + def _ll_2_llong_rshift(xll, y): return xll >> y -def _ll_2_llong_urshift(xull, y): +def _ll_2_ullong_urshift(xull, y): return xull >> y def _ll_1_llong_from_int(x): @@ -563,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1180,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -999,13 +999,13 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns - def check_loops(self, expected=None, everywhere=False, **check): + def check_resops(self, expected=None, **check): insns = {} for loop in self.loops: - if not everywhere: - if getattr(loop, '_ignore_during_counting', False): - continue insns = loop.summary(adding_insns=insns) + return self._check_insns(insns, expected, check) + + def _check_insns(self, insns, expected, check): if expected is not None: insns.pop('debug_merge_point', None) assert insns == expected @@ -1016,6 +1016,25 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + + # XXX hacked version, ignore and remove me when jit-targets is merged. + loops = self.get_all_loops() + loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX + assert len(loops) == 1 + loop, = loops + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + insns = {} + for op in loop.operations: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_consistency(self): "NOT_RPYTHON" for loop in self.loops: diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -7,7 +7,7 @@ from pypy.rlib.libffi import Func from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.lltypesystem import llmemory, rffi class FuncInfo(object): @@ -237,7 +237,7 @@ else: assert False, "unsupported ffitype or kind" # - fieldsize = ffitype.c_size + fieldsize = rffi.getintfield(ffitype, 'c_size') return self.optimizer.cpu.interiorfielddescrof_dynamic( offset, width, fieldsize, is_pointer, is_float, is_signed ) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -348,6 +348,7 @@ self.opaque_pointers = {} self.replaces_guard = {} self._newoperations = [] + self.seen_results = {} self.optimizer = self self.optpure = None self.optearlyforce = None @@ -542,6 +543,10 @@ op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True + if op.result: + if op.result in self.seen_results: + raise ValueError, "invalid optimization" + self.seen_results[op.result] = None self._newoperations.append(op) def replace_op(self, old_op, new_op): @@ -559,9 +564,12 @@ descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - newboxes = modifier.finish(self.values, self.pendingfields) - if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here - compile.giveup() + try: + newboxes = modifier.finish(self.values, self.pendingfields) + if len(newboxes) > self.metainterp_sd.options.failargs_limit: + raise resume.TagOverflow + except resume.TagOverflow: + raise compile.giveup() descr.store_final_boxes(op, newboxes) # if op.getopnum() == rop.GUARD_VALUE: diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1191,6 +1191,75 @@ """ self.optimize_loop(ops, expected, preamble) + def test_virtual_recursive(self): + ops = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + i1 = int_add(i0, 1) + setfield_gc(p2, i1, descr=valuedescr) + jump(p1) + """ + preamble = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i3 = int_add(i0, 1) + jump(i3) + """ + expected = """ + [i0] + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize_loop(ops, expected, preamble) + + def test_virtual_recursive_forced(self): + ops = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + i1 = int_add(i0, 1) + setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p0, p1, descr=nextdescr) + jump(p1) + """ + preamble = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i1 = int_add(i0, 1) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + setfield_gc(p0, p1, descr=nextdescr) + jump(p1) + """ + loop = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i1 = int_add(i0, 1) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p0, p1, descr=nextdescr) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) + jump(p1) + """ + self.optimize_loop(ops, loop, preamble) + def test_virtual_constant_isnull(self): ops = """ [i0] @@ -5438,6 +5507,96 @@ jump() """ self.optimize_loop(ops, expected) + # ---------- + ops = """ + [p1] + p0 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + setfield_gc(p0, p1, descr=immut_ptrval) + escape(p0) + jump(p1) + """ + self.optimize_loop(ops, ops) + # ---------- + ops = """ + [] + p0 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + p1 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p1, 1242, descr=immut_intval) + setfield_gc(p0, p1, descr=immut_ptrval) + escape(p0) + jump() + """ + class PtrObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(slf, other): + if slf is other: + return 1 + p1 = other.container.ptrval + p1cast = lltype.cast_pointer(lltype.Ptr(self.INTOBJ_IMMUT), p1) + return p1cast.intval == 1242 + self.namespace['ptrobj1242'] = lltype._ptr(llmemory.GCREF, + PtrObj1242()) + expected = """ + [] + escape(ConstPtr(ptrobj1242)) + jump() + """ + self.optimize_loop(ops, expected) + + def test_immutable_constantfold_recursive(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + setfield_gc(p0, p0, descr=immut_ptrval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class PtrObjSelf(object): + _TYPE = llmemory.GCREF.TO + def __eq__(slf, other): + if slf is other: + return 1 + p1 = other.container.ptrval + p1cast = lltype.cast_pointer(lltype.Ptr(self.PTROBJ_IMMUT), p1) + return p1cast.ptrval == p1 + self.namespace['ptrobjself'] = lltype._ptr(llmemory.GCREF, + PtrObjSelf()) + expected = """ + [] + escape(ConstPtr(ptrobjself)) + jump() + """ + self.optimize_loop(ops, expected) + # + ops = """ + [] + p0 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + p1 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + setfield_gc(p0, p1, descr=immut_ptrval) + setfield_gc(p1, p0, descr=immut_ptrval) + escape(p0) + jump() + """ + class PtrObjSelf2(object): + _TYPE = llmemory.GCREF.TO + def __eq__(slf, other): + if slf is other: + return 1 + p1 = other.container.ptrval + p1cast = lltype.cast_pointer(lltype.Ptr(self.PTROBJ_IMMUT), p1) + p2 = p1cast.ptrval + assert p2 != p1 + p2cast = lltype.cast_pointer(lltype.Ptr(self.PTROBJ_IMMUT), p2) + return p2cast.ptrval == p1 + self.namespace['ptrobjself2'] = lltype._ptr(llmemory.GCREF, + PtrObjSelf2()) + expected = """ + [] + escape(ConstPtr(ptrobjself2)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble): @@ -6323,6 +6482,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -139,6 +139,12 @@ noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + PTROBJ_IMMUT = lltype.GcStruct('PTROBJ_IMMUT', ('parent', OBJECT), + ('ptrval', lltype.Ptr(OBJECT)), + hints={'immutable': True}) + ptrobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + immut_ptrval = cpu.fielddescrof(PTROBJ_IMMUT, 'ptrval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -246,6 +252,7 @@ register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) + register_known_gctype(cpu, ptrobj_immut_vtable, PTROBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -87,14 +87,36 @@ def _get_descr(self): raise NotImplementedError - def _is_immutable_and_filled_with_constants(self, optforce): + def _is_immutable_and_filled_with_constants(self, memo=None): + # check if it is possible to force the given structure into a + # compile-time constant: this is allowed only if it is declared + # immutable, if all fields are already filled, and if each field + # is either a compile-time constant or (recursively) a structure + # which also answers True to the same question. + # + # check that all fields are filled. The following equality check + # also fails if count == -1, meaning "not an immutable at all". count = self._get_descr().count_fields_if_immutable() - if count != len(self._fields): # always the case if count == -1 + if count != len(self._fields): return False + # + # initialize 'memo' + if memo is None: + memo = {} + elif self in memo: + return True # recursive case: assume yes + memo[self] = None + # for value in self._fields.itervalues(): - subbox = value.force_box(optforce) - if not isinstance(subbox, Const): - return False + if value.is_constant(): + pass # it is a constant value: ok + elif (isinstance(value, AbstractVirtualStructValue) + and value.is_virtual()): + # recursive check + if not value._is_immutable_and_filled_with_constants(memo): + return False + else: + return False # not a constant at all return True def force_at_end_of_preamble(self, already_forced, optforce): @@ -114,7 +136,7 @@ if not we_are_translated(): op.name = 'FORCE ' + self.source_op.name - if self._is_immutable_and_filled_with_constants(optforce): + if self._is_immutable_and_filled_with_constants(): box = optforce.optimizer.constant_fold(op) self.make_constant(box) for ofs, value in self._fields.iteritems(): diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -243,6 +243,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -93,12 +93,14 @@ TAGMASK = 3 +class TagOverflow(Exception): + pass + def tag(value, tagbits): - if tagbits >> 2: - raise ValueError + assert 0 <= tagbits <= 3 sx = value >> 13 if sx != 0 and sx != -1: - raise ValueError + raise TagOverflow return rffi.r_short(value<<2|tagbits) def untag(value): @@ -153,7 +155,7 @@ return self._newconst(const) try: return tag(val, TAGINT) - except ValueError: + except TagOverflow: pass tagged = self.large_ints.get(val, UNASSIGNED) if not tagged_eq(tagged, UNASSIGNED): @@ -429,8 +431,7 @@ fieldnum = self._gettagged(fieldbox) # the index is limited to 2147483647 (64-bit machines only) if itemindex > 2147483647: - from pypy.jit.metainterp import compile - compile.giveup() + raise TagOverflow itemindex = rffi.cast(rffi.INT, itemindex) # rd_pendingfields[i].lldescr = lldescr diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -155,9 +155,11 @@ class JitMixin: basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) + def check_resops(self, expected=None, **check): + get_stats().check_resops(expected=expected, **check) + def check_simple_loop(self, expected=None, **check): + get_stats().check_simple_loop(expected=expected, **check) + def check_loop_count(self, count): """NB. This is a hack; use check_tree_loop_count() or check_enter_count() for the real thing. diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -79,9 +79,8 @@ res = self.meta_interp(f, [6, 7]) assert res == 42 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + if self.basic: found = 0 for op in get_stats().loops[0]._all_operations(): @@ -108,7 +107,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 1323 self.check_loop_count(1) - self.check_loops(int_mul=1) + self.check_simple_loop(int_mul=1) def test_loop_variant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -125,7 +124,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 1323 self.check_loop_count(1) - self.check_loops(int_mul_ovf=1) + self.check_simple_loop(int_mul_ovf=1) def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -140,9 +139,10 @@ res = self.meta_interp(f, [6, 7]) assert res == 252 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_simple_loop(int_mul=0) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) + def test_loop_invariant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -158,10 +158,11 @@ res = self.meta_interp(f, [6, 7]) assert res == 308 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) + self.check_simple_loop(int_mul_ovf=0) + self.check_resops({'jump': 2, 'int_lshift': 2, 'int_gt': 2, + 'int_mul_ovf': 1, 'int_add': 4, + 'guard_true': 2, 'guard_no_overflow': 1, + 'int_sub': 2}) def test_loop_invariant_mul_bridge1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -194,11 +195,9 @@ res = self.meta_interp(f, [6, 32]) assert res == 1167 self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - + self.check_resops({'int_lt': 3, 'int_gt': 2, 'int_add': 5, + 'guard_true': 3, 'int_sub': 4, 'jump': 4, + 'int_mul': 2, 'guard_false': 2}) def test_loop_invariant_mul_bridge_maintaining2(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -216,10 +215,9 @@ res = self.meta_interp(f, [6, 32]) assert res == 1692 self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) + self.check_resops({'int_lt': 3, 'int_gt': 2, 'int_add': 5, + 'guard_true': 3, 'int_sub': 4, 'jump': 4, + 'int_mul': 2, 'guard_false': 2}) def test_loop_invariant_mul_bridge_maintaining3(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) @@ -237,10 +235,9 @@ res = self.meta_interp(f, [6, 32, 16]) assert res == 1692 self.check_loop_count(3) - self.check_loops({'int_add': 2, 'int_lt': 1, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, 'int_mul': 1, - 'int_gt': 2, 'guard_true': 2}) + self.check_resops({'int_lt': 2, 'int_gt': 4, 'guard_false': 2, + 'guard_true': 4, 'int_sub': 4, 'jump': 4, + 'int_mul': 3, 'int_add': 4}) def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -261,9 +258,9 @@ res = self.meta_interp(f, [6, 7]) assert res == 252 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'getfield_gc_pure': 1, 'int_mul': 1, + 'guard_true': 2, 'int_sub': 2}) def test_loops_are_transient(self): import gc, weakref @@ -381,7 +378,7 @@ assert res == 0 # CALL_PURE is recorded in the history, but turned into a CALL # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=0) def test_constfold_call_elidable(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -397,7 +394,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) def test_constfold_call_elidable_2(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -417,7 +414,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) def test_elidable_function_returning_object(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -442,7 +439,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) + self.check_resops(call_pure=0, call=0, getfield_gc=1, int_sub=2) def test_elidable_raising(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -463,12 +460,12 @@ res = self.meta_interp(f, [22, 6]) assert res == -3 # the CALL_PURE is constant-folded away during tracing - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) # res = self.meta_interp(f, [22, -5]) assert res == 0 # raises: becomes CALL and is not constant-folded away - self.check_loops(int_sub=1, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=2) def test_elidable_raising_2(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -489,12 +486,12 @@ res = self.meta_interp(f, [22, 6]) assert res == -3 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) # res = self.meta_interp(f, [22, -5]) assert res == 0 # raises: becomes CALL and is not constant-folded away - self.check_loops(int_sub=1, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=2) def test_constant_across_mp(self): myjitdriver = JitDriver(greens = [], reds = ['n']) @@ -533,7 +530,7 @@ policy = StopAtXPolicy(externfn) res = self.meta_interp(f, [31], policy=policy) assert res == 42 - self.check_loops(int_mul=1, int_mod=0) + self.check_resops(int_mul=2, int_mod=0) def test_we_are_jitted(self): myjitdriver = JitDriver(greens = [], reds = ['y']) @@ -835,7 +832,7 @@ return n res = self.meta_interp(f, [20, 1, 2]) assert res == 0 - self.check_loops(call=0) + self.check_resops(call=0) def test_abs(self): myjitdriver = JitDriver(greens = [], reds = ['i', 't']) @@ -865,9 +862,8 @@ res = self.meta_interp(f, [6, 7]) assert res == 42.0 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'float_gt': 2, 'float_add': 2, + 'float_sub': 2, 'guard_true': 2}) def test_print(self): myjitdriver = JitDriver(greens = [], reds = ['n']) @@ -1038,7 +1034,7 @@ return x res = self.meta_interp(f, [20], enable_opts='') assert res == f(20) - self.check_loops(call=0) + self.check_resops(call=0) def test_zerodivisionerror(self): # test the case of exception-raising operation that is not delegated @@ -1351,7 +1347,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 42 self.check_loop_count(1) - self.check_loops(call=1) + self.check_resops(call=2) def test_merge_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1378,8 +1374,7 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_value=3) - self.check_loops(guard_class=0, guard_value=6, everywhere=True) + self.check_resops(guard_class=0, guard_value=6) def test_merge_guardnonnull_guardclass(self): from pypy.rlib.objectmodel import instantiate @@ -1407,11 +1402,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, - guard_nonnull_class=2, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, - guard_nonnull_class=4, guard_isnull=2, - everywhere=True) + self.check_resops(guard_class=0, guard_nonnull=4, + guard_nonnull_class=4, guard_isnull=2) + def test_merge_guardnonnull_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1438,11 +1431,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, guard_value=2, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, guard_value=4, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, + guard_nonnull_class=0, guard_isnull=2) + def test_merge_guardnonnull_guardvalue_2(self): from pypy.rlib.objectmodel import instantiate @@ -1469,11 +1460,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, guard_value=2, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, guard_value=4, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, + guard_nonnull_class=0, guard_isnull=2) + def test_merge_guardnonnull_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1503,11 +1492,9 @@ return x res = self.meta_interp(f, [399], listops=True) assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=3, guard_value=3, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=6, guard_value=6, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_class=0, guard_nonnull=6, guard_value=6, + guard_nonnull_class=0, guard_isnull=2) + def test_residual_call_doesnt_lose_info(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) @@ -1533,8 +1520,7 @@ y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 return y.v res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) + self.check_resops(getarrayitem_gc=0, getfield_gc=1) def test_guard_isnull_nonnull(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) @@ -1562,7 +1548,7 @@ return res res = self.meta_interp(f, [21]) assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) + self.check_resops(guard_nonnull=2, guard_isnull=2) def test_loop_invariant1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) @@ -1589,8 +1575,7 @@ return res res = self.meta_interp(g, [21]) assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) + self.check_resops(call=1) def test_bug_optimizeopt_mutates_ops(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) @@ -1710,7 +1695,7 @@ return x res = self.meta_interp(f, [8]) assert res == 0 - self.check_loops(jit_debug=2) + self.check_resops(jit_debug=4) def test_assert_green(self): def f(x, promote_flag): @@ -1752,9 +1737,10 @@ res = self.meta_interp(g, [6, 7]) assert res == 6*8 + 6**8 self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) + self.check_resops({'guard_class': 2, 'int_gt': 4, + 'getfield_gc': 4, 'guard_true': 4, + 'int_sub': 4, 'jump': 4, 'int_mul': 2, + 'int_add': 2}) def test_multiple_specialied_versions_array(self): myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', @@ -1795,7 +1781,7 @@ res = self.meta_interp(g, [6, 14]) assert res == g(6, 14) self.check_loop_count(9) - self.check_loops(getarrayitem_gc=8, everywhere=True) + self.check_resops(getarrayitem_gc=8) def test_multiple_specialied_versions_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) @@ -1983,8 +1969,8 @@ res = self.meta_interp(g, [3, 23]) assert res == 7068153 self.check_loop_count(7) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) + self.check_resops(guard_true=6, guard_class=2, int_mul=3, + int_add=3, guard_false=3) def test_dont_trace_every_iteration(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) @@ -2228,27 +2214,27 @@ return sa assert self.meta_interp(f1, [5, 5]) == 50 - self.check_loops(int_rshift=0, everywhere=True) + self.check_resops(int_rshift=0) for f in (f1, f2): assert self.meta_interp(f, [5, 6]) == 50 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [10, 5]) == 100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [10, 6]) == 100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [5, 31]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) def test_overflowing_shift_neg(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) @@ -2273,27 +2259,27 @@ return sa assert self.meta_interp(f1, [-5, 5]) == -50 - self.check_loops(int_rshift=0, everywhere=True) + self.check_resops(int_rshift=0) for f in (f1, f2): assert self.meta_interp(f, [-5, 6]) == -50 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-10, 5]) == -100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-10, 6]) == -100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-5, 31]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) def test_pure_op_not_to_be_propagated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa']) @@ -2433,8 +2419,7 @@ if counter > 10: return 7 assert self.meta_interp(build, []) == 7 - self.check_loops(getfield_gc_pure=0) - self.check_loops(getfield_gc_pure=2, everywhere=True) + self.check_resops(getfield_gc_pure=2) def test_args_becomming_equal(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) @@ -2567,7 +2552,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=2, int_ge=0, int_le=0) + self.check_resops(int_lt=4, int_le=0, int_ge=0, int_gt=2) def test_intbounds_not_generalized1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa']) @@ -2584,7 +2569,8 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=3, int_ge=2, int_le=1) + self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) + def test_intbounds_not_generalized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) @@ -2604,7 +2590,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=2, int_ge=1, int_le=1) + self.check_resops(int_lt=4, int_le=3, int_ge=3, int_gt=2) def test_retrace_limit1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2858,7 +2844,7 @@ return a[0].intvalue res = self.meta_interp(f, [100]) assert res == -2 - #self.check_loops(getarrayitem_gc=0, setarrayitem_gc=0) -- xxx? + self.check_resops(setarrayitem_gc=2, getarrayitem_gc=1) def test_retrace_ending_up_retracing_another_loop(self): @@ -2958,7 +2944,7 @@ i += 1 res = self.meta_interp(f, [32]) assert res == f(32) - self.check_loops(arraylen_gc=2) + self.check_resops(arraylen_gc=3) def test_ulonglong_mod(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'i']) @@ -3145,9 +3131,9 @@ a = A(a.i + 1) self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) + self.check_resops(new_with_vtable=1) def test_two_loopinvariant_arrays1(self): from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -3239,7 +3225,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_loops(arraylen_gc=2, everywhere=True) + self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): if sys.platform == "win32": @@ -3276,7 +3262,7 @@ lock.release() return n res = self.meta_interp(f, [10, 1]) - self.check_loops(getfield_gc=2) + self.check_resops(getfield_gc=4) assert res == f(10, 1) def test_jit_merge_point_with_raw_pointer(self): @@ -3340,10 +3326,10 @@ res = self.meta_interp(main, [0, 10, 2], enable_opts='') assert res == main(0, 10, 2) - self.check_loops(call=1) + self.check_resops(call=1) res = self.meta_interp(main, [1, 10, 2], enable_opts='') assert res == main(1, 10, 2) - self.check_loops(call=0) + self.check_resops(call=0) def test_look_inside_iff_virtual(self): # There's no good reason for this to be look_inside_iff, but it's a test! @@ -3368,10 +3354,10 @@ i += f(A(2), n) res = self.meta_interp(main, [0], enable_opts='') assert res == main(0) - self.check_loops(call=1, getfield_gc=0) + self.check_resops(call=1, getfield_gc=0) res = self.meta_interp(main, [1], enable_opts='') assert res == main(1) - self.check_loops(call=0, getfield_gc=0) + self.check_resops(call=0, getfield_gc=0) def test_reuse_elidable_result(self): driver = JitDriver(reds=['n', 's'], greens = []) @@ -3384,10 +3370,9 @@ return s res = self.meta_interp(main, [10]) assert res == main(10) - self.check_loops({ - 'call': 1, 'guard_no_exception': 1, 'guard_true': 1, 'int_add': 2, - 'int_gt': 1, 'int_sub': 1, 'strlen': 1, 'jump': 1, - }) + self.check_resops({'int_gt': 2, 'strlen': 2, 'guard_true': 2, + 'int_sub': 2, 'jump': 2, 'call': 2, + 'guard_no_exception': 2, 'int_add': 4}) def test_look_inside_iff_const_getarrayitem_gc_pure(self): driver = JitDriver(greens=['unroll'], reds=['s', 'n']) @@ -3419,10 +3404,10 @@ res = self.meta_interp(main, [0, 10]) assert res == main(0, 10) # 2 calls, one for f() and one for char_mul - self.check_loops(call=2) + self.check_resops(call=4) res = self.meta_interp(main, [1, 10]) assert res == main(1, 10) - self.check_loops(call=0) + self.check_resops(call=0) def test_setarrayitem_followed_by_arraycopy(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'x', 'y']) @@ -3523,7 +3508,8 @@ res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) def test_virtual_opaque_ptr(self): myjitdriver = JitDriver(greens = [], reds = ["n"]) @@ -3542,7 +3528,9 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) + def test_virtual_opaque_dict(self): myjitdriver = JitDriver(greens = [], reds = ["n"]) @@ -3562,7 +3550,10 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) + self.check_resops({'int_gt': 2, 'getfield_gc': 1, 'int_eq': 1, + 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'guard_false': 1}) + def test_convert_from_SmallFunctionSetPBCRepr_to_FunctionsPBCRepr(self): f1 = lambda n: n+1 @@ -3594,6 +3585,132 @@ self.interp_operations(f, [5], translationoptions=translationoptions) + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -20,12 +20,12 @@ n -= 1 return 42 self.meta_interp(f, [20]) - self.check_loops({'call': 2, # calls to a helper function - 'guard_no_exception': 2, # follows the calls - 'int_sub': 1, - 'int_gt': 1, - 'guard_true': 1, - 'jump': 1}) + self.check_resops({'call': 4, # calls to a helper function + 'guard_no_exception': 4, # follows the calls + 'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 2}) def test_class_of_allocated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) @@ -78,7 +78,7 @@ return 1 res = self.meta_interp(f, [20], enable_opts='') assert res == 1 - self.check_loops(call=1) # for the case B(), but not for the case A() + self.check_resops(call=1) # for the case B(), but not for the case A() class TestLLtype(DelTests, LLJitMixin): @@ -103,7 +103,7 @@ break return 42 self.meta_interp(f, [20]) - self.check_loops(getfield_raw=1, setfield_raw=1, call=0, call_pure=0) + self.check_resops(call_pure=0, setfield_raw=2, call=0, getfield_raw=2) class TestOOtype(DelTests, OOJitMixin): def setup_class(cls): diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -91,7 +91,7 @@ res1 = f(100) res2 = self.meta_interp(f, [100], listops=True) assert res1 == res2 - self.check_loops(int_mod=1) # the hash was traced and eq, but cached + self.check_resops(int_mod=2) # the hash was traced and eq, but cached def test_dict_setdefault(self): myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) @@ -107,7 +107,7 @@ assert f(100) == 50 res = self.meta_interp(f, [100], listops=True) assert res == 50 - self.check_loops(new=0, new_with_vtable=0) + self.check_resops(new=0, new_with_vtable=0) def test_dict_as_counter(self): myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) @@ -128,7 +128,7 @@ assert f(100) == 50 res = self.meta_interp(f, [100], listops=True) assert res == 50 - self.check_loops(int_mod=1) # key + eq, but cached + self.check_resops(int_mod=2) # key + eq, but cached def test_repeated_lookup(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'd']) @@ -153,12 +153,13 @@ res = self.meta_interp(f, [100], listops=True) assert res == f(50) - self.check_loops({"call": 5, "getfield_gc": 1, "getinteriorfield_gc": 1, - "guard_false": 1, "guard_no_exception": 4, - "guard_true": 1, "int_and": 1, "int_gt": 1, - "int_is_true": 1, "int_sub": 1, "jump": 1, - "new_with_vtable": 1, "new": 1, "new_array": 1, - "setfield_gc": 3, }) + self.check_resops({'new_array': 2, 'getfield_gc': 2, + 'guard_true': 2, 'jump': 2, + 'new_with_vtable': 2, 'getinteriorfield_gc': 2, + 'setfield_gc': 6, 'int_gt': 2, 'int_sub': 2, + 'call': 10, 'int_and': 2, + 'guard_no_exception': 8, 'new': 2, + 'guard_false': 2, 'int_is_true': 2}) class TestOOtype(DictTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -35,10 +35,8 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({'jump': 1, - 'int_gt': 1, 'guard_true': 1, - 'int_sub': 1}) - + self.check_resops({'jump': 2, 'guard_true': 2, + 'int_gt': 2, 'int_sub': 2}) def test_bridge_from_guard_exception(self): myjitdriver = JitDriver(greens = [], reds = ['n']) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -67,23 +67,23 @@ 'byval': False} supported = all(d[check] for check in jitif) if supported: - self.check_loops( - call_release_gil=1, # a CALL_RELEASE_GIL, and no other CALLs + self.check_resops( + call_release_gil=2, # a CALL_RELEASE_GIL, and no other CALLs call=0, call_may_force=0, - guard_no_exception=1, - guard_not_forced=1, - int_add=1, - int_lt=1, - guard_true=1, - jump=1) + guard_no_exception=2, + guard_not_forced=2, + int_add=2, + int_lt=2, + guard_true=2, + jump=2) else: - self.check_loops( + self.check_resops( call_release_gil=0, # no CALL_RELEASE_GIL - int_add=1, - int_lt=1, - guard_true=1, - jump=1) + int_add=2, + int_lt=2, + guard_true=2, + jump=2) return res def test_byval_result(self): @@ -144,10 +144,8 @@ return result_point[0].x * result_point[0].y assert self.meta_interp(main, [10]) == main(10) == 9000 - self.check_loops({"int_add": 3, "jump": 1, "int_lt": 1, "guard_true": 1, - "getinteriorfield_raw": 4, "setinteriorfield_raw": 2 - }) - + self.check_resops({'jump': 2, 'int_lt': 2, 'setinteriorfield_raw': 4, + 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2}) class TestFfiCall(FfiCallTests, LLJitMixin): supports_all = False @@ -156,4 +154,4 @@ supports_all = True # supports_{floats,longlong,singlefloats} class TestFfiLookup(FfiLookupTests, LLJitMixin): - pass \ No newline at end of file + pass diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -25,7 +25,7 @@ res = self.meta_interp(g, [7]) assert res == -2 self.check_loop_count(2) - self.check_loops(guard_value=0) + self.check_resops(guard_value=0) def test_green_field_2(self): myjitdriver = JitDriver(greens=['ctx.x'], reds=['ctx']) @@ -50,7 +50,7 @@ res = self.meta_interp(g, [7]) assert res == -22 self.check_loop_count(6) - self.check_loops(guard_value=0) + self.check_resops(guard_value=0) class TestLLtypeGreenFieldsTests(GreenFieldsTests, LLJitMixin): diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -88,7 +88,7 @@ assert res == loop2(4, 40) # we expect only one int_sub, corresponding to the single # compiled instance of loop1() - self.check_loops(int_sub=1) + self.check_resops(int_sub=2) # the following numbers are not really expectations of the test # itself, but just the numbers that we got after looking carefully # at the generated machine code @@ -154,7 +154,7 @@ res = self.meta_interp(loop2, [4, 40], repeat=7, inline=True) assert res == loop2(4, 40) # we expect no int_sub, but a residual call - self.check_loops(int_sub=0, call=1) + self.check_resops(call=2, int_sub=0) def test_multiple_jits_trace_too_long(self): myjitdriver1 = JitDriver(greens=["n"], reds=["i", "box"]) diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -6,8 +6,8 @@ class ListTests: def check_all_virtualized(self): - self.check_loops(new_array=0, setarrayitem_gc=0, getarrayitem_gc=0, - arraylen_gc=0) + self.check_resops(setarrayitem_gc=0, new_array=0, arraylen_gc=0, + getarrayitem_gc=0) def test_simple_array(self): jitdriver = JitDriver(greens = [], reds = ['n']) @@ -20,7 +20,7 @@ return n res = self.meta_interp(f, [10], listops=True) assert res == 0 - self.check_loops(int_sub=1) + self.check_resops(int_sub=2) self.check_all_virtualized() def test_list_pass_around(self): @@ -56,7 +56,8 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) # one setitem should be gone by now - self.check_loops(call=1, setarrayitem_gc=2, getarrayitem_gc=1) + self.check_resops(setarrayitem_gc=4, getarrayitem_gc=2, call=2) + def test_ll_fixed_setitem_fast(self): jitdriver = JitDriver(greens = [], reds = ['n', 'l']) @@ -93,7 +94,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) - self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0) + self.check_resops(setarrayitem_gc=0, call=0, getarrayitem_gc=0) def test_vlist_alloc_and_set(self): # the check_loops fails, because [non-null] * n is not supported yet @@ -141,7 +142,7 @@ res = self.meta_interp(f, [5], listops=True) assert res == 7 - self.check_loops(call=0) + self.check_resops(call=0) def test_fold_getitem_1(self): jitdriver = JitDriver(greens = ['pc', 'n', 'l'], reds = ['total']) @@ -161,7 +162,7 @@ res = self.meta_interp(f, [4], listops=True) assert res == f(4) - self.check_loops(call=0) + self.check_resops(call=0) def test_fold_getitem_2(self): jitdriver = JitDriver(greens = ['pc', 'n', 'l'], reds = ['total', 'x']) @@ -186,7 +187,7 @@ res = self.meta_interp(f, [4], listops=True) assert res == f(4) - self.check_loops(call=0, getfield_gc=0) + self.check_resops(call=0, getfield_gc=0) def test_fold_indexerror(self): jitdriver = JitDriver(greens = [], reds = ['total', 'n', 'lst']) @@ -206,7 +207,7 @@ res = self.meta_interp(f, [15], listops=True) assert res == f(15) - self.check_loops(guard_exception=0) + self.check_resops(guard_exception=0) def test_virtual_resize(self): jitdriver = JitDriver(greens = [], reds = ['n', 's']) @@ -224,9 +225,8 @@ return s res = self.meta_interp(f, [15], listops=True) assert res == f(15) - self.check_loops({"int_add": 1, "int_sub": 1, "int_gt": 1, - "guard_true": 1, "jump": 1}) - + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'guard_true': 2, 'int_sub': 2}) class TestOOtype(ListTests, OOJitMixin): pass @@ -258,4 +258,4 @@ assert res == f(37) # There is the one actual field on a, plus several fields on the list # itself - self.check_loops(getfield_gc=10, everywhere=True) + self.check_resops(getfield_gc=10) diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -60,7 +60,8 @@ assert res == f(6, 13) self.check_loop_count(1) if self.enable_opts: - self.check_loops(getfield_gc = 0, setfield_gc = 1) + self.check_resops(setfield_gc=2, getfield_gc=0) + def test_loop_with_two_paths(self): from pypy.rpython.lltypesystem import lltype @@ -180,7 +181,10 @@ assert res == 42 self.check_loop_count(1) # the 'int_eq' and following 'guard' should be constant-folded - self.check_loops(int_eq=0, guard_true=1, guard_false=0) + if 'unroll' in self.enable_opts: + self.check_resops(int_eq=0, guard_true=2, guard_false=0) + else: + self.check_resops(int_eq=0, guard_true=1, guard_false=0) if self.basic: found = 0 for op in get_stats().loops[0]._all_operations(): @@ -643,8 +647,12 @@ res = self.meta_interp(main_interpreter_loop, [1]) assert res == 102 self.check_loop_count(1) - self.check_loops({'int_add' : 3, 'int_gt' : 1, - 'guard_false' : 1, 'jump' : 1}) + if 'unroll' in self.enable_opts: + self.check_resops({'int_add' : 6, 'int_gt' : 2, + 'guard_false' : 2, 'jump' : 2}) + else: + self.check_resops({'int_add' : 3, 'int_gt' : 1, + 'guard_false' : 1, 'jump' : 1}) def test_automatic_promotion(self): myjitdriver = JitDriver(greens = ['i'], @@ -686,7 +694,7 @@ self.check_loop_count(1) # These loops do different numbers of ops based on which optimizer we # are testing with. - self.check_loops(self.automatic_promotion_result) + self.check_resops(self.automatic_promotion_result) def test_can_enter_jit_outside_main_loop(self): myjitdriver = JitDriver(greens=[], reds=['i', 'j', 'a']) diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -8,7 +8,8 @@ enable_opts = ALL_OPTS_NAMES automatic_promotion_result = { - 'int_add' : 3, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1, + 'int_gt': 2, 'guard_false': 2, 'jump': 2, 'int_add': 6, + 'guard_value': 1 } # ====> test_loop.py diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -73,8 +73,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -103,7 +102,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=3) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -134,8 +133,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0) def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -160,7 +158,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=2) def test_change_during_tracing_2(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -186,7 +184,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=2) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -212,7 +210,7 @@ assert g(100, 7) == 700707 res = self.meta_interp(g, [100, 7]) assert res == 700707 - self.check_loops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=4, getfield_gc=0) def test_invalidate_while_running(self): jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) @@ -324,8 +322,8 @@ assert f(100, 15) == 3009 res = self.meta_interp(f, [100, 15]) assert res == 3009 - self.check_loops(guard_not_invalidated=4, getfield_gc=0, - call_may_force=0, guard_not_forced=0) + self.check_resops(guard_not_invalidated=8, guard_not_forced=0, + call_may_force=0, getfield_gc=0) def test_list_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -347,9 +345,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - everywhere=True) + self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=2, + getarrayitem_gc=0, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -385,9 +382,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 714 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - arraylen_gc=0, everywhere=True) + self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=2, + arraylen_gc=0, getarrayitem_gc=0, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -421,9 +417,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0, + getarrayitem_gc=0, getarrayitem_gc_pure=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -460,9 +455,9 @@ assert f(100, 15) == 3009 res = self.meta_interp(f, [100, 15]) assert res == 3009 - self.check_loops(guard_not_invalidated=4, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - call_may_force=0, guard_not_forced=0) + self.check_resops(call_may_force=0, getfield_gc=0, + getarrayitem_gc_pure=0, guard_not_forced=0, + getarrayitem_gc=0, guard_not_invalidated=8) def test_invalidated_loop_is_not_used_any_more_as_target(self): myjitdriver = JitDriver(greens=['foo'], reds=['x']) diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -143,11 +143,11 @@ f = self.get_interpreter(codes) assert self.meta_interp(f, [0, 0, 0], enable_opts='') == 42 - self.check_loops(int_add = 1, call_may_force = 1, call = 0) + self.check_resops(call_may_force=1, int_add=1, call=0) assert self.meta_interp(f, [0, 0, 0], enable_opts='', inline=True) == 42 - self.check_loops(int_add = 2, call_may_force = 0, call = 0, - guard_no_exception = 0) + self.check_resops(call=0, int_add=2, call_may_force=0, + guard_no_exception=0) def test_inline_jitdriver_check(self): code = "021" @@ -160,7 +160,7 @@ inline=True) == 42 # the call is fully inlined, because we jump to subcode[1], thus # skipping completely the JUMP_BACK in subcode[0] - self.check_loops(call_may_force = 0, call_assembler = 0, call = 0) + self.check_resops(call=0, call_may_force=0, call_assembler=0) def test_guard_failure_in_inlined_function(self): def p(pc, code): @@ -491,10 +491,10 @@ return loop(100) res = self.meta_interp(main, [0], enable_opts='', trace_limit=TRACE_LIMIT) - self.check_loops(call_may_force=1, call=0) + self.check_resops(call=0, call_may_force=1) res = self.meta_interp(main, [1], enable_opts='', trace_limit=TRACE_LIMIT) - self.check_loops(call_may_force=0, call=0) + self.check_resops(call=0, call_may_force=0) def test_trace_from_start(self): def p(pc, code): @@ -576,7 +576,7 @@ result += f('-c-----------l-', i+100) self.meta_interp(g, [10], backendopt=True) self.check_aborted_count(1) - self.check_loops(call_assembler=1, call=0) + self.check_resops(call=0, call_assembler=2) self.check_tree_loop_count(3) def test_directly_call_assembler(self): @@ -625,8 +625,7 @@ try: compile.compile_tmp_callback = my_ctc self.meta_interp(portal, [2, 5], inline=True) - self.check_loops(call_assembler=2, call_may_force=0, - everywhere=True) + self.check_resops(call_may_force=0, call_assembler=2) finally: compile.compile_tmp_callback = original_ctc # check that we made a temporary callback @@ -681,8 +680,7 @@ try: compile.compile_tmp_callback = my_ctc self.meta_interp(main, [2, 5], inline=True) - self.check_loops(call_assembler=2, call_may_force=0, - everywhere=True) + self.check_resops(call_may_force=0, call_assembler=2) finally: compile.compile_tmp_callback = original_ctc # check that we made a temporary callback @@ -1021,7 +1019,7 @@ res = self.meta_interp(portal, [2, 0], inline=True, policy=StopAtXPolicy(residual)) assert res == portal(2, 0) - self.check_loops(call_assembler=4, everywhere=True) + self.check_resops(call_assembler=4) def test_inline_without_hitting_the_loop(self): driver = JitDriver(greens = ['codeno'], reds = ['i'], @@ -1045,7 +1043,7 @@ assert portal(0) == 70 res = self.meta_interp(portal, [0], inline=True) assert res == 70 - self.check_loops(call_assembler=0) + self.check_resops(call_assembler=0) def test_inline_with_hitting_the_loop_sometimes(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], @@ -1071,7 +1069,7 @@ assert portal(0, 1) == 2095 res = self.meta_interp(portal, [0, 1], inline=True) assert res == 2095 - self.check_loops(call_assembler=12, everywhere=True) + self.check_resops(call_assembler=12) def test_inline_with_hitting_the_loop_sometimes_exc(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], @@ -1109,7 +1107,7 @@ assert main(0, 1) == 2095 res = self.meta_interp(main, [0, 1], inline=True) assert res == 2095 - self.check_loops(call_assembler=12, everywhere=True) + self.check_resops(call_assembler=12) def test_handle_jitexception_in_portal(self): # a test for _handle_jitexception_in_portal in blackhole.py @@ -1238,7 +1236,7 @@ i += 1 self.meta_interp(portal, [0, 0, 0], inline=True) - self.check_loops(call=0, call_may_force=0) + self.check_resops(call_may_force=0, call=0) class TestLLtype(RecursiveTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py --- a/pypy/jit/metainterp/test/test_resume.py +++ b/pypy/jit/metainterp/test/test_resume.py @@ -23,11 +23,11 @@ assert tag(-3, 2) == rffi.r_short(-3<<2|2) assert tag((1<<13)-1, 3) == rffi.r_short(((1<<15)-1)|3) assert tag(-1<<13, 3) == rffi.r_short((-1<<15)|3) - py.test.raises(ValueError, tag, 3, 5) - py.test.raises(ValueError, tag, 1<<13, 0) - py.test.raises(ValueError, tag, (1<<13)+1, 0) - py.test.raises(ValueError, tag, (-1<<13)-1, 0) - py.test.raises(ValueError, tag, (-1<<13)-5, 0) + py.test.raises(AssertionError, tag, 3, 5) + py.test.raises(TagOverflow, tag, 1<<13, 0) + py.test.raises(TagOverflow, tag, (1<<13)+1, 0) + py.test.raises(TagOverflow, tag, (-1<<13)-1, 0) + py.test.raises(TagOverflow, tag, (-1<<13)-5, 0) def test_untag(): assert untag(tag(3, 1)) == (3, 1) @@ -1318,8 +1318,7 @@ assert rffi.cast(lltype.Signed, pf[1].fieldnum) == 1062 assert rffi.cast(lltype.Signed, pf[1].itemindex) == 2147483647 # - from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole - py.test.raises(SwitchToBlackhole, modifier._add_pending_fields, + py.test.raises(TagOverflow, modifier._add_pending_fields, [(array_a, 42, 63, 2147483648)]) def test_resume_reader_fields_and_arrayitems(): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -20,9 +20,8 @@ return c res = self.meta_interp(f, [1]) assert res == 2 - self.check_loops({'jump': 1, - 'int_sub': 1, 'int_gt' : 1, - 'guard_true': 1}) # all folded away + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) # all folded away def test_red_builtin_send(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'counter']) @@ -41,12 +40,9 @@ return res res = self.meta_interp(f, [1], policy=StopAtXPolicy(externfn)) assert res == 2 - if self.type_system == 'ootype': - self.check_loops(call=1, oosend=1) # 'len' remains - else: - # 'len' becomes a getfield('num_items') for now in lltype, - # which is itself encoded as a 'getfield_gc' - self.check_loops(call=1, getfield_gc=1) + # 'len' becomes a getfield('num_items') for now in lltype, + # which is itself encoded as a 'getfield_gc' + self.check_resops(call=2, getfield_gc=2) def test_send_to_single_target_method(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'counter']) @@ -70,11 +66,10 @@ res = self.meta_interp(f, [1], policy=StopAtXPolicy(externfn), backendopt=True) assert res == 43 - self.check_loops({'call': 1, 'guard_no_exception': 1, - 'getfield_gc': 1, - 'int_add': 1, - 'jump': 1, 'int_gt' : 1, 'guard_true' : 1, - 'int_sub' : 1}) + self.check_resops({'int_gt': 2, 'getfield_gc': 2, + 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'call': 2, 'guard_no_exception': 2, + 'int_add': 2}) def test_red_send_to_green_receiver(self): myjitdriver = JitDriver(greens = ['i'], reds = ['counter', 'j']) @@ -97,7 +92,7 @@ return res res = self.meta_interp(f, [4, -1]) assert res == 145 - self.check_loops(int_add = 1, everywhere=True) + self.check_resops(int_add=1) def test_oosend_base(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'w']) @@ -132,7 +127,7 @@ assert res == 17 res = self.meta_interp(f, [4, 14]) assert res == 1404 - self.check_loops(guard_class=0, new_with_vtable=0, new=0) + self.check_resops(guard_class=1, new=0, new_with_vtable=0) def test_three_receivers(self): myjitdriver = JitDriver(greens = [], reds = ['y']) @@ -205,8 +200,7 @@ # of the body in a single bigger loop with no failing guard except # the final one. self.check_loop_count(1) - self.check_loops(guard_class=0, - int_add=2, int_sub=2) + self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) def test_oosend_guard_failure_2(self): @@ -247,8 +241,7 @@ res = self.meta_interp(f, [4, 28]) assert res == f(4, 28) self.check_loop_count(1) - self.check_loops(guard_class=0, - int_add=2, int_sub=2) + self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) def test_oosend_different_initial_class(self): @@ -285,8 +278,8 @@ # However, this doesn't match the initial value of 'w'. # XXX This not completely easy to check... self.check_loop_count(1) - self.check_loops(int_add=0, int_lshift=1, guard_class=0, - new_with_vtable=0, new=0) + self.check_resops(guard_class=1, new_with_vtable=0, int_lshift=2, + int_add=0, new=0) def test_indirect_call_unknown_object_1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y']) @@ -566,10 +559,7 @@ policy = StopAtXPolicy(new, A.foo.im_func, B.foo.im_func) res = self.meta_interp(fn, [0, 20], policy=policy) assert res == 42 - if self.type_system == 'ootype': - self.check_loops(oosend=1) - else: - self.check_loops(call=1) + self.check_resops(call=2) def test_residual_oosend_with_void(self): @@ -597,10 +587,7 @@ policy = StopAtXPolicy(new, A.foo.im_func) res = self.meta_interp(fn, [1, 20], policy=policy) assert res == 41 - if self.type_system == 'ootype': - self.check_loops(oosend=1) - else: - self.check_loops(call=1) + self.check_resops(call=2) def test_constfold_pure_oosend(self): myjitdriver = JitDriver(greens=[], reds = ['i', 'obj']) @@ -621,10 +608,7 @@ policy = StopAtXPolicy(A.foo.im_func) res = self.meta_interp(fn, [1, 20], policy=policy) assert res == 42 - if self.type_system == 'ootype': - self.check_loops(oosend=0) - else: - self.check_loops(call=0) + self.check_resops(call=0) def test_generalize_loop(self): myjitdriver = JitDriver(greens=[], reds = ['i', 'obj']) diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -76,7 +76,7 @@ return lst[i] res = self.meta_interp(f, [21], listops=True) assert res == f(21) - self.check_loops(call=0) + self.check_resops(call=0) def test_getitem_neg(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'n']) @@ -92,7 +92,7 @@ return x res = self.meta_interp(f, [-2], listops=True) assert res == 41 - self.check_loops(call=0, guard_value=0) + self.check_resops(call=0, guard_value=0) # we don't support resizable lists on ootype #class TestOOtype(ListTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -30,7 +30,7 @@ return i res = self.meta_interp(f, [10, True, _str('h')], listops=True) assert res == 5 - self.check_loops(**{self.CALL: 1, self.CALL_PURE: 0, 'everywhere': True}) + self.check_resops(**{self.CALL: 1, self.CALL_PURE: 0}) def test_eq_folded(self): _str = self._str @@ -50,7 +50,7 @@ return i res = self.meta_interp(f, [10, True, _str('h')], listops=True) assert res == 5 - self.check_loops(**{self.CALL: 0, self.CALL_PURE: 0}) + self.check_resops(**{self.CALL: 0, self.CALL_PURE: 0}) def test_newstr(self): _str, _chr = self._str, self._chr @@ -85,7 +85,7 @@ n -= 1 return 42 self.meta_interp(f, [6]) - self.check_loops(newstr=0, strsetitem=0, strlen=0, + self.check_resops(newstr=0, strsetitem=0, strlen=0, newunicode=0, unicodesetitem=0, unicodelen=0) def test_char2string_escape(self): @@ -126,7 +126,7 @@ return total res = self.meta_interp(f, [6]) assert res == 21 - self.check_loops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, + self.check_resops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, newunicode=0, unicodegetitem=0, unicodesetitem=0, unicodelen=0) @@ -147,7 +147,7 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(newstr=0, strsetitem=0, + self.check_resops(newstr=0, strsetitem=0, newunicode=0, unicodesetitem=0, call=0, call_pure=0) @@ -168,12 +168,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=0, copystrcontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=4, + strsetitem=0, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=0, - copyunicodecontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=0, call=2, + copyunicodecontent=4, newunicode=2) def test_strconcat_escape_str_char(self): _str, _chr = self._str, self._chr @@ -192,12 +191,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=2, strsetitem=2, + call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=2, newunicode=2) def test_strconcat_escape_char_str(self): _str, _chr = self._str, self._chr @@ -216,12 +214,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=2, + strsetitem=2, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=2, newunicode=2) def test_strconcat_escape_char_char(self): _str, _chr = self._str, self._chr @@ -239,12 +236,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=2, copystrcontent=0, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=0, + strsetitem=4, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=2, - copyunicodecontent=0, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=4, call=2, + copyunicodecontent=0, newunicode=2) def test_strconcat_escape_str_char_str(self): _str, _chr = self._str, self._chr @@ -263,12 +259,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=4, strsetitem=2, + call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=4, newunicode=2) def test_strconcat_guard_fail(self): _str = self._str @@ -325,7 +320,7 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(newstr=0, newunicode=0) + self.check_resops(newunicode=0, newstr=0) def test_str_slice_len_surviving(self): _str = self._str @@ -491,7 +486,7 @@ def __init__(self, s): self.defaultencoding = s _str = self._str - sys = Sys(_str('ascii')) + sys = Sys(_str('ascii')) mydriver = JitDriver(reds = ['n', 'sa'], greens = []) def f(n): sa = 0 @@ -504,13 +499,13 @@ sys.defaultencoding = _str('utf-8') return sa assert self.meta_interp(f, [8]) == f(8) - self.check_loops({'int_add': 1, 'guard_true': 1, 'int_sub': 1, - 'jump': 1, 'int_is_true': 1, - 'guard_not_invalidated': 1}) + self.check_resops({'jump': 2, 'int_is_true': 2, 'int_add': 2, + 'guard_true': 2, 'guard_not_invalidated': 2, + 'int_sub': 2}) def test_promote_string(self): driver = JitDriver(greens = [], reds = ['n']) - + def f(n): while n < 21: driver.jit_merge_point(n=n) @@ -519,7 +514,7 @@ return 0 self.meta_interp(f, [0]) - self.check_loops(call=3 + 1) # one for int2str + self.check_resops(call=7) #class TestOOtype(StringTests, OOJitMixin): # CALL = "oosend" @@ -552,9 +547,8 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(call=1, # escape() - newunicode=1, unicodegetitem=0, - unicodesetitem=1, copyunicodecontent=1) + self.check_resops(unicodesetitem=2, newunicode=2, call=4, + copyunicodecontent=2, unicodegetitem=0) def test_str2unicode_fold(self): _str = self._str @@ -572,9 +566,9 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(call_pure=0, call=1, - newunicode=0, unicodegetitem=0, - unicodesetitem=0, copyunicodecontent=0) + self.check_resops(call_pure=0, unicodesetitem=0, call=2, + newunicode=0, unicodegetitem=0, + copyunicodecontent=0) def test_join_chars(self): jitdriver = JitDriver(reds=['a', 'b', 'c', 'i'], greens=[]) @@ -596,9 +590,8 @@ # The "".join should be unrolled, since the length of x is known since # it is virtual, ensure there are no calls to ll_join_chars, or # allocations. - self.check_loops({ - "guard_true": 5, "int_is_true": 3, "int_lt": 2, "int_add": 2, "jump": 2, - }, everywhere=True) + self.check_resops({'jump': 2, 'guard_true': 5, 'int_lt': 2, + 'int_add': 2, 'int_is_true': 3}) def test_virtual_copystringcontent(self): jitdriver = JitDriver(reds=['n', 'result'], greens=[]) diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -72,16 +72,16 @@ res = self.meta_interp(main, [0, 6], listops=True, backendopt=True) assert res == 5040 - self.check_loops({'int_mul':1, 'jump':1, - 'int_sub':1, 'int_le':1, 'guard_false':1}) + self.check_resops({'jump': 2, 'int_le': 2, 'guard_value': 1, + 'int_mul': 2, 'guard_false': 2, 'int_sub': 2}) def test_tl_2(self): main = self._get_main() res = self.meta_interp(main, [1, 10], listops=True, backendopt=True) assert res == main(1, 10) - self.check_loops({'int_sub':1, 'int_le':1, - 'guard_false':1, 'jump':1}) + self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 2, + 'guard_false': 2, 'guard_value': 1}) def test_tl_call(self, listops=True, policy=None): from pypy.jit.tl.tl import interp diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -593,6 +593,32 @@ res = self.interp_operations(fn, [sys.maxint]) assert res == 12 + def test_opaque_list(self): + from pypy.rlib.rerased import new_erasing_pair + erase, unerase = new_erasing_pair("test_opaque_list") + def fn(n, ca, cb): + l1 = [n] + l2 = [n] + a1 = erase(l1) + a2 = erase(l1) + a = a1 + if ca: + a = a2 + if n < -100: + unerase(a).append(5) + b = a1 + if cb: + b = a + return unerase(a)[0] + unerase(b)[0] + res = self.interp_operations(fn, [7, 0, 1]) + assert res == 7 * 2 + self.check_operations_history(getarrayitem_gc=0, + getfield_gc=0) + res = self.interp_operations(fn, [-7, 1, 1]) + assert res == -7 * 2 + self.check_operations_history(getarrayitem_gc=0, + getfield_gc=0) + def test_copy_str_content(self): def fn(n): a = StringBuilder() @@ -601,4 +627,4 @@ return x[0] res = self.interp_operations(fn, [0]) assert res == 1 - self.check_operations_history(getarrayitem_gc=0, getarrayitem_gc_pure=0 ) \ No newline at end of file + self.check_operations_history(getarrayitem_gc=0, getarrayitem_gc_pure=0) diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -31,8 +31,9 @@ res = self.meta_interp(f, [10]) assert res == 55 * 10 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=2, new=0) + def test_virtualized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) @@ -53,8 +54,8 @@ n -= 1 return node1.value * node2.value assert f(10) == self.meta_interp(f, [10]) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, + new=0) def test_virtualized_circular1(self): class MyNode(): @@ -79,8 +80,8 @@ res = self.meta_interp(f, [10]) assert res == 55 * 10 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=3, new=0) def test_virtualized_float(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -97,7 +98,7 @@ res = self.meta_interp(f, [10]) assert res == f(10) self.check_loop_count(1) - self.check_loops(new=0, float_add=0) + self.check_resops(new=0, float_add=1) def test_virtualized_float2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -115,7 +116,8 @@ res = self.meta_interp(f, [10]) assert res == f(10) self.check_loop_count(1) - self.check_loops(new=0, float_add=1) + self.check_resops(new=0, float_add=2) + def test_virtualized_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -139,8 +141,8 @@ res = self.meta_interp(f, [10]) assert res == 55 * 30 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, + new=0) def test_nonvirtual_obj_delays_loop(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -160,8 +162,8 @@ res = self.meta_interp(f, [500]) assert res == 640 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=1, new=0) def test_two_loops_with_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -184,8 +186,9 @@ res = self.meta_interp(f, [18]) assert res == f(18) self.check_loop_count(2) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=2, new=0) + def test_two_loops_with_escaping_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -212,8 +215,8 @@ res = self.meta_interp(f, [20], policy=StopAtXPolicy(externfn)) assert res == f(20) self.check_loop_count(3) - self.check_loops(**{self._new_op: 1}) - self.check_loops(int_mul=0, call=1) + self.check_resops(**{self._new_op: 1}) + self.check_resops(int_mul=0, call=1) def test_two_virtuals(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'prev']) @@ -236,7 +239,7 @@ res = self.meta_interp(f, [12]) assert res == 78 - self.check_loops(new_with_vtable=0, new=0) + self.check_resops(new_with_vtable=0, new=0) def test_specialied_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) @@ -281,7 +284,7 @@ res = self.meta_interp(f, [20]) assert res == 9 - self.check_loops(new_with_vtable=0, new=0) + self.check_resops(new_with_vtable=0, new=0) def test_immutable_constant_getfield(self): myjitdriver = JitDriver(greens = ['stufflist'], reds = ['n', 'i']) @@ -307,7 +310,7 @@ res = self.meta_interp(f, [10, 1, 0], listops=True) assert res == 0 - self.check_loops(getfield_gc=0) + self.check_resops(getfield_gc=0) def test_escapes(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) @@ -336,7 +339,7 @@ res = self.meta_interp(f, [10], policy=StopAtXPolicy(g)) assert res == 3 - self.check_loops(**{self._new_op: 1}) + self.check_resops(**{self._new_op: 1}) def test_virtual_on_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) @@ -366,7 +369,7 @@ res = self.meta_interp(f, [10]) assert res == 2 - self.check_loops(new=0, new_with_vtable=0) + self.check_resops(new=0, new_with_vtable=0) def test_bridge_from_interpreter(self): mydriver = JitDriver(reds = ['n', 'f'], greens = []) @@ -609,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -758,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): @@ -841,7 +865,7 @@ del t2 return i assert self.meta_interp(f, []) == 10 - self.check_loops(new_array=0) + self.check_resops(new_array=0) def test_virtual_streq_bug(self): mydriver = JitDriver(reds = ['i', 's', 'a'], greens = []) @@ -942,8 +966,8 @@ res = self.meta_interp(f, [16]) assert res == f(16) - self.check_loops(getfield_gc=2) - + self.check_resops(getfield_gc=7) + # ____________________________________________________________ # Run 1: all the tests instantiate a real RPython class @@ -985,10 +1009,8 @@ res = self.meta_interp(f, [10]) assert res == 20 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) - - + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=0, + new=0) class TestOOtype_Instance(VirtualTests, OOJitMixin): _new_op = 'new_with_vtable' diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -77,7 +77,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 30 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_preexisting_access_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -102,7 +102,7 @@ assert f(5) == 185 res = self.meta_interp(f, [5]) assert res == 185 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_two_paths_access(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -124,7 +124,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10118 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_synchronize_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -146,7 +146,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_virtualizable_and_greens(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'xy'], @@ -174,7 +174,7 @@ return res res = self.meta_interp(f, [40]) assert res == 50 * 4 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_double_frame(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy', 'other'], @@ -197,8 +197,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_loops(getfield_gc=0, setfield_gc=1) - self.check_loops(getfield_gc=1, setfield_gc=2, everywhere=True) + self.check_resops(setfield_gc=2, getfield_gc=1) # ------------------------------ @@ -248,8 +247,8 @@ return xy2.inst_l1[2] res = self.meta_interp(f, [16]) assert res == 3001 + 16 * 80 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, setarrayitem_gc=0) + self.check_resops(setarrayitem_gc=0, setfield_gc=0, + getarrayitem_gc=0, getfield_gc=0) def test_synchronize_arrays_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -279,8 +278,7 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) def test_array_length(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -306,8 +304,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, arraylen_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=0, getfield_gc=0) def test_residual_function(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -340,8 +338,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, arraylen_gc=1, call=1) + self.check_resops(call=2, setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=2, getfield_gc=0) def test_double_frame_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2', 'other'], @@ -377,8 +375,8 @@ expected = f(20) res = self.meta_interp(f, [20], enable_opts='') assert res == expected - self.check_loops(getfield_gc=1, setfield_gc=0, - arraylen_gc=1, getarrayitem_gc=1, setarrayitem_gc=1) + self.check_resops(setarrayitem_gc=1, setfield_gc=0, + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) # ------------------------------ @@ -425,8 +423,7 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) # ------------------------------ @@ -460,8 +457,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) - + self.check_resops(setfield_gc=0, getfield_gc=0) def test_virtualizable_with_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'frame'], @@ -495,8 +491,7 @@ res = self.meta_interp(f, [10, 1], listops=True) assert res == f(10, 1) - self.check_loops(getarrayitem_gc=0) - + self.check_resops(getarrayitem_gc=0) def test_subclass_of_virtualizable(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -524,8 +519,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) - + self.check_resops(setfield_gc=0, getfield_gc=0) def test_external_pass(self): jitdriver = JitDriver(greens = [], reds = ['n', 'z', 'frame'], @@ -1011,8 +1005,8 @@ res = self.meta_interp(f, [70], listops=True) assert res == intmask(42 ** 70) - self.check_loops(int_add=0, - int_sub=1) # for 'n -= 1' only + self.check_resops(int_add=0, + int_sub=2) # for 'n -= 1' only def test_simple_access_directly(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1043,7 +1037,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) from pypy.jit.backend.test.support import BaseCompiledMixin if isinstance(self, BaseCompiledMixin): @@ -1098,42 +1092,42 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_check_for_nonstandardness_only_once(self): - myjitdriver = JitDriver(greens = [], reds = ['frame'], - virtualizables = ['frame']) + myjitdriver = JitDriver(greens = [], reds = ['frame'], + virtualizables = ['frame']) - class Frame(object): - _virtualizable2_ = ['x', 'y', 'z'] + class Frame(object): + _virtualizable2_ = ['x', 'y', 'z'] - def __init__(self, x, y, z=1): - self = hint(self, access_directly=True) - self.x = x - self.y = y - self.z = z + def __init__(self, x, y, z=1): + self = hint(self, access_directly=True) + self.x = x + self.y = y + self.z = z - class SomewhereElse: - pass - somewhere_else = SomewhereElse() + class SomewhereElse: + pass + somewhere_else = SomewhereElse() - def f(n): - frame = Frame(n, 0) - somewhere_else.top_frame = frame # escapes - frame = hint(frame, access_directly=True) - while frame.x > 0: - myjitdriver.can_enter_jit(frame=frame) - myjitdriver.jit_merge_point(frame=frame) - top_frame = somewhere_else.top_frame - child_frame = Frame(frame.x, top_frame.z, 17) - frame.y += child_frame.x - frame.x -= top_frame.z - return somewhere_else.top_frame.y - - res = self.meta_interp(f, [10]) - assert res == 55 - self.check_loops(new_with_vtable=0, ptr_eq=1, everywhere=True) - self.check_history(ptr_eq=2) + def f(n): + frame = Frame(n, 0) + somewhere_else.top_frame = frame # escapes + frame = hint(frame, access_directly=True) + while frame.x > 0: + myjitdriver.can_enter_jit(frame=frame) + myjitdriver.jit_merge_point(frame=frame) + top_frame = somewhere_else.top_frame + child_frame = Frame(frame.x, top_frame.z, 17) + frame.y += child_frame.x + frame.x -= top_frame.z + return somewhere_else.top_frame.y + + res = self.meta_interp(f, [10]) + assert res == 55 + self.check_resops(new_with_vtable=0, ptr_eq=1) + self.check_history(ptr_eq=2) def test_virtual_child_frame_with_arrays(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1165,7 +1159,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == 55 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_blackhole_should_not_pay_attention(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1203,7 +1197,7 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_blackhole_should_synchronize(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1239,7 +1233,7 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_blackhole_should_not_reenter(self): if not self.basic: diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -171,7 +171,7 @@ return 1 # self.meta_interp(f, [10]) - self.check_loops(new_with_vtable=1) # the vref + self.check_resops(new_with_vtable=2) # the vref self.check_aborted_count(0) def test_simple_all_removed(self): @@ -205,8 +205,7 @@ virtual_ref_finish(vref, xy) # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=0, # all virtualized - new_array=0) + self.check_resops(new_with_vtable=0, new_array=0) self.check_aborted_count(0) def test_simple_no_access(self): @@ -242,7 +241,7 @@ virtual_ref_finish(vref, xy) # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=1, # the vref: xy doesn't need to be forced + self.check_resops(new_with_vtable=2, # the vref: xy doesn't need to be forced new_array=0) # and neither xy.next1/2/3 self.check_aborted_count(0) @@ -280,8 +279,8 @@ exctx.topframeref = vref_None # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=2, # XY(), the vref - new_array=3) # next1/2/3 + self.check_resops(new_with_vtable=4, # XY(), the vref + new_array=6) # next1/2/3 self.check_aborted_count(0) def test_simple_force_sometimes(self): @@ -320,8 +319,8 @@ # res = self.meta_interp(f, [30]) assert res == 13 - self.check_loops(new_with_vtable=1, # the vref, but not XY() - new_array=0) # and neither next1/2/3 + self.check_resops(new_with_vtable=2, # the vref, but not XY() + new_array=0) # and neither next1/2/3 self.check_loop_count(1) self.check_aborted_count(0) @@ -362,7 +361,7 @@ # res = self.meta_interp(f, [30]) assert res == 13 - self.check_loops(new_with_vtable=0, # all virtualized in the n!=13 loop + self.check_resops(new_with_vtable=0, # all virtualized in the n!=13 loop new_array=0) self.check_loop_count(1) self.check_aborted_count(0) @@ -412,7 +411,7 @@ res = self.meta_interp(f, [72]) assert res == 6 self.check_loop_count(2) # the loop and the bridge - self.check_loops(new_with_vtable=2, # loop: nothing; bridge: vref, xy + self.check_resops(new_with_vtable=2, # loop: nothing; bridge: vref, xy new_array=2) # bridge: next4, next5 self.check_aborted_count(0) @@ -442,8 +441,8 @@ # res = self.meta_interp(f, [15]) assert res == 1 - self.check_loops(new_with_vtable=2, # vref, xy - new_array=1) # next1 + self.check_resops(new_with_vtable=4, # vref, xy + new_array=2) # next1 self.check_aborted_count(0) def test_recursive_call_1(self): @@ -543,7 +542,7 @@ # res = self.meta_interp(f, [15]) assert res == 1 - self.check_loops(new_with_vtable=2) # vref, xy + self.check_resops(new_with_vtable=4) # vref, xy def test_cannot_use_invalid_virtualref(self): myjitdriver = JitDriver(greens = [], reds = ['n']) diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -100,12 +100,12 @@ # check that the set_param will override the default res = self.meta_interp(f, [10, llstr('')]) assert res == 0 - self.check_loops(new_with_vtable=1) + self.check_resops(new_with_vtable=1) res = self.meta_interp(f, [10, llstr(ALL_OPTS_NAMES)], enable_opts='') assert res == 0 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_unwanted_loops(self): mydriver = JitDriver(reds = ['n', 'total', 'm'], greens = []) @@ -160,7 +160,7 @@ return n self.meta_interp(f, [50], backendopt=True) self.check_enter_count_at_most(2) - self.check_loops(call=0) + self.check_resops(call=0) def test_loop_header(self): # artificial test: we enter into the JIT only when can_enter_jit() @@ -184,7 +184,7 @@ assert f(15) == 1 res = self.meta_interp(f, [15], backendopt=True) assert res == 1 - self.check_loops(int_add=1) # I get 13 without the loop_header() + self.check_resops(int_add=2) # I get 13 without the loop_header() def test_omit_can_enter_jit(self): # Simple test comparing the effects of always giving a can_enter_jit(), @@ -246,8 +246,8 @@ m = m - 1 self.meta_interp(f1, [8]) self.check_loop_count(1) - self.check_loops({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) def test_void_red_variable(self): mydriver = JitDriver(greens=[], reds=['a', 'm']) diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -255,10 +255,8 @@ s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] graph = copygraph(graph) - graph.startblock.isstartblock = False [jmpp] = find_jit_merge_points([graph]) graph.startblock = support.split_before_jit_merge_point(*jmpp) - graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot # to list some variable in greens=[] or reds=[] in JitDriver, # or that a jit_merge_point() takes a constant as an argument. diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -36,7 +36,7 @@ i = i + 1 return i self.interpret(f, []) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_bridge(self): py.test.skip('We currently cant virtualize across bridges') @@ -52,7 +52,7 @@ return total self.interpret(f, [1, 10]) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_bridge_bad_case(self): py.test.skip('We currently cant virtualize across bridges') @@ -67,7 +67,7 @@ return a + b self.interpret(f, [1, 10]) - self.check_loops(new_with_vtable=1) # XXX should eventually be 0? + self.check_resops(new_with_vtable=1) # XXX should eventually be 0? # I think it should be either 0 or 2, 1 makes little sense # If the loop after entering goes first time to the bridge, a # is rewrapped again, without preserving the identity. I'm not diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -95,17 +95,17 @@ return space.newlist(res_w) -def range_withspecialized_implementation(space, start, step, howmany): +def range_withspecialized_implementation(space, start, step, length): assert space.config.objspace.std.withrangelist - from pypy.objspace.std.rangeobject import W_RangeListObject - return W_RangeListObject(start, step, howmany) + from pypy.objspace.std.listobject import make_range_list + return make_range_list(space, start, step, length) bigint_one = rbigint.fromint(1) def range_with_longs(space, w_start, w_stop, w_step): start = lo = space.bigint_w(w_start) - stop = hi = space.bigint_w(w_stop) + hi = space.bigint_w(w_stop) step = st = space.bigint_w(w_step) if not step.tobool(): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,11 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + if isinstance(w_list, W_ListObject): + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,21 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + + l = [1, 2, 3] + assert list_strategy(l) == "int" + l = ["a", "b", "c"] + assert list_strategy(l) == "str" + l = [1.1, 2.2, 3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1, "b", 3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + raises(TypeError, list_strategy, 5) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -67,10 +67,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_builtin = space.getbuiltinmodule('__builtin__') - w_import = space.getattr(w_builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -100,7 +100,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, maxlength) - res = intmask(res) # XXX why? try: if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -117,7 +116,6 @@ res, newbuf = self.do_recv_string( space, length - offset, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: raise BufferTooShort(space, space.wrap( @@ -148,7 +146,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) @@ -413,7 +410,7 @@ self.buffer, min(self.BUFFER_SIZE, buflength), read_ptr, rffi.NULL) if result: - return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) + return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: @@ -476,7 +473,7 @@ block = timeout < 0 if not block: # XXX does not check for overflow - deadline = _GetTickCount() + int(1000 * timeout + 0.5) + deadline = intmask(_GetTickCount()) + int(1000 * timeout + 0.5) else: deadline = 0 @@ -500,7 +497,7 @@ return True if not block: - now = _GetTickCount() + now = intmask(_GetTickCount()) if now > deadline: return False diff = deadline - now diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -235,7 +235,7 @@ elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise OperationError(space.w_OverflowError, space.wrap("timeout is too large")) - full_msecs = int(timeout + 0.5) + full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) @@ -243,7 +243,7 @@ if res != rwin32.WAIT_TIMEOUT: return True - msecs = r_uint(full_msecs) + msecs = full_msecs start = _GetTickCount() while True: @@ -269,7 +269,7 @@ ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False - msecs = r_uint(full_msecs - (ticks - start)) + msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,6 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def set_last_error(space, w_error): + at unwrap_spec(error=int) +def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError - SetLastError(space.uint_w(w_error)) + SetLastError(error) diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -324,6 +324,7 @@ class A(object): pass a = A() assert _weakref.proxy(a) is _weakref.proxy(a) + assert _weakref.proxy(a) is _weakref.proxy(a, None) def test_callable_proxy(self): import _weakref, gc diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -32,7 +32,7 @@ Py_DecRef(space, w_item) if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.wrappeditems + wrappeditems = w_list.getitems() if index < 0 or index >= len(wrappeditems): raise OperationError(space.w_IndexError, space.wrap( "list assignment index out of range")) @@ -47,7 +47,7 @@ IndexError exception.""" if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.wrappeditems + wrappeditems = w_list.getitems() if index < 0 or index >= len(wrappeditems): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) @@ -74,7 +74,7 @@ """Macro form of PyList_Size() without error checking. """ assert isinstance(w_list, W_ListObject) - return len(w_list.wrappeditems) + return len(w_list.getitems()) @cpython_api([PyObject], Py_ssize_t, error=-1) diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -56,7 +56,7 @@ PySequence_Fast(), o is not NULL, and that i is within bounds. """ if isinstance(w_obj, listobject.W_ListObject): - w_res = w_obj.wrappeditems[index] + w_res = w_obj.getitem(index) else: assert isinstance(w_obj, tupleobject.W_TupleObject) w_res = w_obj.wrappeditems[index] @@ -70,7 +70,7 @@ PySequence_Fast_GET_SIZE() is faster because it can assume o is a list or tuple.""" if isinstance(w_obj, listobject.W_ListObject): - return len(w_obj.wrappeditems) + return w_obj.length() assert isinstance(w_obj, tupleobject.W_TupleObject) return len(w_obj.wrappeditems) diff --git a/pypy/module/gc/test/test_referents.py b/pypy/module/gc/test/test_referents.py --- a/pypy/module/gc/test/test_referents.py +++ b/pypy/module/gc/test/test_referents.py @@ -7,9 +7,13 @@ from pypy.rlib import rgc cls._backup = [rgc.get_rpy_roots] w = cls.space.wrap + space = cls.space class RandomRPythonObject(object): pass - cls.ALL_ROOTS = [w(4), w([2, 7]), RandomRPythonObject()] + l4 = space.newlist([w(4)]) + l2 = space.newlist([w(2)]) + l7 = space.newlist([w(7)]) + cls.ALL_ROOTS = [l4, space.newlist([l2, l7]), RandomRPythonObject()] cls.w_ALL_ROOTS = cls.space.newlist(cls.ALL_ROOTS) rgc.get_rpy_roots = lambda: ( map(rgc._GcRef, cls.ALL_ROOTS) + [rgc.NULL_GCREF]*17) @@ -41,14 +45,14 @@ if self.runappdirect: pass # unsure what to test else: - assert lst[0] == 4 - assert lst[1] == [2, 7] + assert lst[0] == [4] + assert lst[1] == [[2], [7]] assert type(lst[2]) is gc.GcRef assert len(lst) == 3 def test_get_rpy_referents(self): import gc - y = 12345 + y = [12345] x = [y] lst = gc.get_rpy_referents(x) # After translation, 'lst' should contain the RPython-level list @@ -88,8 +92,8 @@ def test_get_referents(self): import gc - y = 12345 - z = 23456 + y = [12345] + z = [23456] x = [y, z] lst = gc.get_referents(x) assert y in lst and z in lst diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,14 +5,17 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.SingleDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', + 'dot': 'interp_numarray.dot', 'fromstring': 'interp_support.fromstring', + 'flatiter': 'interp_numarray.W_FlatIterator', 'True_': 'types.Bool.True', 'False_': 'types.Bool.False', @@ -23,6 +26,9 @@ 'signedinteger': 'interp_boxes.W_SignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', + 'int64': 'interp_boxes.W_Int64Box', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', @@ -61,6 +67,7 @@ ("sign", "sign"), ("sin", "sin"), ("subtract", "subtract"), + ('sqrt', 'sqrt'), ("tan", "tan"), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl @@ -70,4 +77,5 @@ 'mean': 'app_numpy.mean', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', + 'arange': 'app_numpy.arange', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -6,12 +6,33 @@ inf = float("inf") e = math.e + def average(a): # This implements a weighted average, for now we don't implement the # weighting, just the average part! return mean(a) + def mean(a): if not hasattr(a, "mean"): a = numpypy.array(a) return a.mean() + + +def arange(start, stop=None, step=1, dtype=None): + '''arange([start], stop[, step], dtype=None) + Generate values in the half-interval [start, stop). + ''' + if stop is None: + stop = start + start = 0 + if dtype is None: + test = numpypy.array([start, stop, step, 0]) + dtype = test.dtype + arr = numpypy.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + i = start + for j in range(arr.size): + arr[j] = i + j += 1 + i += step + return arr diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,11 +3,13 @@ It should not be imported by the module itself """ +import re + from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, SingleDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs from pypy.rlib.objectmodel import specialize, instantiate @@ -24,11 +26,18 @@ class WrongFunctionName(Exception): pass +class TokenizerError(Exception): + pass + +class BadToken(Exception): + pass + SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative"] class FakeSpace(object): w_ValueError = None w_TypeError = None + w_IndexError = None w_None = None w_bool = "bool" @@ -37,19 +46,35 @@ w_list = "list" w_long = "long" w_tuple = 'tuple' + w_slice = "slice" def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, SingleDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): - return False + return w_obj.tp == w_tp def decode_index4(self, w_idx, size): - return (self.int_w(self.int(w_idx)), 0, 0, 1) + if isinstance(w_idx, IntObject): + return (self.int_w(w_idx), 0, 0, 1) + else: + assert isinstance(w_idx, SliceObject) + start, stop, step = w_idx.start, w_idx.stop, w_idx.step + if step == 0: + return (0, size, 1, size) + if start < 0: + start += size + if stop < 0: + stop += size + 1 + if step < 0: + lgt = (stop - start + 1) / step + 1 + else: + lgt = (stop - start - 1) / step + 1 + return (start, stop, step, lgt) @specialize.argtype(1) def wrap(self, obj): @@ -59,7 +84,9 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) - raise Exception + elif isinstance(obj, W_Root): + return obj + raise NotImplementedError def newlist(self, items): return ListObject(items) @@ -67,6 +94,7 @@ def listview(self, obj): assert isinstance(obj, ListObject) return obj.items + fixedview = listview def float(self, w_obj): if isinstance(w_obj, FloatObject): @@ -115,6 +143,12 @@ def allocate_instance(self, klass, w_subtype): return instantiate(klass) + def len_w(self, w_obj): + if isinstance(w_obj, ListObject): + return len(w_obj.items) + # XXX array probably + assert False + class FloatObject(W_Root): tp = FakeSpace.w_float def __init__(self, floatval): @@ -135,6 +169,13 @@ def __init__(self, items): self.items = items +class SliceObject(W_Root): + tp = FakeSpace.w_slice + def __init__(self, start, stop, step): + self.start = start + self.stop = stop + self.step = step + class InterpreterState(object): def __init__(self, code): self.code = code @@ -169,7 +210,7 @@ interp.variables[self.name] = self.expr.execute(interp) def __repr__(self): - return "%% = %r" % (self.name, self.expr) + return "%r = %r" % (self.name, self.expr) class ArrayAssignment(Node): def __init__(self, name, index, expr): @@ -180,18 +221,19 @@ def execute(self, interp): arr = interp.variables[self.name] w_index = self.index.execute(interp) - assert isinstance(w_index, BaseArray) + # cast to int + if isinstance(w_index, FloatObject): + w_index = IntObject(int(w_index.floatval)) w_val = self.expr.execute(interp) - assert isinstance(w_val, BaseArray) assert isinstance(arr, BaseArray) - arr.descr_setitem(interp.space, w_index.eval(0), w_val.eval(0)) + arr.descr_setitem(interp.space, w_index, w_val) def __repr__(self): return "%s[%r] = %r" % (self.name, self.index, self.expr) class Variable(Node): def __init__(self, name): - self.name = name + self.name = name.strip(" ") def execute(self, interp): return interp.variables[self.name] @@ -207,11 +249,15 @@ def execute(self, interp): w_lhs = self.lhs.execute(interp) + if isinstance(self.rhs, SliceConstant): + w_rhs = self.rhs.wrap(interp.space) + else: + w_rhs = self.rhs.execute(interp) + if not isinstance(w_lhs, BaseArray): + # scalar + dtype = get_dtype_cache(interp.space).w_float64dtype + w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) - if isinstance(self.rhs, SliceConstant): - # XXX interface has changed on multidim branch - raise NotImplementedError - w_rhs = self.rhs.execute(interp) if self.name == '+': w_res = w_lhs.descr_add(interp.space, w_rhs) elif self.name == '*': @@ -219,12 +265,11 @@ elif self.name == '-': w_res = w_lhs.descr_sub(interp.space, w_rhs) elif self.name == '->': - if isinstance(w_rhs, Scalar): - index = int(interp.space.float_w(interp.space.float(w_rhs.value))) - dtype = get_dtype_cache(interp.space).w_float64dtype - return Scalar(dtype, w_lhs.get_concrete().eval(index)) - else: - raise NotImplementedError + assert not isinstance(w_rhs, Scalar) + if isinstance(w_rhs, FloatObject): + w_rhs = IntObject(int(w_rhs.floatval)) + assert isinstance(w_lhs, BaseArray) + w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError if (not isinstance(w_res, BaseArray) and @@ -247,8 +292,7 @@ return space.wrap(self.v) def execute(self, interp): - dtype = get_dtype_cache(interp.space).w_float64dtype - return Scalar(dtype, dtype.box(self.v)) + return interp.space.wrap(self.v) class RangeConstant(Node): def __init__(self, v): @@ -259,7 +303,7 @@ [interp.space.wrap(float(i)) for i in range(self.v)] ) dtype = get_dtype_cache(interp.space).w_float64dtype - return descr_new_array(interp.space, None, w_list, w_dtype=dtype) + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -281,17 +325,26 @@ def execute(self, interp): w_list = self.wrap(interp.space) dtype = get_dtype_cache(interp.space).w_float64dtype - return descr_new_array(interp.space, None, w_list, w_dtype=dtype) + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" class SliceConstant(Node): - def __init__(self): - pass + def __init__(self, start, stop, step): + # no negative support for now + self.start = start + self.stop = stop + self.step = step + + def wrap(self, space): + return SliceObject(self.start, self.stop, self.step) + + def execute(self, interp): + return SliceObject(self.start, self.stop, self.step) def __repr__(self): - return 'slice()' + return 'slice(%s,%s,%s)' % (self.start, self.stop, self.step) class Execute(Node): def __init__(self, expr): @@ -305,7 +358,7 @@ class FunctionCall(Node): def __init__(self, name, args): - self.name = name + self.name = name.strip(" ") self.args = args def __repr__(self): @@ -350,95 +403,172 @@ else: raise WrongFunctionName +_REGEXES = [ + ('-?[\d\.]+', 'number'), + ('\[', 'array_left'), + (':', 'colon'), + ('\w+', 'identifier'), + ('\]', 'array_right'), + ('(->)|[\+\-\*\/]', 'operator'), + ('=', 'assign'), + (',', 'coma'), + ('\|', 'pipe'), + ('\(', 'paren_left'), + ('\)', 'paren_right'), +] +REGEXES = [] + +for r, name in _REGEXES: + REGEXES.append((re.compile(r' *(' + r + ')'), name)) +del _REGEXES + +class Token(object): + def __init__(self, name, v): + self.name = name + self.v = v + + def __repr__(self): + return '(%s, %s)' % (self.name, self.v) + +empty = Token('', '') + +class TokenStack(object): + def __init__(self, tokens): + self.tokens = tokens + self.c = 0 + + def pop(self): + token = self.tokens[self.c] + self.c += 1 + return token + + def get(self, i): + if self.c + i >= len(self.tokens): + return empty + return self.tokens[self.c + i] + + def remaining(self): + return len(self.tokens) - self.c + + def push(self): + self.c -= 1 + + def __repr__(self): + return repr(self.tokens[self.c:]) + class Parser(object): - def parse_identifier(self, id): - id = id.strip(" ") - #assert id.isalpha() - return Variable(id) + def tokenize(self, line): + tokens = [] + while True: + for r, name in REGEXES: + m = r.match(line) + if m is not None: + g = m.group(0) + tokens.append(Token(name, g)) + line = line[len(g):] + if not line: + return TokenStack(tokens) + break + else: + raise TokenizerError(line) - def parse_expression(self, expr): - tokens = [i for i in expr.split(" ") if i] - if len(tokens) == 1: - return self.parse_constant_or_identifier(tokens[0]) + def parse_number_or_slice(self, tokens): + start_tok = tokens.pop() + if start_tok.name == 'colon': + start = 0 + else: + if tokens.get(0).name != 'colon': + return FloatConstant(start_tok.v) + start = int(start_tok.v) + tokens.pop() + if not tokens.get(0).name in ['colon', 'number']: + stop = -1 + step = 1 + else: + next = tokens.pop() + if next.name == 'colon': + stop = -1 + step = int(tokens.pop().v) + else: + stop = int(next.v) + if tokens.get(0).name == 'colon': + tokens.pop() + step = int(tokens.pop().v) + else: + step = 1 + return SliceConstant(start, stop, step) + + + def parse_expression(self, tokens): stack = [] - tokens.reverse() - while tokens: + while tokens.remaining(): token = tokens.pop() - if token == ')': - raise NotImplementedError - elif self.is_identifier_or_const(token): - if stack: - name = stack.pop().name - lhs = stack.pop() - rhs = self.parse_constant_or_identifier(token) - stack.append(Operator(lhs, name, rhs)) + if token.name == 'identifier': + if tokens.remaining() and tokens.get(0).name == 'paren_left': + stack.append(self.parse_function_call(token.v, tokens)) else: - stack.append(self.parse_constant_or_identifier(token)) + stack.append(Variable(token.v)) + elif token.name == 'array_left': + stack.append(ArrayConstant(self.parse_array_const(tokens))) + elif token.name == 'operator': + stack.append(Variable(token.v)) + elif token.name == 'number' or token.name == 'colon': + tokens.push() + stack.append(self.parse_number_or_slice(tokens)) + elif token.name == 'pipe': + stack.append(RangeConstant(tokens.pop().v)) + end = tokens.pop() + assert end.name == 'pipe' else: - stack.append(Variable(token)) - assert len(stack) == 1 - return stack[-1] + tokens.push() + break + stack.reverse() + lhs = stack.pop() + while stack: + op = stack.pop() + assert isinstance(op, Variable) + rhs = stack.pop() + lhs = Operator(lhs, op.name, rhs) + return lhs - def parse_constant(self, v): - lgt = len(v)-1 - assert lgt >= 0 - if ':' in v: - # a slice - assert v == ':' - return SliceConstant() - if v[0] == '[': - return ArrayConstant([self.parse_constant(elem) - for elem in v[1:lgt].split(",")]) - if v[0] == '|': - return RangeConstant(v[1:lgt]) - return FloatConstant(v) - - def is_identifier_or_const(self, v): - c = v[0] - if ((c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z') or - (c >= '0' and c <= '9') or c in '-.[|:'): - if v == '-' or v == "->": - return False - return True - return False - - def parse_function_call(self, v): - l = v.split('(') - assert len(l) == 2 - name = l[0] - cut = len(l[1]) - 1 - assert cut >= 0 - args = [self.parse_constant_or_identifier(id) - for id in l[1][:cut].split(",")] + def parse_function_call(self, name, tokens): + args = [] + tokens.pop() # lparen + while tokens.get(0).name != 'paren_right': + args.append(self.parse_expression(tokens)) return FunctionCall(name, args) - def parse_constant_or_identifier(self, v): - c = v[0] - if (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z'): - if '(' in v: - return self.parse_function_call(v) - return self.parse_identifier(v) - return self.parse_constant(v) + def parse_array_const(self, tokens): + elems = [] + while True: + token = tokens.pop() + if token.name == 'number': + elems.append(FloatConstant(token.v)) + elif token.name == 'array_left': + elems.append(ArrayConstant(self.parse_array_const(tokens))) + else: + raise BadToken() + token = tokens.pop() + if token.name == 'array_right': + return elems + assert token.name == 'coma' - def parse_array_subscript(self, v): - v = v.strip(" ") - l = v.split("[") - lgt = len(l[1]) - 1 - assert lgt >= 0 - rhs = self.parse_constant_or_identifier(l[1][:lgt]) - return l[0], rhs - - def parse_statement(self, line): - if '=' in line: - lhs, rhs = line.split("=") - lhs = lhs.strip(" ") - if '[' in lhs: - name, index = self.parse_array_subscript(lhs) - return ArrayAssignment(name, index, self.parse_expression(rhs)) - else: - return Assignment(lhs, self.parse_expression(rhs)) - else: - return Execute(self.parse_expression(line)) + def parse_statement(self, tokens): + if (tokens.get(0).name == 'identifier' and + tokens.get(1).name == 'assign'): + lhs = tokens.pop().v + tokens.pop() + rhs = self.parse_expression(tokens) + return Assignment(lhs, rhs) + elif (tokens.get(0).name == 'identifier' and + tokens.get(1).name == 'array_left'): + name = tokens.pop().v + tokens.pop() + index = self.parse_expression(tokens) + tokens.pop() + tokens.pop() + return ArrayAssignment(name, index, self.parse_expression(tokens)) + return Execute(self.parse_expression(tokens)) def parse(self, code): statements = [] @@ -447,7 +577,8 @@ line = line.split('#', 1)[0] line = line.strip(" ") if line: - statements.append(self.parse_statement(line)) + tokens = self.tokenize(line) + statements.append(self.parse_statement(tokens)) return Code(statements) def numpy_compile(code): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -19,7 +19,7 @@ def new(space, w_subtype, w_value): dtype = get_dtype(space) return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) - return new, staticmethod(get_dtype) + return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) class PrimitiveBox(object): _mixin_ = True @@ -94,6 +94,7 @@ descr_gt = _binop_impl("greater") descr_ge = _binop_impl("greater_equal") + descr_radd = _binop_right_impl("add") descr_rmul = _binop_right_impl("multiply") descr_neg = _unaryop_impl("negative") @@ -128,7 +129,7 @@ descr__new__, get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - pass + descr__new__, get_dtype = new_dtype_getter("int32") class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint32") @@ -164,7 +165,7 @@ descr__new__, get_dtype = new_dtype_getter("complex128") W_GenericBox.typedef = TypeDef("generic", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_GenericBox.descr__new__.im_func), @@ -179,6 +180,7 @@ __mul__ = interp2app(W_GenericBox.descr_mul), __div__ = interp2app(W_GenericBox.descr_div), + __radd__ = interp2app(W_GenericBox.descr_add), __rmul__ = interp2app(W_GenericBox.descr_rmul), __eq__ = interp2app(W_GenericBox.descr_eq), @@ -193,45 +195,47 @@ ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_BoolBox.descr__new__.im_func), ) W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), ) W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), ) W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), ) W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) if LONG_BIT == 32: @@ -239,35 +243,36 @@ elif LONG_BIT == 64: long_name = "int64" W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), - __module__ = "numpy", + __module__ = "numpypy", ) W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, - __module__ = "numpy", + __module__ = "numpypy", + __new__ = interp2app(W_Int64Box.descr__new__.im_func), ) W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -15,7 +15,12 @@ FLOATINGLTR = "f" COMPLEXLTR = "c" + +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) + class W_Dtype(Wrappable): + _immuable_fields_ = ["itemtype", "num", "kind"] + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): self.signature = signature.BaseSignature() self.itemtype = itemtype @@ -28,7 +33,7 @@ def malloc(self, length): # XXX find out why test_zjit explodes with tracking of allocations - return lltype.malloc(rffi.CArray(lltype.Char), self.itemtype.get_element_size() * length, + return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, zero=True, flavor="raw", track_allocation=False, add_memory_pressure=True ) @@ -82,7 +87,7 @@ return space.newtuple([]) W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), __str__= interp2app(W_Dtype.descr_str), diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,45 +1,329 @@ from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature from pypy.rlib import jit -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name +from pypy.rlib.rstring import StringBuilder +from pypy.rlib.objectmodel import instantiate -numpy_driver = jit.JitDriver(greens = ['signature'], - reds = ['result_size', 'i', 'self', 'result']) -all_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self', 'dtype']) -any_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self', 'dtype']) -slice_driver = jit.JitDriver(greens=['signature'], reds=['i', 'j', 'step', 'stop', 'source', 'dest']) +numpy_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['result_size', 'i', 'ri', 'self', 'result'] +) +all_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['i', 'self', 'dtype'] +) +any_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['i', 'self', 'dtype'] +) +slice_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['self', 'source', 'source_iter', 'res_iter'] +) -def descr_new_array(space, w_subtype, w_size_or_iterable, w_dtype=None): - l = space.listview(w_size_or_iterable) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_item in l: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_item, w_dtype) - if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: +def _find_shape_and_elems(space, w_iterable): + shape = [space.len_w(w_iterable)] + batch = space.listview(w_iterable) + while True: + new_batch = [] + if not batch: + return shape, [] + if not space.issequence_w(batch[0]): + for elem in batch: + if space.issequence_w(elem): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape, batch + size = space.len_w(batch[0]) + for w_elem in batch: + if not space.issequence_w(w_elem) or space.len_w(w_elem) != size: + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def shape_agreement(space, shape1, shape2): + ret = _shape_agreement(shape1, shape2) + if len(ret) < max(len(shape1), len(shape2)): + raise OperationError(space.w_ValueError, + space.wrap("operands could not be broadcast together with shapes (%s) (%s)" % ( + ",".join([str(x) for x in shape1]), + ",".join([str(x) for x in shape2]), + )) + ) + return ret + +def _shape_agreement(shape1, shape2): + """ Checks agreement about two shapes with respect to broadcasting. Returns + the resulting shape. + """ + lshift = 0 + rshift = 0 + if len(shape1) > len(shape2): + m = len(shape1) + n = len(shape2) + rshift = len(shape2) - len(shape1) + remainder = shape1 + else: + m = len(shape2) + n = len(shape1) + lshift = len(shape1) - len(shape2) + remainder = shape2 + endshape = [0] * m + indices1 = [True] * m + indices2 = [True] * m + for i in range(m - 1, m - n - 1, -1): + left = shape1[i + lshift] + right = shape2[i + rshift] + if left == right: + endshape[i] = left + elif left == 1: + endshape[i] = right + indices1[i + lshift] = False + elif right == 1: + endshape[i] = left + indices2[i + rshift] = False + else: + return [] + #raise OperationError(space.w_ValueError, space.wrap( + # "frames are not aligned")) + for i in range(m - n): + endshape[i] = remainder[i] + return endshape + + +# Iterators for arrays +# -------------------- +# all those iterators with the exception of BroadcastIterator iterate over the +# entire array in C order (the last index changes the fastest). This will +# yield all elements. Views iterate over indices and look towards strides and +# backstrides to find the correct position. Notably the offset between +# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between +# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. + +# BroadcastIterator works like that, but for indexes that don't change source +# in the original array, strides[i] == backstrides[i] == 0 + +class BaseIterator(object): + def next(self, shapelen): + raise NotImplementedError + + def done(self): + raise NotImplementedError + + def get_offset(self): + raise NotImplementedError + +class ArrayIterator(BaseIterator): + def __init__(self, size): + self.offset = 0 + self.size = size + + def next(self, shapelen): + arr = instantiate(ArrayIterator) + arr.size = self.size + arr.offset = self.offset + 1 + return arr + + def done(self): + return self.offset >= self.size + + def get_offset(self): + return self.offset + +class OneDimIterator(BaseIterator): + def __init__(self, start, step, stop): + self.offset = start + self.step = step + self.size = stop * step + start + + def next(self, shapelen): + arr = instantiate(OneDimIterator) + arr.size = self.size + arr.step = self.step + arr.offset = self.offset + self.step + return arr + + def done(self): + return self.offset == self.size + + def get_offset(self): + return self.offset + +class ViewIterator(BaseIterator): + def __init__(self, arr): + self.indices = [0] * len(arr.shape) + self.offset = arr.start + self.arr = arr + self._done = False + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + for i in range(shapelen): + indices[i] = self.indices[i] + done = False + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.arr.shape[i] - 1: + indices[i] += 1 + offset += self.arr.strides[i] break - if w_dtype is None: - w_dtype = space.w_None + else: + indices[i] = 0 + offset -= self.arr.backstrides[i] + else: + done = True + res = instantiate(ViewIterator) + res.offset = offset + res.indices = indices + res.arr = self.arr + res._done = done + return res - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = SingleDimArray(len(l), dtype=dtype) - i = 0 - for w_elem in l: - dtype.setitem(arr.storage, i, dtype.coerce(space, w_elem)) - i += 1 - return arr + def done(self): + return self._done + + def get_offset(self): + return self.offset + +class BroadcastIterator(BaseIterator): + '''Like a view iterator, but will repeatedly access values + for all iterations across a res_shape, folding the offset + using mod() arithmetic + ''' + def __init__(self, arr, res_shape): + self.indices = [0] * len(res_shape) + self.offset = arr.start + #strides are 0 where original shape==1 + self.strides = [] + self.backstrides = [] + for i in range(len(arr.shape)): + if arr.shape[i] == 1: + self.strides.append(0) + self.backstrides.append(0) + else: + self.strides.append(arr.strides[i]) + self.backstrides.append(arr.backstrides[i]) + self.res_shape = res_shape + self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides + self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides + self._done = False + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + _done = False + for i in range(shapelen): + indices[i] = self.indices[i] + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.res_shape[i] - 1: + indices[i] += 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + else: + _done = True + res = instantiate(BroadcastIterator) + res.indices = indices + res.offset = offset + res._done = _done + res.strides = self.strides + res.backstrides = self.backstrides + res.res_shape = self.res_shape + return res + + def done(self): + return self._done + + def get_offset(self): + return self.offset + +class Call2Iterator(BaseIterator): + def __init__(self, left, right): + self.left = left + self.right = right + + def next(self, shapelen): + return Call2Iterator(self.left.next(shapelen), + self.right.next(shapelen)) + + def done(self): + if isinstance(self.left, ConstantIterator): + return self.right.done() + return self.left.done() + + def get_offset(self): + if isinstance(self.left, ConstantIterator): + return self.right.get_offset() + return self.left.get_offset() + +class Call1Iterator(BaseIterator): + def __init__(self, child): + self.child = child + + def next(self, shapelen): + return Call1Iterator(self.child.next(shapelen)) + + def done(self): + return self.child.done() + + def get_offset(self): + return self.child.get_offset() + +class ConstantIterator(BaseIterator): + def next(self, shapelen): + return self + + def done(self): + return False + + def get_offset(self): + return 0 + class BaseArray(Wrappable): - _attrs_ = ["invalidates", "signature"] + _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", + "start", 'order'] - def __init__(self): + _immutable_fields_ = ['start', "order"] + + strides = None + start = 0 + + def __init__(self, shape, order): self.invalidates = [] + self.shape = shape + self.order = order + if self.strides is None: + self.calc_strides(shape) + + def calc_strides(self, shape): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if self.order == 'C': + shape_rev.reverse() + for sh in shape_rev: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] def invalidated(self): if self.invalidates: @@ -53,6 +337,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -99,7 +390,7 @@ def _reduce_ufunc_impl(ufunc_name): def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).descr_reduce(space, self) + return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, multidim=True) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -108,55 +399,65 @@ descr_min = _reduce_ufunc_impl("minimum") def _reduce_argmax_argmin_impl(op_name): - reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'result', 'self', 'cur_best', 'dtype']) - def loop(self, size): + reduce_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] + ) + def loop(self): + i = self.start_iter() + cur_best = self.eval(i) + shapelen = len(self.shape) + i = i.next(shapelen) + dtype = self.find_dtype() result = 0 - cur_best = self.eval(0) - i = 1 - dtype = self.find_dtype() - while i < size: + idx = 1 + while not i.done(): reduce_driver.jit_merge_point(signature=self.signature, + shapelen=shapelen, self=self, dtype=dtype, - size=size, i=i, result=result, + i=i, result=result, idx=idx, cur_best=cur_best) new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) if dtype.itemtype.ne(new_best, cur_best): - result = i + result = idx cur_best = new_best - i += 1 + i = i.next(shapelen) + idx += 1 return result def impl(self, space): size = self.find_size() if size == 0: raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) - return space.wrap(loop(self, size)) + space.wrap("Can't call %s on zero-size arrays" % op_name)) + return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) def _all(self): - size = self.find_size() dtype = self.find_dtype() - i = 0 - while i < size: - all_driver.jit_merge_point(signature=self.signature, self=self, dtype=dtype, size=size, i=i) + i = self.start_iter() + shapelen = len(self.shape) + while not i.done(): + all_driver.jit_merge_point(signature=self.signature, + shapelen=shapelen, self=self, + dtype=dtype, i=i) if not dtype.itemtype.bool(self.eval(i)): return False - i += 1 + i = i.next(shapelen) return True def descr_all(self, space): return space.wrap(self._all()) def _any(self): - size = self.find_size() dtype = self.find_dtype() - i = 0 - while i < size: - any_driver.jit_merge_point(signature=self.signature, self=self, size=size, dtype=dtype, i=i) + i = self.start_iter() + shapelen = len(self.shape) + while not i.done(): + any_driver.jit_merge_point(signature=self.signature, + shapelen=shapelen, self=self, + dtype=dtype, i=i) if dtype.itemtype.bool(self.eval(i)): return True - i += 1 + i = i.next(shapelen) return False def descr_any(self, space): return space.wrap(self._any()) @@ -173,25 +474,6 @@ assert isinstance(w_res, BaseArray) return w_res.descr_sum(space) - def _getnums(self, comma): - dtype = self.find_dtype() - if self.find_size() > 1000: - nums = [ - dtype.itemtype.str_format(self.eval(index)) - for index in range(3) - ] - nums.append("..." + "," * comma) - nums.extend([ - dtype.itemtype.str_format(self.eval(index)) - for index in range(self.find_size() - 3, self.find_size()) - ]) - else: - nums = [ - dtype.itemtype.str_format(self.eval(index)) - for index in range(self.find_size()) - ] - return nums - def get_concrete(self): raise NotImplementedError @@ -199,110 +481,306 @@ return space.wrap(self.find_dtype()) def descr_get_shape(self, space): - return space.newtuple([self.descr_len(space)]) + return space.newtuple([space.wrap(i) for i in self.shape]) def descr_get_size(self, space): return space.wrap(self.find_size()) def descr_copy(self, space): - return space.call_function(space.gettypefor(BaseArray), self, self.find_dtype()) + return self.get_concrete().copy() def descr_len(self, space): return self.get_concrete().descr_len(space) def descr_repr(self, space): - # Simple implementation so that we can see the array. Needs work. + res = StringBuilder() + res.append("array(") concrete = self.get_concrete() - res = "array([" + ", ".join(concrete._getnums(False)) + "]" dtype = concrete.find_dtype() + if not concrete.find_size(): + res.append('[]') + if len(self.shape) > 1: + # An empty slice reports its shape + res.append(", shape=(") + self_shape = str(self.shape) + res.append_slice(str(self_shape), 1, len(self_shape) - 1) + res.append(')') + else: + concrete.to_str(space, 1, res, indent=' ') if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and - dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or not self.find_size(): - res += ", dtype=" + dtype.name - res += ")" - return space.wrap(res) + dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ + not self.find_size(): + res.append(", dtype=" + dtype.name) + res.append(")") + return space.wrap(res.build()) + + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + '''Modifies builder with a representation of the array/slice + The items will be seperated by a comma if comma is 1 + Multidimensional arrays/slices will span a number of lines, + each line will begin with indent. + ''' + size = self.find_size() + if size < 1: + builder.append('[]') + return + if size > 1000: + # Once this goes True it does not go back to False for recursive + # calls + use_ellipsis = True + dtype = self.find_dtype() + ndims = len(self.shape) + i = 0 + start = True + builder.append('[') + if ndims > 1: + if use_ellipsis: + for i in range(3): + if start: + start = False + else: + builder.append(',' * comma + '\n') + if ndims == 3: + builder.append('\n' + indent) + else: + builder.append(indent) + # create_slice requires len(chunks) > 1 in order to reduce + # shape + view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) + builder.append('\n' + indent + '..., ') + i = self.shape[0] - 3 + while i < self.shape[0]: + if start: + start = False + else: + builder.append(',' * comma + '\n') + if ndims == 3: + builder.append('\n' + indent) + else: + builder.append(indent) + # create_slice requires len(chunks) > 1 in order to reduce + # shape + view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) + i += 1 + elif ndims == 1: + spacer = ',' * comma + ' ' + item = self.start + # An iterator would be a nicer way to walk along the 1d array, but + # how do I reset it if printing ellipsis? iterators have no + # "set_offset()" + i = 0 + if use_ellipsis: + for i in range(3): + if start: + start = False + else: + builder.append(spacer) + builder.append(dtype.itemtype.str_format(self.getitem(item))) + item += self.strides[0] + # Add a comma only if comma is False - this prevents adding two + # commas + builder.append(spacer + '...' + ',' * (1 - comma)) + # Ugly, but can this be done with an iterator? + item = self.start + self.backstrides[0] - 2 * self.strides[0] + i = self.shape[0] - 3 + while i < self.shape[0]: + if start: + start = False + else: + builder.append(spacer) + builder.append(dtype.itemtype.str_format(self.getitem(item))) + item += self.strides[0] + i += 1 + else: + builder.append('[') + builder.append(']') def descr_str(self, space): - # Simple implementation so that we can see the array. Needs work. + ret = StringBuilder() concrete = self.get_concrete() - return space.wrap("[" + " ".join(concrete._getnums(True)) + "]") + concrete.to_str(space, 0, ret, ' ') + return space.wrap(ret.build()) + + @jit.unroll_safe + def _index_of_single_item(self, space, w_idx): + if space.isinstance_w(w_idx, space.w_int): + idx = space.int_w(w_idx) + if not self.shape: + if idx != 0: + raise OperationError(space.w_IndexError, + space.wrap("index out of range")) + return 0 + if idx < 0: + idx = self.shape[0] + idx + if idx < 0 or idx >= self.shape[0]: + raise OperationError(space.w_IndexError, + space.wrap("index out of range")) + return self.start + idx * self.strides[0] + index = [space.int_w(w_item) + for w_item in space.fixedview(w_idx)] + item = self.start + for i in range(len(index)): + v = index[i] + if v < 0: + v += self.shape[i] + if v < 0 or v >= self.shape[i]: + raise operationerrfmt(space.w_IndexError, + "index (%d) out of range (0<=index<%d", i, self.shape[i], + ) + item += v * self.strides[i] + return item + + @jit.unroll_safe + def _single_item_result(self, space, w_idx): + """ The result of getitem/setitem is a single item if w_idx + is a list of scalars that match the size of shape + """ + shape_len = len(self.shape) + if shape_len == 0: + if not space.isinstance_w(w_idx, space.w_int): + raise OperationError(space.w_IndexError, space.wrap( + "wrong index")) + return True + if shape_len == 1: + if space.isinstance_w(w_idx, space.w_int): + return True + if space.isinstance_w(w_idx, space.w_slice): + return False + elif (space.isinstance_w(w_idx, space.w_slice) or + space.isinstance_w(w_idx, space.w_int)): + return False + lgt = space.len_w(w_idx) + if lgt > shape_len: + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if lgt < shape_len: + return False + for w_item in space.fixedview(w_idx): + if space.isinstance_w(w_item, space.w_slice): + return False + return True + + @jit.unroll_safe + def _prepare_slice_args(self, space, w_idx): + if (space.isinstance_w(w_idx, space.w_int) or + space.isinstance_w(w_idx, space.w_slice)): + return [space.decode_index4(w_idx, self.shape[0])] + return [space.decode_index4(w_item, self.shape[i]) for i, w_item in + enumerate(space.fixedview(w_idx))] def descr_getitem(self, space, w_idx): - # TODO: indexing by arrays and lists - if space.isinstance_w(w_idx, space.w_tuple): - length = space.len_w(w_idx) - if length == 0: - return space.wrap(self) - if length > 1: # only one dimension for now. - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - w_idx = space.getitem(w_idx, space.wrap(0)) - start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) - if step == 0: - # Single index - return self.get_concrete().eval(start) - else: - # Slice - new_sig = signature.Signature.find_sig([ - SingleDimSlice.signature, self.signature - ]) - res = SingleDimSlice(start, stop, step, slice_length, self, new_sig) - return space.wrap(res) + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + if len(concrete.shape) < 1: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) + item = concrete._index_of_single_item(space, w_idx) + return concrete.getitem(item) + chunks = self._prepare_slice_args(space, w_idx) + return space.wrap(self.create_slice(space, chunks)) def descr_setitem(self, space, w_idx, w_value): - # TODO: indexing by arrays and lists self.invalidated() - if space.isinstance_w(w_idx, space.w_tuple): - length = space.len_w(w_idx) - if length > 1: # only one dimension for now. - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - if length == 0: - w_idx = space.newslice(space.wrap(0), - space.wrap(self.find_size()), - space.wrap(1)) + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + if len(concrete.shape) < 1: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) + item = concrete._index_of_single_item(space, w_idx) + concrete.setitem_w(space, item, w_value) + return + if not isinstance(w_value, BaseArray): + w_value = convert_to_array(space, w_value) + chunks = self._prepare_slice_args(space, w_idx) + view = self.create_slice(space, chunks) + view.setslice(space, w_value) + + @jit.unroll_safe + def create_slice(self, space, chunks): + if len(chunks) == 1: + start, stop, step, lgt = chunks[0] + if step == 0: + shape = self.shape[1:] + strides = self.strides[1:] + backstrides = self.backstrides[1:] else: - w_idx = space.getitem(w_idx, space.wrap(0)) - start, stop, step, slice_length = space.decode_index4(w_idx, - self.find_size()) - if step == 0: - # Single index - self.get_concrete().setitem_w(space, start, w_value) + shape = [lgt] + self.shape[1:] + strides = [self.strides[0] * step] + self.strides[1:] + backstrides = [(lgt - 1) * self.strides[0] * step] + self.backstrides[1:] + start *= self.strides[0] + start += self.start else: - concrete = self.get_concrete() - if isinstance(w_value, BaseArray): - # for now we just copy if setting part of an array from - # part of itself. can be improved. - if (concrete.get_root_storage() == - w_value.get_concrete().get_root_storage()): - w_value = space.call_function(space.gettypefor(BaseArray), w_value) - assert isinstance(w_value, BaseArray) - else: - w_value = convert_to_array(space, w_value) - concrete.setslice(space, start, stop, step, - slice_length, w_value) + shape = [] + strides = [] + backstrides = [] + start = self.start + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + shape.append(lgt) + strides.append(self.strides[i] * step) + backstrides.append(self.strides[i] * (lgt - 1) * step) + start += self.strides[i] * start_ + # add a reminder + s = i + 1 + assert s >= 0 + shape += self.shape[s:] + strides += self.strides[s:] + backstrides += self.backstrides[s:] + new_sig = signature.Signature.find_sig([ + NDimSlice.signature, self.signature, + ]) + return NDimSlice(self, new_sig, start, strides[:], backstrides[:], + shape[:]) def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) - def _sliceloop(self, start, stop, step, source, dest): - i = start - j = 0 - while (step > 0 and i < stop) or (step < 0 and i > stop): - slice_driver.jit_merge_point(signature=source.signature, step=step, - stop=stop, i=i, j=j, source=source, - dest=dest) - dest.setitem(i, source.eval(j).convert_to(dest.find_dtype())) - j += 1 - i += step + def descr_nonzero(self, space): + if self.find_size() > 1: + raise OperationError(space.w_ValueError, space.wrap( + "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + return space.wrap(space.is_true( + self.get_concrete().eval(self.start_iter(self.shape)) + )) + + def descr_get_transpose(self, space): + concrete = self.get_concrete() + if len(concrete.shape) < 2: + return space.wrap(self) + new_sig = signature.Signature.find_sig([ + NDimSlice.signature, self.signature + ]) + strides = [] + backstrides = [] + shape = [] + for i in range(len(concrete.shape) - 1, -1, -1): + strides.append(concrete.strides[i]) + backstrides.append(concrete.backstrides[i]) + shape.append(concrete.shape[i]) + return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) + + def descr_get_flatiter(self, space): + return space.wrap(W_FlatIterator(self)) + + def getitem(self, item): + raise NotImplementedError + + def start_iter(self, res_shape=None): + raise NotImplementedError + + def descr_debug_repr(self, space): + return space.wrap(self.debug_repr()) def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) @@ -313,32 +791,51 @@ class Scalar(BaseArray): """ - Intermediate class representing a float literal. + Intermediate class representing a literal. """ signature = signature.BaseSignature() - _attrs_ = ["dtype", "value"] + _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): - BaseArray.__init__(self) + self.shape = self.strides = [] + BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value def find_size(self): - raise ValueError + return 1 + + def get_concrete(self): + return self def find_dtype(self): return self.dtype - def eval(self, i): + def getitem(self, item): + raise NotImplementedError + + def eval(self, iter): return self.value + def start_iter(self, res_shape=None): + return ConstantIterator() + + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + builder.append(self.dtype.itemtype.str_format(self.value)) + + def copy(self): + return Scalar(self.dtype, self.value) + + def debug_repr(self): + return 'Scalar' + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, signature, res_dtype): - BaseArray.__init__(self) + def __init__(self, signature, shape, res_dtype, order): + BaseArray.__init__(self, shape, order) self.forced_result = None self.signature = signature self.res_dtype = res_dtype @@ -351,13 +848,18 @@ i = 0 signature = self.signature result_size = self.find_size() - result = SingleDimArray(result_size, self.find_dtype()) - while i < result_size: + result = W_NDimArray(result_size, self.shape, self.find_dtype()) + shapelen = len(self.shape) + i = self.start_iter() + ri = result.start_iter() + while not ri.done(): numpy_driver.jit_merge_point(signature=signature, - result_size=result_size, i=i, + shapelen=shapelen, + result_size=result_size, i=i, ri=ri, self=self, result=result) - result.dtype.setitem(result.storage, i, self.eval(i)) - i += 1 + result.dtype.setitem(result.storage, ri.offset, self.eval(i)) + i = i.next(shapelen) + ri = ri.next(shapelen) return result def force_if_needed(self): @@ -369,10 +871,13 @@ self.force_if_needed() return self.forced_result - def eval(self, i): + def eval(self, iter): if self.forced_result is not None: - return self.forced_result.eval(i) - return self._eval(i) + return self.forced_result.eval(iter) + return self._eval(iter) + + def getitem(self, item): + return self.get_concrete().getitem(item) def setitem(self, item, value): return self.get_concrete().setitem(item, value) @@ -388,8 +893,9 @@ class Call1(VirtualArray): - def __init__(self, signature, res_dtype, values): - VirtualArray.__init__(self, signature, res_dtype) + def __init__(self, signature, shape, res_dtype, values, order): + VirtualArray.__init__(self, signature, shape, res_dtype, + values.order) self.values = values def _del_sources(self): @@ -401,53 +907,91 @@ def _find_dtype(self): return self.res_dtype - def _eval(self, i): - val = self.values.eval(i).convert_to(self.res_dtype) - + def _eval(self, iter): + assert isinstance(iter, Call1Iterator) + val = self.values.eval(iter.child).convert_to(self.res_dtype) sig = jit.promote(self.signature) assert isinstance(sig, signature.Signature) call_sig = sig.components[0] assert isinstance(call_sig, signature.Call1) return call_sig.func(self.res_dtype, val) + def start_iter(self, res_shape=None): + if self.forced_result is not None: + return self.forced_result.start_iter(res_shape) + return Call1Iterator(self.values.start_iter(res_shape)) + + def debug_repr(self): + sig = self.signature + assert isinstance(sig, signature.Signature) + call_sig = sig.components[0] + assert isinstance(call_sig, signature.Call1) + if self.forced_result is not None: + return 'Call1(%s, forced=%s)' % (call_sig.name, + self.forced_result.debug_repr()) + return 'Call1(%s, %s)' % (call_sig.name, + self.values.debug_repr()) + class Call2(VirtualArray): """ Intermediate class for performing binary operations. """ - def __init__(self, signature, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, signature, res_dtype) + def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): + # XXX do something if left.order != right.order + VirtualArray.__init__(self, signature, shape, res_dtype, left.order) self.left = left self.right = right self.calc_dtype = calc_dtype + self.size = 1 + for s in self.shape: + self.size *= s def _del_sources(self): self.left = None self.right = None def _find_size(self): - try: - return self.left.find_size() - except ValueError: - pass - return self.right.find_size() + return self.size - def _eval(self, i): - lhs = self.left.eval(i).convert_to(self.calc_dtype) - rhs = self.right.eval(i).convert_to(self.calc_dtype) + def start_iter(self, res_shape=None): + if self.forced_result is not None: + return self.forced_result.start_iter(res_shape) + if res_shape is None: + res_shape = self.shape # we still force the shape on children + return Call2Iterator(self.left.start_iter(res_shape), + self.right.start_iter(res_shape)) + def _eval(self, iter): + assert isinstance(iter, Call2Iterator) + lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) + rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) sig = jit.promote(self.signature) assert isinstance(sig, signature.Signature) call_sig = sig.components[0] assert isinstance(call_sig, signature.Call2) return call_sig.func(self.calc_dtype, lhs, rhs) + def debug_repr(self): + sig = self.signature + assert isinstance(sig, signature.Signature) + call_sig = sig.components[0] + assert isinstance(call_sig, signature.Call2) + if self.forced_result is not None: + return 'Call2(%s, forced=%s)' % (call_sig.name, + self.forced_result.debug_repr()) + return 'Call2(%s, %s, %s)' % (call_sig.name, + self.left.debug_repr(), + self.right.debug_repr()) + class ViewArray(BaseArray): """ Class for representing views of arrays, they will reflect changes of parent arrays. Example: slices """ - def __init__(self, parent, signature): - BaseArray.__init__(self) + def __init__(self, parent, signature, strides, backstrides, shape): + self.strides = strides + self.backstrides = backstrides + BaseArray.__init__(self, shape, parent.order) self.signature = signature self.parent = parent self.invalidates = parent.invalidates @@ -459,42 +1003,38 @@ self.parent.get_concrete() return self - def eval(self, i): - return self.parent.eval(self.calc_index(i)) + def getitem(self, item): + return self.parent.getitem(item) + + def eval(self, iter): + return self.parent.getitem(iter.get_offset()) @unwrap_spec(item=int) def setitem_w(self, space, item, w_value): - return self.parent.setitem_w(space, self.calc_index(item), w_value) + return self.parent.setitem_w(space, item, w_value) def setitem(self, item, value): # This is currently not possible to be called from anywhere. raise NotImplementedError def descr_len(self, space): - return space.wrap(self.find_size()) + if self.shape: + return space.wrap(self.shape[0]) + return space.wrap(1) - def calc_index(self, item): - raise NotImplementedError -class SingleDimSlice(ViewArray): +class NDimSlice(ViewArray): signature = signature.BaseSignature() - def __init__(self, start, stop, step, slice_length, parent, signature): - ViewArray.__init__(self, parent, signature) - if isinstance(parent, SingleDimSlice): - self.start = parent.calc_index(start) - self.stop = parent.calc_index(stop) - self.step = parent.step * step - self.parent = parent.parent - else: - self.start = start - self.stop = stop - self.step = step - self.parent = parent - self.size = slice_length - - def get_root_storage(self): - return self.parent.get_concrete().get_root_storage() + def __init__(self, parent, signature, start, strides, backstrides, + shape): + if isinstance(parent, NDimSlice): + parent = parent.parent + ViewArray.__init__(self, parent, signature, strides, backstrides, shape) + self.start = start + self.size = 1 + for sh in shape: + self.size *= sh def find_size(self): return self.size @@ -502,20 +1042,52 @@ def find_dtype(self): return self.parent.find_dtype() - def setslice(self, space, start, stop, step, slice_length, arr): - start = self.calc_index(start) - if stop != -1: - stop = self.calc_index(stop) - step = self.step * step - self._sliceloop(start, stop, step, arr, self.parent) + def setslice(self, space, w_value): + res_shape = shape_agreement(space, self.shape, w_value.shape) + self._sliceloop(w_value, res_shape) - def calc_index(self, item): - return (self.start + item * self.step) + def _sliceloop(self, source, res_shape): + source_iter = source.start_iter(res_shape) + res_iter = self.start_iter(res_shape) + shapelen = len(res_shape) + while not res_iter.done(): + slice_driver.jit_merge_point(signature=source.signature, + shapelen=shapelen, + self=self, source=source, + res_iter=res_iter, + source_iter=source_iter) + self.setitem(res_iter.offset, source.eval(source_iter).convert_to( + self.find_dtype())) + source_iter = source_iter.next(shapelen) + res_iter = res_iter.next(shapelen) + def start_iter(self, res_shape=None): + if res_shape is not None and res_shape != self.shape: + return BroadcastIterator(self, res_shape) + if len(self.shape) == 1: + return OneDimIterator(self.start, self.strides[0], self.shape[0]) + return ViewIterator(self) -class SingleDimArray(BaseArray): - def __init__(self, size, dtype): - BaseArray.__init__(self) + def setitem(self, item, value): + self.parent.setitem(item, value) + + def debug_repr(self): + return 'Slice(%s)' % self.parent.debug_repr() + + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = self.start_iter() + while not iter.done(): + array.setitem(iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + return array + +class W_NDimArray(BaseArray): + """ A class representing contiguous array. We know that each iteration + by say ufunc will increase the data index by one + """ + def __init__(self, size, shape, dtype, order='C'): + BaseArray.__init__(self, shape, order) self.size = size self.dtype = dtype self.storage = dtype.malloc(size) @@ -524,20 +1096,32 @@ def get_concrete(self): return self - def get_root_storage(self): - return self.storage - def find_size(self): return self.size def find_dtype(self): return self.dtype - def eval(self, i): - return self.dtype.getitem(self.storage, i) + def getitem(self, item): + return self.dtype.getitem(self.storage, item) + + def eval(self, iter): + return self.dtype.getitem(self.storage, iter.get_offset()) + + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + rffi.c_memcpy( + array.storage, + self.storage, + self.size * self.dtype.itemtype.get_element_size() + ) + return array def descr_len(self, space): - return space.wrap(self.size) + if len(self.shape): + return space.wrap(self.shape[0]) + raise OperationError(space.w_TypeError, space.wrap( + "len() of unsized object")) def setitem_w(self, space, item, w_value): return self.setitem(item, self.dtype.coerce(space, w_value)) @@ -546,34 +1130,101 @@ self.invalidated() self.dtype.setitem(self.storage, item, value) - def setslice(self, space, start, stop, step, slice_length, arr): - self._sliceloop(start, stop, step, arr, self) + def start_iter(self, res_shape=None): + if self.order == 'C': + if res_shape is not None and res_shape != self.shape: + return BroadcastIterator(self, res_shape) + return ArrayIterator(self.size) + raise NotImplementedError # use ViewIterator simply, test it + + def debug_repr(self): + return 'Array' def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) - at unwrap_spec(size=int) -def zeros(space, size, w_dtype=None): +def _find_size_and_shape(space, w_size): + if space.isinstance_w(w_size, space.w_int): + size = space.int_w(w_size) + shape = [size] + else: + size = 1 + shape = [] + for w_item in space.fixedview(w_size): + item = space.int_w(w_item) + size *= item + shape.append(item) + return size, shape + +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - return space.wrap(SingleDimArray(size, dtype=dtype)) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr - at unwrap_spec(size=int) -def ones(space, size, w_dtype=None): +def zeros(space, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + +def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - arr = SingleDimArray(size, dtype=dtype) + size, shape = _find_size_and_shape(space, w_size) + arr = W_NDimArray(size, shape[:], dtype=dtype) one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) +def dot(space, w_obj, w_obj2): + w_arr = convert_to_array(space, w_obj) + if isinstance(w_arr, Scalar): + return convert_to_array(space, w_obj2).descr_dot(space, w_arr) + return w_arr.descr_dot(space, w_obj2) + BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), @@ -582,6 +1233,7 @@ __pos__ = interp2app(BaseArray.descr_pos), __neg__ = interp2app(BaseArray.descr_neg), __abs__ = interp2app(BaseArray.descr_abs), + __nonzero__ = interp2app(BaseArray.descr_nonzero), __add__ = interp2app(BaseArray.descr_add), __sub__ = interp2app(BaseArray.descr_sub), @@ -606,11 +1258,15 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), + __debug_repr__ = interp2app(BaseArray.descr_debug_repr), dtype = GetSetProperty(BaseArray.descr_get_dtype), shape = GetSetProperty(BaseArray.descr_get_shape), size = GetSetProperty(BaseArray.descr_get_size), + T = GetSetProperty(BaseArray.descr_get_transpose), + flat = GetSetProperty(BaseArray.descr_get_flatiter), + mean = interp2app(BaseArray.descr_mean), sum = interp2app(BaseArray.descr_sum), prod = interp2app(BaseArray.descr_prod), @@ -624,3 +1280,54 @@ copy = interp2app(BaseArray.descr_copy), ) + + +class W_FlatIterator(ViewArray): + signature = signature.BaseSignature() + + @jit.unroll_safe + def __init__(self, arr): + size = 1 + for sh in arr.shape: + size *= sh + new_sig = signature.Signature.find_sig([ + W_FlatIterator.signature, arr.signature + ]) + ViewArray.__init__(self, arr, new_sig, [arr.strides[-1]], + [arr.backstrides[-1]], [size]) + self.shapelen = len(arr.shape) + self.arr = arr + self.iter = self.start_iter() + + def start_iter(self, res_shape=None): + if res_shape is not None and res_shape != self.shape: + return BroadcastIterator(self, res_shape) + return OneDimIterator(self.arr.start, self.strides[0], + self.shape[0]) + + def find_dtype(self): + return self.arr.find_dtype() + + def find_size(self): + return self.shape[0] + + def descr_next(self, space): + if self.iter.done(): + raise OperationError(space.w_StopIteration, space.w_None) + result = self.eval(self.iter) + self.iter = self.iter.next(self.shapelen) + return result + + def descr_iter(self): + return self + + def debug_repr(self): + return 'FlatIter(%s)' % self.arr.debug_repr() + + +W_FlatIterator.typedef = TypeDef( + 'flatiter', + next = interp2app(W_FlatIterator.descr_next), + __iter__ = interp2app(W_FlatIterator.descr_iter), +) +W_FlatIterator.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import SingleDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -19,7 +19,7 @@ "string length %d not divisable by %d" % (length, FLOAT_SIZE))) dtype = get_dtype_cache(space).w_float64dtype - a = SingleDimArray(number, dtype=dtype) + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE @@ -31,4 +31,4 @@ start += FLOAT_SIZE end += FLOAT_SIZE - return space.wrap(a) \ No newline at end of file + return space.wrap(a) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types from pypy.rlib import jit @@ -9,12 +9,13 @@ reduce_driver = jit.JitDriver( - greens = ["signature"], - reds = ["i", "size", "self", "dtype", "value", "obj"] + greens = ['shapelen', "signature"], + reds = ["i", "self", "dtype", "value", "obj"] ) class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + _immutable_fields_ = ["promote_to_float", "promote_bools"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -45,8 +46,10 @@ return self.call(space, __args__.arguments_w) def descr_reduce(self, space, w_obj): + return self.reduce(space, w_obj, multidim=False) + + def reduce(self, space, w_obj, multidim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar - if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -62,28 +65,32 @@ space, obj.find_dtype(), promote_to_largest=True ) - start = 0 + start = obj.start_iter(obj.shape) + shapelen = len(obj.shape) + if shapelen > 1 and not multidim: + raise OperationError(space.w_NotImplementedError, + space.wrap("not implemented yet")) if self.identity is None: if size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - value = obj.eval(0).convert_to(dtype) - start += 1 + value = obj.eval(start).convert_to(dtype) + start = start.next(shapelen) else: value = self.identity.convert_to(dtype) new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce(new_sig, start, value, obj, dtype, size) + return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) - def reduce(self, signature, start, value, obj, dtype, size): - i = start - while i < size: - reduce_driver.jit_merge_point(signature=signature, self=self, + def reduce_loop(self, signature, shapelen, i, value, obj, dtype): + while not i.done(): + reduce_driver.jit_merge_point(signature=signature, + shapelen=shapelen, self=self, value=value, obj=obj, i=i, - dtype=dtype, size=size) + dtype=dtype) value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) - i += 1 + i = i.next(shapelen) return value class W_Ufunc1(W_Ufunc): @@ -111,12 +118,13 @@ return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) - w_res = Call1(new_sig, res_dtype, w_obj) + w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) w_obj.add_invalidates(w_res) return w_res class W_Ufunc2(W_Ufunc): + _immutable_fields_ = ["comparison_func", "func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -130,7 +138,7 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar) + convert_to_array, Scalar, shape_agreement) [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) @@ -153,14 +161,16 @@ new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature ]) - w_res = Call2(new_sig, calc_dtype, res_dtype, w_lhs, w_rhs) + new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + w_res = Call2(new_sig, new_shape, calc_dtype, + res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), @@ -317,6 +327,8 @@ ("floor", "floor", 1, {"promote_to_float": True}), ("exp", "exp", 1, {"promote_to_float": True}), + ('sqrt', 'sqrt', 1, {'promote_to_float': True}), + ("sin", "sin", 1, {"promote_to_float": True}), ("cos", "cos", 1, {"promote_to_float": True}), ("tan", "tan", 1, {"promote_to_float": True}), diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -40,13 +40,15 @@ return Signature._known_sigs.setdefault(components, Signature(components)) class Call1(BaseSignature): - _immutable_fields_ = ["func"] + _immutable_fields_ = ["func", "name"] def __init__(self, func): self.func = func + self.name = func.func_name class Call2(BaseSignature): - _immutable_fields_ = ["func"] + _immutable_fields_ = ["func", "name"] def __init__(self, func): - self.func = func \ No newline at end of file + self.func = func + self.name = func.func_name diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.module.micronumpy.interp_numarray import SingleDimArray, Scalar +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -14,7 +14,7 @@ float64_dtype = get_dtype_cache(space).w_float64dtype bool_dtype = get_dtype_cache(space).w_booldtype - ar = SingleDimArray(10, dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -23,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = SingleDimArray(10, dtype=bool_dtype) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -33,13 +33,13 @@ def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype - ar = SingleDimArray(10, dtype=float64_dtype) - v1 = ar.descr_getitem(space, space.wrap(slice(1, 5, 1))) + ar = W_NDimArray(10, [10], dtype=float64_dtype) + v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature - v3 = ar.descr_add(space, v1) - v4 = ar.descr_add(space, v2) + v3 = v2.descr_add(space, v1) + v4 = v1.descr_add(space, v2) assert v3.signature is v4.signature class TestUfuncCoerscion(object): diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,9 @@ +import py -import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): @@ -102,10 +105,11 @@ code = """ a = [1,2,3,4] b = [4,5,6,5] - a + b + c = a + b + c -> 3 """ interp = self.run(code) - assert interp.results[0]._getnums(False) == ["5.0", "7.0", "9.0", "9.0"] + assert interp.results[-1].value == 9 def test_array_getitem(self): code = """ @@ -114,7 +118,7 @@ a + b -> 3 """ interp = self.run(code) - assert interp.results[0].value.value == 3 + 6 + assert interp.results[0].value == 3 + 6 def test_range_getitem(self): code = """ @@ -122,7 +126,7 @@ r -> 3 """ interp = self.run(code) - assert interp.results[0].value.value == 6 + assert interp.results[0].value == 6 def test_sum(self): code = """ @@ -140,7 +144,7 @@ a -> 3 """ interp = self.run(code) - assert interp.results[0].value.value == 15 + assert interp.results[0].value == 15 def test_min(self): interp = self.run(""" @@ -161,10 +165,73 @@ assert interp.results[0].value.value == 256 def test_slice(self): - py.test.skip("in progress") interp = self.run(""" a = [1,2,3,4] b = a -> : b -> 3 """) - assert interp.results[0].value.val == 3 + assert interp.results[0].value == 4 + + def test_slice_step(self): + interp = self.run(""" + a = |30| + b = a -> ::2 + b -> 3 + """) + assert interp.results[0].value == 6 + + def test_setslice(self): + interp = self.run(""" + a = |30| + b = |10| + b[1] = 5 + a[::3] = b + a -> 3 + """) + assert interp.results[0].value == 5 + + + def test_slice2(self): + interp = self.run(""" + a = |30| + s1 = a -> 0:20:2 + s2 = a -> 0:30:3 + b = s1 + s2 + b -> 3 + """) + assert interp.results[0].value == 15 + + def test_multidim_getitem(self): + interp = self.run(""" + a = [[1,2]] + a -> 0 -> 1 + """) + assert interp.results[0].value == 2 + + def test_multidim_getitem_2(self): + interp = self.run(""" + a = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + b = a + a + b -> 1 -> 1 + """) + assert interp.results[0].value == 8 + + def test_set_slice(self): + interp = self.run(""" + a = |30| + b = |30| + b[:] = a + a + b -> 3 + """) + assert interp.results[0].value == 6 + + def test_set_slice2(self): + interp = self.run(""" + a = |30| + b = |10| + b[1] = 5.5 + c = b + b + a[0:30:3] = c + a -> 3 + """) + assert interp.results[0].value == 11 diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -30,7 +30,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -44,13 +44,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from numpypy import array, False_, True_ + from numpypy import array, False_, True_, int64 a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], (int, long)) + assert isinstance(a[0], int64) b = a.copy() - assert isinstance(b[0], (int, long)) + assert isinstance(b[0], int64) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -72,17 +72,17 @@ assert a[i] is True_ def test_zeros_long(self): - from numpypy import zeros + from numpypy import zeros, int64 a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 0 def test_ones_long(self): - from numpypy import ones + from numpypy import ones, int64 a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 1 def test_overflow(self): @@ -184,7 +184,7 @@ raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'numpy.signedinteger' instances" + assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -218,12 +218,39 @@ assert type(x) is numpy.int8 assert repr(x) == "-128" + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + def test_int_(self): import numpypy as numpy assert numpy.int_ is numpy.dtype(int).type assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + def test_int64(self): + import sys + import numpypy as numpy + + if sys.maxint == 2 ** 63 -1: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + else: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.int64).type is numpy.int64 + assert numpy.int64(3) == 3 + def test_float64(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,8 +1,180 @@ + +import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement +from pypy.module.micronumpy import signature +from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace +class MockDtype(object): + signature = signature.BaseSignature() + + def malloc(self, size): + return None + + +class TestNumArrayDirect(object): + def newslice(self, *args): + return self.space.newslice(*[self.space.wrap(arg) for arg in args]) + + def newtuple(self, *args): + args_w = [] + for arg in args: + if isinstance(arg, int): + args_w.append(self.space.wrap(arg)) + else: + args_w.append(arg) + return self.space.newtuple(args_w) + + def test_strides_f(self): + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') + assert a.strides == [1, 10, 50] + assert a.backstrides == [9, 40, 100] + + def test_strides_c(self): + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') + assert a.strides == [15, 3, 1] + assert a.backstrides == [135, 12, 2] + + def test_create_slice_f(self): + space = self.space + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + s = a.create_slice(space, [(3, 0, 0, 1)]) + assert s.start == 3 + assert s.strides == [10, 50] + assert s.backstrides == [40, 100] + s = a.create_slice(space, [(1, 9, 2, 4)]) + assert s.start == 1 + assert s.strides == [2, 10, 50] + assert s.backstrides == [6, 40, 100] + s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + assert s.shape == [2, 1] + assert s.strides == [3, 10] + assert s.backstrides == [3, 0] + s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + assert s.start == 20 + assert s.shape == [10, 3] + + def test_create_slice_c(self): + space = self.space + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + s = a.create_slice(space, [(3, 0, 0, 1)]) + assert s.start == 45 + assert s.strides == [3, 1] + assert s.backstrides == [12, 2] + s = a.create_slice(space, [(1, 9, 2, 4)]) + assert s.start == 15 + assert s.strides == [30, 3, 1] + assert s.backstrides == [90, 12, 2] + s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + assert s.start == 19 + assert s.shape == [2, 1] + assert s.strides == [45, 3] + assert s.backstrides == [45, 0] + s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + assert s.start == 6 + assert s.shape == [10, 3] + + def test_slice_of_slice_f(self): + space = self.space + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + s = a.create_slice(space, [(5, 0, 0, 1)]) + assert s.start == 5 + s2 = s.create_slice(space, [(3, 0, 0, 1)]) + assert s2.shape == [3] + assert s2.strides == [50] + assert s2.parent is a + assert s2.backstrides == [100] + assert s2.start == 35 + s = a.create_slice(space, [(1, 5, 3, 2)]) + s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + assert s2.shape == [2, 3] + assert s2.strides == [3, 50] + assert s2.backstrides == [3, 100] + assert s2.start == 1 * 15 + 2 * 3 + + def test_slice_of_slice_c(self): + space = self.space + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + s = a.create_slice(space, [(5, 0, 0, 1)]) + assert s.start == 15 * 5 + s2 = s.create_slice(space, [(3, 0, 0, 1)]) + assert s2.shape == [3] + assert s2.strides == [1] + assert s2.parent is a + assert s2.backstrides == [2] + assert s2.start == 5 * 15 + 3 * 3 + s = a.create_slice(space, [(1, 5, 3, 2)]) + s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + assert s2.shape == [2, 3] + assert s2.strides == [45, 1] + assert s2.backstrides == [45, 2] + assert s2.start == 1 * 15 + 2 * 3 + + def test_negative_step_f(self): + space = self.space + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + s = a.create_slice(space, [(9, -1, -2, 5)]) + assert s.start == 9 + assert s.strides == [-2, 10, 50] + assert s.backstrides == [-8, 40, 100] + + def test_negative_step_c(self): + space = self.space + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + s = a.create_slice(space, [(9, -1, -2, 5)]) + assert s.start == 135 + assert s.strides == [-30, 3, 1] + assert s.backstrides == [-120, 12, 2] + + def test_index_of_single_item_f(self): + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) + assert r == 1 + 2 * 10 + 2 * 50 + s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + r = s._index_of_single_item(self.space, self.newtuple(1, 0)) + assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) + r = s._index_of_single_item(self.space, self.newtuple(1, 1)) + assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) + + def test_index_of_single_item_c(self): + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) + assert r == 1 * 3 * 5 + 2 * 3 + 2 + s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + r = s._index_of_single_item(self.space, self.newtuple(1, 0)) + assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) + r = s._index_of_single_item(self.space, self.newtuple(1, 1)) + assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) + + def test_shape_agreement(self): + assert shape_agreement(self.space, [3], [3]) == [3] + assert shape_agreement(self.space, [1, 2, 3], [1, 2, 3]) == [1, 2, 3] + py.test.raises(OperationError, shape_agreement, self.space, [2], [3]) + assert shape_agreement(self.space, [4, 4], []) == [4, 4] + assert shape_agreement(self.space, + [8, 1, 6, 1], [7, 1, 5]) == [8, 7, 6, 5] + assert shape_agreement(self.space, + [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + + class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -19,8 +191,7 @@ def test_size(self): from numpypy import array - # XXX fixed on multidim branch - #assert array(3).size == 1 + assert array(3).size == 1 a = array([1, 2, 3]) assert a.size == 3 assert (a + a).size == 3 @@ -50,64 +221,17 @@ b = a.copy() for i in xrange(5): assert b[i] == a[i] + a[3] = 22 + assert b[3] == 3 + + a = array(1) + assert a.copy() == a def test_iterator_init(self): from numpypy import array a = array(range(5)) assert a[3] == 3 - def test_repr(self): - from numpypy import array, zeros - a = array(range(5), float) - assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" - a = array([], float) - assert repr(a) == "array([], dtype=float64)" - a = zeros(1001) - assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" - a = array(range(5), long) - assert repr(a) == "array([0, 1, 2, 3, 4])" - a = array([], long) - assert repr(a) == "array([], dtype=int64)" - a = array([True, False, True, False], "?") - assert repr(a) == "array([True, False, True, False], dtype=bool)" - - def test_repr_slice(self): - from numpypy import array, zeros - a = array(range(5), float) - b = a[1::2] - assert repr(b) == "array([1.0, 3.0])" - a = zeros(2002) - b = a[::2] - assert repr(b) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" - - def test_str(self): - from numpypy import array, zeros - a = array(range(5), float) - assert str(a) == "[0.0 1.0 2.0 3.0 4.0]" - assert str((2*a)[:]) == "[0.0 2.0 4.0 6.0 8.0]" - a = zeros(1001) - assert str(a) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" - - a = array(range(5), dtype=long) - assert str(a) == "[0 1 2 3 4]" - a = array([True, False, True, False], dtype="?") - assert str(a) == "[True False True False]" - - a = array(range(5), dtype="int8") - assert str(a) == "[0 1 2 3 4]" - - a = array(range(5), dtype="int16") - assert str(a) == "[0 1 2 3 4]" - - def test_str_slice(self): - from numpypy import array, zeros - a = array(range(5), float) - b = a[1::2] - assert str(b) == "[1.0 3.0]" - a = zeros(2002) - b = a[::2] - assert str(b) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" - def test_getitem(self): from numpypy import array a = array(range(5)) @@ -140,8 +264,8 @@ a = array(range(5)) raises(IndexError, "a[(1,2)] = [0,1]") for i in xrange(5): - a[(i,)] = i+1 - assert a[i] == i+1 + a[(i,)] = i + 1 + assert a[i] == i + 1 a[()] = range(5) for i in xrange(5): assert a[i] == i @@ -154,7 +278,7 @@ assert a[1] == 0. assert a[3] == 1. b[::-1] = b - assert b[0] == 1. + assert b[0] == 0. assert b[1] == 0. def test_setslice_of_slice_array(self): @@ -171,7 +295,7 @@ assert a[3] == 1. assert a[4] == 11. a = zeros(10) - a[::2][::-1][::2] = array(range(1,4)) + a[::2][::-1][::2] = array(range(1, 4)) assert a[8] == 1. assert a[4] == 2. assert a[0] == 3. @@ -191,6 +315,15 @@ assert a[1] == 0. assert a[3] == 0. + def test_scalar(self): + from numpypy import array, dtype + a = array(3) + #assert a[0] == 3 + raises(IndexError, "a[0]") + assert a.size == 1 + assert a.shape == () + assert a.dtype is dtype(int) + def test_len(self): from numpypy import array a = array(range(5)) @@ -222,7 +355,7 @@ def test_add_other(self): from numpypy import array a = array(range(5)) - b = array(range(4, -1, -1)) + b = array([i for i in reversed(range(5))]) c = a + b for i in range(5): assert c[i] == 4 @@ -241,11 +374,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 @@ -346,8 +479,10 @@ a = array(range(5), float) b = a ** a for i in range(5): - print b[i], i**i - assert b[i] == i**i + assert b[i] == i ** i + + a = array(range(5)) + assert (a ** 2 == a * a).all() def test_pow_other(self): from numpypy import array @@ -366,7 +501,7 @@ def test_mod(self): from numpypy import array - a = array(range(1,6)) + a = array(range(1, 6)) b = a % a for i in range(5): assert b[i] == 0 @@ -394,7 +529,7 @@ def test_pos(self): from numpypy import array - a = array([1.,-2.,3.,-4.,-5.]) + a = array([1., -2., 3., -4., -5.]) b = +a for i in range(5): assert b[i] == a[i] @@ -405,7 +540,7 @@ def test_neg(self): from numpypy import array - a = array([1.,-2.,3.,-4.,-5.]) + a = array([1., -2., 3., -4., -5.]) b = -a for i in range(5): assert b[i] == -a[i] @@ -416,7 +551,7 @@ def test_abs(self): from numpypy import array - a = array([1.,-2.,3.,-4.,-5.]) + a = array([1., -2., 3., -4., -5.]) b = abs(a) for i in range(5): assert b[i] == abs(a[i]) @@ -445,7 +580,7 @@ s = a[1:5] assert len(s) == 4 for i in range(4): - assert s[i] == a[i+1] + assert s[i] == a[i + 1] s = (a + a)[1:2] assert len(s) == 1 @@ -459,7 +594,7 @@ s = a[1:9:2] assert len(s) == 4 for i in range(4): - assert s[i] == a[2*i+1] + assert s[i] == a[2 * i + 1] def test_slice_update(self): from numpypy import array @@ -470,13 +605,12 @@ a[2] = 20 assert s[2] == 20 - def test_slice_invaidate(self): # check that slice shares invalidation list with from numpypy import array a = array(range(5)) s = a[0:2] - b = array([10,11]) + b = array([10, 11]) c = s + b a[0] = 100 assert c[0] == 10 @@ -503,7 +637,7 @@ def test_prod(self): from numpypy import array - a = array(range(1,6)) + a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 @@ -517,7 +651,7 @@ def test_max_add(self): from numpypy import array a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) - assert (a+a).max() == 11.4 + assert (a + a).max() == 11.4 def test_min(self): from numpypy import array @@ -529,12 +663,23 @@ def test_argmax(self): from numpypy import array a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) - assert a.argmax() == 2 + r = a.argmax() + assert r == 2 b = array([]) - raises(ValueError, "b.argmax()") + raises(ValueError, b.argmax) a = array(range(-5, 5)) - assert a.argmax() == 9 + r = a.argmax() + assert r == 9 + b = a[::2] + r = b.argmax() + assert r == 4 + r = (a + a).argmax() + assert r == 9 + a = array([1, 0, 0]) + assert a.argmax() == 0 + a = array([0, 0, 1]) + assert a.argmax() == 2 def test_argmin(self): from numpypy import array @@ -562,12 +707,14 @@ assert c.any() == False def test_dot(self): - from numpypy import array + from numpypy import array, dot a = array(range(5)) assert a.dot(a) == 30.0 a = array(range(5)) assert a.dot(range(5)) == 30 + assert dot(range(5), range(5)) == 30 + assert (dot(5, [1, 2, 3]) == [5, 10, 15]).all() def test_dot_constant(self): from numpypy import array @@ -614,6 +761,17 @@ for i in xrange(5): assert c[i] == func(b[i], 3) + def test_nonzero(self): + from numpypy import array + + a = array([1, 2]) + raises(ValueError, bool, a) + raises(ValueError, bool, a == a) + assert bool(array(1)) + assert not bool(array(0)) + assert bool(array([1])) + assert not bool(array([0])) + def test_complex_basic(self): from numpypy import array @@ -624,6 +782,265 @@ assert x[2].real == 3 assert x[2].imag == 0 + def test_slice_assignment(self): + from numpypy import array + a = array(range(5)) + a[::-1] = a + assert (a == [0, 1, 2, 1, 0]).all() + # but we force intermediates + a = array(range(5)) + a[::-1] = a + a + assert (a == [8, 6, 4, 2, 0]).all() + + def test_debug_repr(self): + from numpypy import zeros, sin + a = zeros(1) + assert a.__debug_repr__() == 'Array' + assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' + assert (a[::2]).__debug_repr__() == 'Slice(Array)' + assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' + assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' + assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + b = a + a + b[0] = 3 + assert b.__debug_repr__() == 'Call2(add, forced=Array)' + +class AppTestMultiDim(BaseNumpyAppTest): + def test_init(self): + import numpypy + a = numpypy.zeros((2, 2)) + assert len(a) == 2 + + def test_shape(self): + import numpypy + assert numpypy.zeros(1).shape == (1,) + assert numpypy.zeros((2, 2)).shape == (2, 2) + assert numpypy.zeros((3, 1, 2)).shape == (3, 1, 2) + assert numpypy.array([[1], [2], [3]]).shape == (3, 1) + assert len(numpypy.zeros((3, 1, 2))) == 3 + raises(TypeError, len, numpypy.zeros(())) + raises(ValueError, numpypy.array, [[1, 2], 3]) + + def test_getsetitem(self): + import numpypy + a = numpypy.zeros((2, 3, 1)) + raises(IndexError, a.__getitem__, (2, 0, 0)) + raises(IndexError, a.__getitem__, (0, 3, 0)) + raises(IndexError, a.__getitem__, (0, 0, 1)) + assert a[1, 1, 0] == 0 + a[1, 2, 0] = 3 + assert a[1, 2, 0] == 3 + assert a[1, 1, 0] == 0 + assert a[1, -1, 0] == 3 + + def test_slices(self): + import numpypy + a = numpypy.zeros((4, 3, 2)) + raises(IndexError, a.__getitem__, (4,)) + raises(IndexError, a.__getitem__, (3, 3)) + raises(IndexError, a.__getitem__, (slice(None), 3)) + a[0, 1, 1] = 13 + a[1, 2, 1] = 15 + b = a[0] + assert len(b) == 3 + assert b.shape == (3, 2) + assert b[1, 1] == 13 + b = a[1] + assert b.shape == (3, 2) + assert b[2, 1] == 15 + b = a[:, 1] + assert b.shape == (4, 2) + assert b[0, 1] == 13 + b = a[:, 1, :] + assert b.shape == (4, 2) + assert b[0, 1] == 13 + b = a[1, 2] + assert b[1] == 15 + b = a[:] + assert b.shape == (4, 3, 2) + assert b[1, 2, 1] == 15 + assert b[0, 1, 1] == 13 + b = a[:][:, 1][:] + assert b[2, 1] == 0.0 + assert b[0, 1] == 13 + raises(IndexError, b.__getitem__, (4, 1)) + assert a[0][1][1] == 13 + assert a[1][2][1] == 15 + + def test_init_2(self): + import numpypy + raises(ValueError, numpypy.array, [[1], 2]) + raises(ValueError, numpypy.array, [[1, 2], [3]]) + raises(ValueError, numpypy.array, [[[1, 2], [3, 4], 5]]) + raises(ValueError, numpypy.array, [[[1, 2], [3, 4], [5]]]) + a = numpypy.array([[1, 2], [4, 5]]) + assert a[0, 1] == 2 + assert a[0][1] == 2 + a = numpypy.array(([[[1, 2], [3, 4], [5, 6]]])) + assert (a[0, 1] == [3, 4]).all() + + def test_setitem_slice(self): + import numpypy + a = numpypy.zeros((3, 4)) + a[1] = [1, 2, 3, 4] + assert a[1, 2] == 3 + raises(TypeError, a[1].__setitem__, [1, 2, 3]) + a = numpypy.array([[1, 2], [3, 4]]) + assert (a == [[1, 2], [3, 4]]).all() + a[1] = numpypy.array([5, 6]) + assert (a == [[1, 2], [5, 6]]).all() + a[:, 1] = numpypy.array([8, 10]) + assert (a == [[1, 8], [5, 10]]).all() + a[0, :: -1] = numpypy.array([11, 12]) + assert (a == [[12, 11], [5, 10]]).all() + + def test_ufunc(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6]]) + assert ((a + a) == \ + array([[1 + 1, 2 + 2], [3 + 3, 4 + 4], [5 + 5, 6 + 6]])).all() + + def test_getitem_add(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) + assert (a + a)[1, 1] == 8 + + def test_ufunc_negative(self): + from numpypy import array, negative + a = array([[1, 2], [3, 4]]) + b = negative(a + a) + assert (b == [[-2, -4], [-6, -8]]).all() + + def test_getitem_3(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6], [7, 8], + [9, 10], [11, 12], [13, 14]]) + b = a[::2] + print a + print b + assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() + c = b + b + assert c[1][1] == 12 + + def test_multidim_ones(self): + from numpypy import ones + a = ones((1, 2, 3)) + assert a[0, 1, 2] == 1.0 + + def test_broadcast_ufunc(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6]]) + b = array([5, 6]) + c = ((a + b) == [[1 + 5, 2 + 6], [3 + 5, 4 + 6], [5 + 5, 6 + 6]]) + assert c.all() + + def test_broadcast_setslice(self): + from numpypy import zeros, ones + a = zeros((100, 100)) + b = ones(100) + a[:, :] = b + assert a[13, 15] == 1 + + def test_broadcast_shape_agreement(self): + from numpypy import zeros, array + a = zeros((3, 1, 3)) + b = array(((10, 11, 12), (20, 21, 22), (30, 31, 32))) + c = ((a + b) == [b, b, b]) + assert c.all() + a = array((((10, 11, 12), ), ((20, 21, 22), ), ((30, 31, 32), ))) + assert(a.shape == (3, 1, 3)) + d = zeros((3, 3)) + c = ((a + d) == [b, b, b]) + c = ((a + d) == array([[[10., 11., 12.]] * 3, + [[20., 21., 22.]] * 3, [[30., 31., 32.]] * 3])) + assert c.all() + + def test_broadcast_scalar(self): + from numpypy import zeros + a = zeros((4, 5), 'd') + a[:, 1] = 3 + assert a[2, 1] == 3 + assert a[0, 2] == 0 + a[0, :] = 5 + assert a[0, 3] == 5 + assert a[2, 1] == 3 + assert a[3, 2] == 0 + + def test_broadcast_call2(self): + from numpypy import zeros, ones + a = zeros((4, 1, 5)) + b = ones((4, 3, 5)) + b[:] = (a + a) + assert (b == zeros((4, 3, 5))).all() + + def test_argmax(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6]]) + assert a.argmax() == 5 + assert a[:2, ].argmax() == 3 + + def test_broadcast_wrong_shapes(self): + from numpypy import zeros + a = zeros((4, 3, 2)) + b = zeros((4, 2)) + exc = raises(ValueError, lambda: a + b) + assert str(exc.value) == "operands could not be broadcast" \ + " together with shapes (4,3,2) (4,2)" + + def test_reduce(self): + from numpypy import array + a = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + assert a.sum() == (13 * 12) / 2 + b = a[1:, 1::2] + c = b + b + assert c.sum() == (6 + 8 + 10 + 12) * 2 + + def test_transpose(self): + from numpypy import array + a = array(((range(3), range(3, 6)), + (range(6, 9), range(9, 12)), + (range(12, 15), range(15, 18)), + (range(18, 21), range(21, 24)))) + assert a.shape == (4, 2, 3) + b = a.T + assert b.shape == (3, 2, 4) + assert(b[0, :, 0] == [0, 3]).all() + b[:, 0, 0] = 1000 + assert(a[0, 0, :] == [1000, 1000, 1000]).all() + a = array(range(5)) + b = a.T + assert(b == range(5)).all() + a = array((range(10), range(20, 30))) + b = a.T + assert(b[:, 0] == a[0, :]).all() + + def test_flatiter(self): + from numpypy import array, flatiter + a = array([[10, 30], [40, 60]]) + f_iter = a.flat + assert f_iter.next() == 10 + assert f_iter.next() == 30 + assert f_iter.next() == 40 + assert f_iter.next() == 60 + raises(StopIteration, "f_iter.next()") + raises(TypeError, "flatiter()") + s = 0 + for k in a.flat: + s += k + assert s == 140 + + def test_flatiter_array_conv(self): + from numpypy import array, dot + a = array([1, 2, 3]) + assert dot(a.flat, a.flat) == 14 + + def test_slice_copy(self): + from numpypy import zeros + a = zeros((10, 10)) + b = a[0].copy() + assert (b == zeros(10)).all() +>>>>>>> other + class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct @@ -636,3 +1053,119 @@ for i in range(4): assert a[i] == i + 1 raises(ValueError, fromstring, "abc") + + +class AppTestRepr(BaseNumpyAppTest): + def test_repr(self): + from numpypy import array, zeros + a = array(range(5), float) + assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" + a = array([], float) + assert repr(a) == "array([], dtype=float64)" + a = zeros(1001) + assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" + a = array(range(5), long) + assert repr(a) == "array([0, 1, 2, 3, 4])" + a = array([], long) + assert repr(a) == "array([], dtype=int64)" + a = array([True, False, True, False], "?") + assert repr(a) == "array([True, False, True, False], dtype=bool)" + + def test_repr_multi(self): + from numpypy import array, zeros + a = zeros((3, 4)) + assert repr(a) == '''array([[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]])''' + a = zeros((2, 3, 4)) + assert repr(a) == '''array([[[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + + [[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]]])''' + + def test_repr_slice(self): + from numpypy import array, zeros + a = array(range(5), float) + b = a[1::2] + assert repr(b) == "array([1.0, 3.0])" + a = zeros(2002) + b = a[::2] + assert repr(b) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" + a = array((range(5), range(5, 10)), dtype="int16") + b = a[1, 2:] + assert repr(b) == "array([7, 8, 9], dtype=int16)" + # an empty slice prints its shape + b = a[2:1, ] + assert repr(b) == "array([], shape=(0, 5), dtype=int16)" + + def test_str(self): + from numpypy import array, zeros + a = array(range(5), float) + assert str(a) == "[0.0 1.0 2.0 3.0 4.0]" + assert str((2 * a)[:]) == "[0.0 2.0 4.0 6.0 8.0]" + a = zeros(1001) + assert str(a) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" + + a = array(range(5), dtype=long) + assert str(a) == "[0 1 2 3 4]" + a = array([True, False, True, False], dtype="?") + assert str(a) == "[True False True False]" + + a = array(range(5), dtype="int8") + assert str(a) == "[0 1 2 3 4]" + + a = array(range(5), dtype="int16") + assert str(a) == "[0 1 2 3 4]" + + a = array((range(5), range(5, 10)), dtype="int16") + assert str(a) == "[[0 1 2 3 4]\n [5 6 7 8 9]]" + + a = array(3, dtype=int) + assert str(a) == "3" + + a = zeros((400, 400), dtype=int) + assert str(a) == "[[0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n" \ + " [0 0 0 ..., 0 0 0]\n ..., \n [0 0 0 ..., 0 0 0]\n" \ + " [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]]" + a = zeros((2, 2, 2)) + r = str(a) + assert r == '[[[0.0 0.0]\n [0.0 0.0]]\n\n [[0.0 0.0]\n [0.0 0.0]]]' + + def test_str_slice(self): + from numpypy import array, zeros + a = array(range(5), float) + b = a[1::2] + assert str(b) == "[1.0 3.0]" + a = zeros(2002) + b = a[::2] + assert str(b) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" + a = array((range(5), range(5, 10)), dtype="int16") + b = a[1, 2:] + assert str(b) == "[7 8 9]" + b = a[2:1, ] + assert str(b) == "[]" + + +class AppTestRanges(BaseNumpyAppTest): + def test_arange(self): + from numpypy import arange, array, dtype + a = arange(3) + assert (a == [0, 1, 2]).all() + assert a.dtype is dtype(int) + a = arange(3.0) + assert (a == [0., 1., 2.]).all() + assert a.dtype is dtype(float) + a = arange(3, 7) + assert (a == [3, 4, 5, 6]).all() + assert a.dtype is dtype(int) + a = arange(3, 7, 2) + assert (a == [3, 5]).all() + a = arange(3, dtype=float) + assert (a == [0., 1., 2.]).all() + assert a.dtype is dtype(float) + a = arange(0, 0.8, 0.1) + assert len(a) == 8 + assert arange(False, True, True).dtype is dtype(int) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) @@ -111,6 +111,8 @@ for i in range(3): assert c[i] == a[i] / b[i] + assert (divide(array([-10]), array([2])) == array([-5])).all() + def test_fabs(self): from numpypy import array, fabs from math import fabs as math_fabs @@ -319,6 +321,17 @@ for v in [1.0, -1.0]: assert arctanh(v) == math.copysign(float("inf"), v) + def test_sqrt(self): + import math + from numpypy import sqrt + + nan, inf = float("nan"), float("inf") + data = [1, 2, 3, inf] + results = [math.sqrt(1), math.sqrt(2), math.sqrt(3), inf] + assert (sqrt(data) == results).all() + assert math.isnan(sqrt(-1)) + assert math.isnan(sqrt(nan)) + def test_reduce_errors(self): from numpypy import sin, add diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -1,96 +1,131 @@ + +""" Tests that check if JIT-compiled numpy operations produce reasonably +good assembler +""" + +import py + +from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin +from pypy.jit.metainterp.warmspot import reset_stats from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature -from pypy.module.micronumpy.compile import (FakeSpace, - FloatObject, IntObject, numpy_compile, BoolObject) -from pypy.module.micronumpy.interp_numarray import (BaseArray, SingleDimArray, - SingleDimSlice) +from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, + FloatObject, IntObject, BoolObject, Parser, InterpreterState) +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, + BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr -from pypy.jit.metainterp.warmspot import reset_stats -from pypy.jit.metainterp import pyjitpl - -import py class TestNumpyJIt(LLJitMixin): graph = None interp = None - def run(self, code): + def setup_class(cls): + default = """ + a = [1,2,3,4] + c = a + b + sum(c) -> 1::1 + a -> 3:1:2 + """ + + d = {} + p = Parser() + allcodes = [p.parse(default)] + for name, meth in cls.__dict__.iteritems(): + if name.startswith("define_"): + code = meth() + d[name[len("define_"):]] = len(allcodes) + allcodes.append(p.parse(code)) + cls.code_mapping = d + cls.codes = allcodes + + def run(self, name): space = FakeSpace() + i = self.code_mapping[name] + codes = self.codes - def f(code): - interp = numpy_compile(hlstr(code)) + def f(i): + interp = InterpreterState(codes[i]) interp.run(space) - res = interp.results[-1] - assert isinstance(res, BaseArray) - w_res = res.eval(0) - if isinstance(w_res, interp_boxes.W_BoolBox): + w_res = interp.results[-1] + if isinstance(w_res, BaseArray): + w_res = w_res.eval(w_res.start_iter()) + + if isinstance(w_res, interp_boxes.W_Float64Box): + return w_res.value + elif isinstance(w_res, interp_boxes.W_BoolBox): return float(w_res.value) - elif isinstance(w_res, interp_boxes.W_Float64Box): - return w_res.value - elif isinstance(w_res, interp_boxes.W_LongBox): - return w_res.value - else: - return -42. + raise TypeError(w_res) if self.graph is None: - interp, graph = self.meta_interp(f, [llstr(code)], + interp, graph = self.meta_interp(f, [i], listops=True, backendopt=True, graph_and_interp_only=True) self.__class__.interp = interp self.__class__.graph = graph - reset_stats() pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() - return self.interp.eval_graph(self.graph, [llstr(code)]) + return self.interp.eval_graph(self.graph, [i]) - def test_add(self): - result = self.run(""" + def define_add(): + return """ a = |30| b = a + a b -> 3 - """) - self.check_loops({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 1, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) + """ + + def test_add(self): + result = self.run("add") + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 - def test_floatadd(self): - result = self.run(""" + def define_float_add(): + return """ a = |30| + 3 a -> 3 - """) + """ + + def test_floatadd(self): + result = self.run("float_add") assert result == 3 + 3 - self.check_loops({"getinteriorfield_raw": 1, "float_add": 1, - "setinteriorfield_raw": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) - def test_sum(self): - result = self.run(""" + def define_sum(): + return """ a = |30| b = a + a sum(b) - """) + """ + + def test_sum(self): + result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_loops({"getinteriorfield_raw": 2, "float_add": 2, - "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, + "int_add": 2, "int_ge": 1, "guard_false": 1, + "jump": 1}) - def test_prod(self): - result = self.run(""" + def define_prod(): + return """ a = |30| b = a + a prod(b) - """) + """ + + def test_prod(self): + result = self.run("prod") expected = 1 for i in range(30): expected *= i * 2 assert result == expected - self.check_loops({"getinteriorfield_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -101,9 +136,9 @@ max(b) """) assert result == 256 - self.check_loops({"getinteriorfield_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def test_min(self): py.test.skip("broken, investigate") @@ -114,54 +149,66 @@ min(b) """) assert result == -24 - self.check_loops({"getinteriorfield_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) - def test_any(self): - result = self.run(""" + def define_any(): + return """ a = [0,0,0,0,0,0,0,0,0,0,0] a[8] = -12 b = a + a any(b) - """) + """ + + def test_any(self): + result = self.run("any") assert result == 1 - self.check_loops({"getinteriorfield_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1, - "guard_false": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) - def test_already_forced(self): - result = self.run(""" + def define_already_forced(): + return """ a = |30| b = a + 4.5 b -> 5 # forces c = b * 8 c -> 5 - """) + """ + + def test_already_forced(self): + result = self.run("already_forced") assert result == (5 + 4.5) * 8 # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - self.check_loops({"getinteriorfield_raw": 2, "float_mul": 1, "float_add": 1, - "setinteriorfield_raw": 2, "int_add": 2, - "int_lt": 2, "guard_true": 2, "jump": 2}) + # XXX the comment above is wrong now. We need preferrably a way to + # count the two loops separately + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, + 'guard_class': 22, 'int_add': 8, 'float_mul': 2, + 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, + 'guard_value': 2}) - def test_ufunc(self): - result = self.run(""" + def define_ufunc(): + return """ a = |30| b = a + a c = unegative(b) c -> 3 - """) + """ + + def test_ufunc(self): + result = self.run("ufunc") assert result == -6 - self.check_loops({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, - "setinteriorfield_raw": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1, - }) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, + "setinteriorfield_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1}) - def test_specialization(self): - self.run(""" + def define_specialization(): + return """ a = |30| b = a + a c = unegative(b) @@ -178,84 +225,120 @@ d = a * a unegative(d) d -> 3 - """) + """ + + def test_specialization(self): + self.run("specialization") # This is 3, not 2 because there is a bridge for the exit. self.check_loop_count(3) + def define_slice(): + return """ + a = |30| + b = a -> ::3 + c = b + b + c -> 3 + """ + + def test_slice(self): + result = self.run("slice") + assert result == 18 + self.check_simple_loop({'getinteriorfield_raw': 2, + 'float_add': 1, + 'setinteriorfield_raw': 1, + 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, + 'jump': 1}) + + def define_slice2(): + return """ + a = |30| + s1 = a -> :20:2 + s2 = a -> :30:3 + b = s1 + s2 + b -> 3 + """ + + def test_slice2(self): + result = self.run("slice2") + assert result == 15 + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + + def define_multidim(): + return """ + a = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + b = a + a + b -> 1 -> 1 + """ + + def test_multidim(self): + result = self.run('multidim') + assert result == 8 + # int_add might be 1 here if we try slightly harder with + # reusing indexes or some optimization + self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1}) + + def define_multidim_slice(): + return """ + a = [[1, 2, 3, 4], [3, 4, 5, 6], [5, 6, 7, 8], [7, 8, 9, 10], [9, 10, 11, 12], [11, 12, 13, 14], [13, 14, 15, 16], [16, 17, 18, 19]] + b = a -> ::2 + c = b + b + c -> 1 -> 1 + """ + + def test_multidim_slice(self): + result = self.run('multidim_slice') + assert result == 12 + py.test.skip("improve") + # XXX the bridge here is scary. Hopefully jit-targets will fix that, + # otherwise it looks kind of good + self.check_simple_loop({}) + + def define_broadcast(): + return """ + a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + b = [1, 2, 3, 4] + c = a + b + c -> 1 -> 2 + """ + + def test_broadcast(self): + result = self.run("broadcast") + assert result == 10 + py.test.skip("improve") + self.check_simple_loop({}) + + def define_setslice(): + return """ + a = |30| + b = |10| + b[1] = 5.5 + c = b + b + a[0:30:3] = c + a -> 3 + """ + + def test_setslice(self): + result = self.run("setslice") + assert result == 11.0 + self.check_loop_count(1) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): + py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype - def test_slice(self): - def f(i): - step = 3 - ar = SingleDimArray(step*i, dtype=self.float64_dtype) - new_sig = signature.Signature.find_sig([ - SingleDimSlice.signature, ar.signature - ]) - s = SingleDimSlice(0, step*i, step, i, ar, new_sig) - v = interp_ufuncs.get(self.space).add.call(self.space, [s, s]) - v = v.get_concrete().eval(3) - assert isinstance(v, interp_boxes.W_Float64Box) - return v.value - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - self.check_loops({'int_mul': 1, 'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 1, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) - assert result == f(5) - - def test_slice2(self): - def f(i): - step1 = 2 - step2 = 3 - ar = SingleDimArray(step2*i, dtype=self.float64_dtype) - new_sig = signature.Signature.find_sig([ - SingleDimSlice.signature, ar.signature - ]) - s1 = SingleDimSlice(0, step1*i, step1, i, ar, new_sig) - new_sig = signature.Signature.find_sig([ - SingleDimSlice.signature, s1.signature - ]) - s2 = SingleDimSlice(0, step2*i, step2, i, ar, new_sig) - v = interp_ufuncs.get(self.space).add.call(self.space, [s1, s2]) - v = v.get_concrete().eval(3) - assert isinstance(v, interp_boxes.W_Float64Box) - return v.value - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - self.check_loops({'int_mul': 2, 'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 1, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) - assert result == f(5) - - def test_setslice(self): - space = self.space - float64_dtype = self.float64_dtype - - def f(i): - step = NonConstant(3) - ar = SingleDimArray(step*i, dtype=float64_dtype) - ar2 = SingleDimArray(i, dtype=float64_dtype) - ar2.get_concrete().setitem(1, float64_dtype.box(5.5)) - arg = ar2.descr_add(space, ar2) - ar.setslice(space, 0, step*i, step, i, arg) - v = ar.get_concrete().eval(3) - assert isinstance(v, interp_boxes.W_Float64Box) - return v.value - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - self.check_loops({'getinteriorfield_raw': 2, - 'float_add' : 1, - 'setinteriorfield_raw': 1, 'int_add': 2, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) - assert result == 11.0 - def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " "deal correctly with int dtypes for this test to " @@ -269,7 +352,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = SingleDimArray(n, dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -10,6 +10,7 @@ def simple_unary_op(func): + specialize.argtype(1)(func) @functools.wraps(func) def dispatcher(self, v): return self.box( @@ -21,6 +22,7 @@ return dispatcher def simple_binary_op(func): + specialize.argtype(1, 2)(func) @functools.wraps(func) def dispatcher(self, v1, v2): return self.box( @@ -33,6 +35,7 @@ return dispatcher def raw_binary_op(func): + specialize.argtype(1, 2)(func) @functools.wraps(func) def dispatcher(self, v1, v2): return func(self, @@ -206,6 +209,18 @@ def mod(self, v1, v2): return v1 % v2 + @simple_binary_op + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + @simple_unary_op def sign(self, v): if v > 0: @@ -357,6 +372,13 @@ return rfloat.NAN return math.atanh(v) + @simple_unary_op + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN + class Float32(BaseType, Float): T = rffi.FLOAT diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -3,7 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.rlib import rmmap -from pypy.rlib.rmmap import RValueError, RTypeError, ROverflowError +from pypy.rlib.rmmap import RValueError, RTypeError class W_MMap(Wrappable): @@ -212,8 +212,6 @@ raise OperationError(space.w_ValueError, space.wrap(e.message)) except RTypeError, e: raise OperationError(space.w_TypeError, space.wrap(e.message)) - except ROverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(e.message)) return space.wrap(self) elif rmmap._MS_WINDOWS: @@ -233,8 +231,6 @@ raise OperationError(space.w_ValueError, space.wrap(e.message)) except RTypeError, e: raise OperationError(space.w_TypeError, space.wrap(e.message)) - except ROverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(e.message)) return space.wrap(self) W_MMap.typedef = TypeDef("mmap", diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -3,9 +3,7 @@ from pypy.interpreter.gateway import NoneNotWrapped from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from pypy.objspace.descroperation import object_setattr from pypy.rlib import rgc -from pypy.rlib.unroll import unrolling_iterable from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo @@ -339,8 +337,6 @@ 'XML_SetUnknownEncodingHandler', [XML_Parser, callback_type, rffi.VOIDP], lltype.Void) -ENUMERATE_SETTERS = unrolling_iterable(SETTERS.items()) - # Declarations of external functions XML_ParserCreate = expat_external( @@ -545,15 +541,19 @@ self.buffer_used = 0 return False + def gethandler(self, space, name, index): + if name == 'CharacterDataHandler': + return self.w_character_data_handler or space.w_None + return self.handlers[index] + def sethandler(self, space, name, w_handler, index, setter, handler): - if name == 'CharacterDataHandler': self.flush_character_buffer(space) if space.is_w(w_handler, space.w_None): self.w_character_data_handler = None else: self.w_character_data_handler = w_handler - + # self.handlers[index] = w_handler setter(self.itself, handler) @@ -580,21 +580,29 @@ return True - @unwrap_spec(name=str) - def setattr(self, space, name, w_value): - if name == "namespace_prefixes": - XML_SetReturnNSTriplet(self.itself, space.int_w(w_value)) - return + @staticmethod + def _make_property(name): + index, setter, handler = SETTERS[name] + # + def descr_get_property(self, space): + return self.gethandler(space, name, index) + # + def descr_set_property(self, space, w_value): + return self.sethandler(space, name, w_value, + index, setter, handler) + # + return GetSetProperty(descr_get_property, + descr_set_property, + cls=W_XMLParserType) - for handler_name, (index, setter, handler) in ENUMERATE_SETTERS: - if name == handler_name: - return self.sethandler(space, handler_name, w_value, - index, setter, handler) - # fallback to object.__setattr__() - return space.call_function( - object_setattr(space), - space.wrap(self), space.wrap(name), w_value) + def get_namespace_prefixes(self, space): + raise OperationError(space.w_AttributeError, + space.wrap("not implemented: reading namespace_prefixes")) + + @unwrap_spec(value=int) + def set_namespace_prefixes(self, space, value): + XML_SetReturnNSTriplet(self.itself, bool(value)) # Parse methods @@ -732,10 +740,18 @@ if XML_COMBINED_VERSION >= 19505: XMLParser_methods.append('UseForeignDTD') +_XMLParser_extras = {} +for name in XMLParser_methods: + _XMLParser_extras[name] = interp2app(getattr(W_XMLParserType, name)) +for name in SETTERS: + _XMLParser_extras[name] = W_XMLParserType._make_property(name) + W_XMLParserType.typedef = TypeDef( "pyexpat.XMLParserType", __doc__ = "XML parser", - __setattr__ = interp2app(W_XMLParserType.setattr), + namespace_prefixes = GetSetProperty(W_XMLParserType.get_namespace_prefixes, + W_XMLParserType.set_namespace_prefixes, + cls=W_XMLParserType), returns_unicode = bool_property('returns_unicode', W_XMLParserType), ordered_attributes = bool_property('ordered_attributes', W_XMLParserType), specified_attributes = bool_property('specified_attributes', W_XMLParserType), @@ -754,8 +770,7 @@ CurrentColumnNumber = GetSetProperty(W_XMLParserType.descr_ErrorColumnNumber, cls=W_XMLParserType), CurrentByteIndex = GetSetProperty(W_XMLParserType.descr_ErrorByteIndex, cls=W_XMLParserType), - **dict((name, interp2app(getattr(W_XMLParserType, name))) - for name in XMLParser_methods) + **_XMLParser_extras ) def ParserCreate(space, w_encoding=None, w_namespace_separator=None, diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -52,6 +52,19 @@ assert res == 1 assert data == [u"\u00f6"] + def test_get_handler(self): + import pyexpat + p = pyexpat.ParserCreate() + assert p.StartElementHandler is None + assert p.EndElementHandler is None + def f(*args): pass + p.StartElementHandler = f + assert p.StartElementHandler is f + def g(*args): pass + p.EndElementHandler = g + assert p.StartElementHandler is f + assert p.EndElementHandler is g + def test_intern(self): import pyexpat p = pyexpat.ParserCreate() diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -17,7 +17,7 @@ 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', 'posix', '_socket', '_sre', '_lsprof', '_weakref', '__pypy__', 'cStringIO', '_collections', 'struct', - 'mmap']: + 'mmap', 'marshal']: return True return False diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -271,7 +271,7 @@ thread_ticker_check = """ guard_not_invalidated? ticker0 = getfield_raw(ticker_address, descr=) - ticker1 = int_sub(ticker0, 1) + ticker1 = int_sub(ticker0, _) setfield_raw(ticker_address, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) guard_false(ticker_cond0, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -334,26 +334,27 @@ log = self.run(main, [1000]) assert log.result == (1000, 998) loop, = log.loops_by_filename(self.filepath) + # the int strategy is used here assert loop.match_by_id('append', """ i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) # Will be killed by the backend - i17 = arraylen_gc(p7, descr=) - call(ConstClass(_ll_list_resize_ge), p8, i15, descr=) + p15 = getfield_gc(p8, descr=) + i17 = arraylen_gc(p15, descr=) + call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... guard_no_exception(descr=...) p17 = getfield_gc(p8, descr=) - p19 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p19, i12, descr=) - setarrayitem_gc(p17, i13, p19, descr=) + setarrayitem_gc(p17, i13, i12, descr=) """) def test_blockstack_virtualizable(self): def main(n): from pypyjit import residual_call + l = len i = 0 while i < n: try: - residual_call(len, []) # ID: call + residual_call(l, []) # ID: call except: pass i += 1 @@ -369,11 +370,8 @@ p22 = new_with_vtable(19511408) p24 = new_array(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) - p27 = new(descr=) - p29 = new_array(0, descr=) setfield_gc(p0, i20, descr=) - setfield_gc(p27, p29, descr=) - setfield_gc(p26, p27, descr=<.* .*W_ListObject.inst_wrappeditems .*>) + setfield_gc(p26, ConstPtr(ptr22), descr=) setarrayitem_gc(p24, 0, p26, descr=) setfield_gc(p22, p24, descr=) p32 = call_may_force(11376960, p18, p22, descr=) @@ -486,4 +484,4 @@ i4 = int_add(i0, 1) --TICK-- jump(..., descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -201,9 +201,11 @@ assert log.result == 1000000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i16 = int_ge(i12, i13) + i14 = getfield_gc(p12, descr=) + i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p17 = getarrayitem_gc(p15, i12, descr=) + p16 = getfield_gc(p12, descr=) + p17 = getarrayitem_gc(p16, i12, descr=) i19 = int_add(i12, 1) setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, 146982464, descr=...) @@ -217,7 +219,7 @@ i28 = int_add_ovf(i10, i25) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, i19, i13, p14, p15, descr=) + jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, p12, i19, descr=) """) @@ -337,7 +339,7 @@ a = compile('x+x+x+x+x+x', 'eval', 'eval') b = {'x': 7} while i < 1000: - y = eval(a,b,b) # ID: eval + y = eval(a, b, b) # ID: eval i += 1 return y diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -147,8 +147,8 @@ i31 = int_gt(i30, 23) guard_false(i31, descr=...) copystrcontent(p9, p21, 0, i25, i10) - i33 = int_eq(i30, 23) - guard_false(i33, descr=...) + i33 = int_lt(i30, 23) + guard_true(i33, descr=...) p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) guard_no_exception(descr=...) i37 = strlen(p35) diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.rpython.lltypesystem import lltype -from pypy.rlib.rarithmetic import ovfcheck_float_to_int +from pypy.rlib.rarithmetic import ovfcheck_float_to_int, intmask from pypy.rlib import rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo import os @@ -585,7 +585,7 @@ # More likely, the format yields an empty result, # e.g. an empty format, or %Z when the timezone # is unknown. - result = rffi.charp2strn(outbuf, buflen) + result = rffi.charp2strn(outbuf, intmask(buflen)) return space.wrap(result) finally: lltype.free(outbuf, flavor='raw') diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -38,7 +38,6 @@ def __init__(self, name, startblock, return_var=None): self.name = name # function name (possibly mangled already) self.startblock = startblock - self.startblock.isstartblock = True # build default returnblock self.returnblock = Block([return_var or Variable()]) self.returnblock.operations = () @@ -171,11 +170,10 @@ class Block(object): - __slots__ = """isstartblock inputargs operations exitswitch + __slots__ = """inputargs operations exitswitch exits blockcolor""".split() def __init__(self, inputargs): - self.isstartblock = False self.inputargs = list(inputargs) # mixed list of variable/const XXX self.operations = [] # list of SpaceOperation(s) self.exitswitch = None # a variable or @@ -452,7 +450,6 @@ newblock.closeblock(*newlinks) newstartblock = blockmap[graph.startblock] - newstartblock.isstartblock = True newgraph = FunctionGraph(graph.name, newstartblock) newgraph.returnblock = blockmap[graph.returnblock] newgraph.exceptblock = blockmap[graph.exceptblock] @@ -490,7 +487,6 @@ for block in graph.iterblocks(): - assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( "block.exits is a %s (closeblock() or recloseblock() missing?)" % (type(block.exits).__name__,)) diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -96,6 +96,12 @@ self.executioncontext.recorder = previous_recorder self.concrete_mode -= 1 + def is_w(self, w_one, w_two): + return self.is_true(self.is_(w_one, w_two)) + + is_ = None # real version added by add_operations() + id = None # real version added by add_operations() + def newdict(self): if self.concrete_mode: return Constant({}) diff --git a/pypy/objspace/flow/operation.py b/pypy/objspace/flow/operation.py --- a/pypy/objspace/flow/operation.py +++ b/pypy/objspace/flow/operation.py @@ -315,7 +315,7 @@ del _add_exceptions, _add_except_ovf def make_op(fs, name, symbol, arity, specialnames): - if hasattr(fs, name): + if getattr(fs, name, None) is not None: return op = None diff --git a/pypy/objspace/flow/test/test_checkgraph.py b/pypy/objspace/flow/test/test_checkgraph.py --- a/pypy/objspace/flow/test/test_checkgraph.py +++ b/pypy/objspace/flow/test/test_checkgraph.py @@ -13,20 +13,6 @@ py.test.raises(AssertionError, checkgraph, g) -def test_nostartblock(): - g = FunctionGraph("g", Block([])) - g.startblock.closeblock(Link([Constant(1)], g.returnblock)) - g.startblock.isstartblock = False - py.test.raises(AssertionError, checkgraph, g) - -def test_twostartblocks(): - g = FunctionGraph("g", Block([])) - b = Block([]) - b.isstartblock = True - g.startblock.closeblock(Link([], b)) - b.closeblock(Link([Constant(1)], g.returnblock)) - py.test.raises(AssertionError, checkgraph, g) - def test_exitlessblocknotexitblock(): g = FunctionGraph("g", Block([])) py.test.raises(AssertionError, checkgraph, g) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -9,10 +9,7 @@ from pypy.rlib.debug import check_annotation from pypy.objspace.std import stringobject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.listobject import ( - _delitem_slice_helper, _setitem_slice_helper, - get_positive_index -) +from pypy.objspace.std.listobject import get_positive_index from pypy.objspace.std.listtype import get_list_index from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stringobject import W_StringObject @@ -600,7 +597,7 @@ oldsize = len(w_bytearray.data) start, stop, step, slicelength = w_slice.indices4(space, oldsize) sequence2 = makebytearraydata_w(space, w_other) - setitem_slice_helper(space, w_bytearray.data, start, step, slicelength, sequence2, empty_elem='\x00') + _setitem_slice_helper(space, w_bytearray.data, start, step, slicelength, sequence2, empty_elem='\x00') def delitem__Bytearray_ANY(space, w_bytearray, w_idx): idx = get_list_index(space, w_idx) @@ -614,13 +611,84 @@ def delitem__Bytearray_Slice(space, w_bytearray, w_slice): start, stop, step, slicelength = w_slice.indices4(space, len(w_bytearray.data)) - delitem_slice_helper(space, w_bytearray.data, start, step, slicelength) + _delitem_slice_helper(space, w_bytearray.data, start, step, slicelength) -# create new helper functions with different list type specialisation -delitem_slice_helper = func_with_new_name(_delitem_slice_helper, - 'delitem_slice_helper') -setitem_slice_helper = func_with_new_name(_setitem_slice_helper, - 'setitem_slice_helper') +#XXX share the code again with the stuff in listobject.py +def _delitem_slice_helper(space, items, start, step, slicelength): + if slicelength==0: + return + + if step < 0: + start = start + step * (slicelength-1) + step = -step + + if step == 1: + assert start >= 0 + assert slicelength >= 0 + del items[start:start+slicelength] + else: + n = len(items) + i = start + + for discard in range(1, slicelength): + j = i+1 + i += step + while j < i: + items[j-discard] = items[j] + j += 1 + + j = i+1 + while j < n: + items[j-slicelength] = items[j] + j += 1 + start = n - slicelength + assert start >= 0 # annotator hint + del items[start:] + +def _setitem_slice_helper(space, items, start, step, slicelength, sequence2, + empty_elem): + assert slicelength >= 0 + oldsize = len(items) + len2 = len(sequence2) + if step == 1: # Support list resizing for non-extended slices + delta = slicelength - len2 + if delta < 0: + delta = -delta + newsize = oldsize + delta + # XXX support this in rlist! + items += [empty_elem] * delta + lim = start+len2 + i = newsize - 1 + while i >= lim: + items[i] = items[i-delta] + i -= 1 + elif start >= 0: + del items[start:start+delta] + else: + assert delta==0 # start<0 is only possible with slicelength==0 + elif len2 != slicelength: # No resize for extended slices + raise operationerrfmt(space.w_ValueError, "attempt to " + "assign sequence of size %d to extended slice of size %d", + len2, slicelength) + + if sequence2 is items: + if step > 0: + # Always copy starting from the right to avoid + # having to make a shallow copy in the case where + # the source and destination lists are the same list. + i = len2 - 1 + start += i*step + while i >= 0: + items[start] = sequence2[i] + start -= step + i -= 1 + return + else: + # Make a shallow copy to more easily handle the reversal case + sequence2 = list(sequence2) + for i in range(len2): + items[start] = sequence2[i] + start += step def _strip(space, w_bytearray, u_chars, left, right): # note: mostly copied from stringobject._strip diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -65,11 +65,11 @@ if isinstance(cell, ModuleCell): cell.w_value = w_value return - # If the new value and the current value are the same, don't create a - # level of indirection, or mutate are version. - if self.space.is_w(w_value, cell): - return if cell is not None: + # If the new value and the current value are the same, don't create a + # level of indirection, or mutate the version. + if self.space.is_w(w_value, cell): + return w_value = ModuleCell(w_value) self.mutated() self.unerase(w_dict.dstorage)[key] = w_value diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -5,12 +5,46 @@ from pypy.objspace.std.register_all import register_all from pypy.objspace.std.floatobject import W_FloatObject, _hash_float from pypy.objspace.std.longobject import W_LongObject +from pypy.rlib.rbigint import rbigint from pypy.rlib.rfloat import ( formatd, DTSF_STR_PRECISION, isinf, isnan, copysign) import math -class W_ComplexObject(W_Object): + +class W_AbstractComplexObject(W_Object): + __slots__ = () + + def is_w(self, space, w_other): + from pypy.rlib.longlong2float import float2longlong + if not isinstance(w_other, W_AbstractComplexObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + real1 = space.float_w(space.getattr(self, space.wrap("real"))) + real2 = space.float_w(space.getattr(w_other, space.wrap("real"))) + imag1 = space.float_w(space.getattr(self, space.wrap("imag"))) + imag2 = space.float_w(space.getattr(w_other, space.wrap("imag"))) + real1 = float2longlong(real1) + real2 = float2longlong(real2) + imag1 = float2longlong(imag1) + imag2 = float2longlong(imag2) + return real1 == real2 and imag1 == imag2 + + def unique_id(self, space): + if self.user_overridden_class: + return W_Object.unique_id(self, space) + from pypy.rlib.longlong2float import float2longlong + from pypy.objspace.std.model import IDTAG_COMPLEX as tag + real = space.float_w(space.getattr(self, space.wrap("real"))) + imag = space.float_w(space.getattr(self, space.wrap("imag"))) + real_b = rbigint.fromrarith_int(float2longlong(real)) + imag_b = rbigint.fromrarith_int(float2longlong(imag)) + val = real_b.lshift(64).or_(imag_b).lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(val) + + +class W_ComplexObject(W_AbstractComplexObject): """This is a reimplementation of the CPython "PyComplexObject" """ from pypy.objspace.std.complextype import complex_typedef as typedef diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -21,10 +21,33 @@ import math from pypy.objspace.std.intobject import W_IntObject -class W_FloatObject(W_Object): - """This is a reimplementation of the CPython "PyFloatObject" - it is assumed that the constructor takes a real Python float as - an argument""" +class W_AbstractFloatObject(W_Object): + __slots__ = () + + def is_w(self, space, w_other): + from pypy.rlib.longlong2float import float2longlong + if not isinstance(w_other, W_AbstractFloatObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + one = float2longlong(space.float_w(self)) + two = float2longlong(space.float_w(w_other)) + return one == two + + def unique_id(self, space): + if self.user_overridden_class: + return W_Object.unique_id(self, space) + from pypy.rlib.longlong2float import float2longlong + from pypy.objspace.std.model import IDTAG_FLOAT as tag + val = float2longlong(space.float_w(self)) + b = rbigint.fromrarith_int(val) + b = b.lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(b) + + +class W_FloatObject(W_AbstractFloatObject): + """This is a implementation of the app-level 'float' type. + The constructor takes an RPython float as an argument.""" from pypy.objspace.std.floattype import float_typedef as typedef _immutable_fields_ = ['floatval'] diff --git a/pypy/objspace/std/frame.py b/pypy/objspace/std/frame.py --- a/pypy/objspace/std/frame.py +++ b/pypy/objspace/std/frame.py @@ -3,13 +3,11 @@ import operator from pypy.rlib.unroll import unrolling_iterable -from pypy.interpreter import pyopcode, function +from pypy.interpreter import pyopcode from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.module.__builtin__ import Module +from pypy.interpreter.error import OperationError from pypy.objspace.std import intobject, smallintobject from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.listobject import W_ListObject @@ -58,7 +56,7 @@ w_1 = f.popvalue() if type(w_1) is W_ListObject and type(w_2) is intobject.W_IntObject: try: - w_result = w_1.wrappeditems[w_2.intval] + w_result = w_1.getitem(w_2.intval) except IndexError: raise OperationError(f.space.w_IndexError, f.space.wrap("list index out of range")) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -19,6 +19,22 @@ class W_AbstractIntObject(W_Object): __slots__ = () + def is_w(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + return space.int_w(self) == space.int_w(w_other) + + def unique_id(self, space): + if self.user_overridden_class: + return W_Object.unique_id(self, space) + from pypy.objspace.std.model import IDTAG_INT as tag + b = space.bigint_w(self) + b = b.lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(b) + + class W_IntObject(W_AbstractIntObject): __slots__ = 'intval' _immutable_fields_ = ['intval'] diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -33,9 +33,9 @@ """Sequence iterator specialized for lists, accessing directly their RPython-level list of wrapped objects. """ - def __init__(w_self, w_seq, wrappeditems): + def __init__(w_self, w_seq): W_AbstractSeqIterObject.__init__(w_self, w_seq) - w_self.listitems = wrappeditems + w_self.w_seq = w_seq class W_FastTupleIterObject(W_AbstractSeqIterObject): """Sequence iterator specialized for tuples, accessing @@ -105,13 +105,15 @@ return w_seqiter def next__FastListIter(space, w_seqiter): - if w_seqiter.listitems is None: + from pypy.objspace.std.listobject import W_ListObject + w_seq = w_seqiter.w_seq + if w_seq is None: raise OperationError(space.w_StopIteration, space.w_None) + assert isinstance(w_seq, W_ListObject) index = w_seqiter.index try: - w_item = w_seqiter.listitems[index] + w_item = w_seq.getitem(index) except IndexError: - w_seqiter.listitems = None w_seqiter.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -5,35 +5,983 @@ from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.listtype import get_list_index from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice - from pypy.objspace.std import slicetype from pypy.interpreter import gateway, baseobjspace +from pypy.rlib.objectmodel import instantiate, specialize from pypy.rlib.listsort import make_timsort_class +from pypy.rlib import rerased, jit from pypy.interpreter.argument import Signature +UNROLL_CUTOFF = 5 + class W_AbstractListObject(W_Object): __slots__ = () +def make_range_list(space, start, step, length): + if length <= 0: + strategy = space.fromcache(EmptyListStrategy) + storage = strategy.erase(None) + else: + strategy = space.fromcache(RangeListStrategy) + storage = strategy.erase((start, step, length)) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + +def make_empty_list(space): + strategy = space.fromcache(EmptyListStrategy) + storage = strategy.erase(None) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + + at jit.look_inside_iff(lambda space, list_w: jit.isconstant(len(list_w)) and len(list_w) < UNROLL_CUTOFF) +def get_strategy_from_list_objects(space, list_w): + if not list_w: + return space.fromcache(EmptyListStrategy) + + # check for ints + for w_obj in list_w: + if not is_W_IntObject(w_obj): + break + else: + return space.fromcache(IntegerListStrategy) + + # check for strings + for w_obj in list_w: + if not is_W_StringObject(w_obj): + break + else: + return space.fromcache(StringListStrategy) + + # check for floats + for w_obj in list_w: + if not is_W_FloatObject(w_obj): + break + else: + return space.fromcache(FloatListStrategy) + + return space.fromcache(ObjectListStrategy) + +def is_W_IntObject(w_object): + from pypy.objspace.std.intobject import W_IntObject + return type(w_object) is W_IntObject + +def is_W_StringObject(w_object): + from pypy.objspace.std.stringobject import W_StringObject + return type(w_object) is W_StringObject + +def is_W_FloatObject(w_object): + from pypy.objspace.std.floatobject import W_FloatObject + return type(w_object) is W_FloatObject + class W_ListObject(W_AbstractListObject): from pypy.objspace.std.listtype import list_typedef as typedef - def __init__(w_self, wrappeditems): - w_self.wrappeditems = wrappeditems + def __init__(w_self, space, wrappeditems): + assert isinstance(wrappeditems, list) + w_self.space = space + if space.config.objspace.std.withliststrategies: + w_self.strategy = get_strategy_from_list_objects(space, wrappeditems) + else: + w_self.strategy = space.fromcache(ObjectListStrategy) + w_self.init_from_list_w(wrappeditems) + + @staticmethod + def from_storage_and_strategy(space, storage, strategy): + w_self = instantiate(W_ListObject) + w_self.space = space + w_self.strategy = strategy + w_self.lstorage = storage + if not space.config.objspace.std.withliststrategies: + w_self.switch_to_object_strategy() + return w_self + + @staticmethod + def newlist_str(space, list_s): + strategy = space.fromcache(StringListStrategy) + storage = strategy.erase(list_s) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) def __repr__(w_self): """ representation for debugging purposes """ - return "%s(%s)" % (w_self.__class__.__name__, w_self.wrappeditems) + return "%s(%s, %s)" % (w_self.__class__.__name__, w_self.strategy, w_self.lstorage._x) def unwrap(w_list, space): - items = [space.unwrap(w_item) for w_item in w_list.wrappeditems]# XXX generic mixed types unwrap + # for tests only! + items = [space.unwrap(w_item) for w_item in w_list.getitems()] return list(items) + def switch_to_object_strategy(self): + list_w = self.getitems() + self.strategy = self.space.fromcache(ObjectListStrategy) + # XXX this is quite indirect + self.init_from_list_w(list_w) + + def _temporarily_as_objects(self): + if self.strategy is self.space.fromcache(ObjectListStrategy): + return self + list_w = self.getitems() + strategy = self.space.fromcache(ObjectListStrategy) + storage = strategy.erase(list_w) + w_objectlist = W_ListObject.from_storage_and_strategy(self.space, storage, strategy) + return w_objectlist + + # ___________________________________________________ + + def init_from_list_w(self, list_w): + """Initializes listobject by iterating through the given list of + wrapped items, unwrapping them if neccessary and creating a + new erased object as storage""" + self.strategy.init_from_list_w(self, list_w) + + def clone(self): + """Returns a clone by creating a new listobject + with the same strategy and a copy of the storage""" + return self.strategy.clone(self) + + def copy_into(self, other): + """Used only when extending an EmptyList. Sets the EmptyLists + strategy and storage according to the other W_List""" + self.strategy.copy_into(self, other) + + def contains(self, w_obj): + """Returns unwrapped boolean, saying wether w_obj exists + in the list.""" + return self.strategy.contains(self, w_obj) + def append(w_list, w_item): - w_list.wrappeditems.append(w_item) + """Appends the wrapped item to the end of the list.""" + w_list.strategy.append(w_list, w_item) + + def length(self): + return self.strategy.length(self) + + def getitem(self, index): + """Returns the wrapped object that is found in the + list at the given index. The index must be unwrapped. + May raise IndexError.""" + return self.strategy.getitem(self, index) + + def getslice(self, start, stop, step, length): + """Returns a slice of the list defined by the arguments. Arguments must be + normalized (i.e. using normalize_simple_slice or W_Slice.indices4). + May raise IndexError.""" + return self.strategy.getslice(self, start, stop, step, length) + + def getitems(self): + """Returns a list of all items after wrapping them. The result can + share with the storage, if possible.""" + return self.strategy.getitems(self) + + def getitems_copy(self): + """Returns a copy of all items in the list. Same as getitems except for + ObjectListStrategy.""" + return self.strategy.getitems_copy(self) + + def getitems_str(self): + """ Return the items in the list as unwrapped strings. If the list does + not use the list strategy, return None. """ + return self.strategy.getitems_str(self) + # ___________________________________________________ + + + def mul(self, times): + """Returns a copy of the list, multiplied by times. + Argument must be unwrapped.""" + return self.strategy.mul(self, times) + + def inplace_mul(self, times): + """Alters the list by multiplying its content by times.""" + self.strategy.inplace_mul(self, times) + + def deleteslice(self, start, step, length): + """Deletes a slice from the list. Used in delitem and delslice. + Arguments must be normalized (see getslice).""" + self.strategy.deleteslice(self, start, step, length) + + def pop(self, index): + """Pops an item from the list. Index must be normalized. + May raise IndexError.""" + return self.strategy.pop(self, index) + + def pop_end(self): + """ Pop the last element from the list.""" + return self.strategy.pop_end(self) + + def setitem(self, index, w_item): + """Inserts a wrapped item at the given (unwrapped) index. + May raise IndexError.""" + self.strategy.setitem(self, index, w_item) From noreply at buildbot.pypy.org Sun Dec 4 20:15:40 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Dec 2011 20:15:40 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: progress Message-ID: <20111204191540.BBB218205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50144:82e1fc9c253c Date: 2011-12-04 14:15 -0500 http://bitbucket.org/pypy/pypy/changeset/82e1fc9c253c/ Log: progress diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -43,6 +43,7 @@ w_bool = "bool" w_int = "int" w_float = "float" + w_complex = "complex" w_list = "list" w_long = "long" w_tuple = 'tuple' diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -257,6 +257,7 @@ bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + complex128_dtype = interp_dtype.get_dtype_cache(space).w_complex128dtype if isinstance(w_obj, interp_boxes.W_GenericBox): dtype = w_obj.get_dtype(space) @@ -278,6 +279,8 @@ current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess + elif space.isinstance_w(w_obj, space.w_complex): + return complex128_dtype return interp_dtype.get_dtype_cache(space).w_float64dtype diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1039,7 +1039,6 @@ a = zeros((10, 10)) b = a[0].copy() assert (b == zeros(10)).all() ->>>>>>> other class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): From noreply at buildbot.pypy.org Sun Dec 4 20:55:38 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Sun, 4 Dec 2011 20:55:38 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge default Message-ID: <20111204195538.7EDCF8205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50145:62684a5c6304 Date: 2011-12-04 20:33 +0100 http://bitbucket.org/pypy/pypy/changeset/62684a5c6304/ Log: merge default diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -473,7 +473,7 @@ block = timeout < 0 if not block: # XXX does not check for overflow - deadline = _GetTickCount() + int(1000 * timeout + 0.5) + deadline = intmask(_GetTickCount()) + int(1000 * timeout + 0.5) else: deadline = 0 @@ -497,7 +497,7 @@ return True if not block: - now = _GetTickCount() + now = intmask(_GetTickCount()) if now > deadline: return False diff = deadline - now diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = cConfig.INVALID_SOCKET + INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 From noreply at buildbot.pypy.org Sun Dec 4 20:59:14 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:14 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix tests in module/_socket Message-ID: <20111204195914.E5FC48205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50146:afebfe18049c Date: 2011-12-03 22:18 +0100 http://bitbucket.org/pypy/pypy/changeset/afebfe18049c/ Log: Fix tests in module/_socket diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -536,7 +536,8 @@ clientsock = _socket.socket(_socket.AF_UNIX) clientsock.connect(sockpath) - s, addr = serversock._accept() + fileno, addr = serversock._accept() + s = _socket.socket(fileno=fileno) assert not addr s.send(b'X') @@ -588,7 +589,8 @@ from _socket import socket, timeout cli = socket() cli.connect(self.serv.getsockname()) - t, addr = self.serv._accept() + fileno, addr = self.serv._accept() + t = socket(fileno=fileno) cli.settimeout(1.0) # test recv() timeout t.send(b'*') @@ -623,7 +625,8 @@ MSG = b'dupa was here\n' cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) cli.connect(self.serv.getsockname()) - conn, addr = self.serv._accept() + fileno, addr = self.serv._accept() + conn = socket.socket(fileno=fileno) buf = buffer(MSG) conn.send(buf) buf = array.array('b', b' '*1024) @@ -638,7 +641,8 @@ MSG = b'dupa was here\n' cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) cli.connect(self.serv.getsockname()) - conn, addr = self.serv._accept() + fileno, addr = self.serv._accept() + conn = socket.socket(fileno=fileno) buf = buffer(MSG) conn.send(buf) buf = array.array('b', b' '*1024) From noreply at buildbot.pypy.org Sun Dec 4 20:59:16 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:16 +0100 (CET) Subject: [pypy-commit] pypy py3k: Allow sockets subclasses to override __init__ only. Message-ID: <20111204195916.3BC388205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50147:138070ed1361 Date: 2011-12-03 22:44 +0100 http://bitbucket.org/pypy/pypy/changeset/138070ed1361/ Log: Allow sockets subclasses to override __init__ only. ssl.py relies on this. diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -16,6 +16,22 @@ self.space.getexecutioncontext().checksignals() class W_RSocket(Wrappable, RSocket): + def descr_new(space, w_subtype, __args__): + sock = space.allocate_instance(W_RSocket, w_subtype) + return space.wrap(sock) + + @unwrap_spec(family=int, type=int, proto=int) + def descr_init(self, space, family=AF_INET, type=SOCK_STREAM, proto=0, + w_fileno=None): + try: + if not space.is_w(w_fileno, space.w_None): + W_RSocket.__init__(self, family, type, proto, + fd=space.c_filedescriptor_w(w_fileno)) + else: + W_RSocket.__init__(self, family, type, proto) + except SocketError, e: + raise converted_error(space, e) + def _accept_w(self, space): """_accept() -> (socket object, address info) @@ -409,21 +425,6 @@ return os.fdopen(newfd, mode, buffersize) ''', filename =__file__).interphook('makefile') - at unwrap_spec(family=int, type=int, proto=int) -def newsocket(space, w_subtype, family=AF_INET, - type=SOCK_STREAM, proto=0, w_fileno=NoneNotWrapped): - sock = space.allocate_instance(W_RSocket, w_subtype) - try: - if w_fileno: - W_RSocket.__init__(sock, family, type, proto, - fd=space.c_filedescriptor_w(w_fileno)) - else: - W_RSocket.__init__(sock, family, type, proto) - except SocketError, e: - raise converted_error(space, e) - return space.wrap(sock) -descr_socket_new = interp2app(newsocket) - # ____________________________________________________________ # Error handling @@ -518,7 +519,8 @@ shutdown(how) -- shut down traffic in one or both directions [*] not available on all platforms!""", - __new__ = descr_socket_new, + __new__ = interp2app(W_RSocket.descr_new.im_func), + __init__ = interp2app(W_RSocket.descr_init), type = interp_attrproperty('type', W_RSocket), proto = interp_attrproperty('proto', W_RSocket), family = interp_attrproperty('family', W_RSocket), diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -550,6 +550,14 @@ clientsock.close() s.close() + def test_subclass(self): + # Socket is not created in __new__, but in __init__. + import socket + class Socket_IPV6(socket.socket): + def __init__(self): + socket.socket.__init__(self, family=socket.AF_INET6) + assert Socket_IPV6().family == socket.AF_INET6 + class AppTestSocketTCP: def setup_class(cls): From noreply at buildbot.pypy.org Sun Dec 4 20:59:17 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:17 +0100 (CET) Subject: [pypy-commit] pypy py3k: Progress in the _ssl module Message-ID: <20111204195917.9FC548205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50148:a0548becd3ac Date: 2011-12-04 16:45 +0100 http://bitbucket.org/pypy/pypy/changeset/a0548becd3ac/ Log: Progress in the _ssl module diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -108,6 +108,15 @@ """ return space.wrap(intmask(self.fd)) + def detach_w(self, space): + """detach() + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned.""" + fd = self.detach() + return space.wrap(intmask(fd)) + def getpeername_w(self, space): """getpeername() -> address info @@ -464,7 +473,7 @@ # ____________________________________________________________ socketmethodnames = """ -_accept bind close connect connect_ex dup fileno +_accept bind close connect connect_ex dup fileno detach getpeername getsockname getsockopt gettimeout listen makefile recv recvfrom send sendall sendto setblocking setsockopt settimeout shutdown _reuse _drop recv_into recvfrom_into diff --git a/pypy/module/_ssl/__init__.py b/pypy/module/_ssl/__init__.py --- a/pypy/module/_ssl/__init__.py +++ b/pypy/module/_ssl/__init__.py @@ -5,8 +5,9 @@ See the socket module for documentation.""" interpleveldefs = { - 'sslwrap': 'interp_ssl.sslwrap', 'SSLError': 'interp_ssl.get_error(space)', + '_SSLSocket': 'interp_ssl.SSLSocket', + '_SSLContext': 'interp_ssl.SSLContext', '_test_decode_cert': 'interp_ssl._test_decode_cert', } diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.rlib.rarithmetic import intmask @@ -10,6 +10,7 @@ from pypy.rlib.ropenssl import * from pypy.module._socket import interp_socket +import weakref ## user defined constants @@ -58,15 +59,28 @@ constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23 constants["PROTOCOL_TLSv1"] = PY_SSL_VERSION_TLS1 -constants["OPENSSL_VERSION_NUMBER"] = OPENSSL_VERSION_NUMBER -ver = OPENSSL_VERSION_NUMBER -ver, status = divmod(ver, 16) -ver, patch = divmod(ver, 256) -ver, fix = divmod(ver, 256) -ver, minor = divmod(ver, 256) -ver, major = divmod(ver, 256) -constants["OPENSSL_VERSION_INFO"] = (major, minor, fix, patch, status) +# protocol options +constants["OP_ALL"] = SSL_OP_ALL +constants["OP_NO_SSLv2"] = SSL_OP_NO_SSLv2 +constants["OP_NO_SSLv3"] = SSL_OP_NO_SSLv3 +constants["OP_NO_TLSv1"] = SSL_OP_NO_TLSv1 +constants["HAS_SNI"] = HAS_SNI + +# OpenSSL version +def _parse_version(ver): + ver, status = divmod(ver, 16) + ver, patch = divmod(ver, 256) + ver, fix = divmod(ver, 256) + ver, minor = divmod(ver, 256) + ver, major = divmod(ver, 256) + return (major, minor, fix, patch, status) +# XXX use SSLeay() to get the version of the library linked against, which +# could be different from the headers version. +libver = OPENSSL_VERSION_NUMBER +constants["OPENSSL_VERSION_NUMBER"] = libver +constants["OPENSSL_VERSION_INFO"] = _parse_version(libver) constants["OPENSSL_VERSION"] = SSLEAY_VERSION +constants["_OPENSSL_API_VERSION"] = _parse_version(libver) def ssl_error(space, msg, errno=0): w_exception_class = get_error(space) @@ -74,6 +88,186 @@ space.wrap(errno), space.wrap(msg)) return OperationError(w_exception_class, w_exception) + +class SSLContext(Wrappable): + def __init__(self, method): + self.ctx = libssl_SSL_CTX_new(method) + + # Defaults + libssl_SSL_CTX_set_verify(self.ctx, SSL_VERIFY_NONE, None) + libssl_SSL_CTX_set_options(self.ctx, SSL_OP_ALL) + libssl_SSL_CTX_set_session_id_context(self.ctx, "Python", len("Python")) + + def __del__(self): + if self.ctx: + libssl_SSL_CTX_free(self.ctx) + + @unwrap_spec(protocol=int) + def descr_new(space, w_subtype, protocol=PY_SSL_VERSION_SSL23): + self = space.allocate_instance(SSLContext, w_subtype) + if protocol == PY_SSL_VERSION_TLS1: + method = libssl_TLSv1_method() + elif protocol == PY_SSL_VERSION_SSL3: + method = libssl_SSLv3_method() + elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2: + method = libssl_SSLv2_method() + elif protocol == PY_SSL_VERSION_SSL23: + method = libssl_SSLv23_method() + else: + raise ssl_error(space, "invalid SSL protocol version") + self.__init__(method) + if not self.ctx: + raise ssl_error(space, "failed to allocate SSL context") + return space.wrap(self) + + @unwrap_spec(cipherlist=str) + def set_ciphers_w(self, space, cipherlist): + ret = libssl_SSL_CTX_set_cipher_list(self.ctx, cipherlist) + if ret == 0: + # Clearing the error queue is necessary on some OpenSSL + # versions, otherwise the error will be reported again + # when another SSL call is done. + libssl_ERR_clear_error() + raise ssl_error(space, "No cipher can be selected.") + + def get_verify_mode_w(self, space): + verify_mode = libssl_SSL_CTX_get_verify_mode(self.ctx) + if verify_mode == SSL_VERIFY_NONE: + return space.wrap(PY_SSL_CERT_NONE) + elif verify_mode == SSL_VERIFY_PEER: + return space.wrap(PY_SSL_CERT_OPTIONAL) + elif verify_mode == (SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT): + return space.wrap(PY_SSL_CERT_REQUIRED) + else: + raise ssl_error( + space, "invalid return value from SSL_CTX_get_verify_mode") + + def set_verify_mode_w(self, space, w_mode): + mode = space.int_w(w_mode) + if mode == PY_SSL_CERT_NONE: + verify_mode = SSL_VERIFY_NONE + elif mode == PY_SSL_CERT_OPTIONAL: + verify_mode = SSL_VERIFY_PEER + elif mode == PY_SSL_CERT_REQUIRED: + verify_mode = SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT + else: + raise OperationError(space.w_ValueError, space.wrap( + "invalid value for verify_mode")) + libssl_SSL_CTX_set_verify(self.ctx, verify_mode, None) + + def get_options_w(self, space): + return space.wrap(libssl_SSL_CTX_get_options(self.ctx)) + + def set_options_w(self, space, value): + opts = libssl_SSL_CTX_get_options(self.ctx) + clear = opts & ~new_opts + set = ~opts & new_opts + if clear: + if HAVE_SSL_CTX_CLEAR_OPTIONS: + libssl_SSL_CTX_clear_options(self.ctx, clear) + else: + raise OperationError(space.w_ValueError, space.wrap( + "can't clear options before OpenSSL 0.9.8m")) + if set: + libssl_SSL_CTX_set_options(self.ctx, set) + + def load_cert_chain_w(self, space, w_certfile, w_keyfile=None): + if space.is_w(w_certfile, space.w_None): + certfile = None + else: + certfile = space.str_w(w_certfile) + if space.is_w(w_keyfile, space.w_None): + keyfile = certfile + else: + keyfile = space.str_w(w_keyfile) + + ret = libssl_SSL_CTX_use_certificate_chain_file(self.ctx, certfile) + if ret != 1: + errno = get_errno() + if errno: + libssl_ERR_clear_error() + raise_from_errno(space.w_IOError, errno) + else: + raise _ssl_seterror(space, None, -1) + + ret = libssl_SSL_CTX_use_PrivateKey_file(ss.ctx, key_file, + SSL_FILETYPE_PEM) + if ret != 1: + errno = get_errno() + if errno: + libssl_ERR_clear_error() + raise_from_errno(space.w_IOError, errno) + else: + raise _ssl_seterror(space, None, -1) + + ret = libssl_SSL_CTX_check_private_key(self.ctx) + if ret != 1: + raise _ssl_seterror(space, None, -1) + + def load_verify_locations_w(self, space, w_cafile=None, w_capath=None): + if space.is_w(w_cafile, space.w_None): + cafile = None + else: + cafile = space.str_w(w_cafile) + if space.is_w(w_capath, space.w_None): + capath = None + else: + capath = space.str_w(w_capath) + if cafile is None and capath is None: + raise OperationError(space.w_ValueError, space.wrap( + "cafile and capath cannot be both omitted")) + ret = libssl_SSL_CTX_load_verify_locations( + self.ctx, cafile, capath) + if ret != 1: + errno = get_errno() + if errno: + libssl_ERR_clear_error() + raise_from_errno(space.w_IOError, errno) + else: + raise _ssl_seterror(space, None, -1) + + @unwrap_spec(server_side=int) + def wrap_socket_w(self, space, w_sock, server_side, + w_server_hostname=None): + assert w_sock is not None + # server_hostname is either None (or absent), or to be encoded + # using the idna encoding. + if space.is_w(w_server_hostname, space.w_None): + hostname = None + else: + hostname = space.bytes_w( + space.call_method(w_server_hostname, "idna")) + + if hostname and not HAS_SNI: + raise OperationError(space.w_ValueError, + space.wrap("server_hostname is not supported " + "by your OpenSSL library")) + + return new_sslobject(space, self.ctx, w_sock, server_side, hostname) + + def session_stats_w(self, space): + w_stats = space.newdict() + for name, ssl_func in SSL_CTX_STATS: + w_value = space.wrap(ssl_func(self.ctx)) + space.setitem_str(w_stats, attr, w_value) + return w_stats + + def set_default_verify_paths_w(self): + ret = libssl_SSL_CTX_set_default_verify_paths(self.ctx) + if ret != 1: + raise _ssl_seterror(space, None, -1) + + +SSLContext.typedef = TypeDef( + "_SSLContext", + __new__ = interp2app(SSLContext.descr_new.im_func), + verify_mode = GetSetProperty(SSLContext.get_verify_mode_w, + SSLContext.set_verify_mode_w), + _wrap_socket = interp2app(SSLContext.wrap_socket_w), +) + + + if HAVE_OPENSSL_RAND: # helper routines for seeding the SSL PRNG @unwrap_spec(string=str, entropy=float) @@ -119,11 +313,10 @@ raise ssl_error(space, msg) return space.wrap(bytes) -class SSLObject(Wrappable): + +class SSLSocket(Wrappable): def __init__(self, space): - self.space = space self.w_socket = None - self.ctx = lltype.nullptr(SSL_CTX.TO) self.ssl = lltype.nullptr(SSL.TO) self.peer_cert = lltype.nullptr(X509.TO) self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') @@ -132,43 +325,35 @@ self._issuer[0] = '\0' self.shutdown_seen_zero = False - def server(self): - return self.space.wrap(rffi.charp2str(self._server)) + def server(self, space): + return space.wrap(rffi.charp2str(self._server)) - def issuer(self): - return self.space.wrap(rffi.charp2str(self._issuer)) + def issuer(self, space): + return space.wrap(rffi.charp2str(self._issuer)) def __del__(self): - self.enqueue_for_destruction(self.space, SSLObject.destructor, - '__del__() method of ') - - def destructor(self): - assert isinstance(self, SSLObject) if self.peer_cert: libssl_X509_free(self.peer_cert) if self.ssl: libssl_SSL_free(self.ssl) - if self.ctx: - libssl_SSL_CTX_free(self.ctx) lltype.free(self._server, flavor='raw') lltype.free(self._issuer, flavor='raw') @unwrap_spec(data='bufferstr') - def write(self, data): + def write(self, space, data): """write(s) -> len Writes the string s into the SSL object. Returns the number of bytes written.""" - self._refresh_nonblocking(self.space) + w_socket = self._get_socket(space) - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) + sockstate = check_socket_and_wait_for_timeout(space, w_socket, True) if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: - raise ssl_error(self.space, "Underlying socket has been closed.") + raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(self.space, "Underlying socket too large for select().") + raise ssl_error(space, "Underlying socket too large for select().") num_bytes = 0 while True: @@ -178,18 +363,18 @@ err = libssl_SSL_get_error(self.ssl, num_bytes) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = check_socket_and_wait_for_timeout( + space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) + sockstate = check_socket_and_wait_for_timeout( + space, w_socket, True) else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: - raise ssl_error(self.space, "Underlying socket has been closed.") + raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -199,38 +384,39 @@ break if num_bytes > 0: - return self.space.wrap(num_bytes) + return space.wrap(num_bytes) else: - raise _ssl_seterror(self.space, self, num_bytes) + raise _ssl_seterror(space, self, num_bytes) - def pending(self): + def pending(self, space): """pending() -> count Returns the number of already decrypted bytes available for read, pending on the connection.""" count = libssl_SSL_pending(self.ssl) if count < 0: - raise _ssl_seterror(self.space, self, count) - return self.space.wrap(count) + raise _ssl_seterror(space, self, count) + return space.wrap(count) @unwrap_spec(num_bytes=int) - def read(self, num_bytes=1024): + def read(self, space, num_bytes=1024): """read([len]) -> string Read up to len bytes from the SSL socket.""" + w_socket = self._get_socket(space) count = libssl_SSL_pending(self.ssl) if not count: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = check_socket_and_wait_for_timeout( + space, w_socket, False) if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(self.space, "Underlying socket too large for select().") + raise ssl_error(space, "Underlying socket too large for select().") elif sockstate == SOCKET_HAS_BEEN_CLOSED: if libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN: - return self.space.wrap('') - raise ssl_error(self.space, "Socket closed without SSL shutdown handshake") + return space.wrapbytes('') + raise ssl_error(space, "Socket closed without SSL shutdown handshake") raw_buf, gc_buf = rffi.alloc_buffer(num_bytes) while True: @@ -240,19 +426,19 @@ err = libssl_SSL_get_error(self.ssl, count) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = check_socket_and_wait_for_timeout( + space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) + sockstate = check_socket_and_wait_for_timeout( + space, w_socket, True) elif (err == SSL_ERROR_ZERO_RETURN and libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): - return self.space.wrap("") + return space.wrapbytes('') else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -262,21 +448,27 @@ break if count <= 0: - raise _ssl_seterror(self.space, self, count) + raise _ssl_seterror(space, self, count) result = rffi.str_from_buffer(raw_buf, gc_buf, num_bytes, count) rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) - return self.space.wrap(result) + return space.wrapbytes(result) - def _refresh_nonblocking(self, space): + def _get_socket(self, space): + w_socket = self.w_socket() + if w_socket is None: + raise ssl_error(space, "Underlying socket connection gone") + # just in case the blocking state of the socket has been changed - w_timeout = space.call_method(self.w_socket, "gettimeout") + w_timeout = space.call_method(w_socket, "gettimeout") nonblocking = not space.is_w(w_timeout, space.w_None) libssl_BIO_set_nbio(libssl_SSL_get_rbio(self.ssl), nonblocking) libssl_BIO_set_nbio(libssl_SSL_get_wbio(self.ssl), nonblocking) + return w_socket + def do_handshake(self, space): - self._refresh_nonblocking(space) + w_socket = self._get_socket(space) # Actually negotiate SSL connection # XXX If SSL_do_handshake() returns 0, it's also a failure. @@ -286,10 +478,10 @@ # XXX PyErr_CheckSignals() if err == SSL_ERROR_WANT_READ: sockstate = check_socket_and_wait_for_timeout( - space, self.w_socket, False) + space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: sockstate = check_socket_and_wait_for_timeout( - space, self.w_socket, True) + space, w_socket, True) else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: @@ -321,13 +513,13 @@ self._issuer, X509_NAME_MAXLEN) def shutdown(self, space): + w_socket = self._get_socket(space) + # Guard against closed socket - w_fileno = space.call_method(self.w_socket, "fileno") + w_fileno = space.call_method(w_socket, "fileno") if space.int_w(w_fileno) < 0: raise ssl_error(space, "Underlying socket has been closed") - self._refresh_nonblocking(space) - zeros = 0 while True: @@ -360,18 +552,18 @@ ssl_err = libssl_SSL_get_error(self.ssl, ret) if ssl_err == SSL_ERROR_WANT_READ: sockstate = check_socket_and_wait_for_timeout( - self.space, self.w_socket, False) + space, w_socket, False) elif ssl_err == SSL_ERROR_WANT_WRITE: sockstate = check_socket_and_wait_for_timeout( - self.space, self.w_socket, True) + space, w_socket, True) else: break if sockstate == SOCKET_HAS_TIMED_OUT: if ssl_err == SSL_ERROR_WANT_READ: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") else: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: raise ssl_error(space, "Underlying socket too large for select().") elif sockstate != SOCKET_OPERATION_OK: @@ -381,7 +573,7 @@ if ret < 0: raise _ssl_seterror(space, self, ret) - return self.w_socket + return w_socket def cipher(self, space): if not self.ssl: @@ -409,7 +601,7 @@ return space.newtuple([w_name, w_proto, w_bits]) @unwrap_spec(der=bool) - def peer_certificate(self, der=False): + def peer_certificate(self, space, der=False): """peer_certificate([der=False]) -> certificate Returns the certificate for the peer. If no certificate was provided, @@ -421,7 +613,7 @@ peer certificate, or None if no certificate was provided. This will return the certificate even if it wasn't validated.""" if not self.peer_cert: - return self.space.w_None + return space.w_None if der: # return cert in DER-encoded format @@ -429,19 +621,19 @@ buf_ptr[0] = lltype.nullptr(rffi.CCHARP.TO) length = libssl_i2d_X509(self.peer_cert, buf_ptr) if length < 0: - raise _ssl_seterror(self.space, self, length) + raise _ssl_seterror(space, self, length) try: # this is actually an immutable bytes sequence - return self.space.wrap(rffi.charp2str(buf_ptr[0])) + return space.wrap(rffi.charp2str(buf_ptr[0])) finally: libssl_OPENSSL_free(buf_ptr[0]) else: verification = libssl_SSL_CTX_get_verify_mode( libssl_SSL_get_SSL_CTX(self.ssl)) if not verification & SSL_VERIFY_PEER: - return self.space.newdict() + return space.newdict() else: - return _decode_certificate(self.space, self.peer_cert) + return _decode_certificate(space, self.peer_cert) def _decode_certificate(space, certificate, verbose=False): w_retval = space.newdict() @@ -625,22 +817,21 @@ return space.newtuple([w_name, w_value]) -SSLObject.typedef = TypeDef("SSLObject", - server = interp2app(SSLObject.server), - issuer = interp2app(SSLObject.issuer), - write = interp2app(SSLObject.write), - pending = interp2app(SSLObject.pending), - read = interp2app(SSLObject.read), - do_handshake = interp2app(SSLObject.do_handshake), - shutdown = interp2app(SSLObject.shutdown), - cipher = interp2app(SSLObject.cipher), - peer_certificate = interp2app(SSLObject.peer_certificate), +SSLSocket.typedef = TypeDef("_SSLSocket", + server = interp2app(SSLSocket.server), + issuer = interp2app(SSLSocket.issuer), + write = interp2app(SSLSocket.write), + pending = interp2app(SSLSocket.pending), + read = interp2app(SSLSocket.read), + do_handshake = interp2app(SSLSocket.do_handshake), + shutdown = interp2app(SSLSocket.shutdown), + cipher = interp2app(SSLSocket.cipher), + peer_certificate = interp2app(SSLSocket.peer_certificate), ) -def new_sslobject(space, w_sock, side, w_key_file, w_cert_file, - cert_mode, protocol, w_cacerts_file, w_ciphers): - ss = SSLObject(space) +def new_sslobject(space, ctx, w_sock, side, server_hostname): + ss = SSLSocket(space) sock_fd = space.int_w(space.call_method(w_sock, "fileno")) w_timeout = space.call_method(w_sock, "gettimeout") @@ -648,93 +839,27 @@ has_timeout = False else: has_timeout = True - if space.is_w(w_key_file, space.w_None): - key_file = None - else: - key_file = space.str_w(w_key_file) - if space.is_w(w_cert_file, space.w_None): - cert_file = None - else: - cert_file = space.str_w(w_cert_file) - if space.is_w(w_cacerts_file, space.w_None): - cacerts_file = None - else: - cacerts_file = space.str_w(w_cacerts_file) - if space.is_w(w_ciphers, space.w_None): - ciphers = None - else: - ciphers = space.str_w(w_ciphers) - if side == PY_SSL_SERVER and (not key_file or not cert_file): - raise ssl_error(space, "Both the key & certificate files " - "must be specified for server-side operation") - - # set up context - if protocol == PY_SSL_VERSION_TLS1: - method = libssl_TLSv1_method() - elif protocol == PY_SSL_VERSION_SSL3: - method = libssl_SSLv3_method() - elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2: - method = libssl_SSLv2_method() - elif protocol == PY_SSL_VERSION_SSL23: - method = libssl_SSLv23_method() - else: - raise ssl_error(space, "Invalid SSL protocol variant specified") - ss.ctx = libssl_SSL_CTX_new(method) - if not ss.ctx: - raise ssl_error(space, "Could not create SSL context") - - if ciphers: - ret = libssl_SSL_CTX_set_cipher_list(ss.ctx, ciphers) - if ret == 0: - raise ssl_error(space, "No cipher can be selected.") - - if cert_mode != PY_SSL_CERT_NONE: - if not cacerts_file: - raise ssl_error(space, - "No root certificates specified for " - "verification of other-side certificates.") - ret = libssl_SSL_CTX_load_verify_locations(ss.ctx, cacerts_file, None) - if ret != 1: - raise _ssl_seterror(space, None, 0) - - if key_file: - ret = libssl_SSL_CTX_use_PrivateKey_file(ss.ctx, key_file, - SSL_FILETYPE_PEM) - if ret < 1: - raise ssl_error(space, "SSL_CTX_use_PrivateKey_file error") - - ret = libssl_SSL_CTX_use_certificate_chain_file(ss.ctx, cert_file) - if ret < 1: - raise ssl_error(space, "SSL_CTX_use_certificate_chain_file error") - - # ssl compatibility - libssl_SSL_CTX_set_options(ss.ctx, SSL_OP_ALL) - - verification_mode = SSL_VERIFY_NONE - if cert_mode == PY_SSL_CERT_OPTIONAL: - verification_mode = SSL_VERIFY_PEER - elif cert_mode == PY_SSL_CERT_REQUIRED: - verification_mode = SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT - libssl_SSL_CTX_set_verify(ss.ctx, verification_mode, None) - ss.ssl = libssl_SSL_new(ss.ctx) # new ssl struct + ss.ssl = libssl_SSL_new(ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + if server_hostname: + libssl_SSL_set_tlsext_host_name(ss.ssl, server_hostname); + # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) if has_timeout: # Set both the read and write BIO's to non-blocking mode libssl_BIO_set_nbio(libssl_SSL_get_rbio(ss.ssl), 1) libssl_BIO_set_nbio(libssl_SSL_get_wbio(ss.ssl), 1) - libssl_SSL_set_connect_state(ss.ssl) if side == PY_SSL_CLIENT: libssl_SSL_set_connect_state(ss.ssl) else: libssl_SSL_set_accept_state(ss.ssl) - ss.w_socket = w_sock + ss.w_socket = weakref.ref(w_sock) return ss def check_socket_and_wait_for_timeout(space, w_sock, writing): @@ -812,7 +937,7 @@ elif err == SSL_ERROR_SYSCALL: e = libssl_ERR_get_error() if e == 0: - if ret == 0 or space.is_w(ss.w_socket, space.w_None): + if ret == 0 or ss.w_socket() is None: errstr = "EOF occurred in violation of protocol" errval = PY_SSL_ERROR_EOF elif ret == -1: @@ -839,16 +964,6 @@ return ssl_error(space, errstr, errval) - at unwrap_spec(side=int, cert_mode=int, protocol=int) -def sslwrap(space, w_socket, side, w_key_file=None, w_cert_file=None, - cert_mode=PY_SSL_CERT_NONE, protocol=PY_SSL_VERSION_SSL23, - w_cacerts_file=None, w_ciphers=None): - """sslwrap(socket, side, [keyfile, certfile]) -> sslobject""" - return space.wrap(new_sslobject( - space, w_socket, side, w_key_file, w_cert_file, - cert_mode, protocol, - w_cacerts_file, w_ciphers)) - class Cache: def __init__(self, space): w_socketerror = interp_socket.get_error(space, "error") diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -63,29 +63,11 @@ _ssl.RAND_egd("entropy") def test_sslwrap(self): - import _ssl, _socket, sys, gc + import ssl, _socket, sys, gc if sys.platform == 'darwin': skip("hangs indefinitely on OSX (also on CPython)") s = _socket.socket() - ss = _ssl.sslwrap(s, 0) - exc = raises(_socket.error, ss.do_handshake) - if sys.platform == 'win32': - assert exc.value.errno == 10057 # WSAENOTCONN - else: - assert exc.value.errno == 32 # Broken pipe - del exc, ss, s - gc.collect() # force the destructor() to be called now - - def test_async_closed(self): - import _ssl, _socket, gc - s = _socket.socket() - s.settimeout(3) - ss = _ssl.sslwrap(s, 0) - s.close() - exc = raises(_ssl.SSLError, ss.write, "data") - assert exc.value.strerror == "Underlying socket has been closed." - del exc, ss, s - gc.collect() # force the destructor() to be called now + ss = ssl.wrap_socket(s) class AppTestConnectedSSL: @@ -108,65 +90,65 @@ """) def test_connect(self): - import socket, gc - ss = socket.ssl(self.s) + import ssl, gc + ss = ssl.wrap_socket(self.s) self.s.close() del ss; gc.collect() def test_server(self): - import socket, gc - ss = socket.ssl(self.s) + import ssl, gc + ss = ssl.wrap_socket(self.s) assert isinstance(ss.server(), str) self.s.close() del ss; gc.collect() def test_issuer(self): - import socket, gc - ss = socket.ssl(self.s) + import ssl, gc + ss = ssl.wrap_socket(self.s) assert isinstance(ss.issuer(), str) self.s.close() del ss; gc.collect() def test_write(self): - import socket, gc - ss = socket.ssl(self.s) + import ssl, gc + ss = ssl.wrap_socket(self.s) raises(TypeError, ss.write, 123) - num_bytes = ss.write("hello\n") + num_bytes = ss.write(b"hello\n") assert isinstance(num_bytes, int) assert num_bytes >= 0 self.s.close() del ss; gc.collect() def test_read(self): - import socket, gc - ss = socket.ssl(self.s) - raises(TypeError, ss.read, "foo") - ss.write("hello\n") + import ssl, gc + ss = ssl.wrap_socket(self.s) + raises(TypeError, ss.read, b"foo") + ss.write(b"hello\n") data = ss.read() - assert isinstance(data, str) + assert isinstance(data, bytes) self.s.close() del ss; gc.collect() def test_read_upto(self): - import socket, gc - ss = socket.ssl(self.s) - raises(TypeError, ss.read, "foo") - ss.write("hello\n") + import ssl, gc + ss = ssl.wrap_socket(self.s) + raises(TypeError, ss.read, b"foo") + ss.write(b"hello\n") data = ss.read(10) - assert isinstance(data, str) + assert isinstance(data, bytes) assert len(data) == 10 assert ss.pending() > 50 # many more bytes to read self.s.close() del ss; gc.collect() def test_shutdown(self): - import socket, ssl, sys, gc + import ssl, ssl, sys, gc if sys.platform == 'darwin': skip("get also on CPython: error: [Errno 0]") - ss = socket.ssl(self.s) - ss.write("hello\n") + ss = ssl.wrap_socket(self.s) + ss.write(b"hello\n") assert ss.shutdown() is self.s._sock - raises(ssl.SSLError, ss.write, "hello\n") + raises(ssl.SSLError, ss.write, b"hello\n") del ss; gc.collect() class AppTestConnectedSSL_Timeout(AppTestConnectedSSL): diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -2,6 +2,7 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.platform import platform from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.rlib.unroll import unrolling_iterable import sys @@ -66,6 +67,10 @@ OPENSSL_NO_SSL2 = rffi_platform.Defined("OPENSSL_NO_SSL2") SSL_FILETYPE_PEM = rffi_platform.ConstantInteger("SSL_FILETYPE_PEM") SSL_OP_ALL = rffi_platform.ConstantInteger("SSL_OP_ALL") + SSL_OP_NO_SSLv2 = rffi_platform.ConstantInteger("SSL_OP_NO_SSLv2") + SSL_OP_NO_SSLv3 = rffi_platform.ConstantInteger("SSL_OP_NO_SSLv3") + SSL_OP_NO_TLSv1 = rffi_platform.ConstantInteger("SSL_OP_NO_TLSv1") + HAS_SNI = rffi_platform.Defined("SSL_CTRL_SET_TLSEXT_HOSTNAME") SSL_VERIFY_NONE = rffi_platform.ConstantInteger("SSL_VERIFY_NONE") SSL_VERIFY_PEER = rffi_platform.ConstantInteger("SSL_VERIFY_PEER") SSL_VERIFY_FAIL_IF_NO_PEER_CERT = rffi_platform.ConstantInteger("SSL_VERIFY_FAIL_IF_NO_PEER_CERT") @@ -186,6 +191,14 @@ ssl_external('SSL_CTX_get_verify_mode', [SSL_CTX], rffi.INT) ssl_external('SSL_CTX_set_cipher_list', [SSL_CTX, rffi.CCHARP], rffi.INT) ssl_external('SSL_CTX_load_verify_locations', [SSL_CTX, rffi.CCHARP, rffi.CCHARP], rffi.INT) +ssl_external('SSL_CTX_set_session_id_context', [SSL_CTX, rffi.CCHARP, rffi.UINT], rffi.INT) +SSL_CTX_STATS_NAMES = """ + number connect connect_good connect_renegotiate accept accept_god + accept_renegotiate hits misses timeouts cache_full""".split() +SSL_CTX_STATS = unrolling_iterable( + (name, external('SSL_CTX_sess_' + name, [SSL_CTX], rffi.LONG)) + for name in SSL_CTX_STATS_NAMES) + ssl_external('SSL_new', [SSL_CTX], SSL) ssl_external('SSL_set_fd', [SSL, rffi.INT], rffi.INT) ssl_external('SSL_set_mode', [SSL, rffi.INT], rffi.INT, macro=True) @@ -201,6 +214,7 @@ ssl_external('SSL_get_error', [SSL, rffi.INT], rffi.INT) ssl_external('SSL_get_shutdown', [SSL], rffi.INT) ssl_external('SSL_set_read_ahead', [SSL, rffi.INT], lltype.Void) +ssl_external('SSL_set_tlsext_host_name', [SSL, rffi.CCHARP], rffi.INT, macro=True) ssl_external('SSL_get_peer_certificate', [SSL], X509) ssl_external('X509_get_subject_name', [X509], X509_NAME) @@ -211,7 +225,7 @@ ssl_external('X509_NAME_ENTRY_get_object', [X509_NAME_ENTRY], ASN1_OBJECT) ssl_external('X509_NAME_ENTRY_get_data', [X509_NAME_ENTRY], ASN1_STRING) ssl_external('i2d_X509', [X509, rffi.CCHARPP], rffi.INT) -ssl_external('X509_free', [X509], lltype.Void) +ssl_external('X509_free', [X509], lltype.Void, threadsafe=False) ssl_external('X509_get_notBefore', [X509], ASN1_TIME, macro=True) ssl_external('X509_get_notAfter', [X509], ASN1_TIME, macro=True) ssl_external('X509_get_serialNumber', [X509], ASN1_INTEGER) @@ -246,8 +260,8 @@ ssl_external('ERR_get_error', [], rffi.INT) ssl_external('ERR_error_string', [rffi.ULONG, rffi.CCHARP], rffi.CCHARP) -ssl_external('SSL_free', [SSL], lltype.Void) -ssl_external('SSL_CTX_free', [SSL_CTX], lltype.Void) +ssl_external('SSL_free', [SSL], lltype.Void, threadsafe=False) +ssl_external('SSL_CTX_free', [SSL_CTX], lltype.Void, threadsafe=False) ssl_external('CRYPTO_free', [rffi.VOIDP], lltype.Void) libssl_OPENSSL_free = libssl_CRYPTO_free diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -744,6 +744,11 @@ if res != 0: raise self.error_handler() + def detach(self): + fd = self.fd + self.fd = _c.INVALID_SOCKET + return fd + if _c.WIN32: def _connect(self, address): """Connect the socket to a remote address.""" From noreply at buildbot.pypy.org Sun Dec 4 20:59:19 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:19 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20111204195919.E8C038205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50149:176c29df1409 Date: 2011-12-04 16:56 +0100 http://bitbucket.org/pypy/pypy/changeset/176c29df1409/ Log: hg merge default diff too long, truncating to 10000 out of 11565 lines diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -307,7 +307,7 @@ self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo - self.typename = self.type.__name__ + self.typename = getattr(self.type, "__name__", "???") self.traceback = py.code.Traceback(tb) def __repr__(self): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -282,6 +282,10 @@ ("objspace.std.withmethodcache", True), ]), + BoolOption("withliststrategies", + "enable optimized ways to store lists of primitives ", + default=True), + BoolOption("withtypeversion", "version type objects when changing them", cmdline=None, diff --git a/pypy/config/test/test_translationoption.py b/pypy/config/test/test_translationoption.py new file mode 100644 --- /dev/null +++ b/pypy/config/test/test_translationoption.py @@ -0,0 +1,10 @@ +import py +from pypy.config.translationoption import get_combined_translation_config +from pypy.config.translationoption import set_opt_level +from pypy.config.config import ConflictConfigError + + +def test_no_gcrootfinder_with_boehm(): + config = get_combined_translation_config() + config.translation.gcrootfinder = "shadowstack" + py.test.raises(ConflictConfigError, set_opt_level, config, '0') diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, @@ -398,6 +398,10 @@ # make_sure_not_resized often relies on it, so we always enable them config.translation.suggest(list_comprehension_operations=True) + # finally, make the choice of the gc definitive. This will fail + # if we have specified strange inconsistent settings. + config.translation.gc = config.translation.gc + # ---------------------------------------------------------------- def set_platform(config): diff --git a/pypy/doc/config/objspace.std.withliststrategies.txt b/pypy/doc/config/objspace.std.withliststrategies.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withliststrategies.txt @@ -0,0 +1,2 @@ +Enable list strategies: Use specialized representations for lists of primitive +objects, such as ints. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -262,6 +262,26 @@ documented as such (as e.g. for hasattr()), in most cases PyPy lets the exception propagate instead. +Object Identity of Primitive Values, ``is`` and ``id`` +------------------------------------------------------- + +Object identity of primitive values works by value equality, not by identity of +the wrapper. This means that ``x + 1 is x + 1`` is always true, for arbitrary +integers ``x``. The rule applies for the following types: + + - ``int`` + + - ``float`` + + - ``long`` + + - ``complex`` + +This change requires some changes to ``id`` as well. ``id`` fulfills the +following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the +above types will return a value that is computed from the argument, and can +thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long). + Miscellaneous ------------- @@ -284,14 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. -* Do not compare immutable objects with ``is``. For example on CPython - it is true that ``x is 0`` works, i.e. does the same as ``type(x) is - int and x == 0``, but it is so by accident. If you do instead - ``x is 1000``, then it stops working, because 1000 is too large and - doesn't come from the internal cache. In PyPy it fails to work in - both cases, because we have no need for a cache at all. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. -* Also, object identity of immutable keys in dictionaries is not necessarily - preserved. .. include:: _ref.txt diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -188,6 +188,12 @@ # ------------------------------------------------------------------- + def is_w(self, space, w_other): + return self is w_other + + def unique_id(self, space): + return space.wrap(compute_unique_id(self)) + def bytes_w(self, space): w_msg = typed_unwrap_error_msg(space, "bytes", self) raise OperationError(space.w_TypeError, w_msg) @@ -686,9 +692,17 @@ """shortcut for space.is_true(space.eq(w_obj1, w_obj2))""" return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2)) - def is_w(self, w_obj1, w_obj2): - """shortcut for space.is_true(space.is_(w_obj1, w_obj2))""" - return self.is_true(self.is_(w_obj1, w_obj2)) + def is_(self, w_one, w_two): + return self.newbool(self.is_w(w_one, w_two)) + + def is_w(self, w_one, w_two): + # done by a method call on w_two (and not on w_one, because of the + # expected programming style where we say "if x is None" or + # "if x is object"). + return w_two.is_w(self, w_one) + + def id(self, w_obj): + return w_obj.unique_id(self) def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -884,6 +898,16 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_str(self, w_list): + """ Return a list of unwrapped strings out of a list of strings. If the + argument is not a list or does not contain only strings, return None. + May return None anyway. + """ + return None + + def newlist_str(self, list_s): + return self.newlist([self.wrap(s) for s in list_s]) + @jit.unroll_safe def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" @@ -1007,9 +1031,6 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) - def id(self, w_obj): - return self.wrap(compute_unique_id(w_obj)) - # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the builtins diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -1,8 +1,9 @@ +from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped +from pypy.interpreter.pyopcode import LoopBlock from pypy.rlib import jit -from pypy.interpreter.pyopcode import LoopBlock +from pypy.rlib.objectmodel import specialize class GeneratorIterator(Wrappable): @@ -156,38 +157,43 @@ break block = block.previous - def unpack_into(self, results_w): - """This is a hack for performance: runs the generator and collects - all produced items in a list.""" - # XXX copied and simplified version of send_ex() - space = self.space - if self.running: - raise OperationError(space.w_ValueError, - space.wrap('generator already executing')) - frame = self.frame - if frame is None: # already finished - return - self.running = True - try: - pycode = self.pycode - while True: - jitdriver.jit_merge_point(self=self, frame=frame, - results_w=results_w, - pycode=pycode) - try: - w_result = frame.execute_frame(space.w_None) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - # if the frame is now marked as finished, it was RETURNed from - if frame.frame_finished_execution: - break - results_w.append(w_result) # YIELDed - finally: - frame.f_backref = jit.vref_None - self.running = False - self.frame = None - -jitdriver = jit.JitDriver(greens=['pycode'], - reds=['self', 'frame', 'results_w']) + # Results can be either an RPython list of W_Root, or it can be an + # app-level W_ListObject, which also has an append() method, that's why we + # generate 2 versions of the function and 2 jit drivers. + def _create_unpack_into(): + jitdriver = jit.JitDriver(greens=['pycode'], + reds=['self', 'frame', 'results']) + def unpack_into(self, results): + """This is a hack for performance: runs the generator and collects + all produced items in a list.""" + # XXX copied and simplified version of send_ex() + space = self.space + if self.running: + raise OperationError(space.w_ValueError, + space.wrap('generator already executing')) + frame = self.frame + if frame is None: # already finished + return + self.running = True + try: + pycode = self.pycode + while True: + jitdriver.jit_merge_point(self=self, frame=frame, + results=results, pycode=pycode) + try: + w_result = frame.execute_frame(space.w_None) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + # if the frame is now marked as finished, it was RETURNed from + if frame.frame_finished_execution: + break + results.append(w_result) # YIELDed + finally: + frame.f_backref = jit.vref_None + self.running = False + self.frame = None + return unpack_into + unpack_into = _create_unpack_into() + unpack_into_w = _create_unpack_into() \ No newline at end of file diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -774,6 +774,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1215,7 +1216,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -523,7 +523,7 @@ assert isinstance(meth2, Method) assert meth2.call_args(args) == obj1 # Check method returned from unbound_method.__get__() - w_meth3 = descr_function_get(space, func, None, space.type(obj2)) + w_meth3 = descr_function_get(space, func, space.w_None, space.type(obj2)) meth3 = space.unwrap(w_meth3) assert meth3 is func diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -63,10 +63,13 @@ def test_unpackiterable(self): space = self.space w = space.wrap - l = [w(1), w(2), w(3), w(4)] + l = [space.newlist([]) for l in range(4)] w_l = space.newlist(l) - assert space.unpackiterable(w_l) == l - assert space.unpackiterable(w_l, 4) == l + l1 = space.unpackiterable(w_l) + l2 = space.unpackiterable(w_l, 4) + for i in range(4): + assert space.is_w(l1[i], l[i]) + assert space.is_w(l2[i], l[i]) err = raises(OperationError, space.unpackiterable, w_l, 3) assert err.value.match(space, space.w_ValueError) err = raises(OperationError, space.unpackiterable, w_l, 5) diff --git a/pypy/jit/backend/llsupport/asmmemmgr.py b/pypy/jit/backend/llsupport/asmmemmgr.py --- a/pypy/jit/backend/llsupport/asmmemmgr.py +++ b/pypy/jit/backend/llsupport/asmmemmgr.py @@ -37,25 +37,25 @@ self._add_free_block(smaller_stop, stop) stop = smaller_stop result = (start, stop) - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result # pair (start, stop) def free(self, start, stop): """Free a block (start, stop) returned by a previous malloc().""" - self.total_mallocs -= (stop - start) + self.total_mallocs -= r_uint(stop - start) self._add_free_block(start, stop) def open_malloc(self, minsize): """Allocate at least minsize bytes. Returns (start, stop).""" result = self._allocate_block(minsize) (start, stop) = result - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result def open_free(self, middle, stop): """Used for freeing the end of an open-allocated block of memory.""" if stop - middle >= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -648,14 +648,10 @@ # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1<= yll -def _ll_2_llong_ult(xull, yull): +def _ll_2_ullong_eq(xull, yull): + return xull == yull + +def _ll_2_ullong_ne(xull, yull): + return xull != yull + +def _ll_2_ullong_ult(xull, yull): return xull < yull -def _ll_2_llong_ule(xull, yull): +def _ll_2_ullong_ule(xull, yull): return xull <= yull -def _ll_2_llong_ugt(xull, yull): +def _ll_2_ullong_ugt(xull, yull): return xull > yull -def _ll_2_llong_uge(xull, yull): +def _ll_2_ullong_uge(xull, yull): return xull >= yull def _ll_2_llong_add(xll, yll): @@ -312,14 +321,41 @@ z = r_ulonglong(xll) ^ r_ulonglong(yll) return u_to_longlong(z) +def _ll_2_ullong_add(xull, yull): + z = (xull) + (yull) + return (z) + +def _ll_2_ullong_sub(xull, yull): + z = (xull) - (yull) + return (z) + +def _ll_2_ullong_mul(xull, yull): + z = (xull) * (yull) + return (z) + +def _ll_2_ullong_and(xull, yull): + z = (xull) & (yull) + return (z) + +def _ll_2_ullong_or(xull, yull): + z = (xull) | (yull) + return (z) + +def _ll_2_ullong_xor(xull, yull): + z = (xull) ^ (yull) + return (z) + def _ll_2_llong_lshift(xll, y): z = r_ulonglong(xll) << y return u_to_longlong(z) +def _ll_2_ullong_lshift(xull, y): + return xull << y + def _ll_2_llong_rshift(xll, y): return xll >> y -def _ll_2_llong_urshift(xull, y): +def _ll_2_ullong_urshift(xull, y): return xull >> y def _ll_1_llong_from_int(x): diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -999,13 +999,13 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns - def check_loops(self, expected=None, everywhere=False, **check): + def check_resops(self, expected=None, **check): insns = {} for loop in self.loops: - if not everywhere: - if getattr(loop, '_ignore_during_counting', False): - continue insns = loop.summary(adding_insns=insns) + return self._check_insns(insns, expected, check) + + def _check_insns(self, insns, expected, check): if expected is not None: insns.pop('debug_merge_point', None) assert insns == expected @@ -1016,6 +1016,25 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + + # XXX hacked version, ignore and remove me when jit-targets is merged. + loops = self.get_all_loops() + loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX + assert len(loops) == 1 + loop, = loops + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + insns = {} + for op in loop.operations: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_consistency(self): "NOT_RPYTHON" for loop in self.loops: diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -7,7 +7,7 @@ from pypy.rlib.libffi import Func from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.lltypesystem import llmemory, rffi class FuncInfo(object): @@ -237,7 +237,7 @@ else: assert False, "unsupported ffitype or kind" # - fieldsize = ffitype.c_size + fieldsize = rffi.getintfield(ffitype, 'c_size') return self.optimizer.cpu.interiorfielddescrof_dynamic( offset, width, fieldsize, is_pointer, is_float, is_signed ) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -564,9 +564,12 @@ descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - newboxes = modifier.finish(self.values, self.pendingfields) - if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here - compile.giveup() + try: + newboxes = modifier.finish(self.values, self.pendingfields) + if len(newboxes) > self.metainterp_sd.options.failargs_limit: + raise resume.TagOverflow + except resume.TagOverflow: + raise compile.giveup() descr.store_final_boxes(op, newboxes) # if op.getopnum() == rop.GUARD_VALUE: diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -93,12 +93,14 @@ TAGMASK = 3 +class TagOverflow(Exception): + pass + def tag(value, tagbits): - if tagbits >> 2: - raise ValueError + assert 0 <= tagbits <= 3 sx = value >> 13 if sx != 0 and sx != -1: - raise ValueError + raise TagOverflow return rffi.r_short(value<<2|tagbits) def untag(value): @@ -153,7 +155,7 @@ return self._newconst(const) try: return tag(val, TAGINT) - except ValueError: + except TagOverflow: pass tagged = self.large_ints.get(val, UNASSIGNED) if not tagged_eq(tagged, UNASSIGNED): @@ -429,8 +431,7 @@ fieldnum = self._gettagged(fieldbox) # the index is limited to 2147483647 (64-bit machines only) if itemindex > 2147483647: - from pypy.jit.metainterp import compile - compile.giveup() + raise TagOverflow itemindex = rffi.cast(rffi.INT, itemindex) # rd_pendingfields[i].lldescr = lldescr diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -155,9 +155,11 @@ class JitMixin: basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) + def check_resops(self, expected=None, **check): + get_stats().check_resops(expected=expected, **check) + def check_simple_loop(self, expected=None, **check): + get_stats().check_simple_loop(expected=expected, **check) + def check_loop_count(self, count): """NB. This is a hack; use check_tree_loop_count() or check_enter_count() for the real thing. diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -79,9 +79,8 @@ res = self.meta_interp(f, [6, 7]) assert res == 42 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + if self.basic: found = 0 for op in get_stats().loops[0]._all_operations(): @@ -108,7 +107,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 1323 self.check_loop_count(1) - self.check_loops(int_mul=1) + self.check_simple_loop(int_mul=1) def test_loop_variant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -125,7 +124,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 1323 self.check_loop_count(1) - self.check_loops(int_mul_ovf=1) + self.check_simple_loop(int_mul_ovf=1) def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -140,9 +139,10 @@ res = self.meta_interp(f, [6, 7]) assert res == 252 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_simple_loop(int_mul=0) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) + def test_loop_invariant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -158,10 +158,11 @@ res = self.meta_interp(f, [6, 7]) assert res == 308 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) + self.check_simple_loop(int_mul_ovf=0) + self.check_resops({'jump': 2, 'int_lshift': 2, 'int_gt': 2, + 'int_mul_ovf': 1, 'int_add': 4, + 'guard_true': 2, 'guard_no_overflow': 1, + 'int_sub': 2}) def test_loop_invariant_mul_bridge1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -194,11 +195,9 @@ res = self.meta_interp(f, [6, 32]) assert res == 1167 self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - + self.check_resops({'int_lt': 3, 'int_gt': 2, 'int_add': 5, + 'guard_true': 3, 'int_sub': 4, 'jump': 4, + 'int_mul': 2, 'guard_false': 2}) def test_loop_invariant_mul_bridge_maintaining2(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -216,10 +215,9 @@ res = self.meta_interp(f, [6, 32]) assert res == 1692 self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) + self.check_resops({'int_lt': 3, 'int_gt': 2, 'int_add': 5, + 'guard_true': 3, 'int_sub': 4, 'jump': 4, + 'int_mul': 2, 'guard_false': 2}) def test_loop_invariant_mul_bridge_maintaining3(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) @@ -237,10 +235,9 @@ res = self.meta_interp(f, [6, 32, 16]) assert res == 1692 self.check_loop_count(3) - self.check_loops({'int_add': 2, 'int_lt': 1, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, 'int_mul': 1, - 'int_gt': 2, 'guard_true': 2}) + self.check_resops({'int_lt': 2, 'int_gt': 4, 'guard_false': 2, + 'guard_true': 4, 'int_sub': 4, 'jump': 4, + 'int_mul': 3, 'int_add': 4}) def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -261,9 +258,9 @@ res = self.meta_interp(f, [6, 7]) assert res == 252 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'getfield_gc_pure': 1, 'int_mul': 1, + 'guard_true': 2, 'int_sub': 2}) def test_loops_are_transient(self): import gc, weakref @@ -381,7 +378,7 @@ assert res == 0 # CALL_PURE is recorded in the history, but turned into a CALL # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=0) def test_constfold_call_elidable(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -397,7 +394,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) def test_constfold_call_elidable_2(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -417,7 +414,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) def test_elidable_function_returning_object(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -442,7 +439,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) + self.check_resops(call_pure=0, call=0, getfield_gc=1, int_sub=2) def test_elidable_raising(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -463,12 +460,12 @@ res = self.meta_interp(f, [22, 6]) assert res == -3 # the CALL_PURE is constant-folded away during tracing - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) # res = self.meta_interp(f, [22, -5]) assert res == 0 # raises: becomes CALL and is not constant-folded away - self.check_loops(int_sub=1, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=2) def test_elidable_raising_2(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -489,12 +486,12 @@ res = self.meta_interp(f, [22, 6]) assert res == -3 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) # res = self.meta_interp(f, [22, -5]) assert res == 0 # raises: becomes CALL and is not constant-folded away - self.check_loops(int_sub=1, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=2) def test_constant_across_mp(self): myjitdriver = JitDriver(greens = [], reds = ['n']) @@ -533,7 +530,7 @@ policy = StopAtXPolicy(externfn) res = self.meta_interp(f, [31], policy=policy) assert res == 42 - self.check_loops(int_mul=1, int_mod=0) + self.check_resops(int_mul=2, int_mod=0) def test_we_are_jitted(self): myjitdriver = JitDriver(greens = [], reds = ['y']) @@ -835,7 +832,7 @@ return n res = self.meta_interp(f, [20, 1, 2]) assert res == 0 - self.check_loops(call=0) + self.check_resops(call=0) def test_abs(self): myjitdriver = JitDriver(greens = [], reds = ['i', 't']) @@ -865,9 +862,8 @@ res = self.meta_interp(f, [6, 7]) assert res == 42.0 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'float_gt': 2, 'float_add': 2, + 'float_sub': 2, 'guard_true': 2}) def test_print(self): myjitdriver = JitDriver(greens = [], reds = ['n']) @@ -1038,7 +1034,7 @@ return x res = self.meta_interp(f, [20], enable_opts='') assert res == f(20) - self.check_loops(call=0) + self.check_resops(call=0) def test_zerodivisionerror(self): # test the case of exception-raising operation that is not delegated @@ -1351,7 +1347,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 42 self.check_loop_count(1) - self.check_loops(call=1) + self.check_resops(call=2) def test_merge_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1378,8 +1374,7 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_value=3) - self.check_loops(guard_class=0, guard_value=6, everywhere=True) + self.check_resops(guard_class=0, guard_value=6) def test_merge_guardnonnull_guardclass(self): from pypy.rlib.objectmodel import instantiate @@ -1407,11 +1402,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, - guard_nonnull_class=2, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, - guard_nonnull_class=4, guard_isnull=2, - everywhere=True) + self.check_resops(guard_class=0, guard_nonnull=4, + guard_nonnull_class=4, guard_isnull=2) + def test_merge_guardnonnull_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1438,11 +1431,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, guard_value=2, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, guard_value=4, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, + guard_nonnull_class=0, guard_isnull=2) + def test_merge_guardnonnull_guardvalue_2(self): from pypy.rlib.objectmodel import instantiate @@ -1469,11 +1460,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, guard_value=2, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, guard_value=4, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, + guard_nonnull_class=0, guard_isnull=2) + def test_merge_guardnonnull_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1503,11 +1492,9 @@ return x res = self.meta_interp(f, [399], listops=True) assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=3, guard_value=3, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=6, guard_value=6, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_class=0, guard_nonnull=6, guard_value=6, + guard_nonnull_class=0, guard_isnull=2) + def test_residual_call_doesnt_lose_info(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) @@ -1533,8 +1520,7 @@ y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 return y.v res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) + self.check_resops(getarrayitem_gc=0, getfield_gc=1) def test_guard_isnull_nonnull(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) @@ -1562,7 +1548,7 @@ return res res = self.meta_interp(f, [21]) assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) + self.check_resops(guard_nonnull=2, guard_isnull=2) def test_loop_invariant1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) @@ -1589,8 +1575,7 @@ return res res = self.meta_interp(g, [21]) assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) + self.check_resops(call=1) def test_bug_optimizeopt_mutates_ops(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) @@ -1710,7 +1695,7 @@ return x res = self.meta_interp(f, [8]) assert res == 0 - self.check_loops(jit_debug=2) + self.check_resops(jit_debug=4) def test_assert_green(self): def f(x, promote_flag): @@ -1752,9 +1737,10 @@ res = self.meta_interp(g, [6, 7]) assert res == 6*8 + 6**8 self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) + self.check_resops({'guard_class': 2, 'int_gt': 4, + 'getfield_gc': 4, 'guard_true': 4, + 'int_sub': 4, 'jump': 4, 'int_mul': 2, + 'int_add': 2}) def test_multiple_specialied_versions_array(self): myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', @@ -1795,7 +1781,7 @@ res = self.meta_interp(g, [6, 14]) assert res == g(6, 14) self.check_loop_count(9) - self.check_loops(getarrayitem_gc=8, everywhere=True) + self.check_resops(getarrayitem_gc=8) def test_multiple_specialied_versions_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) @@ -1983,8 +1969,8 @@ res = self.meta_interp(g, [3, 23]) assert res == 7068153 self.check_loop_count(7) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) + self.check_resops(guard_true=6, guard_class=2, int_mul=3, + int_add=3, guard_false=3) def test_dont_trace_every_iteration(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) @@ -2228,27 +2214,27 @@ return sa assert self.meta_interp(f1, [5, 5]) == 50 - self.check_loops(int_rshift=0, everywhere=True) + self.check_resops(int_rshift=0) for f in (f1, f2): assert self.meta_interp(f, [5, 6]) == 50 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [10, 5]) == 100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [10, 6]) == 100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [5, 31]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) def test_overflowing_shift_neg(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) @@ -2273,27 +2259,27 @@ return sa assert self.meta_interp(f1, [-5, 5]) == -50 - self.check_loops(int_rshift=0, everywhere=True) + self.check_resops(int_rshift=0) for f in (f1, f2): assert self.meta_interp(f, [-5, 6]) == -50 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-10, 5]) == -100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-10, 6]) == -100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-5, 31]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) def test_pure_op_not_to_be_propagated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa']) @@ -2433,8 +2419,7 @@ if counter > 10: return 7 assert self.meta_interp(build, []) == 7 - self.check_loops(getfield_gc_pure=0) - self.check_loops(getfield_gc_pure=2, everywhere=True) + self.check_resops(getfield_gc_pure=2) def test_args_becomming_equal(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) @@ -2567,7 +2552,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=2, int_ge=0, int_le=0) + self.check_resops(int_lt=4, int_le=0, int_ge=0, int_gt=2) def test_intbounds_not_generalized1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa']) @@ -2584,7 +2569,8 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=3, int_ge=2, int_le=1) + self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) + def test_intbounds_not_generalized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) @@ -2604,7 +2590,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=2, int_ge=1, int_le=1) + self.check_resops(int_lt=4, int_le=3, int_ge=3, int_gt=2) def test_retrace_limit1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2858,7 +2844,7 @@ return a[0].intvalue res = self.meta_interp(f, [100]) assert res == -2 - #self.check_loops(getarrayitem_gc=0, setarrayitem_gc=0) -- xxx? + self.check_resops(setarrayitem_gc=2, getarrayitem_gc=1) def test_retrace_ending_up_retracing_another_loop(self): @@ -2958,7 +2944,7 @@ i += 1 res = self.meta_interp(f, [32]) assert res == f(32) - self.check_loops(arraylen_gc=2) + self.check_resops(arraylen_gc=3) def test_ulonglong_mod(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'i']) @@ -3145,9 +3131,9 @@ a = A(a.i + 1) self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) + self.check_resops(new_with_vtable=1) def test_two_loopinvariant_arrays1(self): from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -3239,7 +3225,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_loops(arraylen_gc=2, everywhere=True) + self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): if sys.platform == "win32": @@ -3276,7 +3262,7 @@ lock.release() return n res = self.meta_interp(f, [10, 1]) - self.check_loops(getfield_gc=2) + self.check_resops(getfield_gc=4) assert res == f(10, 1) def test_jit_merge_point_with_raw_pointer(self): @@ -3340,10 +3326,10 @@ res = self.meta_interp(main, [0, 10, 2], enable_opts='') assert res == main(0, 10, 2) - self.check_loops(call=1) + self.check_resops(call=1) res = self.meta_interp(main, [1, 10, 2], enable_opts='') assert res == main(1, 10, 2) - self.check_loops(call=0) + self.check_resops(call=0) def test_look_inside_iff_virtual(self): # There's no good reason for this to be look_inside_iff, but it's a test! @@ -3368,10 +3354,10 @@ i += f(A(2), n) res = self.meta_interp(main, [0], enable_opts='') assert res == main(0) - self.check_loops(call=1, getfield_gc=0) + self.check_resops(call=1, getfield_gc=0) res = self.meta_interp(main, [1], enable_opts='') assert res == main(1) - self.check_loops(call=0, getfield_gc=0) + self.check_resops(call=0, getfield_gc=0) def test_reuse_elidable_result(self): driver = JitDriver(reds=['n', 's'], greens = []) @@ -3384,10 +3370,9 @@ return s res = self.meta_interp(main, [10]) assert res == main(10) - self.check_loops({ - 'call': 1, 'guard_no_exception': 1, 'guard_true': 1, 'int_add': 2, - 'int_gt': 1, 'int_sub': 1, 'strlen': 1, 'jump': 1, - }) + self.check_resops({'int_gt': 2, 'strlen': 2, 'guard_true': 2, + 'int_sub': 2, 'jump': 2, 'call': 2, + 'guard_no_exception': 2, 'int_add': 4}) def test_look_inside_iff_const_getarrayitem_gc_pure(self): driver = JitDriver(greens=['unroll'], reds=['s', 'n']) @@ -3419,10 +3404,10 @@ res = self.meta_interp(main, [0, 10]) assert res == main(0, 10) # 2 calls, one for f() and one for char_mul - self.check_loops(call=2) + self.check_resops(call=4) res = self.meta_interp(main, [1, 10]) assert res == main(1, 10) - self.check_loops(call=0) + self.check_resops(call=0) def test_setarrayitem_followed_by_arraycopy(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'x', 'y']) @@ -3523,7 +3508,8 @@ res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) def test_virtual_opaque_ptr(self): myjitdriver = JitDriver(greens = [], reds = ["n"]) @@ -3542,7 +3528,9 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) + def test_virtual_opaque_dict(self): myjitdriver = JitDriver(greens = [], reds = ["n"]) @@ -3562,7 +3550,10 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) + self.check_resops({'int_gt': 2, 'getfield_gc': 1, 'int_eq': 1, + 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'guard_false': 1}) + def test_convert_from_SmallFunctionSetPBCRepr_to_FunctionsPBCRepr(self): f1 = lambda n: n+1 diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -20,12 +20,12 @@ n -= 1 return 42 self.meta_interp(f, [20]) - self.check_loops({'call': 2, # calls to a helper function - 'guard_no_exception': 2, # follows the calls - 'int_sub': 1, - 'int_gt': 1, - 'guard_true': 1, - 'jump': 1}) + self.check_resops({'call': 4, # calls to a helper function + 'guard_no_exception': 4, # follows the calls + 'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 2}) def test_class_of_allocated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) @@ -78,7 +78,7 @@ return 1 res = self.meta_interp(f, [20], enable_opts='') assert res == 1 - self.check_loops(call=1) # for the case B(), but not for the case A() + self.check_resops(call=1) # for the case B(), but not for the case A() class TestLLtype(DelTests, LLJitMixin): @@ -103,7 +103,7 @@ break return 42 self.meta_interp(f, [20]) - self.check_loops(getfield_raw=1, setfield_raw=1, call=0, call_pure=0) + self.check_resops(call_pure=0, setfield_raw=2, call=0, getfield_raw=2) class TestOOtype(DelTests, OOJitMixin): def setup_class(cls): diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -91,7 +91,7 @@ res1 = f(100) res2 = self.meta_interp(f, [100], listops=True) assert res1 == res2 - self.check_loops(int_mod=1) # the hash was traced and eq, but cached + self.check_resops(int_mod=2) # the hash was traced and eq, but cached def test_dict_setdefault(self): myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) @@ -107,7 +107,7 @@ assert f(100) == 50 res = self.meta_interp(f, [100], listops=True) assert res == 50 - self.check_loops(new=0, new_with_vtable=0) + self.check_resops(new=0, new_with_vtable=0) def test_dict_as_counter(self): myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) @@ -128,7 +128,7 @@ assert f(100) == 50 res = self.meta_interp(f, [100], listops=True) assert res == 50 - self.check_loops(int_mod=1) # key + eq, but cached + self.check_resops(int_mod=2) # key + eq, but cached def test_repeated_lookup(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'd']) @@ -153,12 +153,13 @@ res = self.meta_interp(f, [100], listops=True) assert res == f(50) - self.check_loops({"call": 5, "getfield_gc": 1, "getinteriorfield_gc": 1, - "guard_false": 1, "guard_no_exception": 4, - "guard_true": 1, "int_and": 1, "int_gt": 1, - "int_is_true": 1, "int_sub": 1, "jump": 1, - "new_with_vtable": 1, "new": 1, "new_array": 1, - "setfield_gc": 3, }) + self.check_resops({'new_array': 2, 'getfield_gc': 2, + 'guard_true': 2, 'jump': 2, + 'new_with_vtable': 2, 'getinteriorfield_gc': 2, + 'setfield_gc': 6, 'int_gt': 2, 'int_sub': 2, + 'call': 10, 'int_and': 2, + 'guard_no_exception': 8, 'new': 2, + 'guard_false': 2, 'int_is_true': 2}) class TestOOtype(DictTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -35,10 +35,8 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({'jump': 1, - 'int_gt': 1, 'guard_true': 1, - 'int_sub': 1}) - + self.check_resops({'jump': 2, 'guard_true': 2, + 'int_gt': 2, 'int_sub': 2}) def test_bridge_from_guard_exception(self): myjitdriver = JitDriver(greens = [], reds = ['n']) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -67,23 +67,23 @@ 'byval': False} supported = all(d[check] for check in jitif) if supported: - self.check_loops( - call_release_gil=1, # a CALL_RELEASE_GIL, and no other CALLs + self.check_resops( + call_release_gil=2, # a CALL_RELEASE_GIL, and no other CALLs call=0, call_may_force=0, - guard_no_exception=1, - guard_not_forced=1, - int_add=1, - int_lt=1, - guard_true=1, - jump=1) + guard_no_exception=2, + guard_not_forced=2, + int_add=2, + int_lt=2, + guard_true=2, + jump=2) else: - self.check_loops( + self.check_resops( call_release_gil=0, # no CALL_RELEASE_GIL - int_add=1, - int_lt=1, - guard_true=1, - jump=1) + int_add=2, + int_lt=2, + guard_true=2, + jump=2) return res def test_byval_result(self): @@ -144,10 +144,8 @@ return result_point[0].x * result_point[0].y assert self.meta_interp(main, [10]) == main(10) == 9000 - self.check_loops({"int_add": 3, "jump": 1, "int_lt": 1, "guard_true": 1, - "getinteriorfield_raw": 4, "setinteriorfield_raw": 2 - }) - + self.check_resops({'jump': 2, 'int_lt': 2, 'setinteriorfield_raw': 4, + 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2}) class TestFfiCall(FfiCallTests, LLJitMixin): supports_all = False @@ -156,4 +154,4 @@ supports_all = True # supports_{floats,longlong,singlefloats} class TestFfiLookup(FfiLookupTests, LLJitMixin): - pass \ No newline at end of file + pass diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -25,7 +25,7 @@ res = self.meta_interp(g, [7]) assert res == -2 self.check_loop_count(2) - self.check_loops(guard_value=0) + self.check_resops(guard_value=0) def test_green_field_2(self): myjitdriver = JitDriver(greens=['ctx.x'], reds=['ctx']) @@ -50,7 +50,7 @@ res = self.meta_interp(g, [7]) assert res == -22 self.check_loop_count(6) - self.check_loops(guard_value=0) + self.check_resops(guard_value=0) class TestLLtypeGreenFieldsTests(GreenFieldsTests, LLJitMixin): diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -88,7 +88,7 @@ assert res == loop2(4, 40) # we expect only one int_sub, corresponding to the single # compiled instance of loop1() - self.check_loops(int_sub=1) + self.check_resops(int_sub=2) # the following numbers are not really expectations of the test # itself, but just the numbers that we got after looking carefully # at the generated machine code @@ -154,7 +154,7 @@ res = self.meta_interp(loop2, [4, 40], repeat=7, inline=True) assert res == loop2(4, 40) # we expect no int_sub, but a residual call - self.check_loops(int_sub=0, call=1) + self.check_resops(call=2, int_sub=0) def test_multiple_jits_trace_too_long(self): myjitdriver1 = JitDriver(greens=["n"], reds=["i", "box"]) diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -6,8 +6,8 @@ class ListTests: def check_all_virtualized(self): - self.check_loops(new_array=0, setarrayitem_gc=0, getarrayitem_gc=0, - arraylen_gc=0) + self.check_resops(setarrayitem_gc=0, new_array=0, arraylen_gc=0, + getarrayitem_gc=0) def test_simple_array(self): jitdriver = JitDriver(greens = [], reds = ['n']) @@ -20,7 +20,7 @@ return n res = self.meta_interp(f, [10], listops=True) assert res == 0 - self.check_loops(int_sub=1) + self.check_resops(int_sub=2) self.check_all_virtualized() def test_list_pass_around(self): @@ -56,7 +56,8 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) # one setitem should be gone by now - self.check_loops(call=1, setarrayitem_gc=2, getarrayitem_gc=1) + self.check_resops(setarrayitem_gc=4, getarrayitem_gc=2, call=2) + def test_ll_fixed_setitem_fast(self): jitdriver = JitDriver(greens = [], reds = ['n', 'l']) @@ -93,7 +94,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) - self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0) + self.check_resops(setarrayitem_gc=0, call=0, getarrayitem_gc=0) def test_vlist_alloc_and_set(self): # the check_loops fails, because [non-null] * n is not supported yet @@ -141,7 +142,7 @@ res = self.meta_interp(f, [5], listops=True) assert res == 7 - self.check_loops(call=0) + self.check_resops(call=0) def test_fold_getitem_1(self): jitdriver = JitDriver(greens = ['pc', 'n', 'l'], reds = ['total']) @@ -161,7 +162,7 @@ res = self.meta_interp(f, [4], listops=True) assert res == f(4) - self.check_loops(call=0) + self.check_resops(call=0) def test_fold_getitem_2(self): jitdriver = JitDriver(greens = ['pc', 'n', 'l'], reds = ['total', 'x']) @@ -186,7 +187,7 @@ res = self.meta_interp(f, [4], listops=True) assert res == f(4) - self.check_loops(call=0, getfield_gc=0) + self.check_resops(call=0, getfield_gc=0) def test_fold_indexerror(self): jitdriver = JitDriver(greens = [], reds = ['total', 'n', 'lst']) @@ -206,7 +207,7 @@ res = self.meta_interp(f, [15], listops=True) assert res == f(15) - self.check_loops(guard_exception=0) + self.check_resops(guard_exception=0) def test_virtual_resize(self): jitdriver = JitDriver(greens = [], reds = ['n', 's']) @@ -224,9 +225,8 @@ return s res = self.meta_interp(f, [15], listops=True) assert res == f(15) - self.check_loops({"int_add": 1, "int_sub": 1, "int_gt": 1, - "guard_true": 1, "jump": 1}) - + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'guard_true': 2, 'int_sub': 2}) class TestOOtype(ListTests, OOJitMixin): pass @@ -258,4 +258,4 @@ assert res == f(37) # There is the one actual field on a, plus several fields on the list # itself - self.check_loops(getfield_gc=10, everywhere=True) + self.check_resops(getfield_gc=10) diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -60,7 +60,8 @@ assert res == f(6, 13) self.check_loop_count(1) if self.enable_opts: - self.check_loops(getfield_gc = 0, setfield_gc = 1) + self.check_resops(setfield_gc=2, getfield_gc=0) + def test_loop_with_two_paths(self): from pypy.rpython.lltypesystem import lltype @@ -180,7 +181,10 @@ assert res == 42 self.check_loop_count(1) # the 'int_eq' and following 'guard' should be constant-folded - self.check_loops(int_eq=0, guard_true=1, guard_false=0) + if 'unroll' in self.enable_opts: + self.check_resops(int_eq=0, guard_true=2, guard_false=0) + else: + self.check_resops(int_eq=0, guard_true=1, guard_false=0) if self.basic: found = 0 for op in get_stats().loops[0]._all_operations(): @@ -643,8 +647,12 @@ res = self.meta_interp(main_interpreter_loop, [1]) assert res == 102 self.check_loop_count(1) - self.check_loops({'int_add' : 3, 'int_gt' : 1, - 'guard_false' : 1, 'jump' : 1}) + if 'unroll' in self.enable_opts: + self.check_resops({'int_add' : 6, 'int_gt' : 2, + 'guard_false' : 2, 'jump' : 2}) + else: + self.check_resops({'int_add' : 3, 'int_gt' : 1, + 'guard_false' : 1, 'jump' : 1}) def test_automatic_promotion(self): myjitdriver = JitDriver(greens = ['i'], @@ -686,7 +694,7 @@ self.check_loop_count(1) # These loops do different numbers of ops based on which optimizer we # are testing with. - self.check_loops(self.automatic_promotion_result) + self.check_resops(self.automatic_promotion_result) def test_can_enter_jit_outside_main_loop(self): myjitdriver = JitDriver(greens=[], reds=['i', 'j', 'a']) diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -8,7 +8,8 @@ enable_opts = ALL_OPTS_NAMES automatic_promotion_result = { - 'int_add' : 3, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1, + 'int_gt': 2, 'guard_false': 2, 'jump': 2, 'int_add': 6, + 'guard_value': 1 } # ====> test_loop.py diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -73,8 +73,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -103,7 +102,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=3) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -134,8 +133,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0) def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -160,7 +158,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=2) def test_change_during_tracing_2(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -186,7 +184,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=2) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -212,7 +210,7 @@ assert g(100, 7) == 700707 res = self.meta_interp(g, [100, 7]) assert res == 700707 - self.check_loops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=4, getfield_gc=0) def test_invalidate_while_running(self): jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) @@ -324,8 +322,8 @@ assert f(100, 15) == 3009 res = self.meta_interp(f, [100, 15]) assert res == 3009 - self.check_loops(guard_not_invalidated=4, getfield_gc=0, - call_may_force=0, guard_not_forced=0) + self.check_resops(guard_not_invalidated=8, guard_not_forced=0, + call_may_force=0, getfield_gc=0) def test_list_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -347,9 +345,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - everywhere=True) + self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=2, + getarrayitem_gc=0, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -385,9 +382,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 714 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - arraylen_gc=0, everywhere=True) + self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=2, + arraylen_gc=0, getarrayitem_gc=0, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -421,9 +417,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0, + getarrayitem_gc=0, getarrayitem_gc_pure=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -460,9 +455,9 @@ assert f(100, 15) == 3009 res = self.meta_interp(f, [100, 15]) assert res == 3009 - self.check_loops(guard_not_invalidated=4, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - call_may_force=0, guard_not_forced=0) + self.check_resops(call_may_force=0, getfield_gc=0, + getarrayitem_gc_pure=0, guard_not_forced=0, + getarrayitem_gc=0, guard_not_invalidated=8) def test_invalidated_loop_is_not_used_any_more_as_target(self): myjitdriver = JitDriver(greens=['foo'], reds=['x']) diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -143,11 +143,11 @@ f = self.get_interpreter(codes) assert self.meta_interp(f, [0, 0, 0], enable_opts='') == 42 - self.check_loops(int_add = 1, call_may_force = 1, call = 0) + self.check_resops(call_may_force=1, int_add=1, call=0) assert self.meta_interp(f, [0, 0, 0], enable_opts='', inline=True) == 42 - self.check_loops(int_add = 2, call_may_force = 0, call = 0, - guard_no_exception = 0) + self.check_resops(call=0, int_add=2, call_may_force=0, + guard_no_exception=0) def test_inline_jitdriver_check(self): code = "021" @@ -160,7 +160,7 @@ inline=True) == 42 # the call is fully inlined, because we jump to subcode[1], thus # skipping completely the JUMP_BACK in subcode[0] - self.check_loops(call_may_force = 0, call_assembler = 0, call = 0) + self.check_resops(call=0, call_may_force=0, call_assembler=0) def test_guard_failure_in_inlined_function(self): def p(pc, code): @@ -491,10 +491,10 @@ return loop(100) res = self.meta_interp(main, [0], enable_opts='', trace_limit=TRACE_LIMIT) - self.check_loops(call_may_force=1, call=0) + self.check_resops(call=0, call_may_force=1) res = self.meta_interp(main, [1], enable_opts='', trace_limit=TRACE_LIMIT) - self.check_loops(call_may_force=0, call=0) + self.check_resops(call=0, call_may_force=0) def test_trace_from_start(self): def p(pc, code): @@ -576,7 +576,7 @@ result += f('-c-----------l-', i+100) self.meta_interp(g, [10], backendopt=True) self.check_aborted_count(1) - self.check_loops(call_assembler=1, call=0) + self.check_resops(call=0, call_assembler=2) self.check_tree_loop_count(3) def test_directly_call_assembler(self): @@ -625,8 +625,7 @@ try: compile.compile_tmp_callback = my_ctc self.meta_interp(portal, [2, 5], inline=True) - self.check_loops(call_assembler=2, call_may_force=0, - everywhere=True) + self.check_resops(call_may_force=0, call_assembler=2) finally: compile.compile_tmp_callback = original_ctc # check that we made a temporary callback @@ -681,8 +680,7 @@ try: compile.compile_tmp_callback = my_ctc self.meta_interp(main, [2, 5], inline=True) - self.check_loops(call_assembler=2, call_may_force=0, - everywhere=True) + self.check_resops(call_may_force=0, call_assembler=2) finally: compile.compile_tmp_callback = original_ctc # check that we made a temporary callback @@ -1021,7 +1019,7 @@ res = self.meta_interp(portal, [2, 0], inline=True, policy=StopAtXPolicy(residual)) assert res == portal(2, 0) - self.check_loops(call_assembler=4, everywhere=True) + self.check_resops(call_assembler=4) def test_inline_without_hitting_the_loop(self): driver = JitDriver(greens = ['codeno'], reds = ['i'], @@ -1045,7 +1043,7 @@ assert portal(0) == 70 res = self.meta_interp(portal, [0], inline=True) assert res == 70 - self.check_loops(call_assembler=0) + self.check_resops(call_assembler=0) def test_inline_with_hitting_the_loop_sometimes(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], @@ -1071,7 +1069,7 @@ assert portal(0, 1) == 2095 res = self.meta_interp(portal, [0, 1], inline=True) assert res == 2095 - self.check_loops(call_assembler=12, everywhere=True) + self.check_resops(call_assembler=12) def test_inline_with_hitting_the_loop_sometimes_exc(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], @@ -1109,7 +1107,7 @@ assert main(0, 1) == 2095 res = self.meta_interp(main, [0, 1], inline=True) assert res == 2095 - self.check_loops(call_assembler=12, everywhere=True) + self.check_resops(call_assembler=12) def test_handle_jitexception_in_portal(self): # a test for _handle_jitexception_in_portal in blackhole.py @@ -1238,7 +1236,7 @@ i += 1 self.meta_interp(portal, [0, 0, 0], inline=True) - self.check_loops(call=0, call_may_force=0) + self.check_resops(call_may_force=0, call=0) class TestLLtype(RecursiveTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py --- a/pypy/jit/metainterp/test/test_resume.py +++ b/pypy/jit/metainterp/test/test_resume.py @@ -23,11 +23,11 @@ assert tag(-3, 2) == rffi.r_short(-3<<2|2) assert tag((1<<13)-1, 3) == rffi.r_short(((1<<15)-1)|3) assert tag(-1<<13, 3) == rffi.r_short((-1<<15)|3) - py.test.raises(ValueError, tag, 3, 5) - py.test.raises(ValueError, tag, 1<<13, 0) - py.test.raises(ValueError, tag, (1<<13)+1, 0) - py.test.raises(ValueError, tag, (-1<<13)-1, 0) - py.test.raises(ValueError, tag, (-1<<13)-5, 0) + py.test.raises(AssertionError, tag, 3, 5) + py.test.raises(TagOverflow, tag, 1<<13, 0) + py.test.raises(TagOverflow, tag, (1<<13)+1, 0) + py.test.raises(TagOverflow, tag, (-1<<13)-1, 0) + py.test.raises(TagOverflow, tag, (-1<<13)-5, 0) def test_untag(): assert untag(tag(3, 1)) == (3, 1) @@ -1318,8 +1318,7 @@ assert rffi.cast(lltype.Signed, pf[1].fieldnum) == 1062 assert rffi.cast(lltype.Signed, pf[1].itemindex) == 2147483647 # - from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole - py.test.raises(SwitchToBlackhole, modifier._add_pending_fields, + py.test.raises(TagOverflow, modifier._add_pending_fields, [(array_a, 42, 63, 2147483648)]) def test_resume_reader_fields_and_arrayitems(): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -20,9 +20,8 @@ return c res = self.meta_interp(f, [1]) assert res == 2 - self.check_loops({'jump': 1, - 'int_sub': 1, 'int_gt' : 1, - 'guard_true': 1}) # all folded away + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) # all folded away def test_red_builtin_send(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'counter']) @@ -41,12 +40,9 @@ return res res = self.meta_interp(f, [1], policy=StopAtXPolicy(externfn)) assert res == 2 - if self.type_system == 'ootype': - self.check_loops(call=1, oosend=1) # 'len' remains - else: - # 'len' becomes a getfield('num_items') for now in lltype, - # which is itself encoded as a 'getfield_gc' - self.check_loops(call=1, getfield_gc=1) + # 'len' becomes a getfield('num_items') for now in lltype, + # which is itself encoded as a 'getfield_gc' + self.check_resops(call=2, getfield_gc=2) def test_send_to_single_target_method(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'counter']) @@ -70,11 +66,10 @@ res = self.meta_interp(f, [1], policy=StopAtXPolicy(externfn), backendopt=True) assert res == 43 - self.check_loops({'call': 1, 'guard_no_exception': 1, - 'getfield_gc': 1, - 'int_add': 1, - 'jump': 1, 'int_gt' : 1, 'guard_true' : 1, - 'int_sub' : 1}) + self.check_resops({'int_gt': 2, 'getfield_gc': 2, + 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'call': 2, 'guard_no_exception': 2, + 'int_add': 2}) def test_red_send_to_green_receiver(self): myjitdriver = JitDriver(greens = ['i'], reds = ['counter', 'j']) @@ -97,7 +92,7 @@ return res res = self.meta_interp(f, [4, -1]) assert res == 145 - self.check_loops(int_add = 1, everywhere=True) + self.check_resops(int_add=1) def test_oosend_base(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'w']) @@ -132,7 +127,7 @@ assert res == 17 res = self.meta_interp(f, [4, 14]) assert res == 1404 - self.check_loops(guard_class=0, new_with_vtable=0, new=0) + self.check_resops(guard_class=1, new=0, new_with_vtable=0) def test_three_receivers(self): myjitdriver = JitDriver(greens = [], reds = ['y']) @@ -205,8 +200,7 @@ # of the body in a single bigger loop with no failing guard except # the final one. self.check_loop_count(1) - self.check_loops(guard_class=0, - int_add=2, int_sub=2) + self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) def test_oosend_guard_failure_2(self): @@ -247,8 +241,7 @@ res = self.meta_interp(f, [4, 28]) assert res == f(4, 28) self.check_loop_count(1) - self.check_loops(guard_class=0, - int_add=2, int_sub=2) + self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) def test_oosend_different_initial_class(self): @@ -285,8 +278,8 @@ # However, this doesn't match the initial value of 'w'. # XXX This not completely easy to check... self.check_loop_count(1) - self.check_loops(int_add=0, int_lshift=1, guard_class=0, - new_with_vtable=0, new=0) + self.check_resops(guard_class=1, new_with_vtable=0, int_lshift=2, + int_add=0, new=0) def test_indirect_call_unknown_object_1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y']) @@ -566,10 +559,7 @@ policy = StopAtXPolicy(new, A.foo.im_func, B.foo.im_func) res = self.meta_interp(fn, [0, 20], policy=policy) assert res == 42 - if self.type_system == 'ootype': - self.check_loops(oosend=1) - else: - self.check_loops(call=1) + self.check_resops(call=2) def test_residual_oosend_with_void(self): @@ -597,10 +587,7 @@ policy = StopAtXPolicy(new, A.foo.im_func) res = self.meta_interp(fn, [1, 20], policy=policy) assert res == 41 - if self.type_system == 'ootype': - self.check_loops(oosend=1) - else: - self.check_loops(call=1) + self.check_resops(call=2) def test_constfold_pure_oosend(self): myjitdriver = JitDriver(greens=[], reds = ['i', 'obj']) @@ -621,10 +608,7 @@ policy = StopAtXPolicy(A.foo.im_func) res = self.meta_interp(fn, [1, 20], policy=policy) assert res == 42 - if self.type_system == 'ootype': - self.check_loops(oosend=0) - else: - self.check_loops(call=0) + self.check_resops(call=0) def test_generalize_loop(self): myjitdriver = JitDriver(greens=[], reds = ['i', 'obj']) diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -76,7 +76,7 @@ return lst[i] res = self.meta_interp(f, [21], listops=True) assert res == f(21) - self.check_loops(call=0) + self.check_resops(call=0) def test_getitem_neg(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'n']) @@ -92,7 +92,7 @@ return x res = self.meta_interp(f, [-2], listops=True) assert res == 41 - self.check_loops(call=0, guard_value=0) + self.check_resops(call=0, guard_value=0) # we don't support resizable lists on ootype #class TestOOtype(ListTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -30,7 +30,7 @@ return i res = self.meta_interp(f, [10, True, _str('h')], listops=True) assert res == 5 - self.check_loops(**{self.CALL: 1, self.CALL_PURE: 0, 'everywhere': True}) + self.check_resops(**{self.CALL: 1, self.CALL_PURE: 0}) def test_eq_folded(self): _str = self._str @@ -50,7 +50,7 @@ return i res = self.meta_interp(f, [10, True, _str('h')], listops=True) assert res == 5 - self.check_loops(**{self.CALL: 0, self.CALL_PURE: 0}) + self.check_resops(**{self.CALL: 0, self.CALL_PURE: 0}) def test_newstr(self): _str, _chr = self._str, self._chr @@ -85,7 +85,7 @@ n -= 1 return 42 self.meta_interp(f, [6]) - self.check_loops(newstr=0, strsetitem=0, strlen=0, + self.check_resops(newstr=0, strsetitem=0, strlen=0, newunicode=0, unicodesetitem=0, unicodelen=0) def test_char2string_escape(self): @@ -126,7 +126,7 @@ return total res = self.meta_interp(f, [6]) assert res == 21 - self.check_loops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, + self.check_resops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, newunicode=0, unicodegetitem=0, unicodesetitem=0, unicodelen=0) @@ -147,7 +147,7 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(newstr=0, strsetitem=0, + self.check_resops(newstr=0, strsetitem=0, newunicode=0, unicodesetitem=0, call=0, call_pure=0) @@ -168,12 +168,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=0, copystrcontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=4, + strsetitem=0, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=0, - copyunicodecontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=0, call=2, + copyunicodecontent=4, newunicode=2) def test_strconcat_escape_str_char(self): _str, _chr = self._str, self._chr @@ -192,12 +191,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=2, strsetitem=2, + call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=2, newunicode=2) def test_strconcat_escape_char_str(self): _str, _chr = self._str, self._chr @@ -216,12 +214,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=2, + strsetitem=2, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=2, newunicode=2) def test_strconcat_escape_char_char(self): _str, _chr = self._str, self._chr @@ -239,12 +236,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=2, copystrcontent=0, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=0, + strsetitem=4, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=2, - copyunicodecontent=0, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=4, call=2, + copyunicodecontent=0, newunicode=2) def test_strconcat_escape_str_char_str(self): _str, _chr = self._str, self._chr @@ -263,12 +259,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=4, strsetitem=2, + call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=4, newunicode=2) def test_strconcat_guard_fail(self): _str = self._str @@ -325,7 +320,7 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(newstr=0, newunicode=0) + self.check_resops(newunicode=0, newstr=0) def test_str_slice_len_surviving(self): _str = self._str @@ -491,7 +486,7 @@ def __init__(self, s): self.defaultencoding = s _str = self._str - sys = Sys(_str('ascii')) + sys = Sys(_str('ascii')) mydriver = JitDriver(reds = ['n', 'sa'], greens = []) def f(n): sa = 0 @@ -504,13 +499,13 @@ sys.defaultencoding = _str('utf-8') return sa assert self.meta_interp(f, [8]) == f(8) - self.check_loops({'int_add': 1, 'guard_true': 1, 'int_sub': 1, - 'jump': 1, 'int_is_true': 1, - 'guard_not_invalidated': 1}) + self.check_resops({'jump': 2, 'int_is_true': 2, 'int_add': 2, + 'guard_true': 2, 'guard_not_invalidated': 2, + 'int_sub': 2}) def test_promote_string(self): driver = JitDriver(greens = [], reds = ['n']) - + def f(n): while n < 21: driver.jit_merge_point(n=n) @@ -519,7 +514,7 @@ return 0 self.meta_interp(f, [0]) - self.check_loops(call=3 + 1) # one for int2str + self.check_resops(call=7) #class TestOOtype(StringTests, OOJitMixin): # CALL = "oosend" @@ -552,9 +547,8 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(call=1, # escape() - newunicode=1, unicodegetitem=0, - unicodesetitem=1, copyunicodecontent=1) + self.check_resops(unicodesetitem=2, newunicode=2, call=4, + copyunicodecontent=2, unicodegetitem=0) def test_str2unicode_fold(self): _str = self._str @@ -572,9 +566,9 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(call_pure=0, call=1, - newunicode=0, unicodegetitem=0, - unicodesetitem=0, copyunicodecontent=0) + self.check_resops(call_pure=0, unicodesetitem=0, call=2, + newunicode=0, unicodegetitem=0, + copyunicodecontent=0) def test_join_chars(self): jitdriver = JitDriver(reds=['a', 'b', 'c', 'i'], greens=[]) @@ -596,9 +590,8 @@ # The "".join should be unrolled, since the length of x is known since # it is virtual, ensure there are no calls to ll_join_chars, or # allocations. - self.check_loops({ - "guard_true": 5, "int_is_true": 3, "int_lt": 2, "int_add": 2, "jump": 2, - }, everywhere=True) + self.check_resops({'jump': 2, 'guard_true': 5, 'int_lt': 2, + 'int_add': 2, 'int_is_true': 3}) def test_virtual_copystringcontent(self): jitdriver = JitDriver(reds=['n', 'result'], greens=[]) diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -72,16 +72,16 @@ res = self.meta_interp(main, [0, 6], listops=True, backendopt=True) assert res == 5040 - self.check_loops({'int_mul':1, 'jump':1, - 'int_sub':1, 'int_le':1, 'guard_false':1}) + self.check_resops({'jump': 2, 'int_le': 2, 'guard_value': 1, + 'int_mul': 2, 'guard_false': 2, 'int_sub': 2}) def test_tl_2(self): main = self._get_main() res = self.meta_interp(main, [1, 10], listops=True, backendopt=True) assert res == main(1, 10) - self.check_loops({'int_sub':1, 'int_le':1, - 'guard_false':1, 'jump':1}) + self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 2, + 'guard_false': 2, 'guard_value': 1}) def test_tl_call(self, listops=True, policy=None): from pypy.jit.tl.tl import interp diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -593,6 +593,32 @@ res = self.interp_operations(fn, [sys.maxint]) assert res == 12 + def test_opaque_list(self): + from pypy.rlib.rerased import new_erasing_pair + erase, unerase = new_erasing_pair("test_opaque_list") + def fn(n, ca, cb): + l1 = [n] + l2 = [n] + a1 = erase(l1) + a2 = erase(l1) + a = a1 + if ca: + a = a2 + if n < -100: + unerase(a).append(5) + b = a1 + if cb: + b = a + return unerase(a)[0] + unerase(b)[0] + res = self.interp_operations(fn, [7, 0, 1]) + assert res == 7 * 2 + self.check_operations_history(getarrayitem_gc=0, + getfield_gc=0) + res = self.interp_operations(fn, [-7, 1, 1]) + assert res == -7 * 2 + self.check_operations_history(getarrayitem_gc=0, + getfield_gc=0) + def test_copy_str_content(self): def fn(n): a = StringBuilder() @@ -601,4 +627,4 @@ return x[0] res = self.interp_operations(fn, [0]) assert res == 1 - self.check_operations_history(getarrayitem_gc=0, getarrayitem_gc_pure=0 ) \ No newline at end of file + self.check_operations_history(getarrayitem_gc=0, getarrayitem_gc_pure=0) diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -31,8 +31,9 @@ res = self.meta_interp(f, [10]) assert res == 55 * 10 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=2, new=0) + def test_virtualized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) @@ -53,8 +54,8 @@ n -= 1 return node1.value * node2.value assert f(10) == self.meta_interp(f, [10]) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, + new=0) def test_virtualized_circular1(self): class MyNode(): @@ -79,8 +80,8 @@ res = self.meta_interp(f, [10]) assert res == 55 * 10 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=3, new=0) def test_virtualized_float(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -97,7 +98,7 @@ res = self.meta_interp(f, [10]) assert res == f(10) self.check_loop_count(1) - self.check_loops(new=0, float_add=0) + self.check_resops(new=0, float_add=1) def test_virtualized_float2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -115,7 +116,8 @@ res = self.meta_interp(f, [10]) assert res == f(10) self.check_loop_count(1) - self.check_loops(new=0, float_add=1) + self.check_resops(new=0, float_add=2) + def test_virtualized_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -139,8 +141,8 @@ res = self.meta_interp(f, [10]) assert res == 55 * 30 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, + new=0) def test_nonvirtual_obj_delays_loop(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -160,8 +162,8 @@ res = self.meta_interp(f, [500]) assert res == 640 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=1, new=0) def test_two_loops_with_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -184,8 +186,9 @@ res = self.meta_interp(f, [18]) assert res == f(18) self.check_loop_count(2) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=2, new=0) + def test_two_loops_with_escaping_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -212,8 +215,8 @@ res = self.meta_interp(f, [20], policy=StopAtXPolicy(externfn)) assert res == f(20) self.check_loop_count(3) - self.check_loops(**{self._new_op: 1}) - self.check_loops(int_mul=0, call=1) + self.check_resops(**{self._new_op: 1}) + self.check_resops(int_mul=0, call=1) def test_two_virtuals(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'prev']) @@ -236,7 +239,7 @@ res = self.meta_interp(f, [12]) assert res == 78 - self.check_loops(new_with_vtable=0, new=0) + self.check_resops(new_with_vtable=0, new=0) def test_specialied_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) @@ -281,7 +284,7 @@ res = self.meta_interp(f, [20]) assert res == 9 - self.check_loops(new_with_vtable=0, new=0) + self.check_resops(new_with_vtable=0, new=0) def test_immutable_constant_getfield(self): myjitdriver = JitDriver(greens = ['stufflist'], reds = ['n', 'i']) @@ -307,7 +310,7 @@ res = self.meta_interp(f, [10, 1, 0], listops=True) assert res == 0 - self.check_loops(getfield_gc=0) + self.check_resops(getfield_gc=0) def test_escapes(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) @@ -336,7 +339,7 @@ res = self.meta_interp(f, [10], policy=StopAtXPolicy(g)) assert res == 3 - self.check_loops(**{self._new_op: 1}) + self.check_resops(**{self._new_op: 1}) def test_virtual_on_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) @@ -366,7 +369,7 @@ res = self.meta_interp(f, [10]) assert res == 2 - self.check_loops(new=0, new_with_vtable=0) + self.check_resops(new=0, new_with_vtable=0) def test_bridge_from_interpreter(self): mydriver = JitDriver(reds = ['n', 'f'], greens = []) @@ -841,7 +844,7 @@ del t2 return i assert self.meta_interp(f, []) == 10 - self.check_loops(new_array=0) + self.check_resops(new_array=0) def test_virtual_streq_bug(self): mydriver = JitDriver(reds = ['i', 's', 'a'], greens = []) @@ -942,8 +945,8 @@ res = self.meta_interp(f, [16]) assert res == f(16) - self.check_loops(getfield_gc=2) - + self.check_resops(getfield_gc=7) + # ____________________________________________________________ # Run 1: all the tests instantiate a real RPython class @@ -985,10 +988,8 @@ res = self.meta_interp(f, [10]) assert res == 20 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) - - + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=0, + new=0) class TestOOtype_Instance(VirtualTests, OOJitMixin): _new_op = 'new_with_vtable' diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -77,7 +77,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 30 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_preexisting_access_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -102,7 +102,7 @@ assert f(5) == 185 res = self.meta_interp(f, [5]) assert res == 185 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_two_paths_access(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -124,7 +124,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10118 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_synchronize_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -146,7 +146,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_virtualizable_and_greens(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'xy'], @@ -174,7 +174,7 @@ return res res = self.meta_interp(f, [40]) assert res == 50 * 4 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_double_frame(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy', 'other'], @@ -197,8 +197,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_loops(getfield_gc=0, setfield_gc=1) - self.check_loops(getfield_gc=1, setfield_gc=2, everywhere=True) + self.check_resops(setfield_gc=2, getfield_gc=1) # ------------------------------ @@ -248,8 +247,8 @@ return xy2.inst_l1[2] res = self.meta_interp(f, [16]) assert res == 3001 + 16 * 80 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, setarrayitem_gc=0) + self.check_resops(setarrayitem_gc=0, setfield_gc=0, + getarrayitem_gc=0, getfield_gc=0) def test_synchronize_arrays_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -279,8 +278,7 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) def test_array_length(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -306,8 +304,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, arraylen_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=0, getfield_gc=0) def test_residual_function(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -340,8 +338,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, arraylen_gc=1, call=1) + self.check_resops(call=2, setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=2, getfield_gc=0) def test_double_frame_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2', 'other'], @@ -377,8 +375,8 @@ expected = f(20) res = self.meta_interp(f, [20], enable_opts='') assert res == expected - self.check_loops(getfield_gc=1, setfield_gc=0, - arraylen_gc=1, getarrayitem_gc=1, setarrayitem_gc=1) + self.check_resops(setarrayitem_gc=1, setfield_gc=0, + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) # ------------------------------ @@ -425,8 +423,7 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) # ------------------------------ @@ -460,8 +457,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) - + self.check_resops(setfield_gc=0, getfield_gc=0) def test_virtualizable_with_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'frame'], @@ -495,8 +491,7 @@ res = self.meta_interp(f, [10, 1], listops=True) assert res == f(10, 1) - self.check_loops(getarrayitem_gc=0) - + self.check_resops(getarrayitem_gc=0) def test_subclass_of_virtualizable(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -524,8 +519,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) - + self.check_resops(setfield_gc=0, getfield_gc=0) def test_external_pass(self): jitdriver = JitDriver(greens = [], reds = ['n', 'z', 'frame'], @@ -1011,8 +1005,8 @@ res = self.meta_interp(f, [70], listops=True) assert res == intmask(42 ** 70) - self.check_loops(int_add=0, - int_sub=1) # for 'n -= 1' only + self.check_resops(int_add=0, + int_sub=2) # for 'n -= 1' only def test_simple_access_directly(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1043,7 +1037,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) from pypy.jit.backend.test.support import BaseCompiledMixin if isinstance(self, BaseCompiledMixin): @@ -1098,42 +1092,42 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_check_for_nonstandardness_only_once(self): - myjitdriver = JitDriver(greens = [], reds = ['frame'], - virtualizables = ['frame']) + myjitdriver = JitDriver(greens = [], reds = ['frame'], + virtualizables = ['frame']) - class Frame(object): - _virtualizable2_ = ['x', 'y', 'z'] + class Frame(object): + _virtualizable2_ = ['x', 'y', 'z'] - def __init__(self, x, y, z=1): - self = hint(self, access_directly=True) - self.x = x - self.y = y - self.z = z + def __init__(self, x, y, z=1): + self = hint(self, access_directly=True) + self.x = x + self.y = y + self.z = z - class SomewhereElse: - pass - somewhere_else = SomewhereElse() + class SomewhereElse: + pass + somewhere_else = SomewhereElse() - def f(n): - frame = Frame(n, 0) - somewhere_else.top_frame = frame # escapes - frame = hint(frame, access_directly=True) - while frame.x > 0: - myjitdriver.can_enter_jit(frame=frame) - myjitdriver.jit_merge_point(frame=frame) - top_frame = somewhere_else.top_frame - child_frame = Frame(frame.x, top_frame.z, 17) - frame.y += child_frame.x - frame.x -= top_frame.z - return somewhere_else.top_frame.y - - res = self.meta_interp(f, [10]) - assert res == 55 - self.check_loops(new_with_vtable=0, ptr_eq=1, everywhere=True) - self.check_history(ptr_eq=2) + def f(n): + frame = Frame(n, 0) + somewhere_else.top_frame = frame # escapes + frame = hint(frame, access_directly=True) + while frame.x > 0: + myjitdriver.can_enter_jit(frame=frame) + myjitdriver.jit_merge_point(frame=frame) + top_frame = somewhere_else.top_frame + child_frame = Frame(frame.x, top_frame.z, 17) + frame.y += child_frame.x + frame.x -= top_frame.z + return somewhere_else.top_frame.y + + res = self.meta_interp(f, [10]) + assert res == 55 + self.check_resops(new_with_vtable=0, ptr_eq=1) + self.check_history(ptr_eq=2) def test_virtual_child_frame_with_arrays(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1165,7 +1159,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == 55 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_blackhole_should_not_pay_attention(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1203,7 +1197,7 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_blackhole_should_synchronize(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1239,7 +1233,7 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_blackhole_should_not_reenter(self): if not self.basic: diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -171,7 +171,7 @@ return 1 # self.meta_interp(f, [10]) - self.check_loops(new_with_vtable=1) # the vref + self.check_resops(new_with_vtable=2) # the vref self.check_aborted_count(0) def test_simple_all_removed(self): @@ -205,8 +205,7 @@ virtual_ref_finish(vref, xy) # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=0, # all virtualized - new_array=0) + self.check_resops(new_with_vtable=0, new_array=0) self.check_aborted_count(0) def test_simple_no_access(self): @@ -242,7 +241,7 @@ virtual_ref_finish(vref, xy) # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=1, # the vref: xy doesn't need to be forced + self.check_resops(new_with_vtable=2, # the vref: xy doesn't need to be forced new_array=0) # and neither xy.next1/2/3 self.check_aborted_count(0) @@ -280,8 +279,8 @@ exctx.topframeref = vref_None # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=2, # XY(), the vref - new_array=3) # next1/2/3 + self.check_resops(new_with_vtable=4, # XY(), the vref + new_array=6) # next1/2/3 self.check_aborted_count(0) def test_simple_force_sometimes(self): @@ -320,8 +319,8 @@ # res = self.meta_interp(f, [30]) assert res == 13 - self.check_loops(new_with_vtable=1, # the vref, but not XY() - new_array=0) # and neither next1/2/3 + self.check_resops(new_with_vtable=2, # the vref, but not XY() + new_array=0) # and neither next1/2/3 self.check_loop_count(1) self.check_aborted_count(0) @@ -362,7 +361,7 @@ # res = self.meta_interp(f, [30]) assert res == 13 - self.check_loops(new_with_vtable=0, # all virtualized in the n!=13 loop + self.check_resops(new_with_vtable=0, # all virtualized in the n!=13 loop new_array=0) self.check_loop_count(1) self.check_aborted_count(0) @@ -412,7 +411,7 @@ res = self.meta_interp(f, [72]) assert res == 6 self.check_loop_count(2) # the loop and the bridge - self.check_loops(new_with_vtable=2, # loop: nothing; bridge: vref, xy + self.check_resops(new_with_vtable=2, # loop: nothing; bridge: vref, xy new_array=2) # bridge: next4, next5 self.check_aborted_count(0) @@ -442,8 +441,8 @@ # res = self.meta_interp(f, [15]) assert res == 1 - self.check_loops(new_with_vtable=2, # vref, xy - new_array=1) # next1 + self.check_resops(new_with_vtable=4, # vref, xy + new_array=2) # next1 self.check_aborted_count(0) def test_recursive_call_1(self): @@ -543,7 +542,7 @@ # res = self.meta_interp(f, [15]) assert res == 1 - self.check_loops(new_with_vtable=2) # vref, xy + self.check_resops(new_with_vtable=4) # vref, xy def test_cannot_use_invalid_virtualref(self): myjitdriver = JitDriver(greens = [], reds = ['n']) diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -100,12 +100,12 @@ # check that the set_param will override the default res = self.meta_interp(f, [10, llstr('')]) assert res == 0 - self.check_loops(new_with_vtable=1) + self.check_resops(new_with_vtable=1) res = self.meta_interp(f, [10, llstr(ALL_OPTS_NAMES)], enable_opts='') assert res == 0 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_unwanted_loops(self): mydriver = JitDriver(reds = ['n', 'total', 'm'], greens = []) @@ -160,7 +160,7 @@ return n self.meta_interp(f, [50], backendopt=True) self.check_enter_count_at_most(2) - self.check_loops(call=0) + self.check_resops(call=0) def test_loop_header(self): # artificial test: we enter into the JIT only when can_enter_jit() @@ -184,7 +184,7 @@ assert f(15) == 1 res = self.meta_interp(f, [15], backendopt=True) assert res == 1 - self.check_loops(int_add=1) # I get 13 without the loop_header() + self.check_resops(int_add=2) # I get 13 without the loop_header() def test_omit_can_enter_jit(self): # Simple test comparing the effects of always giving a can_enter_jit(), @@ -246,8 +246,8 @@ m = m - 1 self.meta_interp(f1, [8]) self.check_loop_count(1) - self.check_loops({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) def test_void_red_variable(self): mydriver = JitDriver(greens=[], reds=['a', 'm']) diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -36,7 +36,7 @@ i = i + 1 return i self.interpret(f, []) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_bridge(self): py.test.skip('We currently cant virtualize across bridges') @@ -52,7 +52,7 @@ return total self.interpret(f, [1, 10]) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_bridge_bad_case(self): py.test.skip('We currently cant virtualize across bridges') @@ -67,7 +67,7 @@ return a + b self.interpret(f, [1, 10]) - self.check_loops(new_with_vtable=1) # XXX should eventually be 0? + self.check_resops(new_with_vtable=1) # XXX should eventually be 0? # I think it should be either 0 or 2, 1 makes little sense # If the loop after entering goes first time to the bridge, a # is rewrapped again, without preserving the identity. I'm not diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -324,6 +324,7 @@ class A(object): pass a = A() assert _weakref.proxy(a) is _weakref.proxy(a) + assert _weakref.proxy(a) is _weakref.proxy(a, None) def test_callable_proxy(self): import _weakref, gc diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -32,7 +32,7 @@ Py_DecRef(space, w_item) if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.wrappeditems + wrappeditems = w_list.getitems() if index < 0 or index >= len(wrappeditems): raise OperationError(space.w_IndexError, space.wrap( "list assignment index out of range")) @@ -47,7 +47,7 @@ IndexError exception.""" if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.wrappeditems + wrappeditems = w_list.getitems() if index < 0 or index >= len(wrappeditems): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) @@ -74,7 +74,7 @@ """Macro form of PyList_Size() without error checking. """ assert isinstance(w_list, W_ListObject) - return len(w_list.wrappeditems) + return len(w_list.getitems()) @cpython_api([PyObject], Py_ssize_t, error=-1) diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -56,7 +56,7 @@ PySequence_Fast(), o is not NULL, and that i is within bounds. """ if isinstance(w_obj, listobject.W_ListObject): - w_res = w_obj.wrappeditems[index] + w_res = w_obj.getitem(index) else: assert isinstance(w_obj, tupleobject.W_TupleObject) w_res = w_obj.wrappeditems[index] @@ -70,7 +70,7 @@ PySequence_Fast_GET_SIZE() is faster because it can assume o is a list or tuple.""" if isinstance(w_obj, listobject.W_ListObject): - return len(w_obj.wrappeditems) + return w_obj.length() assert isinstance(w_obj, tupleobject.W_TupleObject) return len(w_obj.wrappeditems) diff --git a/pypy/module/gc/test/test_referents.py b/pypy/module/gc/test/test_referents.py --- a/pypy/module/gc/test/test_referents.py +++ b/pypy/module/gc/test/test_referents.py @@ -7,9 +7,13 @@ from pypy.rlib import rgc cls._backup = [rgc.get_rpy_roots] w = cls.space.wrap + space = cls.space class RandomRPythonObject(object): pass - cls.ALL_ROOTS = [w(4), w([2, 7]), RandomRPythonObject()] + l4 = space.newlist([w(4)]) + l2 = space.newlist([w(2)]) + l7 = space.newlist([w(7)]) + cls.ALL_ROOTS = [l4, space.newlist([l2, l7]), RandomRPythonObject()] cls.w_ALL_ROOTS = cls.space.newlist(cls.ALL_ROOTS) rgc.get_rpy_roots = lambda: ( map(rgc._GcRef, cls.ALL_ROOTS) + [rgc.NULL_GCREF]*17) @@ -41,14 +45,14 @@ if self.runappdirect: pass # unsure what to test else: - assert lst[0] == 4 - assert lst[1] == [2, 7] + assert lst[0] == [4] + assert lst[1] == [[2], [7]] assert type(lst[2]) is gc.GcRef assert len(lst) == 3 def test_get_rpy_referents(self): import gc - y = 12345 + y = [12345] x = [y] lst = gc.get_rpy_referents(x) # After translation, 'lst' should contain the RPython-level list @@ -88,8 +92,8 @@ def test_get_referents(self): import gc - y = 12345 - z = 23456 + y = [12345] + z = [23456] x = [y, z] lst = gc.get_referents(x) assert y in lst and z in lst diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,14 +5,16 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.SingleDimArray', + 'array': 'interp_numarray.NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', + 'dot': 'interp_numarray.dot', 'fromstring': 'interp_support.fromstring', + 'flatiter': 'interp_numarray.W_FlatIterator', 'True_': 'space.w_True', 'False_': 'space.w_False', @@ -48,6 +50,7 @@ ("sign", "sign"), ("sin", "sin"), ("subtract", "subtract"), + ('sqrt', 'sqrt'), ("tan", "tan"), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl @@ -57,4 +60,5 @@ 'mean': 'app_numpy.mean', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', + 'arange': 'app_numpy.arange', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -6,12 +6,33 @@ inf = float("inf") e = math.e + def average(a): # This implements a weighted average, for now we don't implement the # weighting, just the average part! return mean(a) + def mean(a): if not hasattr(a, "mean"): a = numpypy.array(a) return a.mean() + + +def arange(start, stop=None, step=1, dtype=None): + '''arange([start], stop[, step], dtype=None) + Generate values in the half-interval [start, stop). + ''' + if stop is None: + stop = start + start = 0 + if dtype is None: + test = numpypy.array([start, stop, step, 0]) + dtype = test.dtype + arr = numpypy.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + i = start + for j in range(arr.size): + arr[j] = i + j += 1 + i += step + return arr diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -6,10 +6,10 @@ from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_BoolDtype from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, SingleDimArray) + descr_new_array, scalar_w, NDimArray) from pypy.module.micronumpy import interp_ufuncs from pypy.rlib.objectmodel import specialize - +import re class BogusBytecode(Exception): pass @@ -23,11 +23,18 @@ class WrongFunctionName(Exception): pass +class TokenizerError(Exception): + pass + +class BadToken(Exception): + pass + SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative"] class FakeSpace(object): w_ValueError = None w_TypeError = None + w_IndexError = None w_None = None w_bool = "bool" @@ -36,6 +43,7 @@ w_list = "list" w_long = "long" w_tuple = 'tuple' + w_slice = "slice" def __init__(self): """NOT_RPYTHON""" @@ -43,13 +51,30 @@ self.w_float64dtype = W_Float64Dtype(self) def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, SingleDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) def isinstance_w(self, w_obj, w_tp): + if w_obj.tp == w_tp: + return True return False def decode_index4(self, w_idx, size): - return (self.int_w(w_idx), 0, 0, 1) + if isinstance(w_idx, IntObject): + return (self.int_w(w_idx), 0, 0, 1) + else: + assert isinstance(w_idx, SliceObject) + start, stop, step = w_idx.start, w_idx.stop, w_idx.step + if step == 0: + return (0, size, 1, size) + if start < 0: + start += size + if stop < 0: + stop += size + 1 + if step < 0: + lgt = (stop - start + 1) / step + 1 + else: + lgt = (stop - start - 1) / step + 1 + return (start, stop, step, lgt) @specialize.argtype(1) def wrap(self, obj): @@ -59,7 +84,9 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) - raise Exception + elif isinstance(obj, W_Root): + return obj + raise NotImplementedError def newlist(self, items): return ListObject(items) @@ -67,13 +94,14 @@ def listview(self, obj): assert isinstance(obj, ListObject) return obj.items + fixedview = listview def float(self, w_obj): assert isinstance(w_obj, FloatObject) return w_obj def float_w(self, w_obj): - assert isinstance(w_obj, FloatObject) + assert isinstance(w_obj, FloatObject) return w_obj.floatval def int_w(self, w_obj): @@ -107,6 +135,12 @@ assert isinstance(what, tp) return what + def len_w(self, w_obj): + if isinstance(w_obj, ListObject): + return len(w_obj.items) + # XXX array probably + assert False + class FloatObject(W_Root): tp = FakeSpace.w_float def __init__(self, floatval): @@ -127,6 +161,13 @@ def __init__(self, items): self.items = items +class SliceObject(W_Root): + tp = FakeSpace.w_slice + def __init__(self, start, stop, step): + self.start = start + self.stop = stop + self.step = step + class InterpreterState(object): def __init__(self, code): self.code = code @@ -161,7 +202,7 @@ interp.variables[self.name] = self.expr.execute(interp) def __repr__(self): - return "%% = %r" % (self.name, self.expr) + return "%r = %r" % (self.name, self.expr) class ArrayAssignment(Node): def __init__(self, name, index, expr): @@ -171,8 +212,12 @@ def execute(self, interp): arr = interp.variables[self.name] - w_index = self.index.execute(interp).eval(0).wrap(interp.space) - w_val = self.expr.execute(interp).eval(0).wrap(interp.space) + w_index = self.index.execute(interp) + # cast to int + if isinstance(w_index, FloatObject): + w_index = IntObject(int(w_index.floatval)) + w_val = self.expr.execute(interp) + assert isinstance(arr, BaseArray) arr.descr_setitem(interp.space, w_index, w_val) def __repr__(self): @@ -180,7 +225,7 @@ class Variable(Node): def __init__(self, name): - self.name = name + self.name = name.strip(" ") def execute(self, interp): return interp.variables[self.name] @@ -196,25 +241,27 @@ def execute(self, interp): w_lhs = self.lhs.execute(interp) + if isinstance(self.rhs, SliceConstant): + w_rhs = self.rhs.wrap(interp.space) + else: + w_rhs = self.rhs.execute(interp) + if not isinstance(w_lhs, BaseArray): + # scalar + dtype = interp.space.fromcache(W_Float64Dtype) + w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) - if isinstance(self.rhs, SliceConstant): - # XXX interface has changed on multidim branch - raise NotImplementedError - w_rhs = self.rhs.execute(interp) if self.name == '+': w_res = w_lhs.descr_add(interp.space, w_rhs) elif self.name == '*': w_res = w_lhs.descr_mul(interp.space, w_rhs) elif self.name == '-': - w_res = w_lhs.descr_sub(interp.space, w_rhs) + w_res = w_lhs.descr_sub(interp.space, w_rhs) elif self.name == '->': - if isinstance(w_rhs, Scalar): - index = int(interp.space.float_w( - w_rhs.value.wrap(interp.space))) - dtype = interp.space.fromcache(W_Float64Dtype) - return Scalar(dtype, w_lhs.get_concrete().eval(index)) - else: - raise NotImplementedError + assert not isinstance(w_rhs, Scalar) + if isinstance(w_rhs, FloatObject): + w_rhs = IntObject(int(w_rhs.floatval)) + assert isinstance(w_lhs, BaseArray) + w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError if not isinstance(w_res, BaseArray): @@ -236,9 +283,7 @@ return space.wrap(self.v) def execute(self, interp): - dtype = interp.space.fromcache(W_Float64Dtype) - assert isinstance(dtype, W_Float64Dtype) - return Scalar(dtype, dtype.box(self.v)) + return FloatObject(self.v) class RangeConstant(Node): def __init__(self, v): @@ -248,7 +293,8 @@ w_list = interp.space.newlist( [interp.space.wrap(float(i)) for i in range(self.v)]) dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype) + return descr_new_array(interp.space, None, w_list, w_dtype=dtype, + w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -270,17 +316,27 @@ def execute(self, interp): w_list = self.wrap(interp.space) dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype) + return descr_new_array(interp.space, None, w_list, w_dtype=dtype, + w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" class SliceConstant(Node): - def __init__(self): - pass + def __init__(self, start, stop, step): + # no negative support for now + self.start = start + self.stop = stop + self.step = step + + def wrap(self, space): + return SliceObject(self.start, self.stop, self.step) + + def execute(self, interp): + return SliceObject(self.start, self.stop, self.step) def __repr__(self): - return 'slice()' + return 'slice(%s,%s,%s)' % (self.start, self.stop, self.step) class Execute(Node): def __init__(self, expr): @@ -294,7 +350,7 @@ class FunctionCall(Node): def __init__(self, name, args): - self.name = name + self.name = name.strip(" ") self.args = args def __repr__(self): @@ -337,95 +393,172 @@ else: raise WrongFunctionName +_REGEXES = [ + ('-?[\d\.]+', 'number'), + ('\[', 'array_left'), + (':', 'colon'), + ('\w+', 'identifier'), + ('\]', 'array_right'), + ('(->)|[\+\-\*\/]', 'operator'), + ('=', 'assign'), + (',', 'coma'), + ('\|', 'pipe'), + ('\(', 'paren_left'), + ('\)', 'paren_right'), +] +REGEXES = [] + +for r, name in _REGEXES: + REGEXES.append((re.compile(r' *(' + r + ')'), name)) +del _REGEXES + +class Token(object): + def __init__(self, name, v): + self.name = name + self.v = v + + def __repr__(self): + return '(%s, %s)' % (self.name, self.v) + +empty = Token('', '') + +class TokenStack(object): + def __init__(self, tokens): + self.tokens = tokens + self.c = 0 + + def pop(self): + token = self.tokens[self.c] + self.c += 1 + return token + + def get(self, i): + if self.c + i >= len(self.tokens): + return empty + return self.tokens[self.c + i] + + def remaining(self): + return len(self.tokens) - self.c + + def push(self): + self.c -= 1 + + def __repr__(self): + return repr(self.tokens[self.c:]) + class Parser(object): - def parse_identifier(self, id): - id = id.strip(" ") - #assert id.isalpha() - return Variable(id) + def tokenize(self, line): + tokens = [] + while True: + for r, name in REGEXES: + m = r.match(line) + if m is not None: + g = m.group(0) + tokens.append(Token(name, g)) + line = line[len(g):] + if not line: + return TokenStack(tokens) + break + else: + raise TokenizerError(line) - def parse_expression(self, expr): - tokens = [i for i in expr.split(" ") if i] - if len(tokens) == 1: - return self.parse_constant_or_identifier(tokens[0]) + def parse_number_or_slice(self, tokens): + start_tok = tokens.pop() + if start_tok.name == 'colon': + start = 0 + else: + if tokens.get(0).name != 'colon': + return FloatConstant(start_tok.v) + start = int(start_tok.v) + tokens.pop() + if not tokens.get(0).name in ['colon', 'number']: + stop = -1 + step = 1 + else: + next = tokens.pop() + if next.name == 'colon': + stop = -1 + step = int(tokens.pop().v) + else: + stop = int(next.v) + if tokens.get(0).name == 'colon': + tokens.pop() + step = int(tokens.pop().v) + else: + step = 1 + return SliceConstant(start, stop, step) + + + def parse_expression(self, tokens): stack = [] - tokens.reverse() - while tokens: + while tokens.remaining(): token = tokens.pop() - if token == ')': - raise NotImplementedError - elif self.is_identifier_or_const(token): - if stack: - name = stack.pop().name - lhs = stack.pop() - rhs = self.parse_constant_or_identifier(token) - stack.append(Operator(lhs, name, rhs)) + if token.name == 'identifier': + if tokens.remaining() and tokens.get(0).name == 'paren_left': + stack.append(self.parse_function_call(token.v, tokens)) else: - stack.append(self.parse_constant_or_identifier(token)) + stack.append(Variable(token.v)) + elif token.name == 'array_left': + stack.append(ArrayConstant(self.parse_array_const(tokens))) + elif token.name == 'operator': + stack.append(Variable(token.v)) + elif token.name == 'number' or token.name == 'colon': + tokens.push() + stack.append(self.parse_number_or_slice(tokens)) + elif token.name == 'pipe': + stack.append(RangeConstant(tokens.pop().v)) + end = tokens.pop() + assert end.name == 'pipe' else: - stack.append(Variable(token)) - assert len(stack) == 1 - return stack[-1] + tokens.push() + break + stack.reverse() + lhs = stack.pop() + while stack: + op = stack.pop() + assert isinstance(op, Variable) + rhs = stack.pop() + lhs = Operator(lhs, op.name, rhs) + return lhs - def parse_constant(self, v): - lgt = len(v)-1 - assert lgt >= 0 - if ':' in v: - # a slice - assert v == ':' - return SliceConstant() - if v[0] == '[': - return ArrayConstant([self.parse_constant(elem) - for elem in v[1:lgt].split(",")]) - if v[0] == '|': - return RangeConstant(v[1:lgt]) - return FloatConstant(v) - - def is_identifier_or_const(self, v): - c = v[0] - if ((c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z') or - (c >= '0' and c <= '9') or c in '-.[|:'): - if v == '-' or v == "->": - return False - return True - return False - - def parse_function_call(self, v): - l = v.split('(') - assert len(l) == 2 - name = l[0] - cut = len(l[1]) - 1 - assert cut >= 0 - args = [self.parse_constant_or_identifier(id) - for id in l[1][:cut].split(",")] + def parse_function_call(self, name, tokens): + args = [] + tokens.pop() # lparen + while tokens.get(0).name != 'paren_right': + args.append(self.parse_expression(tokens)) return FunctionCall(name, args) - def parse_constant_or_identifier(self, v): - c = v[0] - if (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z'): - if '(' in v: - return self.parse_function_call(v) - return self.parse_identifier(v) - return self.parse_constant(v) + def parse_array_const(self, tokens): + elems = [] + while True: + token = tokens.pop() + if token.name == 'number': + elems.append(FloatConstant(token.v)) + elif token.name == 'array_left': + elems.append(ArrayConstant(self.parse_array_const(tokens))) + else: + raise BadToken() + token = tokens.pop() + if token.name == 'array_right': + return elems + assert token.name == 'coma' - def parse_array_subscript(self, v): - v = v.strip(" ") - l = v.split("[") - lgt = len(l[1]) - 1 - assert lgt >= 0 - rhs = self.parse_constant_or_identifier(l[1][:lgt]) - return l[0], rhs - - def parse_statement(self, line): - if '=' in line: - lhs, rhs = line.split("=") - lhs = lhs.strip(" ") - if '[' in lhs: - name, index = self.parse_array_subscript(lhs) - return ArrayAssignment(name, index, self.parse_expression(rhs)) - else: - return Assignment(lhs, self.parse_expression(rhs)) - else: - return Execute(self.parse_expression(line)) + def parse_statement(self, tokens): + if (tokens.get(0).name == 'identifier' and + tokens.get(1).name == 'assign'): + lhs = tokens.pop().v + tokens.pop() + rhs = self.parse_expression(tokens) + return Assignment(lhs, rhs) + elif (tokens.get(0).name == 'identifier' and + tokens.get(1).name == 'array_left'): + name = tokens.pop().v + tokens.pop() + index = self.parse_expression(tokens) + tokens.pop() + tokens.pop() + return ArrayAssignment(name, index, self.parse_expression(tokens)) + return Execute(self.parse_expression(tokens)) def parse(self, code): statements = [] @@ -434,7 +567,8 @@ line = line.split('#', 1)[0] line = line.strip(" ") if line: - statements.append(self.parse_statement(line)) + tokens = self.tokenize(line) + statements.append(self.parse_statement(tokens)) return Code(statements) def numpy_compile(code): diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -131,6 +131,7 @@ def binop(func): + func._annspecialcase_ = "specialize:call_location" @functools.wraps(func) def impl(self, v1, v2): return self.adapt_val(func(self, @@ -221,6 +222,7 @@ @binop def div(self, v1, v2): + # XXX this won't work after translation, probably requires ovfcheck try: return v1 / v2 except ZeroDivisionError: @@ -292,6 +294,12 @@ if not -1.0 < v < 1.0: return rfloat.NAN return math.atanh(v) + @unaryop + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN class IntegerArithmeticDtype(ArithmeticTypeMixin): _mixin_ = True @@ -313,6 +321,18 @@ @binop def mod(self, v1, v2): return v1 % v2 + @binop + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + class SignedIntegerArithmeticDtype(IntegerArithmeticDtype): _mixin_ = True diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,45 +1,370 @@ from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature from pypy.rlib import jit -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name +from pypy.rlib.rstring import StringBuilder +from pypy.rlib.objectmodel import instantiate -numpy_driver = jit.JitDriver(greens = ['signature'], - reds = ['result_size', 'i', 'self', 'result']) -all_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self', 'dtype']) -any_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self', 'dtype']) -slice_driver = jit.JitDriver(greens=['signature'], reds=['i', 'j', 'step', 'stop', 'source', 'dest']) +numpy_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['result_size', 'i', 'ri', 'self', 'result'] +) +all_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['i', 'self', 'dtype'] +) +any_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['i', 'self', 'dtype'] +) +slice_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['self', 'source', 'source_iter', 'res_iter'] +) -def descr_new_array(space, w_subtype, w_size_or_iterable, w_dtype=None): - l = space.listview(w_size_or_iterable) +def _find_shape_and_elems(space, w_iterable): + shape = [space.len_w(w_iterable)] + batch = space.listview(w_iterable) + while True: + new_batch = [] + if not batch: + return shape, [] + if not space.issequence_w(batch[0]): + for elem in batch: + if space.issequence_w(elem): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape, batch + size = space.len_w(batch[0]) + for w_elem in batch: + if not space.issequence_w(w_elem) or space.len_w(w_elem) != size: + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def shape_agreement(space, shape1, shape2): + ret = _shape_agreement(shape1, shape2) + if len(ret) < max(len(shape1), len(shape2)): + raise OperationError(space.w_ValueError, + space.wrap("operands could not be broadcast together with shapes (%s) (%s)" % ( + ",".join([str(x) for x in shape1]), + ",".join([str(x) for x in shape2]), + )) + ) + return ret + +def _shape_agreement(shape1, shape2): + """ Checks agreement about two shapes with respect to broadcasting. Returns + the resulting shape. + """ + lshift = 0 + rshift = 0 + if len(shape1) > len(shape2): + m = len(shape1) + n = len(shape2) + rshift = len(shape2) - len(shape1) + remainder = shape1 + else: + m = len(shape2) + n = len(shape1) + lshift = len(shape1) - len(shape2) + remainder = shape2 + endshape = [0] * m + indices1 = [True] * m + indices2 = [True] * m + for i in range(m - 1, m - n - 1, -1): + left = shape1[i + lshift] + right = shape2[i + rshift] + if left == right: + endshape[i] = left + elif left == 1: + endshape[i] = right + indices1[i + lshift] = False + elif right == 1: + endshape[i] = left + indices2[i + rshift] = False + else: + return [] + #raise OperationError(space.w_ValueError, space.wrap( + # "frames are not aligned")) + for i in range(m - n): + endshape[i] = remainder[i] + return endshape + +def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, + w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) if space.is_w(w_dtype, space.w_None): w_dtype = None - for w_item in l: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_item, w_dtype) + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): break - if w_dtype is None: - w_dtype = space.w_None - + if w_dtype is None: + w_dtype = space.w_None dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - arr = SingleDimArray(len(l), dtype=dtype) - i = 0 - for w_elem in l: - dtype.setitem_w(space, arr.storage, i, w_elem) - i += 1 + arr = NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) + arr_iter = arr_iter.next(shapelen) return arr +# Iterators for arrays +# -------------------- +# all those iterators with the exception of BroadcastIterator iterate over the +# entire array in C order (the last index changes the fastest). This will +# yield all elements. Views iterate over indices and look towards strides and +# backstrides to find the correct position. Notably the offset between +# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between +# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. + +# BroadcastIterator works like that, but for indexes that don't change source +# in the original array, strides[i] == backstrides[i] == 0 + +class BaseIterator(object): + def next(self, shapelen): + raise NotImplementedError + + def done(self): + raise NotImplementedError + + def get_offset(self): + raise NotImplementedError + +class ArrayIterator(BaseIterator): + def __init__(self, size): + self.offset = 0 + self.size = size + + def next(self, shapelen): + arr = instantiate(ArrayIterator) + arr.size = self.size + arr.offset = self.offset + 1 + return arr + + def done(self): + return self.offset >= self.size + + def get_offset(self): + return self.offset + +class OneDimIterator(BaseIterator): + def __init__(self, start, step, stop): + self.offset = start + self.step = step + self.size = stop * step + start + + def next(self, shapelen): + arr = instantiate(OneDimIterator) + arr.size = self.size + arr.step = self.step + arr.offset = self.offset + self.step + return arr + + def done(self): + return self.offset == self.size + + def get_offset(self): + return self.offset + +class ViewIterator(BaseIterator): + def __init__(self, arr): + self.indices = [0] * len(arr.shape) + self.offset = arr.start + self.arr = arr + self._done = False + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + for i in range(shapelen): + indices[i] = self.indices[i] + done = False + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.arr.shape[i] - 1: + indices[i] += 1 + offset += self.arr.strides[i] + break + else: + indices[i] = 0 + offset -= self.arr.backstrides[i] + else: + done = True + res = instantiate(ViewIterator) + res.offset = offset + res.indices = indices + res.arr = self.arr + res._done = done + return res + + def done(self): + return self._done + + def get_offset(self): + return self.offset + +class BroadcastIterator(BaseIterator): + '''Like a view iterator, but will repeatedly access values + for all iterations across a res_shape, folding the offset + using mod() arithmetic + ''' + def __init__(self, arr, res_shape): + self.indices = [0] * len(res_shape) + self.offset = arr.start + #strides are 0 where original shape==1 + self.strides = [] + self.backstrides = [] + for i in range(len(arr.shape)): + if arr.shape[i] == 1: + self.strides.append(0) + self.backstrides.append(0) + else: + self.strides.append(arr.strides[i]) + self.backstrides.append(arr.backstrides[i]) + self.res_shape = res_shape + self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides + self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides + self._done = False + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + _done = False + for i in range(shapelen): + indices[i] = self.indices[i] + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.res_shape[i] - 1: + indices[i] += 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + else: + _done = True + res = instantiate(BroadcastIterator) + res.indices = indices + res.offset = offset + res._done = _done + res.strides = self.strides + res.backstrides = self.backstrides + res.res_shape = self.res_shape + return res + + def done(self): + return self._done + + def get_offset(self): + return self.offset + +class Call2Iterator(BaseIterator): + def __init__(self, left, right): + self.left = left + self.right = right + + def next(self, shapelen): + return Call2Iterator(self.left.next(shapelen), + self.right.next(shapelen)) + + def done(self): + if isinstance(self.left, ConstantIterator): + return self.right.done() + return self.left.done() + + def get_offset(self): + if isinstance(self.left, ConstantIterator): + return self.right.get_offset() + return self.left.get_offset() + +class Call1Iterator(BaseIterator): + def __init__(self, child): + self.child = child + + def next(self, shapelen): + return Call1Iterator(self.child.next(shapelen)) + + def done(self): + return self.child.done() + + def get_offset(self): + return self.child.get_offset() + +class ConstantIterator(BaseIterator): + def next(self, shapelen): + return self + + def done(self): + return False + + def get_offset(self): + return 0 + + class BaseArray(Wrappable): - _attrs_ = ["invalidates", "signature"] + _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", + "start", 'order'] - def __init__(self): + _immutable_fields_ = ['start', "order"] + + strides = None + start = 0 + + def __init__(self, shape, order): self.invalidates = [] + self.shape = shape + self.order = order + if self.strides is None: + self.calc_strides(shape) + + def calc_strides(self, shape): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if self.order == 'C': + shape_rev.reverse() + for sh in shape_rev: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] def invalidated(self): if self.invalidates: @@ -99,7 +424,7 @@ def _reduce_ufunc_impl(ufunc_name): def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).descr_reduce(space, self) + return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, multidim=True) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") @@ -108,23 +433,30 @@ descr_min = _reduce_ufunc_impl("minimum") def _reduce_argmax_argmin_impl(op_name): - reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'result', 'self', 'cur_best', 'dtype']) - def loop(self, size): + reduce_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] + ) + def loop(self): + i = self.start_iter() + cur_best = self.eval(i) + shapelen = len(self.shape) + i = i.next(shapelen) + dtype = self.find_dtype() result = 0 - cur_best = self.eval(0) - i = 1 - dtype = self.find_dtype() - while i < size: + idx = 1 + while not i.done(): reduce_driver.jit_merge_point(signature=self.signature, + shapelen=shapelen, self=self, dtype=dtype, - size=size, i=i, result=result, + i=i, result=result, idx=idx, cur_best=cur_best) new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) if dtype.ne(new_best, cur_best): - result = i + result = idx cur_best = new_best - i += 1 + i = i.next(shapelen) + idx += 1 return result def impl(self, space): size = self.find_size() @@ -132,31 +464,35 @@ raise OperationError(space.w_ValueError, space.wrap("Can't call %s on zero-size arrays" \ % op_name)) - return space.wrap(loop(self, size)) + return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) def _all(self): - size = self.find_size() dtype = self.find_dtype() - i = 0 - while i < size: - all_driver.jit_merge_point(signature=self.signature, self=self, dtype=dtype, size=size, i=i) + i = self.start_iter() + shapelen = len(self.shape) + while not i.done(): + all_driver.jit_merge_point(signature=self.signature, + shapelen=shapelen, self=self, + dtype=dtype, i=i) if not dtype.bool(self.eval(i)): return False - i += 1 + i = i.next(shapelen) return True def descr_all(self, space): return space.wrap(self._all()) def _any(self): - size = self.find_size() dtype = self.find_dtype() - i = 0 - while i < size: - any_driver.jit_merge_point(signature=self.signature, self=self, size=size, dtype=dtype, i=i) + i = self.start_iter() + shapelen = len(self.shape) + while not i.done(): + any_driver.jit_merge_point(signature=self.signature, + shapelen=shapelen, self=self, + dtype=dtype, i=i) if dtype.bool(self.eval(i)): return True - i += 1 + i = i.next(shapelen) return False def descr_any(self, space): return space.wrap(self._any()) @@ -173,25 +509,6 @@ assert isinstance(w_res, BaseArray) return w_res.descr_sum(space) - def _getnums(self, comma): - dtype = self.find_dtype() - if self.find_size() > 1000: - nums = [ - dtype.str_format(self.eval(index)) - for index in range(3) - ] - nums.append("..." + "," * comma) - nums.extend([ - dtype.str_format(self.eval(index)) - for index in range(self.find_size() - 3, self.find_size()) - ]) - else: - nums = [ - dtype.str_format(self.eval(index)) - for index in range(self.find_size()) - ] - return nums - def get_concrete(self): raise NotImplementedError @@ -199,101 +516,298 @@ return space.wrap(self.find_dtype()) def descr_get_shape(self, space): - return space.newtuple([self.descr_len(space)]) + return space.newtuple([space.wrap(i) for i in self.shape]) def descr_get_size(self, space): return space.wrap(self.find_size()) def descr_copy(self, space): - return space.call_function(space.gettypefor(BaseArray), self, self.find_dtype()) + return self.get_concrete().copy() def descr_len(self, space): return self.get_concrete().descr_len(space) def descr_repr(self, space): - # Simple implementation so that we can see the array. Needs work. + res = StringBuilder() + res.append("array(") concrete = self.get_concrete() - res = "array([" + ", ".join(concrete._getnums(False)) + "]" dtype = concrete.find_dtype() + if not concrete.find_size(): + res.append('[]') + if len(self.shape) > 1: + # An empty slice reports its shape + res.append(", shape=(") + self_shape = str(self.shape) + res.append_slice(str(self_shape), 1, len(self_shape) - 1) + res.append(')') + else: + concrete.to_str(space, 1, res, indent=' ') if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or not self.find_size(): - res += ", dtype=" + dtype.name - res += ")" - return space.wrap(res) + dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or \ + not self.find_size(): + res.append(", dtype=" + dtype.name) + res.append(")") + return space.wrap(res.build()) + + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + '''Modifies builder with a representation of the array/slice + The items will be seperated by a comma if comma is 1 + Multidimensional arrays/slices will span a number of lines, + each line will begin with indent. + ''' + size = self.find_size() + if size < 1: + builder.append('[]') + return + if size > 1000: + # Once this goes True it does not go back to False for recursive + # calls + use_ellipsis = True + dtype = self.find_dtype() + ndims = len(self.shape) + i = 0 + start = True + builder.append('[') + if ndims > 1: + if use_ellipsis: + for i in range(3): + if start: + start = False + else: + builder.append(',' * comma + '\n') + if ndims == 3: + builder.append('\n' + indent) + else: + builder.append(indent) + # create_slice requires len(chunks) > 1 in order to reduce + # shape + view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) + builder.append('\n' + indent + '..., ') + i = self.shape[0] - 3 + while i < self.shape[0]: + if start: + start = False + else: + builder.append(',' * comma + '\n') + if ndims == 3: + builder.append('\n' + indent) + else: + builder.append(indent) + # create_slice requires len(chunks) > 1 in order to reduce + # shape + view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) + i += 1 + elif ndims == 1: + spacer = ',' * comma + ' ' + item = self.start + # An iterator would be a nicer way to walk along the 1d array, but + # how do I reset it if printing ellipsis? iterators have no + # "set_offset()" + i = 0 + if use_ellipsis: + for i in range(3): + if start: + start = False + else: + builder.append(spacer) + builder.append(dtype.str_format(self.getitem(item))) + item += self.strides[0] + # Add a comma only if comma is False - this prevents adding two + # commas + builder.append(spacer + '...' + ',' * (1 - comma)) + # Ugly, but can this be done with an iterator? + item = self.start + self.backstrides[0] - 2 * self.strides[0] + i = self.shape[0] - 3 + while i < self.shape[0]: + if start: + start = False + else: + builder.append(spacer) + builder.append(dtype.str_format(self.getitem(item))) + item += self.strides[0] + i += 1 + else: + builder.append('[') + builder.append(']') def descr_str(self, space): - # Simple implementation so that we can see the array. Needs work. + ret = StringBuilder() concrete = self.get_concrete() - return space.wrap("[" + " ".join(concrete._getnums(True)) + "]") + concrete.to_str(space, 0, ret, ' ') + return space.wrap(ret.build()) + + @jit.unroll_safe + def _index_of_single_item(self, space, w_idx): + if space.isinstance_w(w_idx, space.w_int): + idx = space.int_w(w_idx) + if not self.shape: + if idx != 0: + raise OperationError(space.w_IndexError, + space.wrap("index out of range")) + return 0 + if idx < 0: + idx = self.shape[0] + idx + if idx < 0 or idx >= self.shape[0]: + raise OperationError(space.w_IndexError, + space.wrap("index out of range")) + return self.start + idx * self.strides[0] + index = [space.int_w(w_item) + for w_item in space.fixedview(w_idx)] + item = self.start + for i in range(len(index)): + v = index[i] + if v < 0: + v += self.shape[i] + if v < 0 or v >= self.shape[i]: + raise operationerrfmt(space.w_IndexError, + "index (%d) out of range (0<=index<%d", i, self.shape[i], + ) + item += v * self.strides[i] + return item + + @jit.unroll_safe + def _single_item_result(self, space, w_idx): + """ The result of getitem/setitem is a single item if w_idx + is a list of scalars that match the size of shape + """ + shape_len = len(self.shape) + if shape_len == 0: + if not space.isinstance_w(w_idx, space.w_int): + raise OperationError(space.w_IndexError, space.wrap( + "wrong index")) + return True + if shape_len == 1: + if space.isinstance_w(w_idx, space.w_int): + return True + if space.isinstance_w(w_idx, space.w_slice): + return False + elif (space.isinstance_w(w_idx, space.w_slice) or + space.isinstance_w(w_idx, space.w_int)): + return False + lgt = space.len_w(w_idx) + if lgt > shape_len: + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if lgt < shape_len: + return False + for w_item in space.fixedview(w_idx): + if space.isinstance_w(w_item, space.w_slice): + return False + return True + + @jit.unroll_safe + def _prepare_slice_args(self, space, w_idx): + if (space.isinstance_w(w_idx, space.w_int) or + space.isinstance_w(w_idx, space.w_slice)): + return [space.decode_index4(w_idx, self.shape[0])] + return [space.decode_index4(w_item, self.shape[i]) for i, w_item in + enumerate(space.fixedview(w_idx))] def descr_getitem(self, space, w_idx): - # TODO: indexing by arrays and lists - if space.isinstance_w(w_idx, space.w_tuple): - length = space.len_w(w_idx) - if length == 0: - return space.wrap(self) - if length > 1: # only one dimension for now. - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - w_idx = space.getitem(w_idx, space.wrap(0)) - start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) - if step == 0: - # Single index - return self.get_concrete().eval(start).wrap(space) - else: - # Slice - new_sig = signature.Signature.find_sig([ - SingleDimSlice.signature, self.signature - ]) - res = SingleDimSlice(start, stop, step, slice_length, self, new_sig) - return space.wrap(res) + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + if len(concrete.shape) < 1: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) + item = concrete._index_of_single_item(space, w_idx) + return concrete.getitem(item).wrap(space) + chunks = self._prepare_slice_args(space, w_idx) + return space.wrap(self.create_slice(space, chunks)) def descr_setitem(self, space, w_idx, w_value): - # TODO: indexing by arrays and lists self.invalidated() - if space.isinstance_w(w_idx, space.w_tuple): - length = space.len_w(w_idx) - if length > 1: # only one dimension for now. - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - if length == 0: - w_idx = space.newslice(space.wrap(0), - space.wrap(self.find_size()), - space.wrap(1)) + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + if len(concrete.shape) < 1: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) + item = concrete._index_of_single_item(space, w_idx) + concrete.setitem_w(space, item, w_value) + return + if not isinstance(w_value, BaseArray): + w_value = convert_to_array(space, w_value) + chunks = self._prepare_slice_args(space, w_idx) + view = self.create_slice(space, chunks) + view.setslice(space, w_value) + + @jit.unroll_safe + def create_slice(self, space, chunks): + if len(chunks) == 1: + start, stop, step, lgt = chunks[0] + if step == 0: + shape = self.shape[1:] + strides = self.strides[1:] + backstrides = self.backstrides[1:] else: - w_idx = space.getitem(w_idx, space.wrap(0)) - start, stop, step, slice_length = space.decode_index4(w_idx, - self.find_size()) - if step == 0: - # Single index - self.get_concrete().setitem_w(space, start, w_value) + shape = [lgt] + self.shape[1:] + strides = [self.strides[0] * step] + self.strides[1:] + backstrides = [(lgt - 1) * self.strides[0] * step] + self.backstrides[1:] + start *= self.strides[0] + start += self.start else: - concrete = self.get_concrete() - if isinstance(w_value, BaseArray): - # for now we just copy if setting part of an array from - # part of itself. can be improved. - if (concrete.get_root_storage() == - w_value.get_concrete().get_root_storage()): - w_value = space.call_function(space.gettypefor(BaseArray), w_value) - assert isinstance(w_value, BaseArray) - else: - w_value = convert_to_array(space, w_value) - concrete.setslice(space, start, stop, step, - slice_length, w_value) + shape = [] + strides = [] + backstrides = [] + start = self.start + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + shape.append(lgt) + strides.append(self.strides[i] * step) + backstrides.append(self.strides[i] * (lgt - 1) * step) + start += self.strides[i] * start_ + # add a reminder + s = i + 1 + assert s >= 0 + shape += self.shape[s:] + strides += self.strides[s:] + backstrides += self.backstrides[s:] + new_sig = signature.Signature.find_sig([ + NDimSlice.signature, self.signature, + ]) + return NDimSlice(self, new_sig, start, strides[:], backstrides[:], + shape[:]) def descr_mean(self, space): - return space.wrap(space.float_w(self.descr_sum(space))/self.find_size()) + return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) - def _sliceloop(self, start, stop, step, source, dest): - i = start - j = 0 - while (step > 0 and i < stop) or (step < 0 and i > stop): - slice_driver.jit_merge_point(signature=source.signature, step=step, - stop=stop, i=i, j=j, source=source, - dest=dest) - dest.setitem(i, source.eval(j).convert_to(dest.find_dtype())) - j += 1 - i += step + def descr_nonzero(self, space): + if self.find_size() > 1: + raise OperationError(space.w_ValueError, space.wrap( + "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + return space.wrap(space.is_true(self.get_concrete().eval( + self.start_iter(self.shape)).wrap(space))) + + def descr_get_transpose(self, space): + concrete = self.get_concrete() + if len(concrete.shape) < 2: + return space.wrap(self) + new_sig = signature.Signature.find_sig([ + NDimSlice.signature, self.signature + ]) + strides = [] + backstrides = [] + shape = [] + for i in range(len(concrete.shape) - 1, -1, -1): + strides.append(concrete.strides[i]) + backstrides.append(concrete.backstrides[i]) + shape.append(concrete.shape[i]) + return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) + + def descr_get_flatiter(self, space): + return space.wrap(W_FlatIterator(self)) + + def getitem(self, item): + raise NotImplementedError + + def start_iter(self, res_shape=None): + raise NotImplementedError + + def descr_debug_repr(self, space): + return space.wrap(self.debug_repr()) def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): @@ -309,36 +823,55 @@ return scalar_w(space, dtype, w_obj) def scalar_w(space, dtype, w_obj): + assert isinstance(dtype, interp_dtype.W_Dtype) return Scalar(dtype, dtype.unwrap(space, w_obj)) class Scalar(BaseArray): """ - Intermediate class representing a float literal. + Intermediate class representing a literal. """ signature = signature.BaseSignature() - _attrs_ = ["dtype", "value"] + _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): - BaseArray.__init__(self) + BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value def find_size(self): - raise ValueError + return 1 + + def get_concrete(self): + return self def find_dtype(self): return self.dtype - def eval(self, i): + def getitem(self, item): + raise NotImplementedError + + def eval(self, iter): return self.value + def start_iter(self, res_shape=None): + return ConstantIterator() + + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + builder.append(self.dtype.str_format(self.value)) + + def copy(self): + return Scalar(self.dtype, self.value) + + def debug_repr(self): + return 'Scalar' + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, signature, res_dtype): - BaseArray.__init__(self) + def __init__(self, signature, shape, res_dtype, order): + BaseArray.__init__(self, shape, order) self.forced_result = None self.signature = signature self.res_dtype = res_dtype @@ -351,13 +884,18 @@ i = 0 signature = self.signature result_size = self.find_size() - result = SingleDimArray(result_size, self.find_dtype()) - while i < result_size: + result = NDimArray(result_size, self.shape, self.find_dtype()) + shapelen = len(self.shape) + i = self.start_iter() + ri = result.start_iter() + while not ri.done(): numpy_driver.jit_merge_point(signature=signature, - result_size=result_size, i=i, + shapelen=shapelen, + result_size=result_size, i=i, ri=ri, self=self, result=result) - result.dtype.setitem(result.storage, i, self.eval(i)) - i += 1 + result.dtype.setitem(result.storage, ri.offset, self.eval(i)) + i = i.next(shapelen) + ri = ri.next(shapelen) return result def force_if_needed(self): @@ -369,10 +907,13 @@ self.force_if_needed() return self.forced_result - def eval(self, i): + def eval(self, iter): if self.forced_result is not None: - return self.forced_result.eval(i) - return self._eval(i) + return self.forced_result.eval(iter) + return self._eval(iter) + + def getitem(self, item): + return self.get_concrete().getitem(item) def setitem(self, item, value): return self.get_concrete().setitem(item, value) @@ -388,8 +929,9 @@ class Call1(VirtualArray): - def __init__(self, signature, res_dtype, values): - VirtualArray.__init__(self, signature, res_dtype) + def __init__(self, signature, shape, res_dtype, values, order): + VirtualArray.__init__(self, signature, shape, res_dtype, + values.order) self.values = values def _del_sources(self): @@ -401,53 +943,91 @@ def _find_dtype(self): return self.res_dtype - def _eval(self, i): - val = self.values.eval(i).convert_to(self.res_dtype) - + def _eval(self, iter): + assert isinstance(iter, Call1Iterator) + val = self.values.eval(iter.child).convert_to(self.res_dtype) sig = jit.promote(self.signature) assert isinstance(sig, signature.Signature) call_sig = sig.components[0] assert isinstance(call_sig, signature.Call1) return call_sig.func(self.res_dtype, val) + def start_iter(self, res_shape=None): + if self.forced_result is not None: + return self.forced_result.start_iter(res_shape) + return Call1Iterator(self.values.start_iter(res_shape)) + + def debug_repr(self): + sig = self.signature + assert isinstance(sig, signature.Signature) + call_sig = sig.components[0] + assert isinstance(call_sig, signature.Call1) + if self.forced_result is not None: + return 'Call1(%s, forced=%s)' % (call_sig.name, + self.forced_result.debug_repr()) + return 'Call1(%s, %s)' % (call_sig.name, + self.values.debug_repr()) + class Call2(VirtualArray): """ Intermediate class for performing binary operations. """ - def __init__(self, signature, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, signature, res_dtype) + def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): + # XXX do something if left.order != right.order + VirtualArray.__init__(self, signature, shape, res_dtype, left.order) self.left = left self.right = right self.calc_dtype = calc_dtype + self.size = 1 + for s in self.shape: + self.size *= s def _del_sources(self): self.left = None self.right = None def _find_size(self): - try: - return self.left.find_size() - except ValueError: - pass - return self.right.find_size() + return self.size - def _eval(self, i): - lhs = self.left.eval(i).convert_to(self.calc_dtype) - rhs = self.right.eval(i).convert_to(self.calc_dtype) + def start_iter(self, res_shape=None): + if self.forced_result is not None: + return self.forced_result.start_iter(res_shape) + if res_shape is None: + res_shape = self.shape # we still force the shape on children + return Call2Iterator(self.left.start_iter(res_shape), + self.right.start_iter(res_shape)) + def _eval(self, iter): + assert isinstance(iter, Call2Iterator) + lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) + rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) sig = jit.promote(self.signature) assert isinstance(sig, signature.Signature) call_sig = sig.components[0] assert isinstance(call_sig, signature.Call2) return call_sig.func(self.calc_dtype, lhs, rhs) + def debug_repr(self): + sig = self.signature + assert isinstance(sig, signature.Signature) + call_sig = sig.components[0] + assert isinstance(call_sig, signature.Call2) + if self.forced_result is not None: + return 'Call2(%s, forced=%s)' % (call_sig.name, + self.forced_result.debug_repr()) + return 'Call2(%s, %s, %s)' % (call_sig.name, + self.left.debug_repr(), + self.right.debug_repr()) + class ViewArray(BaseArray): """ Class for representing views of arrays, they will reflect changes of parent arrays. Example: slices """ - def __init__(self, parent, signature): - BaseArray.__init__(self) + def __init__(self, parent, signature, strides, backstrides, shape): + self.strides = strides + self.backstrides = backstrides + BaseArray.__init__(self, shape, parent.order) self.signature = signature self.parent = parent self.invalidates = parent.invalidates @@ -459,42 +1039,38 @@ self.parent.get_concrete() return self - def eval(self, i): - return self.parent.eval(self.calc_index(i)) + def getitem(self, item): + return self.parent.getitem(item) + + def eval(self, iter): + return self.parent.getitem(iter.get_offset()) @unwrap_spec(item=int) def setitem_w(self, space, item, w_value): - return self.parent.setitem_w(space, self.calc_index(item), w_value) + return self.parent.setitem_w(space, item, w_value) def setitem(self, item, value): # This is currently not possible to be called from anywhere. raise NotImplementedError def descr_len(self, space): - return space.wrap(self.find_size()) + if self.shape: + return space.wrap(self.shape[0]) + return space.wrap(1) - def calc_index(self, item): - raise NotImplementedError -class SingleDimSlice(ViewArray): +class NDimSlice(ViewArray): signature = signature.BaseSignature() - def __init__(self, start, stop, step, slice_length, parent, signature): - ViewArray.__init__(self, parent, signature) - if isinstance(parent, SingleDimSlice): - self.start = parent.calc_index(start) - self.stop = parent.calc_index(stop) - self.step = parent.step * step - self.parent = parent.parent - else: - self.start = start - self.stop = stop - self.step = step - self.parent = parent - self.size = slice_length - - def get_root_storage(self): - return self.parent.get_concrete().get_root_storage() + def __init__(self, parent, signature, start, strides, backstrides, + shape): + if isinstance(parent, NDimSlice): + parent = parent.parent + ViewArray.__init__(self, parent, signature, strides, backstrides, shape) + self.start = start + self.size = 1 + for sh in shape: + self.size *= sh def find_size(self): return self.size @@ -502,20 +1078,44 @@ def find_dtype(self): return self.parent.find_dtype() - def setslice(self, space, start, stop, step, slice_length, arr): - start = self.calc_index(start) - if stop != -1: - stop = self.calc_index(stop) - step = self.step * step - self._sliceloop(start, stop, step, arr, self.parent) + def setslice(self, space, w_value): + res_shape = shape_agreement(space, self.shape, w_value.shape) + self._sliceloop(w_value, res_shape) - def calc_index(self, item): - return (self.start + item * self.step) + def _sliceloop(self, source, res_shape): + source_iter = source.start_iter(res_shape) + res_iter = self.start_iter(res_shape) + shapelen = len(res_shape) + while not res_iter.done(): + slice_driver.jit_merge_point(signature=source.signature, + shapelen=shapelen, + self=self, source=source, + res_iter=res_iter, + source_iter=source_iter) + self.setitem(res_iter.offset, source.eval(source_iter).convert_to( + self.find_dtype())) + source_iter = source_iter.next(shapelen) + res_iter = res_iter.next(shapelen) + def start_iter(self, res_shape=None): + if res_shape is not None and res_shape != self.shape: + return BroadcastIterator(self, res_shape) + if len(self.shape) == 1: + return OneDimIterator(self.start, self.strides[0], self.shape[0]) + return ViewIterator(self) -class SingleDimArray(BaseArray): - def __init__(self, size, dtype): - BaseArray.__init__(self) + def setitem(self, item, value): + self.parent.setitem(item, value) + + def debug_repr(self): + return 'Slice(%s)' % self.parent.debug_repr() + +class NDimArray(BaseArray): + """ A class representing contiguous array. We know that each iteration + by say ufunc will increase the data index by one + """ + def __init__(self, size, shape, dtype, order='C'): + BaseArray.__init__(self, shape, order) self.size = size self.dtype = dtype self.storage = dtype.malloc(size) @@ -524,20 +1124,32 @@ def get_concrete(self): return self - def get_root_storage(self): - return self.storage - def find_size(self): return self.size def find_dtype(self): return self.dtype - def eval(self, i): - return self.dtype.getitem(self.storage, i) + def getitem(self, item): + return self.dtype.getitem(self.storage, item) + + def eval(self, iter): + return self.dtype.getitem(self.storage, iter.get_offset()) + + def copy(self): + array = NDimArray(self.size, self.shape[:], self.dtype, self.order) + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, array.storage), + rffi.cast(rffi.VOIDP, self.storage), + self.size * self.dtype.num_bytes + ) + return array def descr_len(self, space): - return space.wrap(self.size) + if len(self.shape): + return space.wrap(self.shape[0]) + raise OperationError(space.w_TypeError, space.wrap( + "len() of unsized object")) def setitem_w(self, space, item, w_value): self.invalidated() @@ -547,30 +1159,55 @@ self.invalidated() self.dtype.setitem(self.storage, item, value) - def setslice(self, space, start, stop, step, slice_length, arr): - self._sliceloop(start, stop, step, arr, self) + def start_iter(self, res_shape=None): + if self.order == 'C': + if res_shape is not None and res_shape != self.shape: + return BroadcastIterator(self, res_shape) + return ArrayIterator(self.size) + raise NotImplementedError # use ViewIterator simply, test it + + def debug_repr(self): + return 'Array' def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) - at unwrap_spec(size=int) -def zeros(space, size, w_dtype=None): +def _find_size_and_shape(space, w_size): + if space.isinstance_w(w_size, space.w_int): + size = space.int_w(w_size) + shape = [size] + else: + size = 1 + shape = [] + for w_item in space.fixedview(w_size): + item = space.int_w(w_item) + size *= item + shape.append(item) + return size, shape + +def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - return space.wrap(SingleDimArray(size, dtype=dtype)) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(NDimArray(size, shape[:], dtype=dtype)) - at unwrap_spec(size=int) -def ones(space, size, w_dtype=None): +def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - - arr = SingleDimArray(size, dtype=dtype) + size, shape = _find_size_and_shape(space, w_size) + arr = NDimArray(size, shape[:], dtype=dtype) one = dtype.adapt_val(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) +def dot(space, w_obj, w_obj2): + w_arr = convert_to_array(space, w_obj) + if isinstance(w_arr, Scalar): + return convert_to_array(space, w_obj2).descr_dot(space, w_arr) + return w_arr.descr_dot(space, w_obj2) + BaseArray.typedef = TypeDef( 'numarray', __new__ = interp2app(descr_new_array), @@ -583,6 +1220,7 @@ __pos__ = interp2app(BaseArray.descr_pos), __neg__ = interp2app(BaseArray.descr_neg), __abs__ = interp2app(BaseArray.descr_abs), + __nonzero__ = interp2app(BaseArray.descr_nonzero), __add__ = interp2app(BaseArray.descr_add), __sub__ = interp2app(BaseArray.descr_sub), @@ -607,11 +1245,15 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), + __debug_repr__ = interp2app(BaseArray.descr_debug_repr), dtype = GetSetProperty(BaseArray.descr_get_dtype), shape = GetSetProperty(BaseArray.descr_get_shape), size = GetSetProperty(BaseArray.descr_get_size), + T = GetSetProperty(BaseArray.descr_get_transpose), + flat = GetSetProperty(BaseArray.descr_get_flatiter), + mean = interp2app(BaseArray.descr_mean), sum = interp2app(BaseArray.descr_sum), prod = interp2app(BaseArray.descr_prod), @@ -625,3 +1267,54 @@ copy = interp2app(BaseArray.descr_copy), ) + + +class W_FlatIterator(ViewArray): + signature = signature.BaseSignature() + + @jit.unroll_safe + def __init__(self, arr): + size = 1 + for sh in arr.shape: + size *= sh + new_sig = signature.Signature.find_sig([ + W_FlatIterator.signature, arr.signature + ]) + ViewArray.__init__(self, arr, new_sig, [arr.strides[-1]], + [arr.backstrides[-1]], [size]) + self.shapelen = len(arr.shape) + self.arr = arr + self.iter = self.start_iter() + + def start_iter(self, res_shape=None): + if res_shape is not None and res_shape != self.shape: + return BroadcastIterator(self, res_shape) + return OneDimIterator(self.arr.start, self.strides[0], + self.shape[0]) + + def find_dtype(self): + return self.arr.find_dtype() + + def find_size(self): + return self.shape[0] + + def descr_next(self, space): + if self.iter.done(): + raise OperationError(space.w_StopIteration, space.wrap('')) + result = self.eval(self.iter) + self.iter = self.iter.next(self.shapelen) + return result.wrap(space) + + def descr_iter(self): + return self + + def debug_repr(self): + return 'FlatIter(%s)' % self.arr.debug_repr() + + +W_FlatIterator.typedef = TypeDef( + 'flatiter', + next = interp2app(W_FlatIterator.descr_next), + __iter__ = interp2app(W_FlatIterator.descr_iter), +) +W_FlatIterator.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import SingleDimArray + from pypy.module.micronumpy.interp_numarray import NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -19,7 +19,7 @@ "string length %d not divisable by %d" % (length, FLOAT_SIZE))) dtype = space.fromcache(W_Float64Dtype) - a = SingleDimArray(number, dtype=dtype) + a = NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE @@ -31,4 +31,4 @@ start += FLOAT_SIZE end += FLOAT_SIZE - return space.wrap(a) \ No newline at end of file + return space.wrap(a) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_dtype, signature from pypy.rlib import jit @@ -9,8 +9,8 @@ reduce_driver = jit.JitDriver( - greens = ["signature"], - reds = ["i", "size", "self", "dtype", "value", "obj"] + greens = ['shapelen', "signature"], + reds = ["i", "self", "dtype", "value", "obj"] ) class W_Ufunc(Wrappable): @@ -45,8 +45,10 @@ return self.call(space, __args__.arguments_w) def descr_reduce(self, space, w_obj): + return self.reduce(space, w_obj, multidim=False) + + def reduce(self, space, w_obj, multidim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar - if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -62,28 +64,33 @@ space, obj.find_dtype(), promote_to_largest=True ) - start = 0 + start = obj.start_iter(obj.shape) + shapelen = len(obj.shape) + if shapelen > 1 and not multidim: + raise OperationError(space.w_NotImplementedError, + space.wrap("not implemented yet")) if self.identity is None: if size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - value = obj.eval(0).convert_to(dtype) - start += 1 + value = obj.eval(start).convert_to(dtype) + start = start.next(shapelen) else: value = self.identity.convert_to(dtype) new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce(new_sig, start, value, obj, dtype, size).wrap(space) + return self.reduce_loop(new_sig, shapelen, start, value, obj, + dtype).wrap(space) - def reduce(self, signature, start, value, obj, dtype, size): - i = start - while i < size: - reduce_driver.jit_merge_point(signature=signature, self=self, + def reduce_loop(self, signature, shapelen, i, value, obj, dtype): + while not i.done(): + reduce_driver.jit_merge_point(signature=signature, + shapelen=shapelen, self=self, value=value, obj=obj, i=i, - dtype=dtype, size=size) + dtype=dtype) value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) - i += 1 + i = i.next(shapelen) return value class W_Ufunc1(W_Ufunc): @@ -111,7 +118,7 @@ return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) - w_res = Call1(new_sig, res_dtype, w_obj) + w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) w_obj.add_invalidates(w_res) return w_res @@ -130,7 +137,7 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, - convert_to_array, Scalar) + convert_to_array, Scalar, shape_agreement) [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) @@ -153,7 +160,9 @@ new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature ]) - w_res = Call2(new_sig, calc_dtype, res_dtype, w_lhs, w_rhs) + new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + w_res = Call2(new_sig, new_shape, calc_dtype, + res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res @@ -310,6 +319,8 @@ ("floor", "floor", 1, {"promote_to_float": True}), ("exp", "exp", 1, {"promote_to_float": True}), + ('sqrt', 'sqrt', 1, {'promote_to_float': True}), + ("sin", "sin", 1, {"promote_to_float": True}), ("cos", "cos", 1, {"promote_to_float": True}), ("tan", "tan", 1, {"promote_to_float": True}), diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -40,13 +40,15 @@ return Signature._known_sigs.setdefault(components, Signature(components)) class Call1(BaseSignature): - _immutable_fields_ = ["func"] + _immutable_fields_ = ["func", "name"] def __init__(self, func): self.func = func + self.name = func.func_name class Call2(BaseSignature): - _immutable_fields_ = ["func"] + _immutable_fields_ = ["func", "name"] def __init__(self, func): - self.func = func \ No newline at end of file + self.func = func + self.name = func.func_name diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace from pypy.module.micronumpy import interp_dtype -from pypy.module.micronumpy.interp_numarray import SingleDimArray, Scalar +from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -13,7 +13,7 @@ def test_binop_signature(self, space): float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) - ar = SingleDimArray(10, dtype=float64_dtype) + ar = NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -22,7 +22,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = SingleDimArray(10, dtype=space.fromcache(interp_dtype.W_BoolDtype)) + bool_ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_BoolDtype)) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -30,13 +30,13 @@ assert v5.signature is v6.signature def test_slice_signature(self, space): - ar = SingleDimArray(10, dtype=space.fromcache(interp_dtype.W_Float64Dtype)) - v1 = ar.descr_getitem(space, space.wrap(slice(1, 5, 1))) + ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_Float64Dtype)) + v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature - v3 = ar.descr_add(space, v1) - v4 = ar.descr_add(space, v2) + v3 = v2.descr_add(space, v1) + v4 = v1.descr_add(space, v2) assert v3.signature is v4.signature class TestUfuncCoerscion(object): diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -5,7 +5,7 @@ class TestCompiler(object): def compile(self, code): return numpy_compile(code) - + def test_vars(self): code = """ a = 2 @@ -25,7 +25,7 @@ st = interp.code.statements[0] assert st.expr.items == [FloatConstant(1), FloatConstant(2), FloatConstant(3)] - + def test_array_literal2(self): code = "a = [[1],[2],[3]]" interp = self.compile(code) @@ -102,10 +102,11 @@ code = """ a = [1,2,3,4] b = [4,5,6,5] - a + b + c = a + b + c -> 3 """ interp = self.run(code) - assert interp.results[0]._getnums(False) == ["5.0", "7.0", "9.0", "9.0"] + assert interp.results[-1].value.val == 9 def test_array_getitem(self): code = """ @@ -115,7 +116,7 @@ """ interp = self.run(code) assert interp.results[0].value.val == 3 + 6 - + def test_range_getitem(self): code = """ r = |20| + 3 @@ -161,10 +162,73 @@ assert interp.results[0].value.val == 256 def test_slice(self): - py.test.skip("in progress") interp = self.run(""" a = [1,2,3,4] b = a -> : b -> 3 """) - assert interp.results[0].value.val == 3 + assert interp.results[0].value.val == 4 + + def test_slice_step(self): + interp = self.run(""" + a = |30| + b = a -> ::2 + b -> 3 + """) + assert interp.results[0].value.val == 6 + + def test_setslice(self): + interp = self.run(""" + a = |30| + b = |10| + b[1] = 5 + a[::3] = b + a -> 3 + """) + assert interp.results[0].value.val == 5 + + + def test_slice2(self): + interp = self.run(""" + a = |30| + s1 = a -> 0:20:2 + s2 = a -> 0:30:3 + b = s1 + s2 + b -> 3 + """) + assert interp.results[0].value.val == 15 + + def test_multidim_getitem(self): + interp = self.run(""" + a = [[1,2]] + a -> 0 -> 1 + """) + assert interp.results[0].value.val == 2 + + def test_multidim_getitem_2(self): + interp = self.run(""" + a = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + b = a + a + b -> 1 -> 1 + """) + assert interp.results[0].value.val == 8 + + def test_set_slice(self): + interp = self.run(""" + a = |30| + b = |30| + b[:] = a + a + b -> 3 + """) + assert interp.results[0].value.val == 6 + + def test_set_slice2(self): + interp = self.run(""" + a = |30| + b = |10| + b[1] = 5.5 + c = b + b + a[0:30:3] = c + a -> 3 + """) + assert interp.results[0].value.val == 11 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,7 +1,164 @@ + +import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest +from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy import signature +from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace +class MockDtype(object): + signature = signature.BaseSignature() + + def malloc(self, size): + return None + + +class TestNumArrayDirect(object): + def newslice(self, *args): + return self.space.newslice(*[self.space.wrap(arg) for arg in args]) + + def newtuple(self, *args): + args_w = [] + for arg in args: + if isinstance(arg, int): + args_w.append(self.space.wrap(arg)) + else: + args_w.append(arg) + return self.space.newtuple(args_w) + + def test_strides_f(self): + a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + assert a.strides == [1, 10, 50] + assert a.backstrides == [9, 40, 100] + + def test_strides_c(self): + a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + assert a.strides == [15, 3, 1] + assert a.backstrides == [135, 12, 2] + + def test_create_slice_f(self): + space = self.space + a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + s = a.create_slice(space, [(3, 0, 0, 1)]) + assert s.start == 3 + assert s.strides == [10, 50] + assert s.backstrides == [40, 100] + s = a.create_slice(space, [(1, 9, 2, 4)]) + assert s.start == 1 + assert s.strides == [2, 10, 50] + assert s.backstrides == [6, 40, 100] + s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + assert s.shape == [2, 1] + assert s.strides == [3, 10] + assert s.backstrides == [3, 0] + s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + assert s.start == 20 + assert s.shape == [10, 3] + + def test_create_slice_c(self): + space = self.space + a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + s = a.create_slice(space, [(3, 0, 0, 1)]) + assert s.start == 45 + assert s.strides == [3, 1] + assert s.backstrides == [12, 2] + s = a.create_slice(space, [(1, 9, 2, 4)]) + assert s.start == 15 + assert s.strides == [30, 3, 1] + assert s.backstrides == [90, 12, 2] + s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + assert s.start == 19 + assert s.shape == [2, 1] + assert s.strides == [45, 3] + assert s.backstrides == [45, 0] + s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + assert s.start == 6 + assert s.shape == [10, 3] + + def test_slice_of_slice_f(self): + space = self.space + a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + s = a.create_slice(space, [(5, 0, 0, 1)]) + assert s.start == 5 + s2 = s.create_slice(space, [(3, 0, 0, 1)]) + assert s2.shape == [3] + assert s2.strides == [50] + assert s2.parent is a + assert s2.backstrides == [100] + assert s2.start == 35 + s = a.create_slice(space, [(1, 5, 3, 2)]) + s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + assert s2.shape == [2, 3] + assert s2.strides == [3, 50] + assert s2.backstrides == [3, 100] + assert s2.start == 1 * 15 + 2 * 3 + + def test_slice_of_slice_c(self): + space = self.space + a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + s = a.create_slice(space, [(5, 0, 0, 1)]) + assert s.start == 15 * 5 + s2 = s.create_slice(space, [(3, 0, 0, 1)]) + assert s2.shape == [3] + assert s2.strides == [1] + assert s2.parent is a + assert s2.backstrides == [2] + assert s2.start == 5 * 15 + 3 * 3 + s = a.create_slice(space, [(1, 5, 3, 2)]) + s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + assert s2.shape == [2, 3] + assert s2.strides == [45, 1] + assert s2.backstrides == [45, 2] + assert s2.start == 1 * 15 + 2 * 3 + + def test_negative_step_f(self): + space = self.space + a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + s = a.create_slice(space, [(9, -1, -2, 5)]) + assert s.start == 9 + assert s.strides == [-2, 10, 50] + assert s.backstrides == [-8, 40, 100] + + def test_negative_step_c(self): + space = self.space + a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + s = a.create_slice(space, [(9, -1, -2, 5)]) + assert s.start == 135 + assert s.strides == [-30, 3, 1] + assert s.backstrides == [-120, 12, 2] + + def test_index_of_single_item_f(self): + a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) + assert r == 1 + 2 * 10 + 2 * 50 + s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + r = s._index_of_single_item(self.space, self.newtuple(1, 0)) + assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) + r = s._index_of_single_item(self.space, self.newtuple(1, 1)) + assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) + + def test_index_of_single_item_c(self): + a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) + assert r == 1 * 3 * 5 + 2 * 3 + 2 + s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + r = s._index_of_single_item(self.space, self.newtuple(1, 0)) + assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) + r = s._index_of_single_item(self.space, self.newtuple(1, 1)) + assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) + + def test_shape_agreement(self): + assert shape_agreement(self.space, [3], [3]) == [3] + assert shape_agreement(self.space, [1, 2, 3], [1, 2, 3]) == [1, 2, 3] + py.test.raises(OperationError, shape_agreement, self.space, [2], [3]) + assert shape_agreement(self.space, [4, 4], []) == [4, 4] + assert shape_agreement(self.space, + [8, 1, 6, 1], [7, 1, 5]) == [8, 7, 6, 5] + assert shape_agreement(self.space, + [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + + class AppTestNumArray(BaseNumpyAppTest): def test_type(self): from numpypy import array @@ -19,8 +176,7 @@ def test_size(self): from numpypy import array - # XXX fixed on multidim branch - #assert array(3).size == 1 + assert array(3).size == 1 a = array([1, 2, 3]) assert a.size == 3 assert (a + a).size == 3 @@ -50,64 +206,17 @@ b = a.copy() for i in xrange(5): assert b[i] == a[i] + a[3] = 22 + assert b[3] == 3 + + a = array(1) + assert a.copy() == a def test_iterator_init(self): from numpypy import array a = array(range(5)) assert a[3] == 3 - def test_repr(self): - from numpypy import array, zeros - a = array(range(5), float) - assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" - a = array([], float) - assert repr(a) == "array([], dtype=float64)" - a = zeros(1001) - assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" - a = array(range(5), long) - assert repr(a) == "array([0, 1, 2, 3, 4])" - a = array([], long) - assert repr(a) == "array([], dtype=int64)" - a = array([True, False, True, False], "?") - assert repr(a) == "array([True, False, True, False], dtype=bool)" - - def test_repr_slice(self): - from numpypy import array, zeros - a = array(range(5), float) - b = a[1::2] - assert repr(b) == "array([1.0, 3.0])" - a = zeros(2002) - b = a[::2] - assert repr(b) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" - - def test_str(self): - from numpypy import array, zeros - a = array(range(5), float) - assert str(a) == "[0.0 1.0 2.0 3.0 4.0]" - assert str((2*a)[:]) == "[0.0 2.0 4.0 6.0 8.0]" - a = zeros(1001) - assert str(a) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" - - a = array(range(5), dtype=long) - assert str(a) == "[0 1 2 3 4]" - a = array([True, False, True, False], dtype="?") - assert str(a) == "[True False True False]" - - a = array(range(5), dtype="int8") - assert str(a) == "[0 1 2 3 4]" - - a = array(range(5), dtype="int16") - assert str(a) == "[0 1 2 3 4]" - - def test_str_slice(self): - from numpypy import array, zeros - a = array(range(5), float) - b = a[1::2] - assert str(b) == "[1.0 3.0]" - a = zeros(2002) - b = a[::2] - assert str(b) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" - def test_getitem(self): from numpypy import array a = array(range(5)) @@ -140,8 +249,8 @@ a = array(range(5)) raises(IndexError, "a[(1,2)] = [0,1]") for i in xrange(5): - a[(i,)] = i+1 - assert a[i] == i+1 + a[(i,)] = i + 1 + assert a[i] == i + 1 a[()] = range(5) for i in xrange(5): assert a[i] == i @@ -154,7 +263,7 @@ assert a[1] == 0. assert a[3] == 1. b[::-1] = b - assert b[0] == 1. + assert b[0] == 0. assert b[1] == 0. def test_setslice_of_slice_array(self): @@ -171,7 +280,7 @@ assert a[3] == 1. assert a[4] == 11. a = zeros(10) - a[::2][::-1][::2] = array(range(1,4)) + a[::2][::-1][::2] = array(range(1, 4)) assert a[8] == 1. assert a[4] == 2. assert a[0] == 3. @@ -191,6 +300,15 @@ assert a[1] == 0. assert a[3] == 0. + def test_scalar(self): + from numpypy import array, dtype + a = array(3) + #assert a[0] == 3 + raises(IndexError, "a[0]") + assert a.size == 1 + assert a.shape == () + assert a.dtype is dtype(int) + def test_len(self): from numpypy import array a = array(range(5)) @@ -222,7 +340,7 @@ def test_add_other(self): from numpypy import array a = array(range(5)) - b = array(range(4, -1, -1)) + b = array([i for i in reversed(range(5))]) c = a + b for i in range(5): assert c[i] == 4 @@ -346,8 +464,10 @@ a = array(range(5), float) b = a ** a for i in range(5): - print b[i], i**i - assert b[i] == i**i + assert b[i] == i ** i + + a = array(range(5)) + assert (a ** 2 == a * a).all() def test_pow_other(self): from numpypy import array @@ -366,7 +486,7 @@ def test_mod(self): from numpypy import array - a = array(range(1,6)) + a = array(range(1, 6)) b = a % a for i in range(5): assert b[i] == 0 @@ -394,7 +514,7 @@ def test_pos(self): from numpypy import array - a = array([1.,-2.,3.,-4.,-5.]) + a = array([1., -2., 3., -4., -5.]) b = +a for i in range(5): assert b[i] == a[i] @@ -405,7 +525,7 @@ def test_neg(self): from numpypy import array - a = array([1.,-2.,3.,-4.,-5.]) + a = array([1., -2., 3., -4., -5.]) b = -a for i in range(5): assert b[i] == -a[i] @@ -416,7 +536,7 @@ def test_abs(self): from numpypy import array - a = array([1.,-2.,3.,-4.,-5.]) + a = array([1., -2., 3., -4., -5.]) b = abs(a) for i in range(5): assert b[i] == abs(a[i]) @@ -445,7 +565,7 @@ s = a[1:5] assert len(s) == 4 for i in range(4): - assert s[i] == a[i+1] + assert s[i] == a[i + 1] s = (a + a)[1:2] assert len(s) == 1 @@ -459,7 +579,7 @@ s = a[1:9:2] assert len(s) == 4 for i in range(4): - assert s[i] == a[2*i+1] + assert s[i] == a[2 * i + 1] def test_slice_update(self): from numpypy import array @@ -470,13 +590,12 @@ a[2] = 20 assert s[2] == 20 - def test_slice_invaidate(self): # check that slice shares invalidation list with from numpypy import array a = array(range(5)) s = a[0:2] - b = array([10,11]) + b = array([10, 11]) c = s + b a[0] = 100 assert c[0] == 10 @@ -503,7 +622,7 @@ def test_prod(self): from numpypy import array - a = array(range(1,6)) + a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 @@ -517,7 +636,7 @@ def test_max_add(self): from numpypy import array a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) - assert (a+a).max() == 11.4 + assert (a + a).max() == 11.4 def test_min(self): from numpypy import array @@ -529,12 +648,23 @@ def test_argmax(self): from numpypy import array a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) - assert a.argmax() == 2 + r = a.argmax() + assert r == 2 b = array([]) - raises(ValueError, "b.argmax()") + raises(ValueError, b.argmax) a = array(range(-5, 5)) - assert a.argmax() == 9 + r = a.argmax() + assert r == 9 + b = a[::2] + r = b.argmax() + assert r == 4 + r = (a + a).argmax() + assert r == 9 + a = array([1, 0, 0]) + assert a.argmax() == 0 + a = array([0, 0, 1]) + assert a.argmax() == 2 def test_argmin(self): from numpypy import array @@ -562,12 +692,14 @@ assert c.any() == False def test_dot(self): - from numpypy import array + from numpypy import array, dot a = array(range(5)) assert a.dot(a) == 30.0 a = array(range(5)) assert a.dot(range(5)) == 30 + assert dot(range(5), range(5)) == 30 + assert (dot(5, [1, 2, 3]) == [5, 10, 15]).all() def test_dot_constant(self): from numpypy import array @@ -608,6 +740,267 @@ for i in xrange(5): assert c[i] == func(b[i], 3) + def test_nonzero(self): + from numpypy import array + a = array([1, 2]) + raises(ValueError, bool, a) + raises(ValueError, bool, a == a) + assert bool(array(1)) + assert not bool(array(0)) + assert bool(array([1])) + assert not bool(array([0])) + + def test_slice_assignment(self): + from numpypy import array + a = array(range(5)) + a[::-1] = a + assert (a == [0, 1, 2, 1, 0]).all() + # but we force intermediates + a = array(range(5)) + a[::-1] = a + a + assert (a == [8, 6, 4, 2, 0]).all() + +class AppTestMultiDim(BaseNumpyAppTest): + def test_init(self): + import numpypy + a = numpypy.zeros((2, 2)) + assert len(a) == 2 + + def test_shape(self): + import numpypy + assert numpypy.zeros(1).shape == (1,) + assert numpypy.zeros((2, 2)).shape == (2, 2) + assert numpypy.zeros((3, 1, 2)).shape == (3, 1, 2) + assert numpypy.array([[1], [2], [3]]).shape == (3, 1) + assert len(numpypy.zeros((3, 1, 2))) == 3 + raises(TypeError, len, numpypy.zeros(())) + raises(ValueError, numpypy.array, [[1, 2], 3]) + + def test_getsetitem(self): + import numpypy + a = numpypy.zeros((2, 3, 1)) + raises(IndexError, a.__getitem__, (2, 0, 0)) + raises(IndexError, a.__getitem__, (0, 3, 0)) + raises(IndexError, a.__getitem__, (0, 0, 1)) + assert a[1, 1, 0] == 0 + a[1, 2, 0] = 3 + assert a[1, 2, 0] == 3 + assert a[1, 1, 0] == 0 + assert a[1, -1, 0] == 3 + + def test_slices(self): + import numpypy + a = numpypy.zeros((4, 3, 2)) + raises(IndexError, a.__getitem__, (4,)) + raises(IndexError, a.__getitem__, (3, 3)) + raises(IndexError, a.__getitem__, (slice(None), 3)) + a[0, 1, 1] = 13 + a[1, 2, 1] = 15 + b = a[0] + assert len(b) == 3 + assert b.shape == (3, 2) + assert b[1, 1] == 13 + b = a[1] + assert b.shape == (3, 2) + assert b[2, 1] == 15 + b = a[:, 1] + assert b.shape == (4, 2) + assert b[0, 1] == 13 + b = a[:, 1, :] + assert b.shape == (4, 2) + assert b[0, 1] == 13 + b = a[1, 2] + assert b[1] == 15 + b = a[:] + assert b.shape == (4, 3, 2) + assert b[1, 2, 1] == 15 + assert b[0, 1, 1] == 13 + b = a[:][:, 1][:] + assert b[2, 1] == 0.0 + assert b[0, 1] == 13 + raises(IndexError, b.__getitem__, (4, 1)) + assert a[0][1][1] == 13 + assert a[1][2][1] == 15 + + def test_init_2(self): + import numpypy + raises(ValueError, numpypy.array, [[1], 2]) + raises(ValueError, numpypy.array, [[1, 2], [3]]) + raises(ValueError, numpypy.array, [[[1, 2], [3, 4], 5]]) + raises(ValueError, numpypy.array, [[[1, 2], [3, 4], [5]]]) + a = numpypy.array([[1, 2], [4, 5]]) + assert a[0, 1] == 2 + assert a[0][1] == 2 + a = numpypy.array(([[[1, 2], [3, 4], [5, 6]]])) + assert (a[0, 1] == [3, 4]).all() + + def test_setitem_slice(self): + import numpypy + a = numpypy.zeros((3, 4)) + a[1] = [1, 2, 3, 4] + assert a[1, 2] == 3 + raises(TypeError, a[1].__setitem__, [1, 2, 3]) + a = numpypy.array([[1, 2], [3, 4]]) + assert (a == [[1, 2], [3, 4]]).all() + a[1] = numpypy.array([5, 6]) + assert (a == [[1, 2], [5, 6]]).all() + a[:, 1] = numpypy.array([8, 10]) + assert (a == [[1, 8], [5, 10]]).all() + a[0, :: -1] = numpypy.array([11, 12]) + assert (a == [[12, 11], [5, 10]]).all() + + def test_ufunc(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6]]) + assert ((a + a) == \ + array([[1 + 1, 2 + 2], [3 + 3, 4 + 4], [5 + 5, 6 + 6]])).all() + + def test_getitem_add(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) + assert (a + a)[1, 1] == 8 + + def test_ufunc_negative(self): + from numpypy import array, negative + a = array([[1, 2], [3, 4]]) + b = negative(a + a) + assert (b == [[-2, -4], [-6, -8]]).all() + + def test_getitem_3(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6], [7, 8], + [9, 10], [11, 12], [13, 14]]) + b = a[::2] + print a + print b + assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all() + c = b + b + assert c[1][1] == 12 + + def test_multidim_ones(self): + from numpypy import ones + a = ones((1, 2, 3)) + assert a[0, 1, 2] == 1.0 + + def test_broadcast_ufunc(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6]]) + b = array([5, 6]) + c = ((a + b) == [[1 + 5, 2 + 6], [3 + 5, 4 + 6], [5 + 5, 6 + 6]]) + assert c.all() + + def test_broadcast_setslice(self): + from numpypy import zeros, ones + a = zeros((100, 100)) + b = ones(100) + a[:, :] = b + assert a[13, 15] == 1 + + def test_broadcast_shape_agreement(self): + from numpypy import zeros, array + a = zeros((3, 1, 3)) + b = array(((10, 11, 12), (20, 21, 22), (30, 31, 32))) + c = ((a + b) == [b, b, b]) + assert c.all() + a = array((((10, 11, 12), ), ((20, 21, 22), ), ((30, 31, 32), ))) + assert(a.shape == (3, 1, 3)) + d = zeros((3, 3)) + c = ((a + d) == [b, b, b]) + c = ((a + d) == array([[[10., 11., 12.]] * 3, + [[20., 21., 22.]] * 3, [[30., 31., 32.]] * 3])) + assert c.all() + + def test_broadcast_scalar(self): + from numpypy import zeros + a = zeros((4, 5), 'd') + a[:, 1] = 3 + assert a[2, 1] == 3 + assert a[0, 2] == 0 + a[0, :] = 5 + assert a[0, 3] == 5 + assert a[2, 1] == 3 + assert a[3, 2] == 0 + + def test_broadcast_call2(self): + from numpypy import zeros, ones + a = zeros((4, 1, 5)) + b = ones((4, 3, 5)) + b[:] = (a + a) + assert (b == zeros((4, 3, 5))).all() + + def test_argmax(self): + from numpypy import array + a = array([[1, 2], [3, 4], [5, 6]]) + assert a.argmax() == 5 + assert a[:2, ].argmax() == 3 + + def test_broadcast_wrong_shapes(self): + from numpypy import zeros + a = zeros((4, 3, 2)) + b = zeros((4, 2)) + exc = raises(ValueError, lambda: a + b) + assert str(exc.value) == "operands could not be broadcast" \ + " together with shapes (4,3,2) (4,2)" + + def test_reduce(self): + from numpypy import array + a = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + assert a.sum() == (13 * 12) / 2 + b = a[1:, 1::2] + c = b + b + assert c.sum() == (6 + 8 + 10 + 12) * 2 + + def test_transpose(self): + from numpypy import array + a = array(((range(3), range(3, 6)), + (range(6, 9), range(9, 12)), + (range(12, 15), range(15, 18)), + (range(18, 21), range(21, 24)))) + assert a.shape == (4, 2, 3) + b = a.T + assert b.shape == (3, 2, 4) + assert(b[0, :, 0] == [0, 3]).all() + b[:, 0, 0] = 1000 + assert(a[0, 0, :] == [1000, 1000, 1000]).all() + a = array(range(5)) + b = a.T + assert(b == range(5)).all() + a = array((range(10), range(20, 30))) + b = a.T + assert(b[:, 0] == a[0, :]).all() + + def test_flatiter(self): + from numpypy import array, flatiter + a = array([[10, 30], [40, 60]]) + f_iter = a.flat + assert f_iter.next() == 10 + assert f_iter.next() == 30 + assert f_iter.next() == 40 + assert f_iter.next() == 60 + raises(StopIteration, "f_iter.next()") + raises(TypeError, "flatiter()") + s = 0 + for k in a.flat: + s += k + assert s == 140 + + def test_flatiter_array_conv(self): + from numpypy import array, dot + a = array([1, 2, 3]) + assert dot(a.flat, a.flat) == 14 + + def test_debug_repr(self): + from numpypy import zeros, sin + a = zeros(1) + assert a.__debug_repr__() == 'Array' + assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' + assert (a[::2]).__debug_repr__() == 'Slice(Array)' + assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' + assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' + assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + b = a + a + b[0] = 3 + assert b.__debug_repr__() == 'Call2(add, forced=Array)' class AppTestSupport(object): def setup_class(cls): @@ -621,3 +1014,119 @@ for i in range(4): assert a[i] == i + 1 raises(ValueError, fromstring, "abc") + + +class AppTestRepr(BaseNumpyAppTest): + def test_repr(self): + from numpypy import array, zeros + a = array(range(5), float) + assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" + a = array([], float) + assert repr(a) == "array([], dtype=float64)" + a = zeros(1001) + assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" + a = array(range(5), long) + assert repr(a) == "array([0, 1, 2, 3, 4])" + a = array([], long) + assert repr(a) == "array([], dtype=int64)" + a = array([True, False, True, False], "?") + assert repr(a) == "array([True, False, True, False], dtype=bool)" + + def test_repr_multi(self): + from numpypy import array, zeros + a = zeros((3, 4)) + assert repr(a) == '''array([[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]])''' + a = zeros((2, 3, 4)) + assert repr(a) == '''array([[[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + + [[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]]])''' + + def test_repr_slice(self): + from numpypy import array, zeros + a = array(range(5), float) + b = a[1::2] + assert repr(b) == "array([1.0, 3.0])" + a = zeros(2002) + b = a[::2] + assert repr(b) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" + a = array((range(5), range(5, 10)), dtype="int16") + b = a[1, 2:] + assert repr(b) == "array([7, 8, 9], dtype=int16)" + # an empty slice prints its shape + b = a[2:1, ] + assert repr(b) == "array([], shape=(0, 5), dtype=int16)" + + def test_str(self): + from numpypy import array, zeros + a = array(range(5), float) + assert str(a) == "[0.0 1.0 2.0 3.0 4.0]" + assert str((2 * a)[:]) == "[0.0 2.0 4.0 6.0 8.0]" + a = zeros(1001) + assert str(a) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" + + a = array(range(5), dtype=long) + assert str(a) == "[0 1 2 3 4]" + a = array([True, False, True, False], dtype="?") + assert str(a) == "[True False True False]" + + a = array(range(5), dtype="int8") + assert str(a) == "[0 1 2 3 4]" + + a = array(range(5), dtype="int16") + assert str(a) == "[0 1 2 3 4]" + + a = array((range(5), range(5, 10)), dtype="int16") + assert str(a) == "[[0 1 2 3 4]\n [5 6 7 8 9]]" + + a = array(3, dtype=int) + assert str(a) == "3" + + a = zeros((400, 400), dtype=int) + assert str(a) == "[[0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n" \ + " [0 0 0 ..., 0 0 0]\n ..., \n [0 0 0 ..., 0 0 0]\n" \ + " [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]]" + a = zeros((2, 2, 2)) + r = str(a) + assert r == '[[[0.0 0.0]\n [0.0 0.0]]\n\n [[0.0 0.0]\n [0.0 0.0]]]' + + def test_str_slice(self): + from numpypy import array, zeros + a = array(range(5), float) + b = a[1::2] + assert str(b) == "[1.0 3.0]" + a = zeros(2002) + b = a[::2] + assert str(b) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" + a = array((range(5), range(5, 10)), dtype="int16") + b = a[1, 2:] + assert str(b) == "[7 8 9]" + b = a[2:1, ] + assert str(b) == "[]" + + +class AppTestRanges(BaseNumpyAppTest): + def test_arange(self): + from numpypy import arange, array, dtype + a = arange(3) + assert (a == [0, 1, 2]).all() + assert a.dtype is dtype(int) + a = arange(3.0) + assert (a == [0., 1., 2.]).all() + assert a.dtype is dtype(float) + a = arange(3, 7) + assert (a == [3, 4, 5, 6]).all() + assert a.dtype is dtype(int) + a = arange(3, 7, 2) + assert (a == [3, 5]).all() + a = arange(3, dtype=float) + assert (a == [0., 1., 2.]).all() + assert a.dtype is dtype(float) + a = arange(0, 0.8, 0.1) + assert len(a) == 8 + assert arange(False, True, True).dtype is dtype(int) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -111,6 +111,8 @@ for i in range(3): assert c[i] == a[i] / b[i] + assert (divide(array([-10]), array([2])) == array([-5])).all() + def test_fabs(self): from numpypy import array, fabs from math import fabs as math_fabs @@ -319,6 +321,17 @@ for v in [1.0, -1.0]: assert arctanh(v) == math.copysign(float("inf"), v) + def test_sqrt(self): + import math + from numpypy import sqrt + + nan, inf = float("nan"), float("inf") + data = [1, 2, 3, inf] + results = [math.sqrt(1), math.sqrt(2), math.sqrt(3), inf] + assert (sqrt(data) == results).all() + assert math.isnan(sqrt(-1)) + assert math.isnan(sqrt(nan)) + def test_reduce_errors(self): from numpypy import sin, add diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -1,29 +1,56 @@ + +""" Tests that check if JIT-compiled numpy operations produce reasonably +good assembler +""" + +import py + +from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin +from pypy.jit.metainterp.warmspot import reset_stats from pypy.module.micronumpy import interp_ufuncs, signature -from pypy.module.micronumpy.compile import (FakeSpace, - FloatObject, IntObject, numpy_compile, BoolObject) -from pypy.module.micronumpy.interp_numarray import (SingleDimArray, - SingleDimSlice) +from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, + FloatObject, IntObject, BoolObject, Parser, InterpreterState) +from pypy.module.micronumpy.interp_numarray import NDimArray, NDimSlice,\ + BaseArray from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr -from pypy.jit.metainterp.warmspot import reset_stats -from pypy.jit.metainterp import pyjitpl - -import py class TestNumpyJIt(LLJitMixin): graph = None interp = None - - def run(self, code): + + def setup_class(cls): + default = """ + a = [1,2,3,4] + c = a + b + sum(c) -> 1::1 + a -> 3:1:2 + """ + + d = {} + p = Parser() + allcodes = [p.parse(default)] + for name, meth in cls.__dict__.iteritems(): + if name.startswith("define_"): + code = meth() + d[name[len("define_"):]] = len(allcodes) + allcodes.append(p.parse(code)) + cls.code_mapping = d + cls.codes = allcodes + + def run(self, name): space = FakeSpace() - - def f(code): - interp = numpy_compile(hlstr(code)) + i = self.code_mapping[name] + codes = self.codes + + def f(i): + interp = InterpreterState(codes[i]) interp.run(space) res = interp.results[-1] - w_res = res.eval(0).wrap(interp.space) + assert isinstance(res, BaseArray) + w_res = res.eval(res.start_iter()).wrap(interp.space) if isinstance(w_res, BoolObject): return float(w_res.boolval) elif isinstance(w_res, FloatObject): @@ -34,62 +61,73 @@ return -42. if self.graph is None: - interp, graph = self.meta_interp(f, [llstr(code)], + interp, graph = self.meta_interp(f, [i], listops=True, backendopt=True, graph_and_interp_only=True) self.__class__.interp = interp self.__class__.graph = graph - reset_stats() pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() - return self.interp.eval_graph(self.graph, [llstr(code)]) + return self.interp.eval_graph(self.graph, [i]) - def test_add(self): - result = self.run(""" + def define_add(): + return """ a = |30| b = a + a b -> 3 - """) - self.check_loops({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 1, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) + """ + + def test_add(self): + result = self.run("add") + self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, + 'setarrayitem_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 - def test_floatadd(self): - result = self.run(""" + def define_float_add(): + return """ a = |30| + 3 a -> 3 - """) + """ + + def test_floatadd(self): + result = self.run("float_add") assert result == 3 + 3 - self.check_loops({"getarrayitem_raw": 1, "float_add": 1, - "setarrayitem_raw": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getarrayitem_raw": 1, "float_add": 1, + "setarrayitem_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) - def test_sum(self): - result = self.run(""" + def define_sum(): + return """ a = |30| b = a + a sum(b) - """) + """ + + def test_sum(self): + result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_loops({"getarrayitem_raw": 2, "float_add": 2, - "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 2, + "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) - def test_prod(self): - result = self.run(""" + def define_prod(): + return """ a = |30| b = a + a prod(b) - """) + """ + + def test_prod(self): + result = self.run("prod") expected = 1 for i in range(30): expected *= i * 2 assert result == expected - self.check_loops({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -100,7 +138,7 @@ max(b) """) assert result == 256 - self.check_loops({"getarrayitem_raw": 2, "float_add": 1, + self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_mul": 1, "int_add": 1, "int_lt": 1, "guard_true": 1, "jump": 1}) @@ -113,54 +151,67 @@ min(b) """) assert result == -24 - self.check_loops({"getarrayitem_raw": 2, "float_add": 1, + self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_mul": 1, "int_add": 1, "int_lt": 1, "guard_true": 1, "jump": 1}) - def test_any(self): - result = self.run(""" + def define_any(): + return """ a = [0,0,0,0,0,0,0,0,0,0,0] a[8] = -12 b = a + a any(b) - """) + """ + + def test_any(self): + result = self.run("any") assert result == 1 - self.check_loops({"getarrayitem_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1, - "guard_false": 1}) + self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) - def test_already_forced(self): - result = self.run(""" + def define_already_forced(): + return """ a = |30| b = a + 4.5 b -> 5 # forces c = b * 8 c -> 5 - """) + """ + + def test_already_forced(self): + result = self.run("already_forced") assert result == (5 + 4.5) * 8 # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - self.check_loops({"getarrayitem_raw": 2, "float_mul": 1, "float_add": 1, - "setarrayitem_raw": 2, "int_add": 2, - "int_lt": 2, "guard_true": 2, "jump": 2}) + # XXX the comment above is wrong now. We need preferrably a way to + # count the two loops separately + self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + 'guard_class': 22, 'int_add': 8, 'float_mul': 2, + 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, + 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, + 'guard_value': 2}) - def test_ufunc(self): - result = self.run(""" + def define_ufunc(): + return """ a = |30| b = a + a c = unegative(b) c -> 3 - """) + """ + + def test_ufunc(self): + result = self.run("ufunc") assert result == -6 - self.check_loops({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, - "setarrayitem_raw": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1, + self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, + "setarrayitem_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1, }) - def test_specialization(self): - self.run(""" + def define_specialization(): + return """ a = |30| b = a + a c = unegative(b) @@ -177,77 +228,119 @@ d = a * a unegative(d) d -> 3 - """) + """ + + def test_specialization(self): + self.run("specialization") # This is 3, not 2 because there is a bridge for the exit. self.check_loop_count(3) + def define_slice(): + return """ + a = |30| + b = a -> ::3 + c = b + b + c -> 3 + """ + + def test_slice(self): + result = self.run("slice") + assert result == 18 + self.check_simple_loop({'getarrayitem_raw': 2, + 'float_add': 1, + 'setarrayitem_raw': 1, + 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, + 'jump': 1}) + + def define_slice2(): + return """ + a = |30| + s1 = a -> :20:2 + s2 = a -> :30:3 + b = s1 + s2 + b -> 3 + """ + + def test_slice2(self): + result = self.run("slice2") + assert result == 15 + self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, + 'setarrayitem_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + + def define_multidim(): + return """ + a = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + b = a + a + b -> 1 -> 1 + """ + + def test_multidim(self): + result = self.run('multidim') + assert result == 8 + self.check_simple_loop({'float_add': 1, 'getarrayitem_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setarrayitem_raw': 1}) + # int_add might be 1 here if we try slightly harder with + # reusing indexes or some optimization + + def define_multidim_slice(): + return """ + a = [[1, 2, 3, 4], [3, 4, 5, 6], [5, 6, 7, 8], [7, 8, 9, 10], [9, 10, 11, 12], [11, 12, 13, 14], [13, 14, 15, 16], [16, 17, 18, 19]] + b = a -> ::2 + c = b + b + c -> 1 -> 1 + """ + + def test_multidim_slice(self): + result = self.run('multidim_slice') + assert result == 12 + py.test.skip("improve") + # XXX the bridge here is scary. Hopefully jit-targets will fix that, + # otherwise it looks kind of good + self.check_simple_loop({}) + + def define_broadcast(): + return """ + a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + b = [1, 2, 3, 4] + c = a + b + c -> 1 -> 2 + """ + + def test_broadcast(self): + result = self.run("broadcast") + assert result == 10 + py.test.skip("improve") + self.check_simple_loop({}) + + def define_setslice(): + return """ + a = |30| + b = |10| + b[1] = 5.5 + c = b + b + a[0:30:3] = c + a -> 3 + """ + + def test_setslice(self): + result = self.run("setslice") + assert result == 11.0 + self.check_loop_count(1) + self.check_simple_loop({'getarrayitem_raw': 2, 'float_add' : 1, + 'setarrayitem_raw': 1, 'int_add': 3, + 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): + py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace from pypy.module.micronumpy.interp_dtype import W_Float64Dtype - + cls.space = FakeSpace() cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) - - def test_slice(self): - def f(i): - step = 3 - ar = SingleDimArray(step*i, dtype=self.float64_dtype) - new_sig = signature.Signature.find_sig([ - SingleDimSlice.signature, ar.signature - ]) - s = SingleDimSlice(0, step*i, step, i, ar, new_sig) - v = interp_ufuncs.get(self.space).add.call(self.space, [s, s]) - return v.get_concrete().eval(3).val - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - self.check_loops({'int_mul': 1, 'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 1, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) - assert result == f(5) - - def test_slice2(self): - def f(i): - step1 = 2 - step2 = 3 - ar = SingleDimArray(step2*i, dtype=self.float64_dtype) - new_sig = signature.Signature.find_sig([ - SingleDimSlice.signature, ar.signature - ]) - s1 = SingleDimSlice(0, step1*i, step1, i, ar, new_sig) - new_sig = signature.Signature.find_sig([ - SingleDimSlice.signature, s1.signature - ]) - s2 = SingleDimSlice(0, step2*i, step2, i, ar, new_sig) - v = interp_ufuncs.get(self.space).add.call(self.space, [s1, s2]) - return v.get_concrete().eval(3).val - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - self.check_loops({'int_mul': 2, 'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 1, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) - assert result == f(5) - - def test_setslice(self): - space = self.space - float64_dtype = self.float64_dtype - - def f(i): - step = NonConstant(3) - ar = SingleDimArray(step*i, dtype=float64_dtype) - ar2 = SingleDimArray(i, dtype=float64_dtype) - ar2.get_concrete().setitem(1, float64_dtype.box(5.5)) - arg = ar2.descr_add(space, ar2) - ar.setslice(space, 0, step*i, step, i, arg) - return ar.get_concrete().eval(3).val - - result = self.meta_interp(f, [5], listops=True, backendopt=True) - self.check_loops({'getarrayitem_raw': 2, - 'float_add' : 1, - 'setarrayitem_raw': 1, 'int_add': 2, - 'int_lt': 1, 'guard_true': 1, 'jump': 1}) - assert result == 11.0 def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " @@ -262,7 +355,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = SingleDimArray(n, dtype=dtype) + ar = NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -3,7 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.rlib import rmmap -from pypy.rlib.rmmap import RValueError, RTypeError, ROverflowError +from pypy.rlib.rmmap import RValueError, RTypeError class W_MMap(Wrappable): @@ -212,8 +212,6 @@ raise OperationError(space.w_ValueError, space.wrap(e.message)) except RTypeError, e: raise OperationError(space.w_TypeError, space.wrap(e.message)) - except ROverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(e.message)) return space.wrap(self) elif rmmap._MS_WINDOWS: @@ -233,8 +231,6 @@ raise OperationError(space.w_ValueError, space.wrap(e.message)) except RTypeError, e: raise OperationError(space.w_TypeError, space.wrap(e.message)) - except ROverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(e.message)) return space.wrap(self) W_MMap.typedef = TypeDef("mmap", diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -271,7 +271,7 @@ thread_ticker_check = """ guard_not_invalidated? ticker0 = getfield_raw(ticker_address, descr=) - ticker1 = int_sub(ticker0, 1) + ticker1 = int_sub(ticker0, _) setfield_raw(ticker_address, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) guard_false(ticker_cond0, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -334,26 +334,27 @@ log = self.run(main, [1000]) assert log.result == (1000, 998) loop, = log.loops_by_filename(self.filepath) + # the int strategy is used here assert loop.match_by_id('append', """ i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) # Will be killed by the backend - i17 = arraylen_gc(p7, descr=) - call(ConstClass(_ll_list_resize_ge), p8, i15, descr=) + p15 = getfield_gc(p8, descr=) + i17 = arraylen_gc(p15, descr=) + call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... guard_no_exception(descr=...) p17 = getfield_gc(p8, descr=) - p19 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p19, i12, descr=) - setarrayitem_gc(p17, i13, p19, descr=) + setarrayitem_gc(p17, i13, i12, descr=) """) def test_blockstack_virtualizable(self): def main(n): from pypyjit import residual_call + l = len i = 0 while i < n: try: - residual_call(len, []) # ID: call + residual_call(l, []) # ID: call except: pass i += 1 @@ -369,11 +370,8 @@ p22 = new_with_vtable(19511408) p24 = new_array(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) - p27 = new(descr=) - p29 = new_array(0, descr=) setfield_gc(p0, i20, descr=) - setfield_gc(p27, p29, descr=) - setfield_gc(p26, p27, descr=<.* .*W_ListObject.inst_wrappeditems .*>) + setfield_gc(p26, ConstPtr(ptr22), descr=) setarrayitem_gc(p24, 0, p26, descr=) setfield_gc(p22, p24, descr=) p32 = call_may_force(11376960, p18, p22, descr=) @@ -486,4 +484,4 @@ i4 = int_add(i0, 1) --TICK-- jump(..., descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -201,9 +201,11 @@ assert log.result == 1000000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i16 = int_ge(i12, i13) + i14 = getfield_gc(p12, descr=) + i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p17 = getarrayitem_gc(p15, i12, descr=) + p16 = getfield_gc(p12, descr=) + p17 = getarrayitem_gc(p16, i12, descr=) i19 = int_add(i12, 1) setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, 146982464, descr=...) @@ -217,7 +219,7 @@ i28 = int_add_ovf(i10, i25) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, i19, i13, p14, p15, descr=) + jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, p12, i19, descr=) """) @@ -337,7 +339,7 @@ a = compile('x+x+x+x+x+x', 'eval', 'eval') b = {'x': 7} while i < 1000: - y = eval(a,b,b) # ID: eval + y = eval(a, b, b) # ID: eval i += 1 return y diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.rpython.lltypesystem import lltype -from pypy.rlib.rarithmetic import ovfcheck_float_to_int +from pypy.rlib.rarithmetic import ovfcheck_float_to_int, intmask from pypy.rlib import rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo import os @@ -585,7 +585,7 @@ # More likely, the format yields an empty result, # e.g. an empty format, or %Z when the timezone # is unknown. - result = rffi.charp2strn(outbuf, buflen) + result = rffi.charp2strn(outbuf, intmask(buflen)) return space.wrap(result) finally: lltype.free(outbuf, flavor='raw') diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -96,6 +96,12 @@ self.executioncontext.recorder = previous_recorder self.concrete_mode -= 1 + def is_w(self, w_one, w_two): + return self.is_true(self.is_(w_one, w_two)) + + is_ = None # real version added by add_operations() + id = None # real version added by add_operations() + def newdict(self): if self.concrete_mode: return Constant({}) diff --git a/pypy/objspace/flow/operation.py b/pypy/objspace/flow/operation.py --- a/pypy/objspace/flow/operation.py +++ b/pypy/objspace/flow/operation.py @@ -322,7 +322,7 @@ del _add_exceptions, _add_except_ovf def make_op(fs, name, symbol, arity, specialnames): - if hasattr(fs, name): + if getattr(fs, name, None) is not None: return op = None diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -9,10 +9,7 @@ from pypy.rlib.debug import check_annotation from pypy.objspace.std import stringobject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.listobject import ( - _delitem_slice_helper, _setitem_slice_helper, - get_positive_index -) +from pypy.objspace.std.listobject import get_positive_index from pypy.objspace.std.listtype import get_list_index from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stringobject import W_StringObject @@ -541,7 +538,7 @@ oldsize = len(w_bytearray.data) start, stop, step, slicelength = w_slice.indices4(space, oldsize) sequence2 = makebytesdata_w(space, w_other) - setitem_slice_helper(space, w_bytearray.data, start, step, slicelength, sequence2, empty_elem='\x00') + _setitem_slice_helper(space, w_bytearray.data, start, step, slicelength, sequence2, empty_elem='\x00') def delitem__Bytearray_ANY(space, w_bytearray, w_idx): idx = get_list_index(space, w_idx) @@ -555,13 +552,84 @@ def delitem__Bytearray_Slice(space, w_bytearray, w_slice): start, stop, step, slicelength = w_slice.indices4(space, len(w_bytearray.data)) - delitem_slice_helper(space, w_bytearray.data, start, step, slicelength) + _delitem_slice_helper(space, w_bytearray.data, start, step, slicelength) -# create new helper functions with different list type specialisation -delitem_slice_helper = func_with_new_name(_delitem_slice_helper, - 'delitem_slice_helper') -setitem_slice_helper = func_with_new_name(_setitem_slice_helper, - 'setitem_slice_helper') +#XXX share the code again with the stuff in listobject.py +def _delitem_slice_helper(space, items, start, step, slicelength): + if slicelength==0: + return + + if step < 0: + start = start + step * (slicelength-1) + step = -step + + if step == 1: + assert start >= 0 + assert slicelength >= 0 + del items[start:start+slicelength] + else: + n = len(items) + i = start + + for discard in range(1, slicelength): + j = i+1 + i += step + while j < i: + items[j-discard] = items[j] + j += 1 + + j = i+1 + while j < n: + items[j-slicelength] = items[j] + j += 1 + start = n - slicelength + assert start >= 0 # annotator hint + del items[start:] + +def _setitem_slice_helper(space, items, start, step, slicelength, sequence2, + empty_elem): + assert slicelength >= 0 + oldsize = len(items) + len2 = len(sequence2) + if step == 1: # Support list resizing for non-extended slices + delta = slicelength - len2 + if delta < 0: + delta = -delta + newsize = oldsize + delta + # XXX support this in rlist! + items += [empty_elem] * delta + lim = start+len2 + i = newsize - 1 + while i >= lim: + items[i] = items[i-delta] + i -= 1 + elif start >= 0: + del items[start:start+delta] + else: + assert delta==0 # start<0 is only possible with slicelength==0 + elif len2 != slicelength: # No resize for extended slices + raise operationerrfmt(space.w_ValueError, "attempt to " + "assign sequence of size %d to extended slice of size %d", + len2, slicelength) + + if sequence2 is items: + if step > 0: + # Always copy starting from the right to avoid + # having to make a shallow copy in the case where + # the source and destination lists are the same list. + i = len2 - 1 + start += i*step + while i >= 0: + items[start] = sequence2[i] + start -= step + i -= 1 + return + else: + # Make a shallow copy to more easily handle the reversal case + sequence2 = list(sequence2) + for i in range(len2): + items[start] = sequence2[i] + start += step def _strip(space, w_bytearray, u_chars, left, right): # note: mostly copied from stringobject._strip diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -65,11 +65,11 @@ if isinstance(cell, ModuleCell): cell.w_value = w_value return - # If the new value and the current value are the same, don't create a - # level of indirection, or mutate are version. - if self.space.is_w(w_value, cell): - return if cell is not None: + # If the new value and the current value are the same, don't create a + # level of indirection, or mutate the version. + if self.space.is_w(w_value, cell): + return w_value = ModuleCell(w_value) self.mutated() self.unerase(w_dict.dstorage)[key] = w_value diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -5,13 +5,47 @@ from pypy.objspace.std.register_all import register_all from pypy.objspace.std.floatobject import W_FloatObject, _hash_float from pypy.objspace.std.longobject import W_LongObject +from pypy.rlib.rbigint import rbigint from pypy.rlib.rfloat import ( formatd, DTSF_STR_PRECISION, isinf, isnan, copysign) from pypy.rlib.objectmodel import HASH_IMAG import math -class W_ComplexObject(W_Object): + +class W_AbstractComplexObject(W_Object): + __slots__ = () + + def is_w(self, space, w_other): + from pypy.rlib.longlong2float import float2longlong + if not isinstance(w_other, W_AbstractComplexObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + real1 = space.float_w(space.getattr(self, space.wrap("real"))) + real2 = space.float_w(space.getattr(w_other, space.wrap("real"))) + imag1 = space.float_w(space.getattr(self, space.wrap("imag"))) + imag2 = space.float_w(space.getattr(w_other, space.wrap("imag"))) + real1 = float2longlong(real1) + real2 = float2longlong(real2) + imag1 = float2longlong(imag1) + imag2 = float2longlong(imag2) + return real1 == real2 and imag1 == imag2 + + def unique_id(self, space): + if self.user_overridden_class: + return W_Object.unique_id(self, space) + from pypy.rlib.longlong2float import float2longlong + from pypy.objspace.std.model import IDTAG_COMPLEX as tag + real = space.float_w(space.getattr(self, space.wrap("real"))) + imag = space.float_w(space.getattr(self, space.wrap("imag"))) + real_b = rbigint.fromrarith_int(float2longlong(real)) + imag_b = rbigint.fromrarith_int(float2longlong(imag)) + val = real_b.lshift(64).or_(imag_b).lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(val) + + +class W_ComplexObject(W_AbstractComplexObject): """This is a reimplementation of the CPython "PyComplexObject" """ from pypy.objspace.std.complextype import complex_typedef as typedef diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -22,10 +22,33 @@ import math from pypy.objspace.std.intobject import W_IntObject -class W_FloatObject(W_Object): - """This is a reimplementation of the CPython "PyFloatObject" - it is assumed that the constructor takes a real Python float as - an argument""" +class W_AbstractFloatObject(W_Object): + __slots__ = () + + def is_w(self, space, w_other): + from pypy.rlib.longlong2float import float2longlong + if not isinstance(w_other, W_AbstractFloatObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + one = float2longlong(space.float_w(self)) + two = float2longlong(space.float_w(w_other)) + return one == two + + def unique_id(self, space): + if self.user_overridden_class: + return W_Object.unique_id(self, space) + from pypy.rlib.longlong2float import float2longlong + from pypy.objspace.std.model import IDTAG_FLOAT as tag + val = float2longlong(space.float_w(self)) + b = rbigint.fromrarith_int(val) + b = b.lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(b) + + +class W_FloatObject(W_AbstractFloatObject): + """This is a implementation of the app-level 'float' type. + The constructor takes an RPython float as an argument.""" from pypy.objspace.std.floattype import float_typedef as typedef _immutable_fields_ = ['floatval'] diff --git a/pypy/objspace/std/frame.py b/pypy/objspace/std/frame.py --- a/pypy/objspace/std/frame.py +++ b/pypy/objspace/std/frame.py @@ -3,13 +3,11 @@ import operator from pypy.rlib.unroll import unrolling_iterable -from pypy.interpreter import pyopcode, function +from pypy.interpreter import pyopcode from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.module.__builtin__ import Module +from pypy.interpreter.error import OperationError from pypy.objspace.std import intobject, smallintobject from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.listobject import W_ListObject @@ -58,7 +56,7 @@ w_1 = f.popvalue() if type(w_1) is W_ListObject and type(w_2) is intobject.W_IntObject: try: - w_result = w_1.wrappeditems[w_2.intval] + w_result = w_1.getitem(w_2.intval) except IndexError: raise OperationError(f.space.w_IndexError, f.space.wrap("list index out of range")) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -19,6 +19,22 @@ class W_AbstractIntObject(W_Object): __slots__ = () + def is_w(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + return space.int_w(self) == space.int_w(w_other) + + def unique_id(self, space): + if self.user_overridden_class: + return W_Object.unique_id(self, space) + from pypy.objspace.std.model import IDTAG_INT as tag + b = space.bigint_w(self) + b = b.lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(b) + + class W_IntObject(W_AbstractIntObject): __slots__ = 'intval' _immutable_fields_ = ['intval'] diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -33,9 +33,9 @@ """Sequence iterator specialized for lists, accessing directly their RPython-level list of wrapped objects. """ - def __init__(w_self, w_seq, wrappeditems): + def __init__(w_self, w_seq): W_AbstractSeqIterObject.__init__(w_self, w_seq) - w_self.listitems = wrappeditems + w_self.w_seq = w_seq class W_FastTupleIterObject(W_AbstractSeqIterObject): """Sequence iterator specialized for tuples, accessing @@ -105,13 +105,15 @@ return w_seqiter def next__FastListIter(space, w_seqiter): - if w_seqiter.listitems is None: + from pypy.objspace.std.listobject import W_ListObject + w_seq = w_seqiter.w_seq + if w_seq is None: raise OperationError(space.w_StopIteration, space.w_None) + assert isinstance(w_seq, W_ListObject) index = w_seqiter.index try: - w_item = w_seqiter.listitems[index] + w_item = w_seq.getitem(index) except IndexError: - w_seqiter.listitems = None w_seqiter.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -5,35 +5,937 @@ from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.listtype import get_list_index from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice - from pypy.objspace.std import slicetype from pypy.interpreter import gateway, baseobjspace +from pypy.rlib.objectmodel import instantiate, specialize from pypy.rlib.listsort import make_timsort_class +from pypy.rlib import rerased, jit from pypy.interpreter.argument import Signature +UNROLL_CUTOFF = 5 + class W_AbstractListObject(W_Object): __slots__ = () +def make_range_list(space, start, step, length): + if length <= 0: + strategy = space.fromcache(EmptyListStrategy) + storage = strategy.erase(None) + else: + strategy = space.fromcache(RangeListStrategy) + storage = strategy.erase((start, step, length)) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + +def make_empty_list(space): + strategy = space.fromcache(EmptyListStrategy) + storage = strategy.erase(None) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + + at jit.look_inside_iff(lambda space, list_w: jit.isconstant(len(list_w)) and len(list_w) < UNROLL_CUTOFF) +def get_strategy_from_list_objects(space, list_w): + if not list_w: + return space.fromcache(EmptyListStrategy) + + # check for ints + for w_obj in list_w: + if not is_W_IntObject(w_obj): + break + else: + return space.fromcache(IntegerListStrategy) + + # check for strings + for w_obj in list_w: + if not is_W_StringObject(w_obj): + break + else: + return space.fromcache(StringListStrategy) + + return space.fromcache(ObjectListStrategy) + +def is_W_IntObject(w_object): + from pypy.objspace.std.intobject import W_IntObject + return type(w_object) is W_IntObject + +def is_W_StringObject(w_object): + from pypy.objspace.std.stringobject import W_StringObject + return type(w_object) is W_StringObject + + + class W_ListObject(W_AbstractListObject): from pypy.objspace.std.listtype import list_typedef as typedef - def __init__(w_self, wrappeditems): - w_self.wrappeditems = wrappeditems + def __init__(w_self, space, wrappeditems): + assert isinstance(wrappeditems, list) + w_self.space = space + if space.config.objspace.std.withliststrategies: + w_self.strategy = get_strategy_from_list_objects(space, wrappeditems) + else: + w_self.strategy = space.fromcache(ObjectListStrategy) + w_self.init_from_list_w(wrappeditems) + + @staticmethod + def from_storage_and_strategy(space, storage, strategy): + w_self = instantiate(W_ListObject) + w_self.space = space + w_self.strategy = strategy + w_self.lstorage = storage + if not space.config.objspace.std.withliststrategies: + w_self.switch_to_object_strategy() + return w_self + + @staticmethod + def newlist_str(space, list_s): + strategy = space.fromcache(StringListStrategy) + storage = strategy.erase(list_s) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) def __repr__(w_self): """ representation for debugging purposes """ - return "%s(%s)" % (w_self.__class__.__name__, w_self.wrappeditems) + return "%s(%s, %s)" % (w_self.__class__.__name__, w_self.strategy, w_self.lstorage._x) def unwrap(w_list, space): - items = [space.unwrap(w_item) for w_item in w_list.wrappeditems]# XXX generic mixed types unwrap + # for tests only! + items = [space.unwrap(w_item) for w_item in w_list.getitems()] return list(items) + def switch_to_object_strategy(self): + list_w = self.getitems() + self.strategy = self.space.fromcache(ObjectListStrategy) + # XXX this is quite indirect + self.init_from_list_w(list_w) + + def _temporarily_as_objects(self): + if self.strategy is self.space.fromcache(ObjectListStrategy): + return self + list_w = self.getitems() + strategy = self.space.fromcache(ObjectListStrategy) + storage = strategy.erase(list_w) + w_objectlist = W_ListObject.from_storage_and_strategy(self.space, storage, strategy) + return w_objectlist + + # ___________________________________________________ + + def init_from_list_w(self, list_w): + """Initializes listobject by iterating through the given list of + wrapped items, unwrapping them if neccessary and creating a + new erased object as storage""" + self.strategy.init_from_list_w(self, list_w) + + def clone(self): + """Returns a clone by creating a new listobject + with the same strategy and a copy of the storage""" + return self.strategy.clone(self) + + def copy_into(self, other): + """Used only when extending an EmptyList. Sets the EmptyLists + strategy and storage according to the other W_List""" + self.strategy.copy_into(self, other) + + def contains(self, w_obj): + """Returns unwrapped boolean, saying wether w_obj exists + in the list.""" + return self.strategy.contains(self, w_obj) + def append(w_list, w_item): - w_list.wrappeditems.append(w_item) + """Appends the wrapped item to the end of the list.""" + w_list.strategy.append(w_list, w_item) + + def length(self): + return self.strategy.length(self) + + def getitem(self, index): + """Returns the wrapped object that is found in the + list at the given index. The index must be unwrapped. + May raise IndexError.""" + return self.strategy.getitem(self, index) + + def getslice(self, start, stop, step, length): + """Returns a slice of the list defined by the arguments. Arguments must be + normalized (i.e. using normalize_simple_slice or W_Slice.indices4). + May raise IndexError.""" + return self.strategy.getslice(self, start, stop, step, length) + + def getitems(self): + """Returns a list of all items after wrapping them. The result can + share with the storage, if possible.""" + return self.strategy.getitems(self) + + def getitems_copy(self): + """Returns a copy of all items in the list. Same as getitems except for + ObjectListStrategy.""" + return self.strategy.getitems_copy(self) + + def getitems_str(self): + """ Return the items in the list as unwrapped strings. If the list does + not use the list strategy, return None. """ + return self.strategy.getitems_str(self) + # ___________________________________________________ + + + def mul(self, times): + """Returns a copy of the list, multiplied by times. + Argument must be unwrapped.""" + return self.strategy.mul(self, times) + + def inplace_mul(self, times): + """Alters the list by multiplying its content by times.""" + self.strategy.inplace_mul(self, times) + + def deleteslice(self, start, step, length): + """Deletes a slice from the list. Used in delitem and delslice. + Arguments must be normalized (see getslice).""" + self.strategy.deleteslice(self, start, step, length) + + def pop(self, index): + """Pops an item from the list. Index must be normalized. + May raise IndexError.""" + return self.strategy.pop(self, index) + + def pop_end(self): + """ Pop the last element from the list.""" + return self.strategy.pop_end(self) + + def setitem(self, index, w_item): + """Inserts a wrapped item at the given (unwrapped) index. + May raise IndexError.""" + self.strategy.setitem(self, index, w_item) + + def setslice(self, start, step, slicelength, sequence_w): + """Sets the slice of the list from start to start+step*slicelength to + the sequence sequence_w. + Used by setslice and setitem.""" + self.strategy.setslice(self, start, step, slicelength, sequence_w) + + def insert(self, index, w_item): + """Inserts an item at the given position. Item must be wrapped, + index not.""" + self.strategy.insert(self, index, w_item) + + def extend(self, items_w): + """Appends the given list of wrapped items.""" + self.strategy.extend(self, items_w) + + def reverse(self): + """Reverses the list.""" + self.strategy.reverse(self) + + def sort(self, reverse): + """Sorts the list ascending or descending depending on + argument reverse. Argument must be unwrapped.""" + self.strategy.sort(self, reverse) registerimplementation(W_ListObject) +class ListStrategy(object): + + def __init__(self, space): + self.space = space + + def init_from_list_w(self, w_list, list_w): + raise NotImplementedError + + def clone(self, w_list): + raise NotImplementedError + + def copy_into(self, w_list, w_other): + raise NotImplementedError + + def contains(self, w_list, w_obj): + # needs to be safe against eq_w() mutating the w_list behind our back + i = 0 + while i < w_list.length(): # intentionally always calling len! + if self.space.eq_w(w_list.getitem(i), w_obj): + return True + i += 1 + return False + + def length(self, w_list): + raise NotImplementedError + + def getitem(self, w_list, index): + raise NotImplementedError + + def getslice(self, w_list, start, stop, step, length): + raise NotImplementedError + + def getitems(self, w_list): + return self.getitems_copy(w_list) + + def getitems_copy(self, w_list): + raise NotImplementedError + + def getitems_str(self, w_list): + return None + + def getstorage_copy(self, w_list): + raise NotImplementedError + + def append(self, w_list, w_item): + raise NotImplementedError + + def mul(self, w_list, times): + w_newlist = w_list.clone() + w_newlist.inplace_mul(times) + return w_newlist + + def inplace_mul(self, w_list, times): + raise NotImplementedError + + def deleteslice(self, w_list, start, step, slicelength): + raise NotImplementedError + + def pop(self, w_list, index): + raise NotImplementedError + + def pop_end(self, w_list): + return self.pop(w_list, self.length(w_list) - 1) + + def setitem(self, w_list, index, w_item): + raise NotImplementedError + + def setslice(self, w_list, start, step, slicelength, sequence_w): + raise NotImplementedError + + def insert(self, w_list, index, w_item): + raise NotImplementedError + + def extend(self, w_list, items_w): + raise NotImplementedError + + def reverse(self, w_list): + raise NotImplementedError + + def sort(self, w_list, reverse): + raise NotImplementedError + +class EmptyListStrategy(ListStrategy): + """EmptyListStrategy is used when a W_List withouth elements is created. + The storage is None. When items are added to the W_List a new RPython list + is created and the strategy and storage of the W_List are changed depending + to the added item. + W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + + def __init__(self, space): + ListStrategy.__init__(self, space) + # cache an empty list that is used whenever getitems is called (i.e. sorting) + self.cached_emptylist_w = [] + + def init_from_list_w(self, w_list, list_w): + assert len(list_w) == 0 + w_list.lstorage = self.erase(None) + + erase, unerase = rerased.new_erasing_pair("empty") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def clone(self, w_list): + return W_ListObject.from_storage_and_strategy(self.space, w_list.lstorage, self) + + def copy_into(self, w_list, w_other): + pass + + def contains(self, w_list, w_obj): + return False + + def length(self, w_list): + return 0 + + def getitem(self, w_list, index): + raise IndexError + + def getslice(self, w_list, start, stop, step, length): + # will never be called because the empty list case is already caught in + # getslice__List_ANY_ANY and getitem__List_Slice + return W_ListObject(self.space, self.cached_emptylist_w) + + def getitems(self, w_list): + return self.cached_emptylist_w + + def getitems_copy(self, w_list): + return [] + + def getstorage_copy(self, w_list): + return self.erase(None) + + def switch_to_correct_strategy(self, w_list, w_item): + if is_W_IntObject(w_item): + strategy = self.space.fromcache(IntegerListStrategy) + elif is_W_StringObject(w_item): + strategy = self.space.fromcache(StringListStrategy) + else: + strategy = self.space.fromcache(ObjectListStrategy) + + storage = strategy.get_empty_storage() + w_list.strategy = strategy + w_list.lstorage = storage + + def append(self, w_list, w_item): + self.switch_to_correct_strategy(w_list, w_item) + w_list.append(w_item) + + def inplace_mul(self, w_list, times): + return + + def deleteslice(self, w_list, start, step, slicelength): + pass + + def pop(self, w_list, index): + # will not be called because IndexError was already raised in + # list_pop__List_ANY + raise IndexError + + def setitem(self, w_list, index, w_item): + raise IndexError + + def setslice(self, w_list, start, step, slicelength, w_other): + strategy = w_other.strategy + storage = strategy.getstorage_copy(w_other) + w_list.strategy = strategy + w_list.lstorage = storage + + def sort(self, w_list, reverse): + return + + def insert(self, w_list, index, w_item): + assert index == 0 + self.append(w_list, w_item) + + def extend(self, w_list, w_other): + w_other.copy_into(w_list) + + def reverse(self, w_list): + pass + +class RangeListStrategy(ListStrategy): + """RangeListStrategy is used when a list is created using the range method. + The storage is a tuple containing only three integers start, step and length + and elements are calculated based on these values. + On any operation destroying the range (inserting, appending non-ints) + the strategy is switched to IntegerListStrategy.""" + + def switch_to_integer_strategy(self, w_list): + items = self._getitems_range(w_list, False) + strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) + w_list.lstorage = strategy.erase(items) + + def wrap(self, intval): + return self.space.wrap(intval) + + def unwrap(self, w_int): + return self.space.int_w(w_int) + + def init_from_list_w(self, w_list, list_w): + raise NotImplementedError + + erase, unerase = rerased.new_erasing_pair("range") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def clone(self, w_list): + storage = w_list.lstorage # lstorage is tuple, no need to clone + w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, self) + return w_clone + + def copy_into(self, w_list, w_other): + w_other.strategy = self + w_other.lstorage = w_list.lstorage + + def contains(self, w_list, w_obj): + if is_W_IntObject(w_obj): + start, step, length = self.unerase(w_list.lstorage) + obj = self.unwrap(w_obj) + i = start + if step > 0 and start <= obj <= start + (length - 1) * step and (start - obj) % step == 0: + return True + elif step < 0 and start + (length -1) * step <= obj <= start and (start - obj) % step == 0: + return True + else: + return False + return ListStrategy.contains(self, w_list, w_obj) + + def length(self, w_list): + return self.unerase(w_list.lstorage)[2] + + def _getitem_unwrapped(self, w_list, i): + v = self.unerase(w_list.lstorage) + start = v[0] + step = v[1] + length = v[2] + if i < 0: + i += length + if i < 0: + raise IndexError + elif i >= length: + raise IndexError + return start + i * step + + def getitem(self, w_list, i): + return self.wrap(self._getitem_unwrapped(w_list, i)) + + def getitems_copy(self, w_list): + return self._getitems_range(w_list, True) + + def getstorage_copy(self, w_list): + # tuple is unmutable + return w_list.lstorage + + + @specialize.arg(2) + def _getitems_range(self, w_list, wrap_items): + l = self.unerase(w_list.lstorage) + start = l[0] + step = l[1] + length = l[2] + if wrap_items: + r = [None] * length + else: + r = [0] * length + i = start + n = 0 + while n < length: + if wrap_items: + r[n] = self.wrap(i) + else: + r[n] = i + i += step + n += 1 + + return r + + def getslice(self, w_list, start, stop, step, length): + v = self.unerase(w_list.lstorage) + old_start = v[0] + old_step = v[1] + old_length = v[2] + + new_start = self._getitem_unwrapped(w_list, start) + new_step = old_step * step + return make_range_list(self.space, new_start, new_step, length) + + def append(self, w_list, w_item): + if is_W_IntObject(w_item): + l = self.unerase(w_list.lstorage) + step = l[1] + last_in_range = self._getitem_unwrapped(w_list, -1) + if self.unwrap(w_item) - step == last_in_range: + new = self.erase((l[0],l[1],l[2]+1)) + w_list.lstorage = new + return + + self.switch_to_integer_strategy(w_list) + else: + w_list.switch_to_object_strategy() + w_list.append(w_item) + + def inplace_mul(self, w_list, times): + self.switch_to_integer_strategy(w_list) + w_list.inplace_mul(times) + + def deleteslice(self, w_list, start, step, slicelength): + self.switch_to_integer_strategy(w_list) + w_list.deleteslice(start, step, slicelength) + + def pop_end(self, w_list): + start, step, length = self.unerase(w_list.lstorage) + w_result = self.wrap(start + (length - 1) * step) + new = self.erase((start, step, length - 1)) + w_list.lstorage = new + return w_result + + def pop(self, w_list, index): + l = self.unerase(w_list.lstorage) + start = l[0] + step = l[1] + length = l[2] + if index == 0: + w_result = self.wrap(start) + new = self.erase((start + step, step, length - 1)) + w_list.lstorage = new + return w_result + elif index == length - 1: + return self.pop_end(w_list) + else: + self.switch_to_integer_strategy(w_list) + return w_list.pop(index) + + def setitem(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.setitem(index, w_item) + + def setslice(self, w_list, start, step, slicelength, sequence_w): + self.switch_to_integer_strategy(w_list) + w_list.setslice(start, step, slicelength, sequence_w) + + def sort(self, w_list, reverse): + start, step, length = self.unerase(w_list.lstorage) + if step > 0 and reverse or step < 0 and not reverse: + start = start + step * (length - 1) + step = step * (-1) + else: + return + w_list.lstorage = self.erase((start, step, length)) + + def insert(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.insert(index, w_item) + + def extend(self, w_list, items_w): + self.switch_to_integer_strategy(w_list) + w_list.extend(items_w) + + def reverse(self, w_list): + v = self.unerase(w_list.lstorage) + last = self._getitem_unwrapped(w_list, -1) + length = v[2] + skip = v[1] + new = self.erase((last, -skip, length)) + w_list.lstorage = new + +class AbstractUnwrappedStrategy(object): + _mixin_ = True + + def wrap(self, unwrapped): + raise NotImplementedError + + def unwrap(self, wrapped): + raise NotImplementedError + + @staticmethod + def unerase(storage): + raise NotImplementedError("abstract base class") + + @staticmethod + def erase(obj): + raise NotImplementedError("abstract base class") + + def is_correct_type(self, w_obj): + raise NotImplementedError("abstract base class") + + def list_is_correct_type(self, w_list): + raise NotImplementedError("abstract base class") + + @jit.look_inside_iff(lambda space, w_list, list_w: + jit.isconstant(len(list_w)) and len(list_w) < UNROLL_CUTOFF) + def init_from_list_w(self, w_list, list_w): + l = [self.unwrap(w_item) for w_item in list_w] + w_list.lstorage = self.erase(l) + + def get_empty_storage(self): + return self.erase([]) + + def clone(self, w_list): + l = self.unerase(w_list.lstorage) + storage = self.erase(l[:]) + w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, self) + return w_clone + + def copy_into(self, w_list, w_other): + w_other.strategy = self + items = self.unerase(w_list.lstorage)[:] + w_other.lstorage = self.erase(items) + + def contains(self, w_list, w_obj): + if self.is_correct_type(w_obj): + obj = self.unwrap(w_obj) + l = self.unerase(w_list.lstorage) + for i in l: + if i == obj: + return True + return ListStrategy.contains(self, w_list, w_obj) + + def length(self, w_list): + return len(self.unerase(w_list.lstorage)) + + def getitem(self, w_list, index): + l = self.unerase(w_list.lstorage) + try: + r = l[index] + except IndexError: # make RPython raise the exception + raise + return self.wrap(r) + + @jit.look_inside_iff(lambda self, w_list: + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) + def getitems_copy(self, w_list): + return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + + def getstorage_copy(self, w_list): + items = self.unerase(w_list.lstorage)[:] + return self.erase(items) + + + def getslice(self, w_list, start, stop, step, length): + if step == 1 and 0 <= start <= stop: + l = self.unerase(w_list.lstorage) + assert start >= 0 + assert stop >= 0 + sublist = l[start:stop] + storage = self.erase(sublist) + return W_ListObject.from_storage_and_strategy(self.space, storage, self) + else: + subitems_w = [self._none_value] * length + l = self.unerase(w_list.lstorage) + for i in range(length): + try: + subitems_w[i] = l[start] + start += step + except IndexError: + raise + storage = self.erase(subitems_w) + return W_ListObject.from_storage_and_strategy(self.space, storage, self) + + def append(self, w_list, w_item): + + if self.is_correct_type(w_item): + self.unerase(w_list.lstorage).append(self.unwrap(w_item)) + return + + w_list.switch_to_object_strategy() + w_list.append(w_item) + + def insert(self, w_list, index, w_item): + l = self.unerase(w_list.lstorage) + + if self.is_correct_type(w_item): + l.insert(index, self.unwrap(w_item)) + return + + w_list.switch_to_object_strategy() + w_list.insert(index, w_item) + + def extend(self, w_list, w_other): + l = self.unerase(w_list.lstorage) + if self.list_is_correct_type(w_other): + l += self.unerase(w_other.lstorage) + return + elif w_other.strategy is self.space.fromcache(EmptyListStrategy): + return + + w_other = w_other._temporarily_as_objects() + w_list.switch_to_object_strategy() + w_list.extend(w_other) + + def setitem(self, w_list, index, w_item): + l = self.unerase(w_list.lstorage) + + if self.is_correct_type(w_item): + try: + l[index] = self.unwrap(w_item) + except IndexError: + raise + return + + w_list.switch_to_object_strategy() + w_list.setitem(index, w_item) + + def setslice(self, w_list, start, step, slicelength, w_other): + assert slicelength >= 0 + items = self.unerase(w_list.lstorage) + + if self is self.space.fromcache(ObjectListStrategy): + w_other = w_other._temporarily_as_objects() + elif (not self.list_is_correct_type(w_other) and + w_other.length() != 0): + w_list.switch_to_object_strategy() + w_other_as_object = w_other._temporarily_as_objects() + assert w_other_as_object.strategy is self.space.fromcache(ObjectListStrategy) + w_list.setslice(start, step, slicelength, w_other_as_object) + return + + oldsize = len(items) + len2 = w_other.length() + if step == 1: # Support list resizing for non-extended slices + delta = slicelength - len2 + if delta < 0: + delta = -delta + newsize = oldsize + delta + # XXX support this in rlist! + items += [self._none_value] * delta + lim = start+len2 + i = newsize - 1 + while i >= lim: + items[i] = items[i-delta] + i -= 1 + elif start >= 0: + del items[start:start+delta] + else: + assert delta==0 # start<0 is only possible with slicelength==0 + elif len2 != slicelength: # No resize for extended slices + raise operationerrfmt(self.space.w_ValueError, "attempt to " + "assign sequence of size %d to extended slice of size %d", + len2, slicelength) + + if w_other.strategy is self.space.fromcache(EmptyListStrategy): + other_items = [] + else: + # at this point both w_list and w_other have the same type, so + # self.unerase is valid for both of them + other_items = self.unerase(w_other.lstorage) + if other_items is items: + if step > 0: + # Always copy starting from the right to avoid + # having to make a shallow copy in the case where + # the source and destination lists are the same list. + i = len2 - 1 + start += i*step + while i >= 0: + items[start] = other_items[i] + start -= step + i -= 1 + return + else: + # Make a shallow copy to more easily handle the reversal case + w_list.reverse() + return + #other_items = list(other_items) + for i in range(len2): + items[start] = other_items[i] + start += step + + def deleteslice(self, w_list, start, step, slicelength): + items = self.unerase(w_list.lstorage) + if slicelength==0: + return + + if step < 0: + start = start + step * (slicelength-1) + step = -step + + if step == 1: + assert start >= 0 + assert slicelength >= 0 + del items[start:start+slicelength] + else: + n = len(items) + i = start + + for discard in range(1, slicelength): + j = i+1 + i += step + while j < i: + items[j-discard] = items[j] + j += 1 + + j = i+1 + while j < n: + items[j-slicelength] = items[j] + j += 1 + start = n - slicelength + assert start >= 0 # annotator hint + del items[start:] + + def pop_end(self, w_list): + l = self.unerase(w_list.lstorage) + return self.wrap(l.pop()) + + def pop(self, w_list, index): + l = self.unerase(w_list.lstorage) + # not sure if RPython raises IndexError on pop + # so check again here + if index < 0: + raise IndexError + try: + item = l.pop(index) + except IndexError: + raise + + w_item = self.wrap(item) + return w_item + + def inplace_mul(self, w_list, times): + l = self.unerase(w_list.lstorage) + l *= times + + def reverse(self, w_list): + self.unerase(w_list.lstorage).reverse() + +class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = None + + def unwrap(self, w_obj): + return w_obj + + def wrap(self, item): + return item + + erase, unerase = rerased.new_erasing_pair("object") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def is_correct_type(self, w_obj): + return True + + def list_is_correct_type(self, w_list): + return w_list.strategy is self.space.fromcache(ObjectListStrategy) + + def init_from_list_w(self, w_list, list_w): + w_list.lstorage = self.erase(list_w) + + def contains(self, w_list, w_obj): + return ListStrategy.contains(self, w_list, w_obj) + + def getitems(self, w_list): + return self.unerase(w_list.lstorage) + +class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = 0 + + def wrap(self, intval): + return self.space.wrap(intval) + + def unwrap(self, w_int): + return self.space.int_w(w_int) + + erase, unerase = rerased.new_erasing_pair("integer") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def is_correct_type(self, w_obj): + return is_W_IntObject(w_obj) + + def list_is_correct_type(self, w_list): + return w_list.strategy is self.space.fromcache(IntegerListStrategy) + + def sort(self, w_list, reverse): + l = self.unerase(w_list.lstorage) + sorter = IntSort(l, len(l)) + sorter.sort() + if reverse: + l.reverse() + +class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = None + + def wrap(self, stringval): + return self.space.wrap(stringval) + + def unwrap(self, w_string): + return self.space.str_w(w_string) + + erase, unerase = rerased.new_erasing_pair("string") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def is_correct_type(self, w_obj): + return is_W_StringObject(w_obj) + + def list_is_correct_type(self, w_list): + return w_list.strategy is self.space.fromcache(StringListStrategy) + + def sort(self, w_list, reverse): + l = self.unerase(w_list.lstorage) + sorter = StringSort(l, len(l)) + sorter.sort() + if reverse: + l.reverse() + + def getitems_str(self, w_list): + return self.unerase(w_list.lstorage) + +# _______________________________________________________ + init_signature = Signature(['sequence'], None, None) init_defaults = [None] @@ -42,25 +944,24 @@ # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - items_w = w_list.wrappeditems - del items_w[:] + w_list.__init__(space, []) if w_iterable is not None: # unfortunately this is duplicating space.unpackiterable to avoid # assigning a new RPython list to 'wrappeditems', which defeats the # W_FastListIterObject optimization. if isinstance(w_iterable, W_ListObject): - items_w.extend(w_iterable.wrappeditems) + w_list.extend(w_iterable) elif isinstance(w_iterable, W_TupleObject): - items_w.extend(w_iterable.wrappeditems) + w_list.extend(W_ListObject(space, w_iterable.wrappeditems[:])) else: - _init_from_iterable(space, items_w, w_iterable) + _init_from_iterable(space, w_list, w_iterable) -def _init_from_iterable(space, items_w, w_iterable): +def _init_from_iterable(space, w_list, w_iterable): # in its own function to make the JIT look into init__List # xxx special hack for speed from pypy.interpreter.generator import GeneratorIterator if isinstance(w_iterable, GeneratorIterator): - w_iterable.unpack_into(items_w) + w_iterable.unpack_into_w(w_list) return # /xxx w_iterator = space.iter(w_iterable) @@ -71,51 +972,39 @@ if not e.match(space, space.w_StopIteration): raise break # done - items_w.append(w_item) + w_list.append(w_item) def len__List(space, w_list): - result = len(w_list.wrappeditems) + result = w_list.length() return wrapint(space, result) def getitem__List_ANY(space, w_list, w_index): try: - return w_list.wrappeditems[get_list_index(space, w_index)] + return w_list.getitem(get_list_index(space, w_index)) except IndexError: raise OperationError(space.w_IndexError, space.wrap("list index out of range")) def getitem__List_Slice(space, w_list, w_slice): # XXX consider to extend rlist's functionality? - length = len(w_list.wrappeditems) + length = w_list.length() start, stop, step, slicelength = w_slice.indices4(space, length) assert slicelength >= 0 - if step == 1 and 0 <= start <= stop: - return W_ListObject(w_list.wrappeditems[start:stop]) - w_res = W_ListObject([None] * slicelength) - items_w = w_list.wrappeditems - subitems_w = w_res.wrappeditems - for i in range(slicelength): - subitems_w[i] = items_w[start] - start += step - return w_res + if slicelength == 0: + return make_empty_list(space) + return w_list.getslice(start, stop, step, slicelength) def contains__List_ANY(space, w_list, w_obj): - # needs to be safe against eq_w() mutating the w_list behind our back - i = 0 - items_w = w_list.wrappeditems - while i < len(items_w): # intentionally always calling len! - if space.eq_w(items_w[i], w_obj): - return space.w_True - i += 1 - return space.w_False + return space.wrap(w_list.contains(w_obj)) def iter__List(space, w_list): from pypy.objspace.std import iterobject - return iterobject.W_FastListIterObject(w_list, w_list.wrappeditems) + return iterobject.W_FastListIterObject(w_list) def add__List_List(space, w_list1, w_list2): - return W_ListObject(w_list1.wrappeditems + w_list2.wrappeditems) - + w_clone = w_list1.clone() + w_clone.extend(w_list2) + return w_clone def inplace_add__List_ANY(space, w_list1, w_iterable2): try: @@ -137,7 +1026,7 @@ if e.match(space, space.w_TypeError): raise FailedToImplement raise - return W_ListObject(w_list.wrappeditems * times) + return w_list.mul(times) def mul__List_ANY(space, w_list, w_times): return mul_list_times(space, w_list, w_times) @@ -152,63 +1041,65 @@ if e.match(space, space.w_TypeError): raise FailedToImplement raise - w_list.wrappeditems *= times + w_list.inplace_mul(times) return w_list def eq__List_List(space, w_list1, w_list2): # needs to be safe against eq_w() mutating the w_lists behind our back - items1_w = w_list1.wrappeditems - items2_w = w_list2.wrappeditems - return equal_wrappeditems(space, items1_w, items2_w) + if w_list1.length() != w_list2.length(): + return space.w_False -def equal_wrappeditems(space, items1_w, items2_w): - if len(items1_w) != len(items2_w): - return space.w_False + # XXX in theory, this can be implemented more efficiently as well. let's + # not care for now i = 0 - while i < len(items1_w) and i < len(items2_w): - if not space.eq_w(items1_w[i], items2_w[i]): + while i < w_list1.length() and i < w_list2.length(): + if not space.eq_w(w_list1.getitem(i), w_list2.getitem(i)): return space.w_False i += 1 return space.w_True -def lessthan_unwrappeditems(space, items1_w, items2_w): +def lessthan_unwrappeditems(space, w_list1, w_list2): # needs to be safe against eq_w() mutating the w_lists behind our back # Search for the first index where items are different i = 0 - while i < len(items1_w) and i < len(items2_w): - w_item1 = items1_w[i] - w_item2 = items2_w[i] + # XXX in theory, this can be implemented more efficiently as well. let's + # not care for now + while i < w_list1.length() and i < w_list2.length(): + w_item1 = w_list1.getitem(i) + w_item2 = w_list2.getitem(i) if not space.eq_w(w_item1, w_item2): return space.lt(w_item1, w_item2) i += 1 # No more items to compare -- compare sizes - return space.newbool(len(items1_w) < len(items2_w)) + return space.newbool(w_list1.length() < w_list2.length()) -def greaterthan_unwrappeditems(space, items1_w, items2_w): +def greaterthan_unwrappeditems(space, w_list1, w_list2): # needs to be safe against eq_w() mutating the w_lists behind our back # Search for the first index where items are different i = 0 - while i < len(items1_w) and i < len(items2_w): - w_item1 = items1_w[i] - w_item2 = items2_w[i] + # XXX in theory, this can be implemented more efficiently as well. let's + # not care for now + while i < w_list1.length() and i < w_list2.length(): + w_item1 = w_list1.getitem(i) + w_item2 = w_list2.getitem(i) if not space.eq_w(w_item1, w_item2): return space.gt(w_item1, w_item2) i += 1 # No more items to compare -- compare sizes - return space.newbool(len(items1_w) > len(items2_w)) + return space.newbool(w_list1.length() > w_list2.length()) def lt__List_List(space, w_list1, w_list2): - return lessthan_unwrappeditems(space, w_list1.wrappeditems, - w_list2.wrappeditems) + return lessthan_unwrappeditems(space, w_list1, w_list2) def gt__List_List(space, w_list1, w_list2): - return greaterthan_unwrappeditems(space, w_list1.wrappeditems, - w_list2.wrappeditems) + return greaterthan_unwrappeditems(space, w_list1, w_list2) def delitem__List_ANY(space, w_list, w_idx): idx = get_list_index(space, w_idx) + if idx < 0: + idx += w_list.length() try: - del w_list.wrappeditems[idx] + w_list.pop(idx) except IndexError: raise OperationError(space.w_IndexError, space.wrap("list deletion index out of range")) @@ -216,103 +1107,29 @@ def delitem__List_Slice(space, w_list, w_slice): - start, stop, step, slicelength = w_slice.indices4(space, - len(w_list.wrappeditems)) - _delitem_slice_helper(space, w_list.wrappeditems, start, step, slicelength) - -def _delitem_slice_helper(space, items, start, step, slicelength): - if slicelength==0: - return - - if step < 0: - start = start + step * (slicelength-1) - step = -step - - if step == 1: - assert start >= 0 - assert slicelength >= 0 - del items[start:start+slicelength] - else: - n = len(items) - i = start - - for discard in range(1, slicelength): - j = i+1 - i += step - while j < i: - items[j-discard] = items[j] - j += 1 - - j = i+1 - while j < n: - items[j-slicelength] = items[j] - j += 1 - start = n - slicelength - assert start >= 0 # annotator hint - del items[start:] + start, stop, step, slicelength = w_slice.indices4(space, w_list.length()) + w_list.deleteslice(start, step, slicelength) def setitem__List_ANY_ANY(space, w_list, w_index, w_any): idx = get_list_index(space, w_index) try: - w_list.wrappeditems[idx] = w_any + w_list.setitem(idx, w_any) except IndexError: raise OperationError(space.w_IndexError, space.wrap("list index out of range")) return space.w_None +def setitem__List_Slice_List(space, w_list, w_slice, w_other): + oldsize = w_list.length() + start, stop, step, slicelength = w_slice.indices4(space, oldsize) + w_list.setslice(start, step, slicelength, w_other) + def setitem__List_Slice_ANY(space, w_list, w_slice, w_iterable): - oldsize = len(w_list.wrappeditems) + oldsize = w_list.length() start, stop, step, slicelength = w_slice.indices4(space, oldsize) - - sequence2 = space.listview(w_iterable) - items = w_list.wrappeditems - _setitem_slice_helper(space, items, start, step, slicelength, sequence2, - empty_elem=None) - -def _setitem_slice_helper(space, items, start, step, slicelength, sequence2, - empty_elem): - assert slicelength >= 0 - oldsize = len(items) - len2 = len(sequence2) - if step == 1: # Support list resizing for non-extended slices - delta = slicelength - len2 - if delta < 0: - delta = -delta - newsize = oldsize + delta - # XXX support this in rlist! - items += [empty_elem] * delta - lim = start+len2 - i = newsize - 1 - while i >= lim: - items[i] = items[i-delta] - i -= 1 - elif start >= 0: - del items[start:start+delta] - else: - assert delta==0 # start<0 is only possible with slicelength==0 - elif len2 != slicelength: # No resize for extended slices - raise operationerrfmt(space.w_ValueError, "attempt to " - "assign sequence of size %d to extended slice of size %d", - len2, slicelength) - - if sequence2 is items: - if step > 0: - # Always copy starting from the right to avoid - # having to make a shallow copy in the case where - # the source and destination lists are the same list. - i = len2 - 1 - start += i*step - while i >= 0: - items[start] = sequence2[i] - start -= step - i -= 1 - return - else: - # Make a shallow copy to more easily handle the reversal case - sequence2 = list(sequence2) - for i in range(len2): - items[start] = sequence2[i] - start += step + sequence_w = space.listview(w_iterable) + w_other = W_ListObject(space, sequence_w) + w_list.setslice(start, step, slicelength, w_other) app = gateway.applevel(""" def listrepr(currently_in_repr, l): @@ -333,7 +1150,7 @@ listrepr = app.interphook("listrepr") def repr__List(space, w_list): - if len(w_list.wrappeditems) == 0: + if w_list.length() == 0: return space.wrap('[]') ec = space.getexecutioncontext() w_currently_in_repr = ec._py_repr @@ -343,9 +1160,9 @@ def list_insert__List_ANY_ANY(space, w_list, w_where, w_any): where = space.int_w(w_where) - length = len(w_list.wrappeditems) + length = w_list.length() index = get_positive_index(where, length) - w_list.wrappeditems.insert(index, w_any) + w_list.insert(index, w_any) return space.w_None def get_positive_index(where, length): @@ -355,45 +1172,51 @@ where = 0 elif where > length: where = length + assert where >= 0 return where def list_append__List_ANY(space, w_list, w_any): - w_list.wrappeditems.append(w_any) + w_list.append(w_any) return space.w_None def list_extend__List_List(space, w_list, w_other): - w_list.wrappeditems += w_other.wrappeditems + w_list.extend(w_other) return space.w_None def list_extend__List_ANY(space, w_list, w_any): - w_list.wrappeditems += space.listview(w_any) + w_other = W_ListObject(space, space.listview(w_any)) + w_list.extend(w_other) return space.w_None -# note that the default value will come back wrapped!!! -def list_pop__List_ANY(space, w_list, w_idx=-1): - items = w_list.wrappeditems - if len(items)== 0: +# default of w_idx is space.w_None (see listtype.py) +def list_pop__List_ANY(space, w_list, w_idx): + length = w_list.length() + if length == 0: raise OperationError(space.w_IndexError, space.wrap("pop from empty list")) + # clearly differentiate between list.pop() and list.pop(index) + if space.is_w(w_idx, space.w_None): + return w_list.pop_end() # cannot raise because list is not empty if space.isinstance_w(w_idx, space.w_float): raise OperationError(space.w_TypeError, space.wrap("integer argument expected, got float") ) idx = space.int_w(space.int(w_idx)) + if idx < 0: + idx += length try: - return items.pop(idx) + return w_list.pop(idx) except IndexError: raise OperationError(space.w_IndexError, space.wrap("pop index out of range")) def list_remove__List_ANY(space, w_list, w_any): # needs to be safe against eq_w() mutating the w_list behind our back - items = w_list.wrappeditems i = 0 - while i < len(items): - if space.eq_w(items[i], w_any): - if i < len(items): # if this is wrong the list was changed - del items[i] + while i < w_list.length(): + if space.eq_w(w_list.getitem(i), w_any): + if i < w_list.length(): # if this is wrong the list was changed + w_list.pop(i) return space.w_None i += 1 raise OperationError(space.w_ValueError, @@ -401,12 +1224,11 @@ def list_index__List_ANY_ANY_ANY(space, w_list, w_any, w_start, w_stop): # needs to be safe against eq_w() mutating the w_list behind our back - items = w_list.wrappeditems - size = len(items) + size = w_list.length() i, stop = slicetype.unwrap_start_stop( space, size, w_start, w_stop, True) - while i < stop and i < len(items): - if space.eq_w(items[i], w_any): + while i < stop and i < w_list.length(): + if space.eq_w(w_list.getitem(i), w_any): return space.wrap(i) i += 1 raise OperationError(space.w_ValueError, @@ -416,15 +1238,14 @@ # needs to be safe against eq_w() mutating the w_list behind our back count = 0 i = 0 - items = w_list.wrappeditems - while i < len(items): - if space.eq_w(items[i], w_any): + while i < w_list.length(): + if space.eq_w(w_list.getitem(i), w_any): count += 1 i += 1 return space.wrap(count) def list_reverse__List(space, w_list): - w_list.wrappeditems.reverse() + w_list.reverse() return space.w_None # ____________________________________________________________ @@ -433,12 +1254,15 @@ # Reverse a slice of a list in place, from lo up to (exclusive) hi. # (used in sort) +TimSort = make_timsort_class() +IntBaseTimSort = make_timsort_class() +StringBaseTimSort = make_timsort_class() + class KeyContainer(baseobjspace.W_Root): def __init__(self, w_key, w_item): self.w_key = w_key self.w_item = w_item -TimSort = make_timsort_class() # NOTE: all the subclasses of TimSort should inherit from a common subclass, # so make sure that only SimpleSort inherits directly from TimSort. # This is necessary to hide the parent method TimSort.lt() from the @@ -448,6 +1272,14 @@ space = self.space return space.is_true(space.lt(a, b)) +class IntSort(IntBaseTimSort): + def lt(self, a, b): + return a < b + +class StringSort(StringBaseTimSort): + def lt(self, a, b): + return a < b + class CustomCompareSort(SimpleSort): def lt(self, a, b): space = self.space @@ -476,6 +1308,7 @@ return CustomCompareSort.lt(self, a.w_key, b.w_key) def list_sort__List_ANY_ANY_ANY(space, w_list, w_cmp, w_keyfunc, w_reverse): + has_cmp = not space.is_w(w_cmp, space.w_None) has_key = not space.is_w(w_keyfunc, space.w_None) has_reverse = space.is_true(w_reverse) @@ -490,9 +1323,13 @@ if has_key: sorterclass = CustomKeySort else: - sorterclass = SimpleSort - items = w_list.wrappeditems - sorter = sorterclass(items, len(items)) + if w_list.strategy is space.fromcache(ObjectListStrategy): + sorterclass = SimpleSort + else: + w_list.sort(has_reverse) + return space.w_None + + sorter = sorterclass(w_list.getitems(), w_list.length()) sorter.space = space sorter.w_cmp = w_cmp @@ -500,8 +1337,8 @@ # The list is temporarily made empty, so that mutations performed # by comparison functions can't affect the slice of memory we're # sorting (allowing mutations during sorting is an IndexError or - # core-dump factory, since wrappeditems may change). - w_list.wrappeditems = [] + # core-dump factory, since the storage may change). + w_list.__init__(space, []) # wrap each item in a KeyContainer if needed if has_key: @@ -531,10 +1368,10 @@ sorter.list[i] = w_obj.w_item # check if the user mucked with the list during the sort - mucked = len(w_list.wrappeditems) > 0 + mucked = w_list.length() > 0 # put the items back into the list - w_list.wrappeditems = sorter.list + w_list.__init__(space, sorter.list) if mucked: raise OperationError(space.w_ValueError, diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py --- a/pypy/objspace/std/listtype.py +++ b/pypy/objspace/std/listtype.py @@ -11,7 +11,7 @@ list_extend = SMM('extend', 2, doc='L.extend(iterable) -- extend list by appending' ' elements from the iterable') -list_pop = SMM('pop', 2, defaults=(-1,), +list_pop = SMM('pop', 2, defaults=(None,), doc='L.pop([index]) -> item -- remove and return item at' ' index (default last)') list_remove = SMM('remove', 2, @@ -43,7 +43,7 @@ def descr__new__(space, w_listtype, __args__): from pypy.objspace.std.listobject import W_ListObject w_obj = space.allocate_instance(W_ListObject, w_listtype) - W_ListObject.__init__(w_obj, []) + W_ListObject.__init__(w_obj, space, []) return w_obj # ____________________________________________________________ diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -11,6 +11,22 @@ class W_AbstractLongObject(W_Object): __slots__ = () + def is_w(self, space, w_other): + if not isinstance(w_other, W_AbstractLongObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + return space.bigint_w(self).eq(space.bigint_w(w_other)) + + def unique_id(self, space): + if self.user_overridden_class: + return W_Object.unique_id(self, space) + from pypy.objspace.std.model import IDTAG_LONG as tag + b = space.bigint_w(self) + b = b.lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(b) + + class W_LongObject(W_AbstractLongObject): """This is a wrapper of rbigint.""" from pypy.objspace.std.longtype import long_typedef as typedef diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -270,7 +270,7 @@ register(TYPE_TUPLE, unmarshal_Tuple) def marshal_w__List(space, w_list, m): - items = w_list.wrappeditems[:] + items = w_list.getitems()[:] m.put_tuple_w(TYPE_LIST, items) def unmarshal_List(space, u, tc): diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -29,6 +29,11 @@ "proxyobject.W_TransparentDict"], } +IDTAG_INT = 1 +IDTAG_LONG = 3 +IDTAG_FLOAT = 5 +IDTAG_COMPLEX = 7 + class StdTypeModel: def __init__(self, config): diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -309,7 +309,10 @@ return wraptuple(self, list_w) def newlist(self, list_w): - return W_ListObject(list_w) + return W_ListObject(self, list_w) + + def newlist_str(self, list_s): + return W_ListObject.newlist_str(self, list_s) def newdict(self, module=False, instance=False, classofinstance=None, strdict=False): @@ -399,7 +402,7 @@ if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems[:] elif isinstance(w_obj, W_ListObject): - t = w_obj.wrappeditems[:] + t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: @@ -413,7 +416,8 @@ if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems elif isinstance(w_obj, W_ListObject): - t = w_obj.wrappeditems[:] + # XXX this can copy twice + t = w_obj.getitems()[:] else: if unroll: return make_sure_not_resized(ObjSpace.unpackiterable_unroll( @@ -431,7 +435,7 @@ def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): - t = w_obj.wrappeditems + t = w_obj.getitems() elif isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems[:] else: @@ -440,6 +444,11 @@ raise self._wrap_expected_length(expected_length, len(t)) return t + def listview_str(self, w_obj): + if isinstance(w_obj, W_ListObject): + return w_obj.getitems_str() + return None + def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): a, b, c = w_slice.indices3(self, self.int_w(w_length)) @@ -452,15 +461,6 @@ self.wrap("Expected tuple of length 3")) return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) - def is_(self, w_one, w_two): - if w_one is w_two: - return self.w_True - return self.w_False - - # short-cut - def is_w(self, w_one, w_two): - return w_one is w_two - def is_true(self, w_obj): # a shortcut for performance # NOTE! this method is typically overridden by builtinshortcut.py. diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -453,12 +453,12 @@ multi = r_uint(1822399083) + r_uint(1822399083) + 1 if w_set.hash != 0: return space.wrap(w_set.hash) - hash = 1927868237 - hash *= (len(w_set.setdata) + 1) + hash = r_uint(1927868237) + hash *= r_uint(len(w_set.setdata) + 1) for w_item in w_set.setdata: h = space.hash_w(w_item) - value = ((h ^ (h << 16) ^ 89869747) * multi) - hash = intmask(hash ^ value) + value = (r_uint(h ^ (h << 16) ^ 89869747) * multi) + hash = hash ^ value hash = hash * 69069 + 907133923 if hash == 0: hash = 590923713 diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -5,6 +5,7 @@ from pypy.interpreter import gateway from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.objectmodel import we_are_translated, compute_hash, specialize +from pypy.rlib.objectmodel import compute_unique_id from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std import slicetype, newformat @@ -12,7 +13,7 @@ from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.tupleobject import W_TupleObject -from pypy.rlib.rstring import StringBuilder +from pypy.rlib.rstring import StringBuilder, split from pypy.interpreter.buffer import StringBuffer from pypy.objspace.std.stringtype import sliced, wrapstr, wrapchar, \ @@ -21,6 +22,21 @@ class W_AbstractStringObject(W_Object): __slots__ = () + def is_w(self, space, w_other): + if not isinstance(w_other, W_AbstractStringObject): + return False + if self is w_other: + return True + if self.user_overridden_class or w_other.user_overridden_class: + return False + return space.str_w(self) is space.str_w(w_other) + + def unique_id(self, space): + if self.user_overridden_class: + return W_Object.unique_id(self, space) + return space.wrap(compute_unique_id(space.str_w(self))) + + class W_StringObject(W_AbstractStringObject): from pypy.objspace.std.stringtype import str_typedef as typedef _immutable_fields_ = ['_value'] @@ -220,7 +236,7 @@ def str_split__String_None_ANY(space, w_self, w_none, w_maxsplit=-1): maxsplit = space.int_w(w_maxsplit) - res_w = [] + res = [] value = w_self._value length = len(value) i = 0 @@ -243,12 +259,12 @@ maxsplit -= 1 # NB. if it's already < 0, it stays < 0 # the word is value[i:j] - res_w.append(sliced(space, value, i, j, w_self)) + res.append(value[i:j]) # continue to look from the character following the space after the word i = j + 1 - return space.newlist(res_w) + return space.newlist_str(res) def str_split__String_ANY_ANY(space, w_self, w_by, w_maxsplit=-1): maxsplit = space.int_w(w_maxsplit) @@ -258,33 +274,26 @@ if bylen == 0: raise OperationError(space.w_ValueError, space.wrap("empty separator")) - res_w = [] - start = 0 if bylen == 1 and maxsplit < 0: + res = [] + start = 0 # fast path: uses str.rfind(character) and str.count(character) by = by[0] # annotator hack: string -> char count = value.count(by) - res_w = [None] * (count + 1) + res = [None] * (count + 1) end = len(value) while count >= 0: assert end >= 0 prev = value.rfind(by, 0, end) start = prev + 1 assert start >= 0 - res_w[count] = sliced(space, value, start, end, w_self) + res[count] = value[start:end] count -= 1 end = prev else: - while maxsplit != 0: - next = value.find(by, start) - if next < 0: - break - res_w.append(sliced(space, value, start, next, w_self)) - start = next + bylen - maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - res_w.append(sliced(space, value, start, len(value), w_self)) + res = split(value, by, maxsplit) - return space.newlist(res_w) + return space.newlist_str(res) def str_rsplit__String_None_ANY(space, w_self, w_none, w_maxsplit=-1): maxsplit = space.int_w(w_maxsplit) @@ -359,6 +368,11 @@ 'str_rsplit__String_ANY_ANY', sliced) def str_join__String_ANY(space, w_self, w_list): + l = space.listview_str(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(w_self._value.join(l)) list_w = space.listview(w_list) size = len(list_w) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -572,15 +572,17 @@ assert getattr(a, s) == 42 def test_setattr_string_identify(self): - attrs = [] + class StrHolder(object): + pass + holder = StrHolder() class A(object): def __setattr__(self, attr, value): - attrs.append(attr) + holder.seen = attr a = A() s = "abc" setattr(a, s, 123) - assert attrs[0] is s + assert holder.seen is s class AppTestDictViews: def test_dictview(self): diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1,3 +1,4 @@ +# coding: iso-8859-15 import random from pypy.objspace.std.listobject import W_ListObject from pypy.interpreter.error import OperationError @@ -8,25 +9,25 @@ class TestW_ListObject(object): def test_is_true(self): w = self.space.wrap - w_list = W_ListObject([]) + w_list = W_ListObject(self.space, []) assert self.space.is_true(w_list) == False - w_list = W_ListObject([w(5)]) + w_list = W_ListObject(self.space, [w(5)]) assert self.space.is_true(w_list) == True - w_list = W_ListObject([w(5), w(3)]) + w_list = W_ListObject(self.space, [w(5), w(3)]) assert self.space.is_true(w_list) == True def test_len(self): w = self.space.wrap - w_list = W_ListObject([]) + w_list = W_ListObject(self.space, []) assert self.space.eq_w(self.space.len(w_list), w(0)) - w_list = W_ListObject([w(5)]) + w_list = W_ListObject(self.space, [w(5)]) assert self.space.eq_w(self.space.len(w_list), w(1)) - w_list = W_ListObject([w(5), w(3), w(99)]*111) + w_list = W_ListObject(self.space, [w(5), w(3), w(99)]*111) assert self.space.eq_w(self.space.len(w_list), w(333)) - + def test_getitem(self): w = self.space.wrap - w_list = W_ListObject([w(5), w(3)]) + w_list = W_ListObject(self.space, [w(5), w(3)]) assert self.space.eq_w(self.space.getitem(w_list, w(0)), w(5)) assert self.space.eq_w(self.space.getitem(w_list, w(1)), w(3)) assert self.space.eq_w(self.space.getitem(w_list, w(-2)), w(5)) @@ -38,10 +39,19 @@ self.space.raises_w(self.space.w_IndexError, self.space.getitem, w_list, w(-3)) + def test_getitems(self): + w = self.space.wrap + from pypy.objspace.std.listobject import make_range_list + r = make_range_list(self.space, 1,1,7) + l = [w(1),w(2),w(3),w(4),w(5),w(6),w(7)] + l2 = r.getitems() + for i in range(7): + assert self.space.eq_w(l[i], l2[i]) + def test_random_getitem(self): w = self.space.wrap s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9') - w_list = W_ListObject(map(w, s)) + w_list = W_ListObject(self.space, map(w, s)) keys = range(-len(s)-5, len(s)+5) choices = keys + [None]*12 stepchoices = [None, None, None, 1, 1, -1, -1, 2, -2, @@ -64,7 +74,7 @@ def test_iter(self): w = self.space.wrap - w_list = W_ListObject([w(5), w(3), w(99)]) + w_list = W_ListObject(self.space, [w(5), w(3), w(99)]) w_iter = self.space.iter(w_list) assert self.space.eq_w(self.space.next(w_iter), w(5)) assert self.space.eq_w(self.space.next(w_iter), w(3)) @@ -74,7 +84,7 @@ def test_contains(self): w = self.space.wrap - w_list = W_ListObject([w(5), w(3), w(99)]) + w_list = W_ListObject(self.space, [w(5), w(3), w(99)]) assert self.space.eq_w(self.space.contains(w_list, w(5)), self.space.w_True) assert self.space.eq_w(self.space.contains(w_list, w(99)), @@ -89,10 +99,10 @@ def test1(testlist, start, stop, step, expected): w_slice = self.space.newslice(w(start), w(stop), w(step)) - w_list = W_ListObject([w(i) for i in testlist]) + w_list = W_ListObject(self.space, [w(i) for i in testlist]) w_result = self.space.getitem(w_list, w_slice) assert self.space.unwrap(w_result) == expected - + for testlist in [[], [5,3,99]]: for start in [-2, 0, 1, 10]: for end in [-1, 2, 999]: @@ -110,11 +120,11 @@ def test1(lhslist, start, stop, rhslist, expected): w_slice = self.space.newslice(w(start), w(stop), w(1)) - w_lhslist = W_ListObject([w(i) for i in lhslist]) - w_rhslist = W_ListObject([w(i) for i in rhslist]) + w_lhslist = W_ListObject(self.space, [w(i) for i in lhslist]) + w_rhslist = W_ListObject(self.space, [w(i) for i in rhslist]) self.space.setitem(w_lhslist, w_slice, w_rhslist) assert self.space.unwrap(w_lhslist) == expected - + test1([5,7,1,4], 1, 3, [9,8], [5,9,8,4]) test1([5,7,1,4], 1, 3, [9], [5,9,4]) @@ -125,14 +135,14 @@ def test_add(self): w = self.space.wrap - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(-7)] * 111) + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(-7)] * 111) assert self.space.eq_w(self.space.add(w_list1, w_list1), - W_ListObject([w(5), w(3), w(99), + W_ListObject(self.space, [w(5), w(3), w(99), w(5), w(3), w(99)])) assert self.space.eq_w(self.space.add(w_list1, w_list2), - W_ListObject([w(5), w(3), w(99)] + + W_ListObject(self.space, [w(5), w(3), w(99)] + [w(-7)] * 111)) assert self.space.eq_w(self.space.add(w_list1, w_list0), w_list1) assert self.space.eq_w(self.space.add(w_list0, w_list2), w_list2) @@ -142,8 +152,8 @@ w = self.space.wrap arg = w(2) n = 3 - w_lis = W_ListObject([arg]) - w_lis3 = W_ListObject([arg]*n) + w_lis = W_ListObject(self.space, [arg]) + w_lis3 = W_ListObject(self.space, [arg]*n) w_res = self.space.mul(w_lis, w(n)) assert self.space.eq_w(w_lis3, w_res) # commute @@ -152,9 +162,9 @@ def test_setitem(self): w = self.space.wrap - w_list = W_ListObject([w(5), w(3)]) - w_exp1 = W_ListObject([w(5), w(7)]) - w_exp2 = W_ListObject([w(8), w(7)]) + w_list = W_ListObject(self.space, [w(5), w(3)]) + w_exp1 = W_ListObject(self.space, [w(5), w(7)]) + w_exp2 = W_ListObject(self.space, [w(8), w(7)]) self.space.setitem(w_list, w(1), w(7)) assert self.space.eq_w(w_exp1, w_list) self.space.setitem(w_list, w(-2), w(8)) @@ -167,7 +177,7 @@ def test_random_setitem_delitem(self): w = self.space.wrap s = range(39) - w_list = W_ListObject(map(w, s)) + w_list = W_ListObject(self.space, map(w, s)) expected = list(s) keys = range(-len(s)-5, len(s)+5) choices = keys + [None]*12 @@ -183,7 +193,7 @@ for key in keys: if random.random() < 0.15: random.shuffle(s) - w_list = W_ListObject(map(w, s)) + w_list = W_ListObject(self.space, map(w, s)) expected = list(s) try: value = expected[key] @@ -218,11 +228,11 @@ def test_eq(self): w = self.space.wrap - - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) + + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) assert self.space.eq_w(self.space.eq(w_list0, w_list1), self.space.w_False) @@ -236,11 +246,11 @@ self.space.w_False) def test_ne(self): w = self.space.wrap - - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) + + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) assert self.space.eq_w(self.space.ne(w_list0, w_list1), self.space.w_True) @@ -254,12 +264,12 @@ self.space.w_True) def test_lt(self): w = self.space.wrap - - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject([w(5), w(3), w(9), w(-1)]) + + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) + w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) assert self.space.eq_w(self.space.lt(w_list0, w_list1), self.space.w_True) @@ -273,15 +283,15 @@ self.space.w_True) assert self.space.eq_w(self.space.lt(w_list4, w_list3), self.space.w_True) - + def test_ge(self): w = self.space.wrap - - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject([w(5), w(3), w(9), w(-1)]) + + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) + w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) assert self.space.eq_w(self.space.ge(w_list0, w_list1), self.space.w_False) @@ -295,15 +305,15 @@ self.space.w_False) assert self.space.eq_w(self.space.ge(w_list4, w_list3), self.space.w_False) - + def test_gt(self): w = self.space.wrap - - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject([w(5), w(3), w(9), w(-1)]) + + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) + w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) assert self.space.eq_w(self.space.gt(w_list0, w_list1), self.space.w_False) @@ -317,15 +327,15 @@ self.space.w_False) assert self.space.eq_w(self.space.gt(w_list4, w_list3), self.space.w_False) - + def test_le(self): w = self.space.wrap - - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject([w(5), w(3), w(9), w(-1)]) + + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) + w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) assert self.space.eq_w(self.space.le(w_list0, w_list1), self.space.w_True) @@ -346,8 +356,62 @@ import sys on_cpython = (option.runappdirect and not hasattr(sys, 'pypy_translation_info')) + cls.w_on_cpython = cls.space.wrap(on_cpython) - cls.w_on_cpython = cls.space.wrap(on_cpython) + def test_getstrategyfromlist_w(self): + l0 = ["a", "2", "a", True] + # this raised TypeError on ListStrategies + l1 = ["a", "2", True, "a"] + l2 = [1, "2", "a", "a"] + assert sorted(l1) == sorted(l2) + + def test_notequals(self): + assert [1,2,3,4] != [1,2,5,4] + + def test_contains(self): + l = [] + assert not l.__contains__(2) + + l = [1,2,3] + assert l.__contains__(2) + assert not l.__contains__("2") + assert l.__contains__(1.0) + + l = ["1","2","3"] + assert l.__contains__("2") + assert not l.__contains__(2) + + l = range(4) + assert l.__contains__(2) + assert not l.__contains__("2") + + l = [1,2,"3"] + assert l.__contains__(2) + assert not l.__contains__("2") + + l = range(2, 20, 3) # = [2, 5, 8, 11, 14, 17] + assert l.__contains__(2) + assert l.__contains__(5) + assert l.__contains__(8) + assert l.__contains__(11) + assert l.__contains__(14) + assert l.__contains__(17) + assert not l.__contains__(3) + assert not l.__contains__(4) + assert not l.__contains__(7) + assert not l.__contains__(13) + assert not l.__contains__(20) + + l = range(2, -20, -3) # [2, -1, -4, -7, -10, -13, -16, -19] + assert l.__contains__(2) + assert l.__contains__(-4) + assert l.__contains__(-13) + assert l.__contains__(-16) + assert l.__contains__(-19) + assert not l.__contains__(-17) + assert not l.__contains__(-3) + assert not l.__contains__(-20) + assert not l.__contains__(-21) def test_call_list(self): assert list('') == [] @@ -385,6 +449,13 @@ l.extend([10]) assert l == range(11) + l = [] + m = [1,2,3] + l.extend(m) + m[0] = 5 + assert m == [5,2,3] + assert l == [1,2,3] + def test_extend_tuple(self): l = l0 = [1] l.extend((2,)) @@ -418,6 +489,10 @@ assert l is l0 assert l == [1] + l = ["c", "a", "d", "b"] + l.sort(reverse=True) + assert l == ["d", "c", "b", "a"] + def test_sort_cmp(self): def lencmp(a,b): return cmp(len(a), len(b)) l = [ 'a', 'fiver', 'tre', '' ] @@ -459,6 +534,11 @@ l.sort(reverse = True, key = lower) assert l == ['C', 'b', 'a'] + def test_sort_simple_string(self): + l = ["a", "d", "c", "b"] + l.sort() + assert l == ["a", "b", "c", "d"] + def test_getitem(self): l = [1, 2, 3, 4, 5, 6, 9] assert l[0] == 1 @@ -471,6 +551,21 @@ assert l[-1] == 'c' assert l[-2] == 'b' raises(IndexError, "l[len(l)]") + l = [] + raises(IndexError, "l[1]") + + def test_setitem(self): + + l = [] + raises(IndexError, "l[1] = 2") + + l = [5,3] + l[0] = 2 + assert l == [2,3] + + l = [5,3] + l[0] = "2" + assert l == ["2",3] def test_delitem(self): l = [1, 2, 3, 4, 5, 6, 9] @@ -482,7 +577,7 @@ assert l == [2, 3, 4, 6] raises(IndexError, "del l[len(l)]") raises(IndexError, "del l[-len(l)-1]") - + l = l0 = ['a', 'b', 'c'] del l[0] assert l == ['b', 'c'] @@ -513,7 +608,7 @@ assert l[::] == l assert l[0::-2] == l assert l[-1::-5] == l - + l = [''] assert l[1:] == [] assert l[1::2] == [] @@ -523,6 +618,10 @@ l.extend(['a', 'b']) assert l[::-1] == ['b', 'a', ''] + l = [1,2,3,4,5] + assert l[1:0:None] == [] + assert l[1:0] == [] + def test_delall(self): l = l0 = [1,2,3] del l[:] @@ -564,6 +663,16 @@ l1 += bar assert l1 == ('radd', bar, [1,2,3]) + def test_add_lists(self): + l1 = [1,2,3] + l2 = [4,5,6] + l3 = l1 + l2 + assert l3 == [1,2,3,4,5,6] + + l4 = range(3) + l5 = l4 + l2 + assert l5 == [0,1,2,4,5,6] + def test_imul(self): l = l0 = [4,3] l *= 2 @@ -576,7 +685,7 @@ l *= (-1) assert l is l0 assert l == [] - + l = l0 = ['a', 'b'] l *= 2 assert l is l0 @@ -602,7 +711,7 @@ c = range(10) assert c.index(0) == 0 raises(ValueError, c.index, 10) - + c = list('hello world') assert c.index('l') == 2 raises(ValueError, c.index, '!') @@ -650,7 +759,7 @@ assert l == [] assert l is l0 - def test_ass_extended_slice(self): + def test_assign_extended_slice(self): l = l0 = ['a', 'b', 'c'] l[::-1] = ['a', 'b', 'c'] assert l == ['c', 'b', 'a'] @@ -662,6 +771,41 @@ assert l == [0, 'b', 2] assert l is l0 + l = [1,2,3] + raises(ValueError, "l[0:2:2] = [1,2,3,4]") + raises(ValueError, "l[::2] = []") + + l = range(6) + l[::3] = ('a', 'b') + assert l == ['a', 1, 2, 'b', 4, 5] + + def test_setslice_with_self(self): + l = [1,2,3,4] + l[:] = l + assert l == [1,2,3,4] + + l = [1,2,3,4] + l[0:2] = l + assert l == [1,2,3,4,3,4] + + l = [1,2,3,4] + l[0:2] = l + assert l == [1,2,3,4,3,4] + + l = [1,2,3,4,5,6,7,8,9,10] + raises(ValueError, "l[5::-1] = l") + + l = [1,2,3,4,5,6,7,8,9,10] + raises(ValueError, "l[::2] = l") + + l = [1,2,3,4,5,6,7,8,9,10] + l[5:] = l + assert l == [1,2,3,4,5,1,2,3,4,5,6,7,8,9,10] + + l = [1,2,3,4,5,6] + l[::-1] = l + assert l == [6,5,4,3,2,1] + def test_recursive_repr(self): l = [] assert repr(l) == '[]' @@ -687,6 +831,10 @@ l.append(4) assert l == range(5) + l = [1,2,3] + l.append("a") + assert l == [1,2,3,"a"] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -706,6 +854,14 @@ ls.insert(0, i) assert len(ls) == 12 + l = [] + l.insert(4,2) + assert l == [2] + + l = [1,2,3] + l.insert(0,"a") + assert l == ["a", 1, 2, 3] + def test_pop(self): c = list('hello world') s = '' @@ -719,6 +875,9 @@ l.pop() assert l == range(9) + l = [] + raises(IndexError, l.pop, 0) + def test_pop_custom_int(self): class A(object): def __init__(self, x): @@ -733,6 +892,22 @@ assert l == range(9) raises(TypeError, range(10).pop, 1.0) + def test_pop_negative(self): + l1 = [1,2,3,4] + l2 = ["1", "2", "3", "4"] + l3 = range(5) + l4 = [1, 2, 3, "4"] + + raises(IndexError, l1.pop, -5) + raises(IndexError, l2.pop, -5) + raises(IndexError, l3.pop, -6) + raises(IndexError, l4.pop, -5) + + assert l1.pop(-2) == 3 + assert l2.pop(-2) == "3" + assert l3.pop(-2) == 3 + assert l4.pop(-2) == 3 + def test_remove(self): c = list('hello world') c.remove('l') @@ -783,6 +958,20 @@ l.remove(5) assert l[10:] == [0, 1, 2, 3, 4, 6, 7, 8, 9] + def test_mutate_while_contains(self): + class Mean(object): + def __init__(self, i): + self.i = i + def __eq__(self, other): + if self.i == 9 == other: + del l[0] + return True + else: + return False + l = [Mean(i) for i in range(10)] + assert l.__contains__(9) + assert not l.__contains__(2) + def test_mutate_while_extend(self): # this used to segfault pypy-c (with py.test -A) import sys @@ -805,16 +994,36 @@ res = l.__getslice__(0, 2) assert res == [1, 2] + l = [] + assert l.__getslice__(0,2) == [] + def test___setslice__(self): l = [1,2,3,4] l.__setslice__(0, 2, [5, 6]) assert l == [5, 6, 3, 4] + l = [] + l.__setslice__(0,0,[3,4,5]) + assert l == [3,4,5] + def test___delslice__(self): l = [1,2,3,4] l.__delslice__(0, 2) assert l == [3, 4] + def test_unicode(self): + s = u"\ufffd\ufffd\ufffd" + assert s.encode("ascii", "replace") == "???" + assert s.encode("ascii", "ignore") == "" + l1 = [s.encode("ascii", "replace")] + assert l1[0] == "???" + + l2 = [s.encode("ascii", "ignore")] + assert l2[0] == "" + + l3 = [s] + assert l1[0].encode("ascii", "replace") == "???" + def test_list_from_set(self): l = ['a'] l.__init__(set('b')) @@ -829,6 +1038,96 @@ assert l == [] assert list(g) == [] +class AppTestForRangeLists(AppTestW_ListObject): + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withrangelist" : + True}) + + def test_range_simple_backwards(self): + x = range(5,1) + assert x == [] + + def test_range_big_start(self): + x = range(1,10) + x[22:0:-1] == range(1,10) + + def test_range_list_invalid_slice(self): + x = [1,2,3,4] + assert x[10:0] == [] + assert x[10:0:None] == [] + + x = range(1,5) + assert x[10:0] == [] + assert x[10:0:None] == [] + + assert x[0:22] == [1,2,3,4] + assert x[-1:10] == [4] + + assert x[0:22:None] == [1,2,3,4] + assert x[-1:10:None] == [4] + + def test_range_backwards(self): + x = range(1,10) + assert x[22:-10] == [] + assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] + assert x[10:3:-1] == [9,8,7,6,5] + assert x[10:3:-2] == [9,7,5] + assert x[1:5:-1] == [] + + def test_sort_range(self): + l = range(3,10,3) + l.sort() + assert l == [3, 6, 9] + l.sort(reverse = True) + assert l == [9, 6, 3] + l.sort(reverse = True) + assert l == [9, 6, 3] + l.sort() + assert l == [3, 6, 9] + + def test_slice(self): + l = [] + l2 = range(3) + l.__setslice__(0,3,l2) + assert l == [0,1,2] + + def test_getitem(self): + l = range(5) + raises(IndexError, "l[-10]") + + def test_append(self): + l = range(5) + l.append(26) + assert l == [0,1,2,3,4,26] + + l = range(5) + l.append("a") + assert l == [0,1,2,3,4,"a"] + + l = range(5) + l.append(5) + assert l == [0,1,2,3,4,5] + + def test_pop(self): + l = range(3) + assert l.pop(0) == 0 + + def test_setitem(self): + l = range(3) + l[0] = 1 + assert l == [1,1,2] + + def test_inset(self): + l = range(3) + l.insert(1,5) + assert l == [0,5,1,2] From noreply at buildbot.pypy.org Sun Dec 4 20:59:21 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:21 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fixes after merge Message-ID: <20111204195921.48A278205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50150:015df90d2965 Date: 2011-12-04 17:17 +0100 http://bitbucket.org/pypy/pypy/changeset/015df90d2965/ Log: Fixes after merge diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -29,12 +29,12 @@ return True if self.user_overridden_class or w_other.user_overridden_class: return False - return space.str_w(self) is space.str_w(w_other) + return space.bytes_w(self) is space.bytes_w(w_other) def unique_id(self, space): if self.user_overridden_class: return W_Object.unique_id(self, space) - return space.wrap(compute_unique_id(space.str_w(self))) + return space.wrap(compute_unique_id(space.bytes_w(self))) class W_StringObject(W_AbstractStringObject): From noreply at buildbot.pypy.org Sun Dec 4 20:59:22 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:22 +0100 (CET) Subject: [pypy-commit] pypy py3k: Convert StringListStrategy into UnicodeListStrategy Message-ID: <20111204195922.94C848205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50151:94f777e3c69a Date: 2011-12-04 19:30 +0100 http://bitbucket.org/pypy/pypy/changeset/94f777e3c69a/ Log: Convert StringListStrategy into UnicodeListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -45,10 +45,10 @@ # check for strings for w_obj in list_w: - if not is_W_StringObject(w_obj): + if not is_W_UnicodeObject(w_obj): break else: - return space.fromcache(StringListStrategy) + return space.fromcache(UnicodeListStrategy) return space.fromcache(ObjectListStrategy) @@ -56,9 +56,9 @@ from pypy.objspace.std.intobject import W_IntObject return type(w_object) is W_IntObject -def is_W_StringObject(w_object): - from pypy.objspace.std.stringobject import W_StringObject - return type(w_object) is W_StringObject +def is_W_UnicodeObject(w_object): + from pypy.objspace.std.unicodeobject import W_UnicodeObject + return type(w_object) is W_UnicodeObject @@ -86,7 +86,7 @@ @staticmethod def newlist_str(space, list_s): - strategy = space.fromcache(StringListStrategy) + strategy = space.fromcache(UnicodeListStrategy) storage = strategy.erase(list_s) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @@ -362,8 +362,8 @@ def switch_to_correct_strategy(self, w_list, w_item): if is_W_IntObject(w_item): strategy = self.space.fromcache(IntegerListStrategy) - elif is_W_StringObject(w_item): - strategy = self.space.fromcache(StringListStrategy) + elif is_W_UnicodeObject(w_item): + strategy = self.space.fromcache(UnicodeListStrategy) else: strategy = self.space.fromcache(ObjectListStrategy) @@ -905,28 +905,28 @@ if reverse: l.reverse() -class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): +class UnicodeListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None def wrap(self, stringval): return self.space.wrap(stringval) def unwrap(self, w_string): - return self.space.str_w(w_string) + return self.space.unicode_w(w_string) - erase, unerase = rerased.new_erasing_pair("string") + erase, unerase = rerased.new_erasing_pair("unicode") erase = staticmethod(erase) unerase = staticmethod(unerase) def is_correct_type(self, w_obj): - return is_W_StringObject(w_obj) + return is_W_UnicodeObject(w_obj) def list_is_correct_type(self, w_list): - return w_list.strategy is self.space.fromcache(StringListStrategy) + return w_list.strategy is self.space.fromcache(UnicodeListStrategy) def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) - sorter = StringSort(l, len(l)) + sorter = UnicodeSort(l, len(l)) sorter.sort() if reverse: l.reverse() @@ -1256,7 +1256,7 @@ TimSort = make_timsort_class() IntBaseTimSort = make_timsort_class() -StringBaseTimSort = make_timsort_class() +UnicodeBaseTimSort = make_timsort_class() class KeyContainer(baseobjspace.W_Root): def __init__(self, w_key, w_item): @@ -1276,7 +1276,7 @@ def lt(self, a, b): return a < b -class StringSort(StringBaseTimSort): +class UnicodeSort(UnicodeBaseTimSort): def lt(self, a, b): return a < b diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -236,7 +236,7 @@ def str_split__String_None_ANY(space, w_self, w_none, w_maxsplit=-1): maxsplit = space.int_w(w_maxsplit) - res = [] + res_w = [] value = w_self._value length = len(value) i = 0 @@ -259,12 +259,12 @@ maxsplit -= 1 # NB. if it's already < 0, it stays < 0 # the word is value[i:j] - res.append(value[i:j]) + res_w.append(sliced(space, value, i, j, w_self)) # continue to look from the character following the space after the word i = j + 1 - return space.newlist_str(res) + return space.newlist(res_w) def str_split__String_ANY_ANY(space, w_self, w_by, w_maxsplit=-1): maxsplit = space.int_w(w_maxsplit) @@ -275,25 +275,32 @@ raise OperationError(space.w_ValueError, space.wrap("empty separator")) if bylen == 1 and maxsplit < 0: - res = [] start = 0 # fast path: uses str.rfind(character) and str.count(character) by = by[0] # annotator hack: string -> char count = value.count(by) - res = [None] * (count + 1) + res_w = [None] * (count + 1) end = len(value) while count >= 0: assert end >= 0 prev = value.rfind(by, 0, end) start = prev + 1 assert start >= 0 - res[count] = value[start:end] + res_w[count] = sliced(space, value, start, end, w_self) count -= 1 end = prev else: - res = split(value, by, maxsplit) + res_w = [] + while maxsplit != 0: + next = value.find(by, start) + if next < 0: + break + res_w.append(sliced(space, value, start, next, w_self)) + start = next + bylen + maxsplit -= 1 # NB. if it's already < 0, it stays < 0 + res_w.append(sliced(space, value, start, len(value), w_self)) - return space.newlist_str(res) + return space.newlist(res_w) def str_rsplit__String_None_ANY(space, w_self, w_none, w_maxsplit=-1): maxsplit = space.int_w(w_maxsplit) @@ -368,11 +375,6 @@ 'str_rsplit__String_ANY_ANY', sliced) def str_join__String_ANY(space, w_self, w_list): - l = space.listview_str(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(w_self._value.join(l)) list_w = space.listview(w_list) size = len(list_w) @@ -382,8 +384,7 @@ if size == 1: w_s = list_w[0] # only one item, return it if it's not a subclass of str - if (space.is_w(space.type(w_s), space.w_str) or - space.is_w(space.type(w_s), space.w_unicode)): + if space.is_w(space.type(w_s), space.w_str): return w_s return _str_join_many_items(space, w_self, list_w, size) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, StringListStrategy, RangeListStrategy, make_range_list +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, UnicodeListStrategy, RangeListStrategy, make_range_list from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -10,7 +10,7 @@ assert isinstance(W_ListObject(self.space, []).strategy, EmptyListStrategy) assert isinstance(W_ListObject(self.space, [self.space.wrap(1),self.space.wrap('a')]).strategy, ObjectListStrategy) assert isinstance(W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]).strategy, IntegerListStrategy) - assert isinstance(W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b')]).strategy, StringListStrategy) + assert isinstance(W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b')]).strategy, UnicodeListStrategy) def test_empty_to_any(self): l = W_ListObject(self.space, []) @@ -26,7 +26,7 @@ l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(self.space.wrap('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, UnicodeListStrategy) def test_int_to_any(self): l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) @@ -38,9 +38,9 @@ def test_string_to_any(self): l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, UnicodeListStrategy) l.append(self.space.wrap('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, UnicodeListStrategy) l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) @@ -51,7 +51,7 @@ l.setitem(0, self.space.wrap('d')) assert self.space.eq_w(l.getitem(0), self.space.wrap('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, UnicodeListStrategy) # IntStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) @@ -59,9 +59,9 @@ l.setitem(0, self.space.wrap('d')) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # UnicodeStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, UnicodeListStrategy) l.setitem(0, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -72,9 +72,9 @@ l.insert(3, self.space.wrap(4)) assert isinstance(l.strategy, IntegerListStrategy) - # StringStrategy + # UnicodeStrategy l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, UnicodeListStrategy) l.insert(3, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -88,7 +88,7 @@ l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) l.insert(0, self.space.wrap('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, UnicodeListStrategy) l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -158,7 +158,7 @@ l = W_ListObject(self.space, wrapitems(["a","b","c","d","e"])) other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) - assert l.strategy is self.space.fromcache(StringListStrategy) + assert l.strategy is self.space.fromcache(UnicodeListStrategy) l = W_ListObject(self.space, wrapitems(["a",3,"c",4,"e"])) other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) @@ -203,7 +203,7 @@ empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(self.space, [self.space.wrap("a"), self.space.wrap("b"), self.space.wrap("c")])) - assert isinstance(empty.strategy, StringListStrategy) + assert isinstance(empty.strategy, UnicodeListStrategy) empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -341,9 +341,11 @@ def test_weird_rangelist_bug(self): l = make_range_list(self.space, 1, 1, 3) - from pypy.objspace.std.listobject import getslice__List_ANY_ANY + from pypy.objspace.std.listobject import getitem__List_Slice + w_slice = self.space.newslice( + self.space.wrap(15), self.space.wrap(2222), self.space.w_None) # should not raise - assert getslice__List_ANY_ANY(self.space, l, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) + assert getitem__List_Slice(self.space, l, w_slice).strategy == self.space.fromcache(EmptyListStrategy) def test_add_to_rangelist(self): @@ -353,12 +355,12 @@ l3 = add__List_List(self.space, l1, l2) assert self.space.eq_w(l3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3), self.space.wrap(4), self.space.wrap(5)])) - def test_unicode(self): + def test_bytes(self): l1 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap("zwei")]) - assert isinstance(l1.strategy, StringListStrategy) - l2 = W_ListObject(self.space, [self.space.wrap(u"eins"), self.space.wrap(u"zwei")]) + assert isinstance(l1.strategy, UnicodeListStrategy) + l2 = W_ListObject(self.space, [self.space.wrapbytes("eins"), self.space.wrapbytes("zwei")]) assert isinstance(l2.strategy, ObjectListStrategy) - l3 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap(u"zwei")]) + l3 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrapbytes("zwei")]) assert isinstance(l3.strategy, ObjectListStrategy) def test_listview_str(self): @@ -384,7 +386,7 @@ space = self.space l = ['a', 'b'] w_l = self.space.newlist_str(l) - assert isinstance(w_l.strategy, StringListStrategy) + assert isinstance(w_l.strategy, UnicodeListStrategy) assert space.listview_str(w_l) is l def test_string_uses_newlist_str(self): diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -123,6 +123,12 @@ return space.newbool(container.find(item) != -1) def unicode_join__Unicode_ANY(space, w_self, w_list): + l = space.listview_str(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(w_self._value.join(l)) + list_w = space.unpackiterable(w_list) size = len(list_w) @@ -555,7 +561,7 @@ def unicode_split__Unicode_None_ANY(space, w_self, w_none, w_maxsplit): maxsplit = space.int_w(w_maxsplit) - res_w = [] + res = [] value = w_self._value length = len(value) i = 0 @@ -578,12 +584,12 @@ maxsplit -= 1 # NB. if it's already < 0, it stays < 0 # the word is value[i:j] - res_w.append(W_UnicodeObject(value[i:j])) + res.append(value[i:j]) # continue to look from the character following the space after the word i = j + 1 - return space.newlist(res_w) + return space.newlist_str(res) def unicode_split__Unicode_Unicode_ANY(space, w_self, w_delim, w_maxsplit): self = w_self._value @@ -594,7 +600,7 @@ raise OperationError(space.w_ValueError, space.wrap('empty separator')) parts = _split_with(self, delim, maxsplit) - return space.newlist([W_UnicodeObject(part) for part in parts]) + return space.newlist_str(parts) def unicode_rsplit__Unicode_None_ANY(space, w_self, w_none, w_maxsplit): From noreply at buildbot.pypy.org Sun Dec 4 20:59:24 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:24 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20111204195924.7632C8205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50152:16620429e5fe Date: 2011-12-04 19:34 +0100 http://bitbucket.org/pypy/pypy/changeset/16620429e5fe/ Log: hg merge default diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -20,7 +20,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -1432,6 +1432,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1510,12 +1514,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -167,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -195,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + f(123, *[None]*11) # check that the check() are ok + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -241,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + return True # better safe than sorry + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -481,8 +500,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1180,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6482,6 +6482,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -243,6 +243,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3585,6 +3585,132 @@ self.interp_operations(f, [5], translationoptions=translationoptions) + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -612,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -761,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -255,10 +255,8 @@ s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] graph = copygraph(graph) - graph.startblock.isstartblock = False [jmpp] = find_jit_merge_points([graph]) graph.startblock = support.split_before_jit_merge_point(*jmpp) - graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot # to list some variable in greens=[] or reds=[] in JitDriver, # or that a jit_merge_point() takes a constant as an argument. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,11 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + if isinstance(w_list, W_ListObject): + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,21 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + + l = [1, 2, 3] + assert list_strategy(l) == "int" + l = ["a", "b", "c"] + assert list_strategy(l) == "str" + l = [1.1, 2.2, 3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1, "b", 3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + raises(TypeError, list_strategy, 5) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -71,9 +71,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_import = space.getattr(space.builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -100,7 +100,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, maxlength) - res = intmask(res) # XXX why? try: if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -117,7 +116,6 @@ res, newbuf = self.do_recv_string( space, length - offset, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: raise BufferTooShort(space, space.wrap( @@ -148,7 +146,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) @@ -413,7 +410,7 @@ self.buffer, min(self.BUFFER_SIZE, buflength), read_ptr, rffi.NULL) if result: - return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) + return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -235,7 +235,7 @@ elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise OperationError(space.w_OverflowError, space.wrap("timeout is too large")) - full_msecs = int(timeout + 0.5) + full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) @@ -243,7 +243,7 @@ if res != rwin32.WAIT_TIMEOUT: return True - msecs = r_uint(full_msecs) + msecs = full_msecs start = _GetTickCount() while True: @@ -269,7 +269,7 @@ ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False - msecs = r_uint(full_msecs - (ticks - start)) + msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,6 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def set_last_error(space, w_error): + at unwrap_spec(error=int) +def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError - SetLastError(space.uint_w(w_error)) + SetLastError(error) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,10 +5,11 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.NDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', @@ -16,8 +17,22 @@ 'fromstring': 'interp_support.fromstring', 'flatiter': 'interp_numarray.W_FlatIterator', - 'True_': 'space.w_True', - 'False_': 'space.w_False', + 'True_': 'types.Bool.True', + 'False_': 'types.Bool.False', + + 'generic': 'interp_boxes.W_GenericBox', + 'number': 'interp_boxes.W_NumberBox', + 'integer': 'interp_boxes.W_IntegerBox', + 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'bool_': 'interp_boxes.W_BoolBox', + 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', + 'int64': 'interp_boxes.W_Int64Box', + 'int_': 'interp_boxes.W_LongBox', + 'inexact': 'interp_boxes.W_InexactBox', + 'floating': 'interp_boxes.W_FloatingBox', + 'float64': 'interp_boxes.W_Float64Box', } # ufuncs diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,13 +3,16 @@ It should not be imported by the module itself """ +import re + from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_BoolDtype +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, NDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs -from pypy.rlib.objectmodel import specialize -import re +from pypy.rlib.objectmodel import specialize, instantiate + class BogusBytecode(Exception): pass @@ -48,15 +51,12 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_float64dtype = W_Float64Dtype(self) def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): - if w_obj.tp == w_tp: - return True - return False + return w_obj.tp == w_tp def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -97,8 +97,10 @@ fixedview = listview def float(self, w_obj): - assert isinstance(w_obj, FloatObject) - return w_obj + if isinstance(w_obj, FloatObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): assert isinstance(w_obj, FloatObject) @@ -112,7 +114,10 @@ raise NotImplementedError def int(self, w_obj): - return w_obj + if isinstance(w_obj, IntObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.int(w_obj.descr_int(self)) def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) @@ -135,6 +140,9 @@ assert isinstance(what, tp) return what + def allocate_instance(self, klass, w_subtype): + return instantiate(klass) + def len_w(self, w_obj): if isinstance(w_obj, ListObject): return len(w_obj.items) @@ -247,7 +255,7 @@ w_rhs = self.rhs.execute(interp) if not isinstance(w_lhs, BaseArray): # scalar - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) if self.name == '+': @@ -264,8 +272,9 @@ w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError - if not isinstance(w_res, BaseArray): - dtype = interp.space.fromcache(W_Float64Dtype) + if (not isinstance(w_res, BaseArray) and + not isinstance(w_res, interp_boxes.W_GenericBox)): + dtype = get_dtype_cache(interp.space).w_float64dtype w_res = scalar_w(interp.space, dtype, w_res) return w_res @@ -283,7 +292,7 @@ return space.wrap(self.v) def execute(self, interp): - return FloatObject(self.v) + return interp.space.wrap(self.v) class RangeConstant(Node): def __init__(self, v): @@ -291,10 +300,10 @@ def execute(self, interp): w_list = interp.space.newlist( - [interp.space.wrap(float(i)) for i in range(self.v)]) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + [interp.space.wrap(float(i)) for i in range(self.v)] + ) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -315,9 +324,8 @@ def execute(self, interp): w_list = self.wrap(interp.space) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" @@ -384,9 +392,11 @@ if isinstance(w_res, BaseArray): return w_res if isinstance(w_res, FloatObject): - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype elif isinstance(w_res, BoolObject): - dtype = interp.space.fromcache(W_BoolDtype) + dtype = get_dtype_cache(interp.space).w_booldtype + elif isinstance(w_res, interp_boxes.W_GenericBox): + dtype = w_res.get_dtype(interp.space) else: dtype = None return scalar_w(interp.space, dtype, w_res) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_boxes.py @@ -0,0 +1,267 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef +from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.inttype import int_typedef +from pypy.objspace.std.typeobject import W_TypeObject +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.tool.sourcetools import func_with_new_name + + +MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () + +def new_dtype_getter(name): + def get_dtype(space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return getattr(get_dtype_cache(space), "w_%sdtype" % name) + def new(space, w_subtype, w_value): + dtype = get_dtype(space) + return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) + return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + +class PrimitiveBox(object): + _mixin_ = True + + def __init__(self, value): + self.value = value + + def convert_to(self, dtype): + return dtype.box(self.value) + +class W_GenericBox(Wrappable): + _attrs_ = () + + def descr__new__(space, w_subtype, __args__): + assert isinstance(w_subtype, W_TypeObject) + raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", + w_subtype.get_module_type_name() + ) + + def descr_str(self, space): + return self.descr_repr(space) + + def descr_repr(self, space): + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + + def descr_int(self, space): + box = self.convert_to(W_LongBox.get_dtype(space)) + assert isinstance(box, W_LongBox) + return space.wrap(box.value) + + def descr_float(self, space): + box = self.convert_to(W_Float64Box.get_dtype(space)) + assert isinstance(box, W_Float64Box) + return space.wrap(box.value) + + def descr_nonzero(self, space): + dtype = self.get_dtype(space) + return space.wrap(dtype.itemtype.bool(self)) + + def _binop_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + def _binop_right_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) + + def _unaryop_impl(ufunc_name): + def impl(self, space): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + + descr_radd = _binop_right_impl("add") + descr_rmul = _binop_right_impl("multiply") + + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") + + +class W_BoolBox(W_GenericBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("bool") + +class W_NumberBox(W_GenericBox): + _attrs_ = () + +class W_IntegerBox(W_NumberBox): + pass + +class W_SignedIntegerBox(W_IntegerBox): + pass + +class W_UnsignedIntgerBox(W_IntegerBox): + pass + +class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int8") + +class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint8") + +class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int16") + +class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint16") + +class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int32") + +class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint32") + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("long") + +class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int64") + +class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_InexactBox(W_NumberBox): + _attrs_ = () + +class W_FloatingBox(W_InexactBox): + _attrs_ = () + +class W_Float32Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float32") + +class W_Float64Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float64") + + + +W_GenericBox.typedef = TypeDef("generic", + __module__ = "numpypy", + + __new__ = interp2app(W_GenericBox.descr__new__.im_func), + + __str__ = interp2app(W_GenericBox.descr_str), + __repr__ = interp2app(W_GenericBox.descr_repr), + __int__ = interp2app(W_GenericBox.descr_int), + __float__ = interp2app(W_GenericBox.descr_float), + __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + + __add__ = interp2app(W_GenericBox.descr_add), + __sub__ = interp2app(W_GenericBox.descr_sub), + __mul__ = interp2app(W_GenericBox.descr_mul), + __div__ = interp2app(W_GenericBox.descr_div), + + __radd__ = interp2app(W_GenericBox.descr_add), + __rmul__ = interp2app(W_GenericBox.descr_rmul), + + __eq__ = interp2app(W_GenericBox.descr_eq), + __ne__ = interp2app(W_GenericBox.descr_ne), + __lt__ = interp2app(W_GenericBox.descr_lt), + __le__ = interp2app(W_GenericBox.descr_le), + __gt__ = interp2app(W_GenericBox.descr_gt), + __ge__ = interp2app(W_GenericBox.descr_ge), + + __neg__ = interp2app(W_GenericBox.descr_neg), + __abs__ = interp2app(W_GenericBox.descr_abs), +) + +W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_BoolBox.descr__new__.im_func), +) + +W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + +W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int8Box.descr__new__.im_func), +) + +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), +) + +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), +) + +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +if LONG_BIT == 32: + long_name = "int32" +elif LONG_BIT == 64: + long_name = "int64" +W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), + __module__ = "numpypy", +) + +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, + __module__ = "numpypy", + __new__ = interp2app(W_Int64Box.descr__new__.im_func), +) + +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, + __module__ = "numpypy", +) + +W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, + __module__ = "numpypy", +) + +W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), + __module__ = "numpypy", + + __new__ = interp2app(W_Float64Box.descr__new__.im_func), +) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,16 +1,11 @@ -import functools -import math - from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty -from pypy.module.micronumpy import signature -from pypy.objspace.std.floatobject import float2string -from pypy.rlib import rarithmetic, rfloat -from pypy.rlib.rarithmetic import LONG_BIT, widen -from pypy.rlib.objectmodel import specialize, enforceargs -from pypy.rlib.unroll import unrolling_iterable +from pypy.interpreter.typedef import (TypeDef, GetSetProperty, + interp_attrproperty, interp_attrproperty_w) +from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT from pypy.rpython.lltypesystem import lltype, rffi @@ -19,521 +14,218 @@ BOOLLTR = "b" FLOATINGLTR = "f" + +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) + class W_Dtype(Wrappable): - def __init__(self, space): - pass + _immuable_fields_ = ["itemtype", "num", "kind"] + + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): + self.signature = signature.BaseSignature() + self.itemtype = itemtype + self.num = num + self.kind = kind + self.name = name + self.char = char + self.w_box_type = w_box_type + self.alternate_constructors = alternate_constructors + + def malloc(self, length): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True + ) + + @specialize.argtype(1) + def box(self, value): + return self.itemtype.box(value) + + def coerce(self, space, w_item): + return self.itemtype.coerce(space, w_item) + + def getitem(self, storage, i): + return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + + def setitem(self, storage, i, box): + self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + + def fill(self, storage, box, start, stop): + self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + if space.is_w(w_dtype, space.w_None): - return space.fromcache(W_Float64Dtype) + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype elif space.isinstance_w(w_dtype, space.w_str): - dtype = space.str_w(w_dtype) - for alias, dtype_class in dtypes_by_alias: - if alias == dtype: - return space.fromcache(dtype_class) - elif isinstance(space.interpclass_w(w_dtype), W_Dtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_type): - for typename, dtype_class in dtypes_by_apptype: - if space.is_w(getattr(space, "w_%s" % typename), w_dtype): - return space.fromcache(dtype_class) + name = space.str_w(w_dtype) + for dtype in cache.builtin_dtypes: + if dtype.name == name or dtype.char == name: + return dtype + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + def descr_str(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("dtype('%s')" % self.name) - def descr_str(self, space): - return space.wrap(self.name) + def descr_get_itemsize(self, space): + return space.wrap(self.itemtype.get_element_size()) def descr_get_shape(self, space): return space.newtuple([]) - -class BaseBox(object): - pass - -VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) - -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, - expected_size=None): - - class Box(BaseBox): - def __init__(self, val): - self.val = val - - def wrap(self, space): - val = self.val - if valtype is rarithmetic.r_singlefloat: - val = float(val) - return space.wrap(val) - - def convert_to(self, dtype): - return dtype.adapt_val(self.val) - Box.__name__ = "%sBox" % T._name - - TP = lltype.Ptr(lltype.Array(T, hints={'nolength': True})) - class W_LowLevelDtype(W_Dtype): - signature = signature.BaseSignature() - - def erase(self, storage): - return rffi.cast(VOID_TP, storage) - - def unerase(self, storage): - return rffi.cast(TP, storage) - - @enforceargs(None, valtype) - def box(self, value): - return Box(value) - - def unbox(self, box): - assert isinstance(box, Box) - return box.val - - def unwrap(self, space, w_item): - raise NotImplementedError - - def malloc(self, size): - # XXX find out why test_zjit explodes with tracking of allocations - return self.erase(lltype.malloc(TP.TO, size, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - )) - - def getitem(self, storage, i): - return Box(self.unerase(storage)[i]) - - def setitem(self, storage, i, item): - self.unerase(storage)[i] = self.unbox(item) - - def setitem_w(self, space, storage, i, w_item): - self.setitem(storage, i, self.unwrap(space, w_item)) - - def fill(self, storage, item, start, stop): - storage = self.unerase(storage) - item = self.unbox(item) - for i in xrange(start, stop): - storage[i] = item - - @specialize.argtype(1) - def adapt_val(self, val): - return self.box(rffi.cast(TP.TO.OF, val)) - - W_LowLevelDtype.__name__ = "W_%sDtype" % name.capitalize() - W_LowLevelDtype.num = num - W_LowLevelDtype.kind = kind - W_LowLevelDtype.name = name - W_LowLevelDtype.aliases = aliases - W_LowLevelDtype.applevel_types = applevel_types - W_LowLevelDtype.num_bytes = rffi.sizeof(T) - if expected_size is not None: - assert W_LowLevelDtype.num_bytes == expected_size - return W_LowLevelDtype - - -def binop(func): - func._annspecialcase_ = "specialize:call_location" - @functools.wraps(func) - def impl(self, v1, v2): - return self.adapt_val(func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)), - )) - return impl - -def raw_binop(func): - # Returns the result unwrapped. - @functools.wraps(func) - def impl(self, v1, v2): - return func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)) - ) - return impl - -def unaryop(func): - @functools.wraps(func) - def impl(self, v): - return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) - return impl - -class ArithmeticTypeMixin(object): - _mixin_ = True - - @binop - def add(self, v1, v2): - return v1 + v2 - @binop - def sub(self, v1, v2): - return v1 - v2 - @binop - def mul(self, v1, v2): - return v1 * v2 - - @unaryop - def pos(self, v): - return +v - @unaryop - def neg(self, v): - return -v - @unaryop - def abs(self, v): - return abs(v) - - @binop - def max(self, v1, v2): - return max(v1, v2) - @binop - def min(self, v1, v2): - return min(v1, v2) - - def bool(self, v): - return bool(self.for_computation(self.unbox(v))) - @raw_binop - def eq(self, v1, v2): - return v1 == v2 - @raw_binop - def ne(self, v1, v2): - return v1 != v2 - @raw_binop - def lt(self, v1, v2): - return v1 < v2 - @raw_binop - def le(self, v1, v2): - return v1 <= v2 - @raw_binop - def gt(self, v1, v2): - return v1 > v2 - @raw_binop - def ge(self, v1, v2): - return v1 >= v2 - - -class FloatArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def for_computation(self, v): - return float(v) - - def str_format(self, item): - return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION) - - @binop - def div(self, v1, v2): - # XXX this won't work after translation, probably requires ovfcheck - try: - return v1 / v2 - except ZeroDivisionError: - if v1 == v2 == 0.0: - return rfloat.NAN - return rfloat.copysign(rfloat.INFINITY, v1 * v2) - @binop - def mod(self, v1, v2): - return math.fmod(v1, v2) - @binop - def pow(self, v1, v2): - return math.pow(v1, v2) - - @unaryop - def sign(self, v): - if v == 0.0: - return 0.0 - return rfloat.copysign(1.0, v) - @unaryop - def reciprocal(self, v): - if v == 0.0: - return rfloat.copysign(rfloat.INFINITY, v) - return 1.0 / v - @unaryop - def fabs(self, v): - return math.fabs(v) - @unaryop - def floor(self, v): - return math.floor(v) - - @binop - def copysign(self, v1, v2): - return math.copysign(v1, v2) - @unaryop - def exp(self, v): - try: - return math.exp(v) - except OverflowError: - return rfloat.INFINITY - @unaryop - def sin(self, v): - return math.sin(v) - @unaryop - def cos(self, v): - return math.cos(v) - @unaryop - def tan(self, v): - return math.tan(v) - @unaryop - def arcsin(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.asin(v) - @unaryop - def arccos(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.acos(v) - @unaryop - def arctan(self, v): - return math.atan(v) - @unaryop - def arcsinh(self, v): - return math.asinh(v) - @unaryop - def arctanh(self, v): - if v == 1.0 or v == -1.0: - return math.copysign(rfloat.INFINITY, v) - if not -1.0 < v < 1.0: - return rfloat.NAN - return math.atanh(v) - @unaryop - def sqrt(self, v): - try: - return math.sqrt(v) - except ValueError: - return rfloat.NAN - -class IntegerArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) - - def for_computation(self, v): - return widen(v) - - def str_format(self, item): - return str(widen(self.unbox(item))) - - @binop - def div(self, v1, v2): - if v2 == 0: - return 0 - return v1 / v2 - @binop - def mod(self, v1, v2): - return v1 % v2 - @binop - def pow(self, v1, v2): - res = 1 - while v2 > 0: - if v2 & 1: - res *= v1 - v2 >>= 1 - if v2 == 0: - break - v1 *= v1 - return res - - -class SignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - if v > 0: - return 1 - elif v < 0: - return -1 - else: - assert v == 0 - return 0 - -class UnsignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - return int(v != 0) - - -W_BoolDtype = create_low_level_dtype( - num = 0, kind = BOOLLTR, name = "bool", - aliases = ["?", "bool", "bool8"], - applevel_types = ["bool"], - T = lltype.Bool, - valtype = bool, -) -class W_BoolDtype(SignedIntegerArithmeticDtype, W_BoolDtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.is_true(w_item)) - - def str_format(self, item): - v = self.unbox(item) - return "True" if v else "False" - - def for_computation(self, v): - return int(v) - -W_Int8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "int8", - aliases = ["b", "int8", "i1"], - applevel_types = [], - T = rffi.SIGNEDCHAR, - valtype = rffi.SIGNEDCHAR._type, - expected_size = 1, -) -class W_Int8Dtype(SignedIntegerArithmeticDtype, W_Int8Dtype): - pass - -W_UInt8Dtype = create_low_level_dtype( - num = 2, kind = UNSIGNEDLTR, name = "uint8", - aliases = ["B", "uint8", "I1"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(UnsignedIntegerArithmeticDtype, W_UInt8Dtype): - pass - -W_Int16Dtype = create_low_level_dtype( - num = 3, kind = SIGNEDLTR, name = "int16", - aliases = ["h", "int16", "i2"], - applevel_types = [], - T = rffi.SHORT, - valtype = rffi.SHORT._type, - expected_size = 2, -) -class W_Int16Dtype(SignedIntegerArithmeticDtype, W_Int16Dtype): - pass - -W_UInt16Dtype = create_low_level_dtype( - num = 4, kind = UNSIGNEDLTR, name = "uint16", - aliases = ["H", "uint16", "I2"], - applevel_types = [], - T = rffi.USHORT, - valtype = rffi.USHORT._type, - expected_size = 2, -) -class W_UInt16Dtype(UnsignedIntegerArithmeticDtype, W_UInt16Dtype): - pass - -W_Int32Dtype = create_low_level_dtype( - num = 5, kind = SIGNEDLTR, name = "int32", - aliases = ["i", "int32", "i4"], - applevel_types = [], - T = rffi.INT, - valtype = rffi.INT._type, - expected_size = 4, -) -class W_Int32Dtype(SignedIntegerArithmeticDtype, W_Int32Dtype): - pass - -W_UInt32Dtype = create_low_level_dtype( - num = 6, kind = UNSIGNEDLTR, name = "uint32", - aliases = ["I", "uint32", "I4"], - applevel_types = [], - T = rffi.UINT, - valtype = rffi.UINT._type, - expected_size = 4, -) -class W_UInt32Dtype(UnsignedIntegerArithmeticDtype, W_UInt32Dtype): - pass - -W_Int64Dtype = create_low_level_dtype( - num = 9, kind = SIGNEDLTR, name = "int64", - aliases = ["q", "int64", "i8"], - applevel_types = ["long"], - T = rffi.LONGLONG, - valtype = rffi.LONGLONG._type, - expected_size = 8, -) -class W_Int64Dtype(SignedIntegerArithmeticDtype, W_Int64Dtype): - pass - -W_UInt64Dtype = create_low_level_dtype( - num = 10, kind = UNSIGNEDLTR, name = "uint64", - aliases = ["Q", "uint64", "I8"], - applevel_types = [], - T = rffi.ULONGLONG, - valtype = rffi.ULONGLONG._type, - expected_size = 8, -) -class W_UInt64Dtype(UnsignedIntegerArithmeticDtype, W_UInt64Dtype): - pass - -if LONG_BIT == 32: - long_dtype = W_Int32Dtype - ulong_dtype = W_UInt32Dtype -elif LONG_BIT == 64: - long_dtype = W_Int64Dtype - ulong_dtype = W_UInt64Dtype -else: - assert False - -class W_LongDtype(long_dtype): - num = 7 - aliases = ["l"] - applevel_types = ["int"] - -class W_ULongDtype(ulong_dtype): - num = 8 - aliases = ["L"] - -W_Float32Dtype = create_low_level_dtype( - num = 11, kind = FLOATINGLTR, name = "float32", - aliases = ["f", "float32", "f4"], - applevel_types = [], - T = lltype.SingleFloat, - valtype = rarithmetic.r_singlefloat, - expected_size = 4, -) -class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): - pass - -W_Float64Dtype = create_low_level_dtype( - num = 12, kind = FLOATINGLTR, name = "float64", - aliases = ["d", "float64", "f8"], - applevel_types = ["float"], - T = lltype.Float, - valtype = float, - expected_size = 8, -) -class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): - pass - -ALL_DTYPES = [ - W_BoolDtype, - W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, - W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, - W_Int64Dtype, W_UInt64Dtype, - W_Float32Dtype, W_Float64Dtype, -] - -dtypes_by_alias = unrolling_iterable([ - (alias, dtype) - for dtype in ALL_DTYPES - for alias in dtype.aliases -]) -dtypes_by_apptype = unrolling_iterable([ - (apptype, dtype) - for dtype in ALL_DTYPES - for apptype in dtype.applevel_types -]) -dtypes_by_num_bytes = unrolling_iterable(sorted([ - (dtype.num_bytes, dtype) - for dtype in ALL_DTYPES -])) - W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), + __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), - __str__ = interp2app(W_Dtype.descr_str), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), - itemsize = interp_attrproperty("num_bytes", cls=W_Dtype), + type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), shape = GetSetProperty(W_Dtype.descr_get_shape), ) W_Dtype.typedef.acceptable_as_base_class = False + +class DtypeCache(object): + def __init__(self, space): + self.w_booldtype = W_Dtype( + types.Bool(), + num=0, + kind=BOOLLTR, + name="bool", + char="?", + w_box_type = space.gettypefor(interp_boxes.W_BoolBox), + alternate_constructors=[space.w_bool], + ) + self.w_int8dtype = W_Dtype( + types.Int8(), + num=1, + kind=SIGNEDLTR, + name="int8", + char="b", + w_box_type = space.gettypefor(interp_boxes.W_Int8Box) + ) + self.w_uint8dtype = W_Dtype( + types.UInt8(), + num=2, + kind=UNSIGNEDLTR, + name="uint8", + char="B", + w_box_type = space.gettypefor(interp_boxes.W_UInt8Box), + ) + self.w_int16dtype = W_Dtype( + types.Int16(), + num=3, + kind=SIGNEDLTR, + name="int16", + char="h", + w_box_type = space.gettypefor(interp_boxes.W_Int16Box), + ) + self.w_uint16dtype = W_Dtype( + types.UInt16(), + num=4, + kind=UNSIGNEDLTR, + name="uint16", + char="H", + w_box_type = space.gettypefor(interp_boxes.W_UInt16Box), + ) + self.w_int32dtype = W_Dtype( + types.Int32(), + num=5, + kind=SIGNEDLTR, + name="int32", + char="i", + w_box_type = space.gettypefor(interp_boxes.W_Int32Box), + ) + self.w_uint32dtype = W_Dtype( + types.UInt32(), + num=6, + kind=UNSIGNEDLTR, + name="uint32", + char="I", + w_box_type = space.gettypefor(interp_boxes.W_UInt32Box), + ) + if LONG_BIT == 32: + name = "int32" + elif LONG_BIT == 64: + name = "int64" + self.w_longdtype = W_Dtype( + types.Long(), + num=7, + kind=SIGNEDLTR, + name=name, + char="l", + w_box_type = space.gettypefor(interp_boxes.W_LongBox), + alternate_constructors=[space.w_int], + ) + self.w_ulongdtype = W_Dtype( + types.ULong(), + num=8, + kind=UNSIGNEDLTR, + name="u" + name, + char="L", + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + ) + self.w_int64dtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name="int64", + char="q", + w_box_type = space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], + ) + self.w_uint64dtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name="uint64", + char="Q", + w_box_type = space.gettypefor(interp_boxes.W_UInt64Box), + ) + self.w_float32dtype = W_Dtype( + types.Float32(), + num=11, + kind=FLOATINGLTR, + name="float32", + char="f", + w_box_type = space.gettypefor(interp_boxes.W_Float32Box), + ) + self.w_float64dtype = W_Dtype( + types.Float64(), + num=12, + kind=FLOATINGLTR, + name="float64", + char="d", + w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + alternate_constructors=[space.w_float], + ) + + self.builtin_dtypes = [ + self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, + self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, + self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, + self.w_float64dtype + ] + self.dtypes_by_num_bytes = sorted( + (dtype.itemtype.get_element_size(), dtype) + for dtype in self.builtin_dtypes + ) + +def get_dtype_cache(space): + return space.fromcache(DtypeCache) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,47 +98,6 @@ endshape[i] = remainder[i] return endshape -def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, - w_order=NoneNotWrapped): - # find scalar - if not space.issequence_w(w_item_or_iterable): - if space.is_w(w_dtype, space.w_None): - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, - w_item_or_iterable) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - return scalar_w(space, dtype, w_item_or_iterable) - if w_order is None: - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise operationerrfmt(space.w_ValueError, "Unknown order: %s", - order) - shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) - # they come back in C order - size = len(elems_w) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): - break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = NDimArray(size, shape[:], dtype=dtype, order=order) - shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) - for i in range(len(elems_w)): - w_elem = elems_w[i] - dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) - arr_iter = arr_iter.next(shapelen) - return arr # Iterators for arrays # -------------------- @@ -378,6 +337,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -451,8 +417,8 @@ self=self, dtype=dtype, i=i, result=result, idx=idx, cur_best=cur_best) - new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.ne(new_best, cur_best): + new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best i = i.next(shapelen) @@ -462,8 +428,7 @@ size = self.find_size() if size == 0: raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) + space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -475,7 +440,7 @@ all_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if not dtype.bool(self.eval(i)): + if not dtype.itemtype.bool(self.eval(i)): return False i = i.next(shapelen) return True @@ -490,7 +455,7 @@ any_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if dtype.bool(self.eval(i)): + if dtype.itemtype.bool(self.eval(i)): return True i = i.next(shapelen) return False @@ -542,8 +507,8 @@ res.append(')') else: concrete.to_str(space, 1, res, indent=' ') - if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or \ + if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and + dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ not self.find_size(): res.append(", dtype=" + dtype.name) res.append(")") @@ -612,7 +577,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] # Add a comma only if comma is False - this prevents adding two # commas @@ -625,7 +590,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] i += 1 else: @@ -712,7 +677,7 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item).wrap(space) + return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) return space.wrap(self.create_slice(space, chunks)) @@ -771,14 +736,15 @@ shape[:]) def descr_mean(self, space): - return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) + return space.div(self.descr_sum(space), space.wrap(self.find_size())) def descr_nonzero(self, space): if self.find_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true(self.get_concrete().eval( - self.start_iter(self.shape)).wrap(space))) + return space.wrap(space.is_true( + self.get_concrete().eval(self.start_iter(self.shape)) + )) def descr_get_transpose(self, space): concrete = self.get_concrete() @@ -814,17 +780,14 @@ return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) return scalar_w(space, dtype, w_obj) def scalar_w(space, dtype, w_obj): - assert isinstance(dtype, interp_dtype.W_Dtype) - return Scalar(dtype, dtype.unwrap(space, w_obj)) + return Scalar(dtype, dtype.coerce(space, w_obj)) class Scalar(BaseArray): """ @@ -835,6 +798,7 @@ _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): + self.shape = self.strides = [] BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value @@ -858,7 +822,7 @@ return ConstantIterator() def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.str_format(self.value)) + builder.append(self.dtype.itemtype.str_format(self.value)) def copy(self): return Scalar(self.dtype, self.value) @@ -884,7 +848,7 @@ i = 0 signature = self.signature result_size = self.find_size() - result = NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) i = self.start_iter() ri = result.start_iter() @@ -1110,7 +1074,15 @@ def debug_repr(self): return 'Slice(%s)' % self.parent.debug_repr() -class NDimArray(BaseArray): + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = self.start_iter() + while not iter.done(): + array.setitem(iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + return array + +class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ @@ -1137,11 +1109,11 @@ return self.dtype.getitem(self.storage, iter.get_offset()) def copy(self): - array = NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( - rffi.cast(rffi.VOIDP, array.storage), - rffi.cast(rffi.VOIDP, self.storage), - self.size * self.dtype.num_bytes + array.storage, + self.storage, + self.size * self.dtype.itemtype.get_element_size() ) return array @@ -1152,8 +1124,7 @@ "len() of unsized object")) def setitem_w(self, space, item, w_value): - self.invalidated() - self.dtype.setitem_w(space, self.storage, item, w_value) + return self.setitem(item, self.dtype.coerce(space, w_value)) def setitem(self, item, value): self.invalidated() @@ -1185,20 +1156,62 @@ shape.append(item) return size, shape +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr + def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) size, shape = _find_size_and_shape(space, w_size) - return space.wrap(NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) + size, shape = _find_size_and_shape(space, w_size) - arr = NDimArray(size, shape[:], dtype=dtype) - one = dtype.adapt_val(1) + arr = W_NDimArray(size, shape[:], dtype=dtype) + one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) @@ -1209,9 +1222,9 @@ return w_arr.descr_dot(space, w_obj2) BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), @@ -1300,10 +1313,10 @@ def descr_next(self, space): if self.iter.done(): - raise OperationError(space.w_StopIteration, space.wrap('')) + raise OperationError(space.w_StopIteration, space.w_None) result = self.eval(self.iter) self.iter = self.iter.next(self.shapelen) - return result.wrap(space) + return result def descr_iter(self): return self diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import NDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -18,8 +18,8 @@ raise OperationError(space.w_ValueError, space.wrap( "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - dtype = space.fromcache(W_Float64Dtype) - a = NDimArray(number, [number], dtype=dtype) + dtype = get_dtype_cache(space).w_float64dtype + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_dtype, signature +from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -15,6 +15,7 @@ class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + _immutable_fields_ = ["promote_to_float", "promote_bools"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -29,7 +30,7 @@ def descr_get_identity(self, space): if self.identity is None: return space.w_None - return self.identity.wrap(space) + return self.identity def descr_call(self, space, __args__): if __args__.keywords or len(__args__.arguments_w) < self.argcount: @@ -80,8 +81,7 @@ new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, - dtype).wrap(space) + return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): while not i.done(): @@ -115,7 +115,7 @@ promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) @@ -124,6 +124,7 @@ class W_Ufunc2(W_Ufunc): + _immutable_fields_ = ["comparison_func", "func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -148,14 +149,14 @@ promote_bools=self.promote_bools, ) if self.comparison_func: - res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): return self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - ).wrap(space) + ) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature @@ -169,7 +170,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), @@ -187,7 +188,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. @@ -197,14 +198,14 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == 11 and dt1.num_bytes >= 4: - return space.fromcache(interp_dtype.W_Float64Dtype) + if dt2.num == 11 and dt1.itemtype.get_element_size() >= 4: + return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == interp_dtype.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it - if dt1.num_bytes < dt2.num_bytes: + if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 @@ -214,10 +215,11 @@ # UInt64 + signed = Float64 if dt2.num == 10: dtypenum += 1 - newdtype = interp_dtype.ALL_DTYPES[dtypenum] + newdtype = interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] - if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(newdtype) + if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or + newdtype.kind == interp_dtype.FLOATINGLTR): + return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes @@ -225,35 +227,42 @@ dtypenum += 2 else: dtypenum += 3 - return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum]) + return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt if dt.num >= 5: - return space.fromcache(interp_dtype.W_Float64Dtype) - for bytes, dtype in interp_dtype.dtypes_by_num_bytes: - if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: - return space.fromcache(dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype + for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + if (dtype.kind == interp_dtype.FLOATINGLTR and + dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): + return dtype if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: - return space.fromcache(interp_dtype.W_Int64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.UNSIGNEDLTR: - return space.fromcache(interp_dtype.W_UInt64Dtype) + return interp_dtype.get_dtype_cache(space).w_uint64dtype else: assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) + bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + + if isinstance(w_obj, interp_boxes.W_GenericBox): + dtype = w_obj.get_dtype(space) + if current_guess is None: + return dtype + return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: @@ -269,20 +278,19 @@ current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): - return getattr(res_dtype, op_name)(value) + return getattr(res_dtype.itemtype, op_name)(value) elif argcount == 2: + dtype_cache = interp_dtype.get_dtype_cache(space) def impl(res_dtype, lvalue, rvalue): - res = getattr(res_dtype, op_name)(lvalue, rvalue) + res = getattr(res_dtype.itemtype, op_name)(lvalue, rvalue) if comparison_func: - booldtype = space.fromcache(interp_dtype.W_BoolDtype) - assert isinstance(booldtype, interp_dtype.W_BoolDtype) - res = booldtype.box(res) + return dtype_cache.w_booldtype.box(res) return res return func_with_new_name(impl, ufunc_name) @@ -338,7 +346,7 @@ identity = extra_kwargs.get("identity") if identity is not None: - identity = space.fromcache(interp_dtype.W_LongDtype).adapt_val(identity) + identity = interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace -from pypy.module.micronumpy import interp_dtype -from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar +from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -11,9 +11,10 @@ class TestSignature(object): def test_binop_signature(self, space): - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + float64_dtype = get_dtype_cache(space).w_float64dtype + bool_dtype = get_dtype_cache(space).w_booldtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -22,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_BoolDtype)) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -30,7 +31,9 @@ assert v5.signature is v6.signature def test_slice_signature(self, space): - ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_Float64Dtype)) + float64_dtype = get_dtype_cache(space).w_float64dtype + + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature @@ -41,10 +44,10 @@ class TestUfuncCoerscion(object): def test_binops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Basic pairing assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype @@ -62,19 +65,19 @@ assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype def test_unaryops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - uint8_dtype = space.fromcache(interp_dtype.W_UInt8Dtype) - int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype) - uint16_dtype = space.fromcache(interp_dtype.W_UInt16Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - uint32_dtype = space.fromcache(interp_dtype.W_UInt32Dtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - ulong_dtype = space.fromcache(interp_dtype.W_ULongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) - uint64_dtype = space.fromcache(interp_dtype.W_UInt64Dtype) - float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + uint8_dtype = get_dtype_cache(space).w_uint8dtype + int16_dtype = get_dtype_cache(space).w_int16dtype + uint16_dtype = get_dtype_cache(space).w_uint16dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + uint32_dtype = get_dtype_cache(space).w_uint32dtype + long_dtype = get_dtype_cache(space).w_longdtype + ulong_dtype = get_dtype_cache(space).w_ulongdtype + int64_dtype = get_dtype_cache(space).w_int64dtype + uint64_dtype = get_dtype_cache(space).w_uint64dtype + float32_dtype = get_dtype_cache(space).w_float32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Normal rules, everything returns itself assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,9 @@ +import py -import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): @@ -106,7 +109,7 @@ c -> 3 """ interp = self.run(code) - assert interp.results[-1].value.val == 9 + assert interp.results[-1].value == 9 def test_array_getitem(self): code = """ @@ -115,7 +118,7 @@ a + b -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 3 + 6 + assert interp.results[0].value == 3 + 6 def test_range_getitem(self): code = """ @@ -123,7 +126,7 @@ r -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_sum(self): code = """ @@ -132,7 +135,7 @@ r """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value.value == 15 def test_array_write(self): code = """ @@ -141,7 +144,7 @@ a -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_min(self): interp = self.run(""" @@ -150,7 +153,7 @@ b = a + a min(b) """) - assert interp.results[0].value.val == -24 + assert interp.results[0].value.value == -24 def test_max(self): interp = self.run(""" @@ -159,7 +162,7 @@ b = a + a max(b) """) - assert interp.results[0].value.val == 256 + assert interp.results[0].value.value == 256 def test_slice(self): interp = self.run(""" @@ -167,7 +170,7 @@ b = a -> : b -> 3 """) - assert interp.results[0].value.val == 4 + assert interp.results[0].value == 4 def test_slice_step(self): interp = self.run(""" @@ -175,7 +178,7 @@ b = a -> ::2 b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_setslice(self): interp = self.run(""" @@ -185,7 +188,7 @@ a[::3] = b a -> 3 """) - assert interp.results[0].value.val == 5 + assert interp.results[0].value == 5 def test_slice2(self): @@ -196,14 +199,14 @@ b = s1 + s2 b -> 3 """) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_multidim_getitem(self): interp = self.run(""" a = [[1,2]] a -> 0 -> 1 """) - assert interp.results[0].value.val == 2 + assert interp.results[0].value == 2 def test_multidim_getitem_2(self): interp = self.run(""" @@ -211,7 +214,7 @@ b = a + a b -> 1 -> 1 """) - assert interp.results[0].value.val == 8 + assert interp.results[0].value == 8 def test_set_slice(self): interp = self.run(""" @@ -220,7 +223,7 @@ b[:] = a + a b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_set_slice2(self): interp = self.run(""" @@ -231,4 +234,4 @@ a[0:30:3] = c a -> 3 """) - assert interp.results[0].value.val == 11 + assert interp.results[0].value == 11 diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -30,7 +30,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -44,13 +44,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from numpypy import array, False_, True_ + from numpypy import array, False_, True_, int64 a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], (int, long)) + assert isinstance(a[0], int64) b = a.copy() - assert isinstance(b[0], (int, long)) + assert isinstance(b[0], int64) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -72,17 +72,17 @@ assert a[i] is True_ def test_zeros_long(self): - from numpypy import zeros + from numpypy import zeros, int64 a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 0 def test_ones_long(self): - from numpypy import ones + from numpypy import ones, int64 a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 1 def test_overflow(self): @@ -165,3 +165,99 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) + +class AppTestTypes(BaseNumpyAppTest): + def test_abstract_types(self): + import numpypy as numpy + raises(TypeError, numpy.generic, 0) + raises(TypeError, numpy.number, 0) + raises(TypeError, numpy.integer, 0) + exc = raises(TypeError, numpy.signedinteger, 0) + assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + + raises(TypeError, numpy.floating, 0) + raises(TypeError, numpy.inexact, 0) + + def test_bool(self): + import numpypy as numpy + + assert numpy.bool_.mro() == [numpy.bool_, numpy.generic, object] + assert numpy.bool_(3) is numpy.True_ + assert numpy.bool_("") is numpy.False_ + assert type(numpy.True_) is type(numpy.False_) is numpy.bool_ + + class X(numpy.bool_): + pass + + assert type(X(True)) is numpy.bool_ + assert X(True) is numpy.True_ + + def test_int8(self): + import numpypy as numpy + + assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.int8) + assert type(a[1]) is numpy.int8 + assert numpy.dtype("int8").type is numpy.int8 + + x = numpy.int8(128) + assert x == -128 + assert x != 128 + assert type(x) is numpy.int8 + assert repr(x) == "-128" + + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + + def test_int_(self): + import numpypy as numpy + + assert numpy.int_ is numpy.dtype(int).type + assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + + def test_int64(self): + import sys + import numpypy as numpy + + if sys.maxint == 2 ** 63 -1: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + else: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.int64).type is numpy.int64 + assert numpy.int64(3) == 3 + + def test_float64(self): + import numpypy as numpy + + assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] + + a = numpy.array([1, 2, 3], numpy.float64) + assert type(a[1]) is numpy.float64 + assert numpy.dtype(float).type is numpy.float64 + + assert numpy.float64(2.0) == 2.0 + + def test_subclass_type(self): + import numpypy as numpy + + class X(numpy.float64): + def m(self): + return self + 2 + + b = X(10) + assert type(b) is X + assert b.m() == 12 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,7 +1,7 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy import signature from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace @@ -28,18 +28,18 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -58,7 +58,7 @@ def test_create_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -78,7 +78,7 @@ def test_slice_of_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -96,7 +96,7 @@ def test_slice_of_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -114,7 +114,7 @@ def test_negative_step_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] @@ -122,14 +122,14 @@ def test_negative_step_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -139,7 +139,7 @@ assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -160,6 +160,21 @@ class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -359,11 +374,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 @@ -709,7 +724,7 @@ assert b[i] == 2.5 * a[i] def test_dtype_guessing(self): - from numpypy import array, dtype + from numpypy import array, dtype, float64, int8, bool_ assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) @@ -719,6 +734,10 @@ assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + assert array([float64(2)]).dtype is dtype(float) + assert array([int8(3)]).dtype is dtype("int8") + assert array([bool_(True)]).dtype is dtype(bool) + assert array([bool_(True), 3.0]).dtype is dtype(float) def test_comparison(self): import operator @@ -760,6 +779,19 @@ a[::-1] = a + a assert (a == [8, 6, 4, 2, 0]).all() + def test_debug_repr(self): + from numpypy import zeros, sin + a = zeros(1) + assert a.__debug_repr__() == 'Array' + assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' + assert (a[::2]).__debug_repr__() == 'Slice(Array)' + assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' + assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' + assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + b = a + a + b[0] = 3 + assert b.__debug_repr__() == 'Call2(add, forced=Array)' + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -989,23 +1021,16 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 - def test_debug_repr(self): - from numpypy import zeros, sin - a = zeros(1) - assert a.__debug_repr__() == 'Array' - assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' - assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' - assert sin(a).__debug_repr__() == 'Call1(sin, Array)' - b = a + a - b[0] = 3 - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_slice_copy(self): + from numpypy import zeros + a = zeros((10, 10)) + b = a[0].copy() + assert (b == zeros(10)).all() -class AppTestSupport(object): +class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct - cls.space = gettestobjspace(usemodules=('micronumpy',)) + BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) def test_fromstring(self): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,11 +8,11 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_ufuncs, signature +from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import NDimArray, NDimSlice,\ - BaseArray +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, + BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr @@ -48,17 +48,15 @@ def f(i): interp = InterpreterState(codes[i]) interp.run(space) - res = interp.results[-1] - assert isinstance(res, BaseArray) - w_res = res.eval(res.start_iter()).wrap(interp.space) - if isinstance(w_res, BoolObject): - return float(w_res.boolval) - elif isinstance(w_res, FloatObject): - return w_res.floatval - elif isinstance(w_res, IntObject): - return w_res.intval - else: - return -42. + w_res = interp.results[-1] + if isinstance(w_res, BaseArray): + w_res = w_res.eval(w_res.start_iter()) + + if isinstance(w_res, interp_boxes.W_Float64Box): + return w_res.value + elif isinstance(w_res, interp_boxes.W_BoolBox): + return float(w_res.value) + raise TypeError(w_res) if self.graph is None: interp, graph = self.meta_interp(f, [i], @@ -80,9 +78,9 @@ def test_add(self): result = self.run("add") - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 def define_float_add(): @@ -94,9 +92,9 @@ def test_floatadd(self): result = self.run("float_add") assert result == 3 + 3 - self.check_simple_loop({"getarrayitem_raw": 1, "float_add": 1, - "setarrayitem_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_sum(): return """ @@ -108,9 +106,9 @@ def test_sum(self): result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 2, - "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, + "int_add": 2, "int_ge": 1, "guard_false": 1, + "jump": 1}) def define_prod(): return """ @@ -125,9 +123,9 @@ for i in range(30): expected *= i * 2 assert result == expected - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -138,9 +136,9 @@ max(b) """) assert result == 256 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def test_min(self): py.test.skip("broken, investigate") @@ -151,9 +149,9 @@ min(b) """) assert result == -24 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def define_any(): return """ @@ -166,10 +164,10 @@ def test_any(self): result = self.run("any") assert result == 1 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, - "int_ge": 1, "jump": 1, - "guard_false": 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) def define_already_forced(): return """ @@ -188,10 +186,10 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) def define_ufunc(): @@ -205,10 +203,9 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -6 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, - "setarrayitem_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1, - }) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, + "setinteriorfield_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_specialization(): return """ @@ -246,9 +243,9 @@ def test_slice(self): result = self.run("slice") assert result == 18 - self.check_simple_loop({'getarrayitem_raw': 2, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) @@ -265,8 +262,8 @@ def test_slice2(self): result = self.run("slice2") assert result == 15 - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) def define_multidim(): @@ -279,11 +276,11 @@ def test_multidim(self): result = self.run('multidim') assert result == 8 - self.check_simple_loop({'float_add': 1, 'getarrayitem_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setarrayitem_raw': 1}) # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization + self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1}) def define_multidim_slice(): return """ @@ -329,18 +326,18 @@ result = self.run("setslice") assert result == 11.0 self.check_loop_count(1) - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add' : 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import W_Float64Dtype + from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() - cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) + cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " @@ -355,7 +352,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = NDimArray(n, [n], dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/types.py @@ -0,0 +1,389 @@ +import functools +import math + +from pypy.module.micronumpy import interp_boxes +from pypy.objspace.std.floatobject import float2string +from pypy.rlib import rfloat, libffi, clibffi +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rpython.lltypesystem import lltype, rffi + + +def simple_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v): + return self.box( + func( + self, + self.for_computation(self.unbox(v)) + ) + ) + return dispatcher + +def simple_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return self.box( + func( + self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + ) + ) + return dispatcher + +def raw_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)) + ) + return dispatcher + +class BaseType(object): + def _unimplemented_ufunc(self, *args): + raise NotImplementedError + # add = sub = mul = div = mod = pow = eq = ne = lt = le = gt = ge = max = \ + # min = copysign = pos = neg = abs = sign = reciprocal = fabs = floor = \ + # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ + # arctanh = _unimplemented_ufunc + +class Primitive(object): + _mixin_ = True + def get_element_size(self): + return rffi.sizeof(self.T) + + @specialize.argtype(1) + def box(self, value): + return self.BoxType(rffi.cast(self.T, value)) + + def unbox(self, box): + assert isinstance(box, self.BoxType) + return box.value + + def coerce(self, space, w_item): + if isinstance(w_item, self.BoxType): + return w_item + return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # XXX: ugly + w_obj = space.allocate_instance(self.BoxType, w_subtype) + assert isinstance(w_obj, self.BoxType) + w_obj.__init__(self._coerce(space, w_item).value) + return w_obj + + def _coerce(self, space, w_item): + raise NotImplementedError + + def read(self, storage, width, i, offset): + return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset + )) + + def store(self, storage, width, i, offset, box): + value = self.unbox(box) + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + @simple_binary_op + def add(self, v1, v2): + return v1 + v2 + + @simple_binary_op + def sub(self, v1, v2): + return v1 - v2 + + @simple_binary_op + def mul(self, v1, v2): + return v1 * v2 + + @simple_unary_op + def pos(self, v): + return +v + + @simple_unary_op + def neg(self, v): + return -v + + @simple_unary_op + def abs(self, v): + return abs(v) + + @raw_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @raw_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @raw_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @raw_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @raw_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @raw_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + def bool(self, v): + return bool(self.for_computation(self.unbox(v))) + + @simple_binary_op + def max(self, v1, v2): + return max(v1, v2) + + @simple_binary_op + def min(self, v1, v2): + return min(v1, v2) + +class Bool(BaseType, Primitive): + T = lltype.Bool + BoxType = interp_boxes.W_BoolBox + + True = BoxType(True) + False = BoxType(False) + + @specialize.argtype(1) + def box(self, value): + box = Primitive.box(self, value) + if box.value: + return self.True + else: + return self.False + + def coerce_subtype(self, space, w_subtype, w_item): + # Doesn't return subclasses so it can return the constants. + return self._coerce(space, w_item) + + def _coerce(self, space, w_item): + return self.box(space.is_true(w_item)) + + def str_format(self, box): + value = self.unbox(box) + return "True" if value else "False" + + def for_computation(self, v): + return int(v) + +class Integer(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.int_w(space.int(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return str(self.for_computation(value)) + + def for_computation(self, v): + return widen(v) + + @simple_binary_op + def div(self, v1, v2): + if v2 == 0: + return 0 + return v1 / v2 + + @simple_binary_op + def mod(self, v1, v2): + return v1 % v2 + + @simple_binary_op + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + + @simple_unary_op + def sign(self, v): + if v > 0: + return 1 + elif v < 0: + return -1 + else: + assert v == 0 + return 0 + +class Int8(BaseType, Integer): + T = rffi.SIGNEDCHAR + BoxType = interp_boxes.W_Int8Box + +class UInt8(BaseType, Integer): + T = rffi.UCHAR + BoxType = interp_boxes.W_UInt8Box + +class Int16(BaseType, Integer): + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + +class UInt16(BaseType, Integer): + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + +class Int32(BaseType, Integer): + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + +class UInt32(BaseType, Integer): + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + +class Int64(BaseType, Integer): + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + +class UInt64(BaseType, Integer): + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + +class Float(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.float_w(space.float(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + + def for_computation(self, v): + return float(v) + + @simple_binary_op + def div(self, v1, v2): + try: + return v1 / v2 + except ZeroDivisionError: + if v1 == v2 == 0.0: + return rfloat.NAN + return rfloat.copysign(rfloat.INFINITY, v1 * v2) + + @simple_binary_op + def mod(self, v1, v2): + return math.fmod(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return math.pow(v1, v2) + + @simple_binary_op + def copysign(self, v1, v2): + return math.copysign(v1, v2) + + @simple_unary_op + def sign(self, v): + if v == 0.0: + return 0.0 + return rfloat.copysign(1.0, v) + + @simple_unary_op + def fabs(self, v): + return math.fabs(v) + + @simple_unary_op + def reciprocal(self, v): + if v == 0.0: + return rfloat.copysign(rfloat.INFINITY, v) + return 1.0 / v + + @simple_unary_op + def floor(self, v): + return math.floor(v) + + @simple_unary_op + def exp(self, v): + try: + return math.exp(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def sin(self, v): + return math.sin(v) + + @simple_unary_op + def cos(self, v): + return math.cos(v) + + @simple_unary_op + def tan(self, v): + return math.tan(v) + + @simple_unary_op + def arcsin(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.asin(v) + + @simple_unary_op + def arccos(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.acos(v) + + @simple_unary_op + def arctan(self, v): + return math.atan(v) + + @simple_unary_op + def arcsinh(self, v): + return math.asinh(v) + + @simple_unary_op + def arctanh(self, v): + if v == 1.0 or v == -1.0: + return math.copysign(rfloat.INFINITY, v) + if not -1.0 < v < 1.0: + return rfloat.NAN + return math.atanh(v) + + @simple_unary_op + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN + + +class Float32(BaseType, Float): + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + +class Float64(BaseType, Float): + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box \ No newline at end of file diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -38,7 +38,6 @@ def __init__(self, name, startblock, return_var=None): self.name = name # function name (possibly mangled already) self.startblock = startblock - self.startblock.isstartblock = True # build default returnblock self.returnblock = Block([return_var or Variable()]) self.returnblock.operations = () @@ -171,11 +170,10 @@ class Block(object): - __slots__ = """isstartblock inputargs operations exitswitch + __slots__ = """inputargs operations exitswitch exits blockcolor""".split() def __init__(self, inputargs): - self.isstartblock = False self.inputargs = list(inputargs) # mixed list of variable/const XXX self.operations = [] # list of SpaceOperation(s) self.exitswitch = None # a variable or @@ -452,7 +450,6 @@ newblock.closeblock(*newlinks) newstartblock = blockmap[graph.startblock] - newstartblock.isstartblock = True newgraph = FunctionGraph(graph.name, newstartblock) newgraph.returnblock = blockmap[graph.returnblock] newgraph.exceptblock = blockmap[graph.exceptblock] @@ -490,7 +487,6 @@ for block in graph.iterblocks(): - assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( "block.exits is a %s (closeblock() or recloseblock() missing?)" % (type(block.exits).__name__,)) diff --git a/pypy/objspace/flow/test/test_checkgraph.py b/pypy/objspace/flow/test/test_checkgraph.py --- a/pypy/objspace/flow/test/test_checkgraph.py +++ b/pypy/objspace/flow/test/test_checkgraph.py @@ -13,20 +13,6 @@ py.test.raises(AssertionError, checkgraph, g) -def test_nostartblock(): - g = FunctionGraph("g", Block([])) - g.startblock.closeblock(Link([Constant(1)], g.returnblock)) - g.startblock.isstartblock = False - py.test.raises(AssertionError, checkgraph, g) - -def test_twostartblocks(): - g = FunctionGraph("g", Block([])) - b = Block([]) - b.isstartblock = True - g.startblock.closeblock(Link([], b)) - b.closeblock(Link([Constant(1)], g.returnblock)) - py.test.raises(AssertionError, checkgraph, g) - def test_exitlessblocknotexitblock(): g = FunctionGraph("g", Block([])) py.test.raises(AssertionError, checkgraph, g) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -50,6 +50,13 @@ else: return space.fromcache(UnicodeListStrategy) + # check for floats + for w_obj in list_w: + if not is_W_FloatObject(w_obj): + break + else: + return space.fromcache(FloatListStrategy) + return space.fromcache(ObjectListStrategy) def is_W_IntObject(w_object): @@ -60,7 +67,9 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject return type(w_object) is W_UnicodeObject - +def is_W_FloatObject(w_object): + from pypy.objspace.std.floatobject import W_FloatObject + return type(w_object) is W_FloatObject class W_ListObject(W_AbstractListObject): from pypy.objspace.std.listtype import list_typedef as typedef @@ -317,6 +326,8 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + _applevel_repr = "empty" + def __init__(self, space): ListStrategy.__init__(self, space) # cache an empty list that is used whenever getitems is called (i.e. sorting) @@ -364,6 +375,8 @@ strategy = self.space.fromcache(IntegerListStrategy) elif is_W_UnicodeObject(w_item): strategy = self.space.fromcache(UnicodeListStrategy) + elif is_W_FloatObject(w_item): + strategy = self.space.fromcache(FloatListStrategy) else: strategy = self.space.fromcache(ObjectListStrategy) @@ -415,6 +428,8 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" + _applevel_repr = "range" + def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -853,6 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -881,6 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 + _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -905,8 +922,40 @@ if reverse: l.reverse() +<<<<<<< local class UnicodeListStrategy(AbstractUnwrappedStrategy, ListStrategy): +======= +class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = 0.0 + _applevel_repr = "float" + + def wrap(self, floatval): + return self.space.wrap(floatval) + + def unwrap(self, w_float): + return self.space.float_w(w_float) + + erase, unerase = rerased.new_erasing_pair("float") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def is_correct_type(self, w_obj): + return is_W_FloatObject(w_obj) + + def list_is_correct_type(self, w_list): + return w_list.strategy is self.space.fromcache(FloatListStrategy) + + def sort(self, w_list, reverse): + l = self.unerase(w_list.lstorage) + sorter = FloatSort(l, len(l)) + sorter.sort() + if reverse: + l.reverse() + +class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): +>>>>>>> other _none_value = None + _applevel_repr = "str" def wrap(self, stringval): return self.space.wrap(stringval) @@ -934,6 +983,7 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) + # _______________________________________________________ init_signature = Signature(['sequence'], None, None) @@ -1256,7 +1306,12 @@ TimSort = make_timsort_class() IntBaseTimSort = make_timsort_class() +<<<<<<< local UnicodeBaseTimSort = make_timsort_class() +======= +FloatBaseTimSort = make_timsort_class() +StringBaseTimSort = make_timsort_class() +>>>>>>> other class KeyContainer(baseobjspace.W_Root): def __init__(self, w_key, w_item): @@ -1276,7 +1331,15 @@ def lt(self, a, b): return a < b +<<<<<<< local class UnicodeSort(UnicodeBaseTimSort): +======= +class FloatSort(FloatBaseTimSort): + def lt(self, a, b): + return a < b + +class StringSort(StringBaseTimSort): +>>>>>>> other def lt(self, a, b): return a < b diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -470,11 +470,17 @@ l.extend(iter([1, 2, 3, 4])) assert l is l0 assert l == [1, 1, 2, 3, 4] + l = l0 = ['a'] l.extend(iter(['b', 'c', 'd'])) assert l == ['a', 'b', 'c', 'd'] assert l is l0 + l = l0 = [1.2] + l.extend(iter([2.3, 3.4, 4.5])) + assert l == [1.2, 2.3, 3.4, 4.5] + assert l is l0 + def test_sort(self): l = l0 = [1, 5, 3, 0] l.sort() @@ -493,6 +499,10 @@ l.sort(reverse=True) assert l == ["d", "c", "b", "a"] + l = [3.3, 2.2, 4.4, 1.1, 3.1, 5.5] + l.sort() + assert l == [1.1, 2.2, 3.1, 3.3, 4.4, 5.5] + def test_sort_cmp(self): def lencmp(a,b): return cmp(len(a), len(b)) l = [ 'a', 'fiver', 'tre', '' ] @@ -546,11 +556,19 @@ assert l[-2] == 6 raises(IndexError, "l[len(l)]") raises(IndexError, "l[-len(l)-1]") + l = ['a', 'b', 'c'] assert l[0] == 'a' assert l[-1] == 'c' assert l[-2] == 'b' raises(IndexError, "l[len(l)]") + + l = [1.1, 2.2, 3.3] + assert l[0] == 1.1 + assert l[-1] == 3.3 + assert l[-2] == 2.2 + raises(IndexError, "l[len(l)]") + l = [] raises(IndexError, "l[1]") @@ -588,6 +606,16 @@ assert l is l0 raises(IndexError, "del l[0]") + l = l0 = [1.1, 2.2, 3.3] + del l[0] + assert l == [2.2, 3.3] + del l[-1] + assert l == [2.2] + del l[-1] + assert l == [] + assert l is l0 + raises(IndexError, "del l[0]") + l = range(10) del l[5] assert l == [0, 1, 2, 3, 4, 6, 7, 8, 9] @@ -627,9 +655,15 @@ del l[:] assert l is l0 assert l == [] + l = ['a', 'b'] del l[:] assert l == [] + + l = [1.1, 2.2] + del l[:] + assert l == [] + l = range(5) del l[:] assert l == [] @@ -640,6 +674,11 @@ assert l is l0 assert l == [1,2,3,4,5] + l = l0 = [1.1,2.2,3.3] + l += [4.4,5.5] + assert l is l0 + assert l == [1.1,2.2,3.3,4.4,5.5] + l = l0 = ['a', 'b', 'c'] l1 = l[:] l += ['d'] @@ -697,6 +736,11 @@ l *= -5 assert l == [] + l = l0 = [1.1, 2.2] + l *= 2 + assert l is l0 + assert l == [1.1, 2.2, 1.1, 2.2] + l = range(2) l *= 2 assert l == [0, 1, 0, 1] @@ -731,6 +775,10 @@ assert c.index(0) == 0 raises(ValueError, c.index, 3) + c = [0.0, 2.2, 4.4] + assert c.index(0) == 0.0 + raises(ValueError, c.index, 3) + def test_index_cpython_bug(self): if self.on_cpython: skip("cpython has a bug here") @@ -779,6 +827,10 @@ l[::3] = ('a', 'b') assert l == ['a', 1, 2, 'b', 4, 5] + l = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5] + l[::3] = ('a', 'b') + assert l == ['a', 1.1, 2.2, 'b', 4.4, 5.5] + def test_setslice_with_self(self): l = [1,2,3,4] l[:] = l @@ -835,6 +887,10 @@ l.append("a") assert l == [1,2,3,"a"] + l = [1.1, 2.2, 3.3] + l.append(4.4) + assert l == [1.1, 2.2, 3.3, 4.4] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -875,6 +931,10 @@ l.pop() assert l == range(9) + l = [1.1, 2.2, 3.3] + l.pop() + assert l == [1.1, 2.2] + l = [] raises(IndexError, l.pop, 0) @@ -897,16 +957,19 @@ l2 = ["1", "2", "3", "4"] l3 = range(5) l4 = [1, 2, 3, "4"] + l5 = [1.1, 2.2, 3.3, 4.4] raises(IndexError, l1.pop, -5) raises(IndexError, l2.pop, -5) raises(IndexError, l3.pop, -6) raises(IndexError, l4.pop, -5) + raises(IndexError, l5.pop, -5) assert l1.pop(-2) == 3 assert l2.pop(-2) == "3" assert l3.pop(-2) == 3 assert l4.pop(-2) == 3 + assert l5.pop(-2) == 3.3 def test_remove(self): c = list('hello world') @@ -925,6 +988,13 @@ l = [0, 3, 5] raises(ValueError, c.remove, 2) + l = [0.0, 1.1, 2.2, 3.3, 4.4] + l.remove(2.2) + assert l == [0.0, 1.1, 3.3, 4.4] + l = [0.0, 3.3, 5.5] + raises(ValueError, c.remove, 2) + raises(ValueError, c.remove, 2.2) + def test_reverse(self): c = list('hello world') c.reverse() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, UnicodeListStrategy, RangeListStrategy, make_range_list +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, UnicodeListStrategy, RangeListStrategy, make_range_list from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -15,7 +15,7 @@ def test_empty_to_any(self): l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) - l.append(self.space.wrap(1.)) + l.append(self.space.wrap((1,3))) assert isinstance(l.strategy, ObjectListStrategy) l = W_ListObject(self.space, []) @@ -28,6 +28,11 @@ l.append(self.space.wrap('a')) assert isinstance(l.strategy, UnicodeListStrategy) + l = W_ListObject(self.space, []) + assert isinstance(l.strategy, EmptyListStrategy) + l.append(self.space.wrap(1.2)) + assert isinstance(l.strategy, FloatListStrategy) + def test_int_to_any(self): l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) @@ -44,6 +49,14 @@ l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) + def test_float_to_any(self): + l = W_ListObject(self.space, [self.space.wrap(1.1),self.space.wrap(2.2),self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.append(self.space.wrap(4.4)) + assert isinstance(l.strategy, FloatListStrategy) + l.append(self.space.wrap("a")) + assert isinstance(l.strategy, ObjectListStrategy) + def test_setitem(self): # This should work if test_listobject.py passes l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) @@ -65,6 +78,12 @@ l.setitem(0, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) + # FloatStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap(1.2),self.space.wrap(2.3),self.space.wrap(3.4)]) + assert isinstance(l.strategy, FloatListStrategy) + l.setitem(0, self.space.wrap("a")) + assert isinstance(l.strategy, ObjectListStrategy) + def test_insert(self): # no change l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) @@ -84,6 +103,12 @@ l.insert(3, self.space.wrap('d')) assert isinstance(l.strategy, ObjectListStrategy) + # FloatStrategy + l = W_ListObject(self.space, [self.space.wrap(1.1),self.space.wrap(2.2),self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.insert(3, self.space.wrap('d')) + assert isinstance(l.strategy, ObjectListStrategy) + # EmptyStrategy l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -95,7 +120,9 @@ l.insert(0, self.space.wrap(2)) assert isinstance(l.strategy, IntegerListStrategy) - def notest_list_empty_after_delete(self): + def test_list_empty_after_delete(self): + import py + py.test.skip("return to emptyliststrategy is not supported anymore") l = W_ListObject(self.space, [self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.deleteitem(0) @@ -117,21 +144,36 @@ l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) + # IntegerStrategy to IntegerStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + # ObjectStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap('b'), self.space.wrap(3)]) assert isinstance(l.strategy, ObjectListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, ObjectListStrategy) + # IntegerStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')])) assert isinstance(l.strategy, ObjectListStrategy) + # StringStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')]) + assert isinstance(l.strategy, StringListStrategy) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, ObjectListStrategy) + + # FloatStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, ObjectListStrategy) + def test_setslice_List(self): def wrapitems(items): @@ -160,6 +202,11 @@ keep_other_strategy(l, 0, 2, other.length(), other) assert l.strategy is self.space.fromcache(UnicodeListStrategy) + l = W_ListObject(self.space, wrapitems([1.1, 2.2, 3.3, 4.4, 5.5])) + other = W_ListObject(self.space, []) + keep_other_strategy(l, 0, 1, l.length(), other) + assert l.strategy is self.space.fromcache(FloatListStrategy) + l = W_ListObject(self.space, wrapitems(["a",3,"c",4,"e"])) other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) @@ -194,6 +241,11 @@ l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + l = W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) + assert isinstance(l.strategy, ObjectListStrategy) + def test_empty_extend_with_any(self): empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -220,6 +272,11 @@ empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) + empty.extend(W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)])) + assert isinstance(empty.strategy, FloatListStrategy) + + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(self.space, [])) assert isinstance(empty.strategy, EmptyListStrategy) @@ -293,12 +350,13 @@ l.setslice(0, 1, 3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) - def test_get_items_copy(self): + def test_copy_list(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) - l2 = l1.getitems() + l2 = l1.clone() l2.append(self.space.wrap(4)) assert not l2 == l1.getitems() + def test_getitems_does_not_copy_object_list(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap("two"), self.space.wrap(3)]) l2 = l1.getitems() l2.append(self.space.wrap("four")) @@ -347,7 +405,6 @@ # should not raise assert getitem__List_Slice(self.space, l, w_slice).strategy == self.space.fromcache(EmptyListStrategy) - def test_add_to_rangelist(self): l1 = make_range_list(self.space, 1, 1, 3) l2 = W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5)]) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -102,6 +102,7 @@ 'instancetypedef', 'terminator', '_version_tag?', + 'name?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/rlib/_stacklet_n_a.py b/pypy/rlib/_stacklet_n_a.py --- a/pypy/rlib/_stacklet_n_a.py +++ b/pypy/rlib/_stacklet_n_a.py @@ -1,4 +1,5 @@ from pypy.rlib import _rffi_stacklet as _c +from pypy.rlib import objectmodel, debug from pypy.rpython.annlowlevel import llhelper from pypy.tool.staticmethods import StaticMethods @@ -21,6 +22,9 @@ def destroy(thrd, h): _c.destroy(thrd._thrd, h) + if objectmodel.we_are_translated(): + debug.debug_print("not using a framework GC: " + "stacklet_destroy() may leak") is_empty_handle = _c.is_empty_handle diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -176,7 +176,6 @@ return decorator @oopspec("jit.isconstant(value)") - at specialize.ll() def isconstant(value): """ While tracing, returns whether or not the value is currently known to be @@ -186,9 +185,9 @@ This is for advanced usage only. """ return NonConstant(False) +isconstant._annspecialcase_ = "specialize:call_location" @oopspec("jit.isvirtual(value)") - at specialize.ll() def isvirtual(value): """ Returns if this value is virtual, while tracing, it's relatively @@ -197,6 +196,7 @@ This is for advanced usage only. """ return NonConstant(False) +isvirtual._annspecialcase_ = "specialize:call_location" class Entry(ExtRegistryEntry): _about_ = hint @@ -738,3 +738,29 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +def record_known_class(value, cls): + """ + Assure the JIT that value is an instance of cls. This is not a precise + class check, unlike a guard_class. + """ + assert isinstance(value, cls) + + +class Entry(ExtRegistryEntry): + _about_ = record_known_class + + def compute_result_annotation(self, s_inst, s_cls): + from pypy.annotation import model as annmodel + assert s_cls.is_constant() + assert not s_inst.can_be_none() + assert isinstance(s_inst, annmodel.SomeInstance) + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype, rclass + classrepr = rclass.get_type_repr(hop.rtyper) + + hop.exception_cannot_occur() + v_inst = hop.inputarg(hop.args_r[0], arg=0) + v_cls = hop.inputarg(classrepr, arg=1) + return hop.genop('jit_record_known_class', [v_inst, v_cls], + resulttype=lltype.Void) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -411,6 +411,10 @@ def getaddressindll(self, name): return dlsym(self.lib, name) +# These specialize.call_location's should really be specialize.arg(0), however +# you can't hash a pointer obj, which the specialize machinery wants to do. +# Given the present usage of these functions, it's good enough. + at specialize.call_location() @jit.oopspec("libffi_array_getitem(ffitype, width, addr, index, offset)") def array_getitem(ffitype, width, addr, index, offset): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -420,6 +424,7 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False + at specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -428,4 +433,4 @@ addr = rffi.ptradd(addr, offset) rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return - assert False \ No newline at end of file + assert False diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -91,9 +91,18 @@ return decorated_func + def call_location(self): + """ Specializes the function for each call site. + """ + def decorated_func(func): + func._annspecialcase_ = "specialize:call_location" + return func + + return decorated_func + def _wrap(self, args): return "("+','.join([repr(arg) for arg in args]) +")" - + specialize = _Specialize() def enforceargs(*args): @@ -125,7 +134,7 @@ def __hash__(self): raise TypeError("Symbolics are not hashable!") - + def __nonzero__(self): raise TypeError("Symbolics are not comparable") @@ -155,7 +164,7 @@ def lltype(self): from pypy.rpython.lltypesystem import lltype return lltype.Signed - + malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) # running_on_llinterp is meant to have the value 0 in all backends @@ -221,7 +230,7 @@ def compute_result_annotation(self, s_sizehint): from pypy.annotation.model import SomeInteger - + assert isinstance(s_sizehint, SomeInteger) return self.bookkeeper.newlist() diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -98,8 +98,13 @@ INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) - GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) - SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + _GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) + _SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + + def GetLastError(): + return rffi.cast(lltype.Signed, _GetLastError()) + def SetLastError(err): + _SetLastError(rffi.cast(DWORD, err)) # In tests, the first call to GetLastError is always wrong, because error # is hidden by operations in ll2ctypes. Call it now. @@ -184,12 +189,12 @@ msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, None, - code, + rffi.cast(DWORD, code), DEFAULT_LANGUAGE, rffi.cast(rffi.CCHARP, buf), 0, None) - if msglen <= 2 or msglen > sys.maxint: + if msglen <= 2: # includes the case msglen < 0 return fake_FormatError(code) # FormatMessage always appends \r\n. diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -548,6 +548,9 @@ def op_jit_marker(self, *args): pass + def op_jit_record_known_class(self, *args): + pass + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -430,6 +430,7 @@ 'jit_force_virtual': LLOp(canrun=True), 'jit_is_virtual': LLOp(canrun=True), 'jit_force_quasi_immutable': LLOp(canrun=True), + 'jit_record_known_class' : LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -548,6 +548,9 @@ def op_jit_force_quasi_immutable(*args): pass +def op_jit_record_known_class(x, y): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -92,7 +92,6 @@ # make a copy of the graph that will reload the values graph2 = copygraph(fnptr._obj.graph) block2 = graph2.startblock - block2.isstartblock = False block1 = Block([]) reloadedvars = [] for v, c_p in zip(block2.inputargs, sra): @@ -109,7 +108,6 @@ [w], v)) reloadedvars.append(v) block1.closeblock(Link(reloadedvars, block2)) - block1.isstartblock = True graph2.startblock = block1 FUNC2 = lltype.FuncType([], FUNC1.RESULT) fnptr2 = lltype.functionptr(FUNC2, diff --git a/pypy/rpython/memory/gctransform/test/test_transform.py b/pypy/rpython/memory/gctransform/test/test_transform.py --- a/pypy/rpython/memory/gctransform/test/test_transform.py +++ b/pypy/rpython/memory/gctransform/test/test_transform.py @@ -102,12 +102,12 @@ llops.genop("gc_pop_alive", [var]) -def checkblock(block, is_borrowed): +def checkblock(block, is_borrowed, is_start_block): if block.operations == (): # a return/exception block -- don't want to think about them # (even though the test passes for somewhat accidental reasons) return - if block.isstartblock: + if is_start_block: refs_in = 0 else: refs_in = len([v for v in block.inputargs if isinstance(v, Variable) @@ -167,7 +167,7 @@ if check: for graph, is_borrowed in graphs_borrowed.iteritems(): for block in graph.iterblocks(): - checkblock(block, is_borrowed) + checkblock(block, is_borrowed, block is graph.startblock) return t, transformer def getops(graph): diff --git a/pypy/rpython/memory/gctransform/transform.py b/pypy/rpython/memory/gctransform/transform.py --- a/pypy/rpython/memory/gctransform/transform.py +++ b/pypy/rpython/memory/gctransform/transform.py @@ -263,9 +263,7 @@ # still be empty (but let's check) if starts_with_empty_block(graph) and inserted_empty_startblock: old_startblock = graph.startblock - graph.startblock.isstartblock = False graph.startblock = graph.startblock.exits[0].target - graph.startblock.isstartblock = True checkgraph(graph) diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -1773,7 +1773,7 @@ @registering(rwin32.FormatError) def register_rwin32_FormatError(self): - return extdef([rwin32.DWORD], str, + return extdef([lltype.Signed], str, "rwin32_FormatError", llimpl=rwin32.llimpl_FormatError, ooimpl=rwin32.fake_FormatError) diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py --- a/pypy/rpython/module/ll_os_stat.py +++ b/pypy/rpython/module/ll_os_stat.py @@ -12,6 +12,7 @@ from pypy.rpython.tool import rffi_platform as platform from pypy.rpython.lltypesystem.rtupletype import TUPLE_TYPE from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import specialize from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.annlowlevel import hlstr @@ -442,20 +443,19 @@ # Helper functions for win32 def make_longlong(high, low): - return (lltype.r_longlong(high) << 32) + lltype.r_longlong(low) + return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) # Seconds between 1.1.1601 and 1.1.1970 -secs_between_epochs = lltype.r_longlong(11644473600) +secs_between_epochs = rffi.r_longlong(11644473600) def FILE_TIME_to_time_t_nsec(filetime): ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) # FILETIME is in units of 100 nsec nsec = (ft % 10000000) * 100 time = (ft / 10000000) - secs_between_epochs - return time, nsec + return intmask(time), intmask(nsec) def time_t_to_FILE_TIME(time, filetime): - ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) - filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & lltype.r_uint(-1)) - + ft = (rffi.r_longlong(time) + secs_between_epochs) * 10000000 + filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) + filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits diff --git a/pypy/rpython/normalizecalls.py b/pypy/rpython/normalizecalls.py --- a/pypy/rpython/normalizecalls.py +++ b/pypy/rpython/normalizecalls.py @@ -116,8 +116,6 @@ v = Constant(default) outlist.append(v) newblock.closeblock(Link(outlist, oldblock)) - oldblock.isstartblock = False - newblock.isstartblock = True graph.startblock = newblock for i in range(len(newdefaults)-1,-1,-1): if newdefaults[i] is NODEFAULT: @@ -171,8 +169,6 @@ # prepare the output args of newblock and link outlist = inlist[:] newblock.closeblock(Link(outlist, oldblock)) - oldblock.isstartblock = False - newblock.isstartblock = True graph.startblock = newblock # finished checkgraph(graph) diff --git a/pypy/tool/nullpath.py b/pypy/tool/nullpath.py --- a/pypy/tool/nullpath.py +++ b/pypy/tool/nullpath.py @@ -1,17 +1,12 @@ import py, os -if os.name <> 'nt': - NULLPATHNAME = '/dev/null' -else: - NULLPATHNAME = 'NUL' - class NullPyPathLocal(py.path.local): def join(self, *args): return self.__class__(py.path.local.join(self, *args)) def open(self, mode): - return open(NULLPATHNAME, mode) + return open(os.devnull, mode) def __repr__(self): return py.path.local.__repr__(self) + ' [fake]' diff --git a/pypy/tool/test/test_nullpath.py b/pypy/tool/test/test_nullpath.py --- a/pypy/tool/test/test_nullpath.py +++ b/pypy/tool/test/test_nullpath.py @@ -1,7 +1,6 @@ -import sys +import sys, os import py -from pypy.tool.nullpath import NullPyPathLocal, NULLPATHNAME - +from pypy.tool.nullpath import NullPyPathLocal def test_nullpath(tmpdir): path = NullPyPathLocal(tmpdir) @@ -10,4 +9,4 @@ assert isinstance(foo_txt, NullPyPathLocal) # f = foo_txt.open('w') - assert f.name == NULLPATHNAME + assert f.name == os.devnull diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -37,8 +37,9 @@ except (KeyboardInterrupt, SystemExit): raise except Exception, e: - log.WARNING('constant-folding %r:' % (spaceop,)) - log.WARNING(' %s: %s' % (e.__class__.__name__, e)) + pass # turn off reporting these as warnings: useless + #log.WARNING('constant-folding %r:' % (spaceop,)) + #log.WARNING(' %s: %s' % (e.__class__.__name__, e)) else: # success in folding this space operation if spaceop.opname in fixup_op_result: diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -453,7 +453,6 @@ #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) - copiedstartblock.isstartblock = False #find args passed to startblock of inlined function passon_args = [] for arg in self.op.args[1:]: diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -391,7 +391,6 @@ virtualframe = VirtualFrame(graph2.startblock, 0, nodelist) graphbuilder = GraphBuilder(self, graph2) specblock = graphbuilder.start_from_virtualframe(virtualframe) - specblock.isstartblock = True specgraph = graph2 specgraph.name += '_mallocv' specgraph.startblock = specblock diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -50,7 +50,8 @@ # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) - simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks())) + simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks()), + [graph]) if progress and option.view: t.view() if expected_result is not Ellipsis: diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -11,6 +11,7 @@ #endif /* MIN */ #define RUNNING_ON_LLINTERP 0 +#define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ #define FAIL_EXCEPTION(exc, msg) \ { \ diff --git a/pypy/translator/c/test/test_refcount.py b/pypy/translator/c/test/test_refcount.py --- a/pypy/translator/c/test/test_refcount.py +++ b/pypy/translator/c/test/test_refcount.py @@ -229,7 +229,6 @@ graph = t.buildflowgraph(g) assert graph.startblock.operations == [] graph.startblock = graph.startblock.exits[0].target - graph.startblock.isstartblock = True from pypy.objspace.flow.model import checkgraph checkgraph(graph) t._prebuilt_graphs[g] = graph diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -397,7 +397,8 @@ def transform_dead_op_vars(graph, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a graph.""" - return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), translator) + return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), + [graph], translator) # the set of operations that can safely be removed # (they have no side effects, at least in R-Python) @@ -419,11 +420,19 @@ hasattr: True, } -def transform_dead_op_vars_in_blocks(blocks, translator=None): +def find_start_blocks(graphs): + start_blocks = set() + for graph in graphs: + start_blocks.add(graph.startblock) + return start_blocks + +def transform_dead_op_vars_in_blocks(blocks, graphs, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a set of blocks""" read_vars = {} # set of variables really used variable_flow = {} # map {Var: list-of-Vars-it-depends-on} + set_of_blocks = set(blocks) + start_blocks = find_start_blocks(graphs) def canremove(op, block): if op.opname not in CanRemove: @@ -451,7 +460,7 @@ if block.exits: for link in block.exits: - if link.target not in blocks: + if link.target not in set_of_blocks: for arg, targetarg in zip(link.args, link.target.inputargs): read_vars[arg] = True read_vars[targetarg] = True @@ -465,7 +474,7 @@ read_vars[arg] = True # an input block's inputargs should not be modified, even if some # of the function's input arguments are not actually used - if block.isstartblock: + if block in start_blocks: for arg in block.inputargs: read_vars[arg] = True diff --git a/pypy/translator/transform.py b/pypy/translator/transform.py --- a/pypy/translator/transform.py +++ b/pypy/translator/transform.py @@ -115,7 +115,7 @@ # to kill dead (never-followed) links, # which can possibly remove more variables. from pypy.translator.simplify import transform_dead_op_vars_in_blocks - transform_dead_op_vars_in_blocks(block_subset) + transform_dead_op_vars_in_blocks(block_subset, self.translator.graphs) def transform_dead_code(self, block_subset): """Remove dead code: these are the blocks that are not annotated at all diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -42,9 +42,7 @@ vars = [copyvar(annotator, v) for v in graph.startblock.inputargs] newblock = Block(vars) newblock.closeblock(Link(vars, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newblock - graph.startblock.isstartblock = True def starts_with_empty_block(graph): return (not graph.startblock.operations @@ -151,9 +149,7 @@ newop = SpaceOperation('direct_call', [c_initial_func], v_none) extrablock.operations = [newop] extrablock.closeblock(Link(args, entry_point.startblock)) - entry_point.startblock.isstartblock = False entry_point.startblock = extrablock - entry_point.startblock.isstartblock = True checkgraph(entry_point) def call_final_function(translator, final_func, annhelper=None): From noreply at buildbot.pypy.org Sun Dec 4 20:59:25 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:25 +0100 (CET) Subject: [pypy-commit] pypy py3k: Merge glitch Message-ID: <20111204195925.C6FDB8205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50153:0cf341cfa0f7 Date: 2011-12-04 19:38 +0100 http://bitbucket.org/pypy/pypy/changeset/0cf341cfa0f7/ Log: Merge glitch diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -922,9 +922,6 @@ if reverse: l.reverse() -<<<<<<< local -class UnicodeListStrategy(AbstractUnwrappedStrategy, ListStrategy): -======= class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 _applevel_repr = "float" @@ -952,8 +949,7 @@ if reverse: l.reverse() -class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): ->>>>>>> other +class UnicodeListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None _applevel_repr = "str" @@ -1306,12 +1302,8 @@ TimSort = make_timsort_class() IntBaseTimSort = make_timsort_class() -<<<<<<< local +FloatBaseTimSort = make_timsort_class() UnicodeBaseTimSort = make_timsort_class() -======= -FloatBaseTimSort = make_timsort_class() -StringBaseTimSort = make_timsort_class() ->>>>>>> other class KeyContainer(baseobjspace.W_Root): def __init__(self, w_key, w_item): @@ -1331,15 +1323,11 @@ def lt(self, a, b): return a < b -<<<<<<< local -class UnicodeSort(UnicodeBaseTimSort): -======= class FloatSort(FloatBaseTimSort): def lt(self, a, b): return a < b -class StringSort(StringBaseTimSort): ->>>>>>> other +class UnicodeSort(UnicodeBaseTimSort): def lt(self, a, b): return a < b From noreply at buildbot.pypy.org Sun Dec 4 20:59:27 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Dec 2011 20:59:27 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix translation Message-ID: <20111204195927.145CA8205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50154:d7e4343ce017 Date: 2011-12-04 20:58 +0100 http://bitbucket.org/pypy/pypy/changeset/d7e4343ce017/ Log: Fix translation diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -274,8 +274,8 @@ if bylen == 0: raise OperationError(space.w_ValueError, space.wrap("empty separator")) + start = 0 if bylen == 1 and maxsplit < 0: - start = 0 # fast path: uses str.rfind(character) and str.count(character) by = by[0] # annotator hack: string -> char count = value.count(by) From pullrequests-noreply at bitbucket.org Sun Dec 4 21:22:53 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sun, 04 Dec 2011 20:22:53 -0000 Subject: [pypy-commit] [OPEN] Pull request #14 for pypy/pypy: py3k fixes In-Reply-To: References: Message-ID: <20111204202253.23103.92965@bitbucket01.managed.contegix.com> Pull request #14 has been updated by chronitis to include new changes. https://bitbucket.org/pypy/pypy/pull-request/14/py3k-fixes Title: py3k fixes Creator: chronitis Some work on __builtins__ Updated list of changes: 39344e68a597 by chronitis: "Merge" fa576535dacc by chronitis: "Fix for annotation fails." b3840c3c8440 by chronitis: "Make round() return int when ndigits is None." 14369c3d47f1 by chronitis: "Allow pow(negative, fractional) by delegation to complex." 5b95e7c08e92 by chronitis: "Proper handling of large __len__ values." 6a78810d702d by chronitis: "Fix hex, oct (underlying 0123 -> 0o123)." fa0c7e5b1feb by chronitis: "Merge" f0afd64eac48 by chronitis: "Correct mapiterator pickle support function." 050613e81bba by chronitis: "Add interp-level implementations (in py3k iterator style) for zip and map. (Also?" cbc1a1861a21 by chronitis: "Update __builtin__ tests, updating some (but not all) behaviour to py3k. Fix ind?" 95ca73a3be3e by chronitis: "Merge." e3ea662db1a7 by chronitis: "Remove list __(get,set,del)slice__ tests not used in python3." 3b752508ba56 by chronitis: "Fix print for python3 in tests." 3e17ca1ea704 by chronitis: "Fix xrange/range issues in python3 code." d3c50446897f by chronitis: "Bool and len both use _check_len_result for identical error messages." 09f985a90b5d by chronitis: "Fix descroperation.is_true to require __bool__ returns bool and __len__ returns ?" de954d9fe3cd by chronitis: "Fix binascii.Error, for test_base64 and test_binascii." 9b4d52021d2a by chronitis: "Fix stringobject so annotation succeeds." 342485ca402a by chronitis: "Fix module.struct so that annotation succeeds." c3020e56c5bc by chronitis: "Merge." 4a241f725714 by chronitis: "Fix struct module for py3k, fixing unicode/bytes issues, raise struct.error inst?" 8114ec2ff7b1 by chronitis: "Add a couple more string test cases, and fix contains and expandtabs appropriate?" 7cf1f39c9f60 by chronitis: "Fix string methods so that all stdobjspace tests pass." 59ee5b2d25bc by chronitis: "Update stdobjspace string tests with bytestrings, where necessary." 63cd13767ec9 by chronitis: "Add py3k sys.hash_info struct." -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sun Dec 4 22:06:00 2011 From: noreply at buildbot.pypy.org (ned) Date: Sun, 4 Dec 2011 22:06:00 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox: A new test: try opening too many files in the sandbox, see that you can't Message-ID: <20111204210600.827CE8205C@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox Changeset: r50155:6f823f8ecd00 Date: 2011-12-03 22:23 -0500 http://bitbucket.org/pypy/pypy/changeset/6f823f8ecd00/ Log: A new test: try opening too many files in the sandbox, see that you can't diff --git a/pypy/translator/sandbox/test/test_sandlib.py b/pypy/translator/sandbox/test/test_sandlib.py --- a/pypy/translator/sandbox/test/test_sandlib.py +++ b/pypy/translator/sandbox/test/test_sandlib.py @@ -1,14 +1,17 @@ import py -import os, StringIO +import errno, os, StringIO from pypy.tool.sourcetools import func_with_new_name from pypy.rpython.lltypesystem import rffi from pypy.translator.sandbox.sandlib import SandboxedProc from pypy.translator.sandbox.sandlib import SimpleIOSandboxedProc +from pypy.translator.sandbox.sandlib import VirtualizedSandboxedProc from pypy.translator.sandbox.sandlib import VirtualizedSocketProc from pypy.translator.sandbox.test.test_sandbox import compile +from pypy.translator.sandbox.vfs import Dir, File -class MySandboxedProc(SandboxedProc): +class MockSandboxedProc(SandboxedProc): + """A sandbox process wrapper that replays expected syscalls.""" def __init__(self, args, expected): SandboxedProc.__init__(self, args) @@ -48,7 +51,7 @@ return 0 exe = compile(entry_point) - proc = MySandboxedProc([exe, 'x1', 'y2'], expected = [ + proc = MockSandboxedProc([exe, 'x1', 'y2'], expected = [ ("open", ("/tmp/foobar", os.O_RDONLY, 0777), 77), ("read", (77, 123), "he\x00llo"), ("write", (77, "world\x00!\x00"), 42), @@ -69,7 +72,7 @@ return n exe = compile(entry_point) - proc = MySandboxedProc([exe, 'spam', 'egg'], expected = [ + proc = MockSandboxedProc([exe, 'spam', 'egg'], expected = [ ("foobar", ("spam",), 2), ("foobar", ("egg",), 0), ]) @@ -122,9 +125,45 @@ return 0 exe = compile(entry_point) - proc = MySandboxedProc([exe], expected = [ + proc = MockSandboxedProc([exe], expected = [ ("open", ("/tmp/foobar", os.O_RDONLY, 0777), OSError(-42, "baz")), ("close", (-42,), None), ]) proc.handle_forever() assert proc.seen == len(proc.expected) + + +class SandboxedProcWithFiles(VirtualizedSandboxedProc, SimpleIOSandboxedProc): + """A sandboxed process with a simple virtualized filesystem. + + For testing file operations. + + """ + def build_virtual_root(self): + return Dir({ + 'hi.txt': File("Hello, world!\n"), + }) + +def test_too_many_opens(): + def entry_point(argv): + try: + open_files = [] + for i in range(500): + fd = os.open('/hi.txt', os.O_RDONLY, 0777) + open_files.append(fd) + txt = os.read(fd, 100) + if txt != "Hello, world!\n": + print "Wrong content: %s" % txt + except OSError, e: + if e.errno != errno.EMFILE: + print "OSError: %s!" % (e.errno,) + else: + print "We opened 500 files! Shouldn't have been able to." + print "All ok!" + return 0 + exe = compile(entry_point) + + proc = SandboxedProcWithFiles([exe]) + output, error = proc.communicate("") + assert output == "All ok!\n" + assert error == "" From noreply at buildbot.pypy.org Sun Dec 4 22:06:01 2011 From: noreply at buildbot.pypy.org (ned) Date: Sun, 4 Dec 2011 22:06:01 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox: A correct implementation of os.fstat, with a test. Message-ID: <20111204210601.A6B438205C@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox Changeset: r50156:e4704f598176 Date: 2011-12-04 16:05 -0500 http://bitbucket.org/pypy/pypy/changeset/e4704f598176/ Log: A correct implementation of os.fstat, with a test. diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -409,7 +409,7 @@ def __init__(self, *args, **kwds): super(VirtualizedSandboxedProc, self).__init__(*args, **kwds) self.virtual_root = self.build_virtual_root() - self.open_fds = {} # {virtual_fd: real_file_object} + self.open_fds = {} # {virtual_fd: (real_file_object, node)} def build_virtual_root(self): raise NotImplementedError("must be overridden") @@ -451,19 +451,32 @@ def do_ll_os__ll_os_isatty(self, fd): return self.virtual_console_isatty and fd in (0, 1, 2) - def allocate_fd(self, f): + def allocate_fd(self, f, node=None): for fd in self.virtual_fd_range: if fd not in self.open_fds: - self.open_fds[fd] = f + self.open_fds[fd] = (f, node) return fd else: raise OSError(errno.EMFILE, "trying to open too many files") - def get_file(self, fd): + def get_fd(self, fd, throw=True): + """Get the objects implementing file descriptor `fd`. + + Returns a pair, (open file, vfs node) + + `throw`: if true, raise OSError for bad fd, else return (None, None). + """ try: - return self.open_fds[fd] + f, node = self.open_fds[fd] except KeyError: - raise OSError(errno.EBADF, "bad file descriptor") + if throw: + raise OSError(errno.EBADF, "bad file descriptor") + return None, None + return f, node + + def get_file(self, fd, throw=True): + """Return the open file for file descriptor `fd`.""" + return self.get_fd(fd, throw)[0] def do_ll_os__ll_os_open(self, vpathname, flags, mode): node = self.get_node(vpathname) @@ -471,7 +484,7 @@ raise OSError(errno.EPERM, "write access denied") # all other flags are ignored f = node.open() - return self.allocate_fd(f) + return self.allocate_fd(f, node) def do_ll_os__ll_os_close(self, fd): f = self.get_file(fd) @@ -479,9 +492,8 @@ f.close() def do_ll_os__ll_os_read(self, fd, size): - try: - f = self.open_fds[fd] - except KeyError: + f = self.get_file(fd, throw=False) + if f is None: return super(VirtualizedSandboxedProc, self).do_ll_os__ll_os_read( fd, size) else: @@ -491,12 +503,8 @@ return f.read(min(size, 256*1024)) def do_ll_os__ll_os_fstat(self, fd): - try: - f = self.open_fds[fd] - except KeyError: - return super(VirtualizedSandboxedProc, self).do_ll_os__ll_os_fstat(fd) - else: - return os.stat(f.name) # Isn't there a better way to do this? + f, node = self.get_fd(fd) + return node.stat() do_ll_os__ll_os_fstat.resulttype = s_StatResult def do_ll_os__ll_os_lseek(self, fd, pos, how): @@ -539,13 +547,13 @@ def do_ll_os__ll_os_read(self, fd, size): if fd in self.sockets: - return self.open_fds[fd].recv(size) + return self.get_file(fd).recv(size) return super(VirtualizedSocketProc, self).do_ll_os__ll_os_read( fd, size) def do_ll_os__ll_os_write(self, fd, data): if fd in self.sockets: - return self.open_fds[fd].send(data) + return self.get_file(fd).send(data) return super(VirtualizedSocketProc, self).do_ll_os__ll_os_write( fd, data) diff --git a/pypy/translator/sandbox/test/test_sandlib.py b/pypy/translator/sandbox/test/test_sandlib.py --- a/pypy/translator/sandbox/test/test_sandlib.py +++ b/pypy/translator/sandbox/test/test_sandlib.py @@ -167,3 +167,36 @@ output, error = proc.communicate("") assert output == "All ok!\n" assert error == "" + +def test_fstat(): + def compare(a, b, i): + if a != b: + print "stat and fstat differ @%d: %s != %s" % (i, a, b) + + def entry_point(argv): + try: + # Open a file, and compare stat and fstat + fd = os.open('/hi.txt', os.O_RDONLY, 0777) + st = os.stat('/hi.txt') + fs = os.fstat(fd) + compare(st[0], fs[0], 0) + compare(st[1], fs[1], 1) + compare(st[2], fs[2], 2) + compare(st[3], fs[3], 3) + compare(st[4], fs[4], 4) + compare(st[5], fs[5], 5) + compare(st[6], fs[6], 6) + compare(st[7], fs[7], 7) + compare(st[8], fs[8], 8) + compare(st[9], fs[9], 9) + except OSError, e: + print "OSError: %s" % (e.errno,) + print "All ok!" + return 0 + exe = compile(entry_point) + + proc = SandboxedProcWithFiles([exe]) + output, error = proc.communicate("") + assert output == "All ok!\n" + assert error == "" + From noreply at buildbot.pypy.org Sun Dec 4 23:45:51 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 4 Dec 2011 23:45:51 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: fix for segfault, thanks to amaury! Message-ID: <20111204224551.830268205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50157:7d6deb08502d Date: 2011-12-04 17:45 -0500 http://bitbucket.org/pypy/pypy/changeset/7d6deb08502d/ Log: fix for segfault, thanks to amaury! diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -408,11 +408,11 @@ def store(self, storage, width, i, offset, box): subboxes = self.unbox(box) - i = 0 + item_idx = 0 for box in subboxes: - self.itemtypes[i].store(storage, width, i, offset, box) - offset += self.itemtypes[i].get_element_size() - i += 1 + self.itemtypes[item_idx].store(storage, width, i, offset, box) + offset += self.itemtypes[item_idx].get_element_size() + item_idx += 1 def read(self, storage, width, i, offset): boxes = [] From noreply at buildbot.pypy.org Mon Dec 5 00:13:55 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Mon, 5 Dec 2011 00:13:55 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Hey, this is the first working pypy win64 ever !!! Message-ID: <20111204231355.D46EC8205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50158:5dd74223444d Date: 2011-12-04 23:50 +0100 http://bitbucket.org/pypy/pypy/changeset/5dd74223444d/ Log: Hey, this is the first working pypy win64 ever !!! -O1 --no-allworkingmodules but I think it is the first Milestone diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -228,7 +228,7 @@ def extdef_for_os_function_returning_int(self, name, **kwds): c_func = self.llexternal(name, [], rffi.INT, **kwds) def c_func_llimpl(): - res = rffi.cast(rffi.LONG, c_func()) + res = rffi.cast(rffi.SIGNED, c_func()) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) return res @@ -240,7 +240,7 @@ def extdef_for_os_function_accepting_int(self, name, **kwds): c_func = self.llexternal(name, [rffi.INT], rffi.INT, **kwds) def c_func_llimpl(arg): - res = rffi.cast(rffi.LONG, c_func(arg)) + res = rffi.cast(rffi.SIGNED, c_func(arg)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) @@ -252,7 +252,7 @@ def extdef_for_os_function_accepting_2int(self, name, **kwds): c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT, **kwds) def c_func_llimpl(arg, arg2): - res = rffi.cast(rffi.LONG, c_func(arg, arg2)) + res = rffi.cast(rffi.SIGNED, c_func(arg, arg2)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) @@ -264,7 +264,7 @@ def extdef_for_os_function_accepting_0int(self, name, **kwds): c_func = self.llexternal(name, [], rffi.INT, **kwds) def c_func_llimpl(): - res = rffi.cast(rffi.LONG, c_func()) + res = rffi.cast(rffi.SIGNED, c_func()) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) @@ -276,7 +276,7 @@ def extdef_for_os_function_int_to_int(self, name, **kwds): c_func = self.llexternal(name, [rffi.INT], rffi.INT, **kwds) def c_func_llimpl(arg): - res = rffi.cast(rffi.LONG, c_func(arg)) + res = rffi.cast(rffi.SIGNED, c_func(arg)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) return res @@ -754,7 +754,7 @@ if self.GETPGRP_HAVE_ARG: c_func = self.llexternal(name, [rffi.INT], rffi.INT) def c_func_llimpl(): - res = rffi.cast(rffi.LONG, c_func(0)) + res = rffi.cast(rffi.SIGNED, c_func(0)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) return res @@ -772,7 +772,7 @@ if self.SETPGRP_HAVE_ARG: c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT) def c_func_llimpl(): - res = rffi.cast(rffi.LONG, c_func(0, 0)) + res = rffi.cast(rffi.SIGNED, c_func(0, 0)) if res == -1: raise OSError(rposix.get_errno(), "%s failed" % name) @@ -1008,7 +1008,7 @@ os_fsync = self.llexternal('_commit', [rffi.INT], rffi.INT) def fsync_llimpl(fd): - res = rffi.cast(rffi.LONG, os_fsync(rffi.cast(rffi.INT, fd))) + res = rffi.cast(rffi.SIGNED, os_fsync(rffi.cast(rffi.INT, fd))) if res < 0: raise OSError(rposix.get_errno(), "fsync failed") return extdef([int], s_None, @@ -1020,7 +1020,7 @@ os_fdatasync = self.llexternal('fdatasync', [rffi.INT], rffi.INT) def fdatasync_llimpl(fd): - res = rffi.cast(rffi.LONG, os_fdatasync(rffi.cast(rffi.INT, fd))) + res = rffi.cast(rffi.SIGNED, os_fdatasync(rffi.cast(rffi.INT, fd))) if res < 0: raise OSError(rposix.get_errno(), "fdatasync failed") return extdef([int], s_None, @@ -1032,7 +1032,7 @@ os_fchdir = self.llexternal('fchdir', [rffi.INT], rffi.INT) def fchdir_llimpl(fd): - res = rffi.cast(rffi.LONG, os_fchdir(rffi.cast(rffi.INT, fd))) + res = rffi.cast(rffi.SIGNED, os_fchdir(rffi.cast(rffi.INT, fd))) if res < 0: raise OSError(rposix.get_errno(), "fchdir failed") return extdef([int], s_None, @@ -1312,7 +1312,9 @@ result = os__cwait(status_p, pid, options) # shift the status left a byte so this is more # like the POSIX waitpid - status_p[0] <<= 8 + tmp = rffi.cast(rffi.SIGNED, status_p[0]) + tmp <<= 8 + status_p[0] = rffi.cast(rffi.INT, tmp) return result else: # Posix diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py --- a/pypy/rpython/module/ll_os_stat.py +++ b/pypy/rpython/module/ll_os_stat.py @@ -321,6 +321,7 @@ def attributes_to_mode(attributes): m = 0 + attributes = intmask(attributes) if attributes & win32traits.FILE_ATTRIBUTE_DIRECTORY: m |= win32traits._S_IFDIR | 0111 # IFEXEC for user,group,other else: From noreply at buildbot.pypy.org Mon Dec 5 01:38:03 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 5 Dec 2011 01:38:03 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: progress, can now see if 2 complexes are equal and read them out of a value Message-ID: <20111205003803.964958205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50159:2d7dd6a415a4 Date: 2011-12-04 19:37 -0500 http://bitbucket.org/pypy/pypy/changeset/2d7dd6a415a4/ Log: progress, can now see if 2 complexes are equal and read them out of a value diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -164,6 +164,11 @@ class W_Complex128Box(W_ComplexFloatingBox, CompositeBox): descr__new__, get_dtype = new_dtype_getter("complex128") + def convert_to(self, dtype): + if dtype.itemtype.is_correct_box(self): + return self + raise NotImplementedError + W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -52,6 +52,9 @@ # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ # arctanh = _unimplemented_ufunc + def is_correct_box(self, box): + return isinstance(box, self.BoxType) + class Primitive(object): _mixin_ = True def get_element_size(self): @@ -432,4 +435,14 @@ if isinstance(w_item, self.BoxType): return w_item real, imag = space.unpackcomplex(w_item) - return self.box([self.real.box(real), self.imag.box(imag)]) \ No newline at end of file + return self.box([self.real.box(real), self.imag.box(imag)]) + + def for_computation(self, (real, imag)): + return [ + self.real.for_computation(self.real.unbox(real)), + self.imag.for_computation(self.imag.unbox(imag)), + ] + + @raw_binary_op + def eq(self, (real1, imag1), (real2, imag2)): + return real1 == real2 and imag1 == imag2 From notifications-noreply at bitbucket.org Mon Dec 5 03:29:32 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Mon, 05 Dec 2011 02:29:32 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20111205022932.32480.38675@bitbucket13.managed.contegix.com> You have received a notification from tankasaurus. Hi, I forked pypy. My fork is at https://bitbucket.org/tankasaurus/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Mon Dec 5 05:09:45 2011 From: noreply at buildbot.pypy.org (ned) Date: Mon, 5 Dec 2011 05:09:45 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox: Add a test of opening lots of real files also. Message-ID: <20111205040945.60C4E8205C@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox Changeset: r50160:d06f77ba878f Date: 2011-12-04 18:42 -0500 http://bitbucket.org/pypy/pypy/changeset/d06f77ba878f/ Log: Add a test of opening lots of real files also. diff --git a/pypy/translator/sandbox/test/test_sandlib.py b/pypy/translator/sandbox/test/test_sandlib.py --- a/pypy/translator/sandbox/test/test_sandlib.py +++ b/pypy/translator/sandbox/test/test_sandlib.py @@ -7,7 +7,7 @@ from pypy.translator.sandbox.sandlib import VirtualizedSandboxedProc from pypy.translator.sandbox.sandlib import VirtualizedSocketProc from pypy.translator.sandbox.test.test_sandbox import compile -from pypy.translator.sandbox.vfs import Dir, File +from pypy.translator.sandbox.vfs import Dir, File, RealDir, RealFile class MockSandboxedProc(SandboxedProc): @@ -142,6 +142,7 @@ def build_virtual_root(self): return Dir({ 'hi.txt': File("Hello, world!\n"), + 'this.pyc': RealFile(__file__), }) def test_too_many_opens(): @@ -155,10 +156,27 @@ if txt != "Hello, world!\n": print "Wrong content: %s" % txt except OSError, e: + # We expect to get EMFILE, for opening too many files. if e.errno != errno.EMFILE: print "OSError: %s!" % (e.errno,) else: - print "We opened 500 files! Shouldn't have been able to." + print "We opened 500 fake files! Shouldn't have been able to." + + for fd in open_files: + os.close(fd) + + try: + open_files = [] + for i in range(500): + fd = os.open('/this.pyc', os.O_RDONLY, 0777) + open_files.append(fd) + except OSError, e: + # We expect to get EMFILE, for opening too many files. + if e.errno != errno.EMFILE: + print "OSError: %s!" % (e.errno,) + else: + print "We opened 500 real files! Shouldn't have been able to." + print "All ok!" return 0 exe = compile(entry_point) @@ -179,6 +197,7 @@ fd = os.open('/hi.txt', os.O_RDONLY, 0777) st = os.stat('/hi.txt') fs = os.fstat(fd) + # RPython requires the index for stat to be a constant.. :( compare(st[0], fs[0], 0) compare(st[1], fs[1], 1) compare(st[2], fs[2], 2) @@ -199,4 +218,3 @@ output, error = proc.communicate("") assert output == "All ok!\n" assert error == "" - From noreply at buildbot.pypy.org Mon Dec 5 05:09:46 2011 From: noreply at buildbot.pypy.org (ned) Date: Mon, 5 Dec 2011 05:09:46 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox: Add a sandlib test for lseek Message-ID: <20111205040946.81F7A8205C@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox Changeset: r50161:ba523ca5ff05 Date: 2011-12-04 23:08 -0500 http://bitbucket.org/pypy/pypy/changeset/ba523ca5ff05/ Log: Add a sandlib test for lseek diff --git a/pypy/translator/sandbox/test/test_sandlib.py b/pypy/translator/sandbox/test/test_sandlib.py --- a/pypy/translator/sandbox/test/test_sandlib.py +++ b/pypy/translator/sandbox/test/test_sandlib.py @@ -218,3 +218,32 @@ output, error = proc.communicate("") assert output == "All ok!\n" assert error == "" + +def test_lseek(): + def char_should_be(c, should): + if c != should: + print "Wrong char: '%s' should be '%s'" % (c, should) + + def entry_point(argv): + fd = os.open('/hi.txt', os.O_RDONLY, 0777) + char_should_be(os.read(fd, 1), "H") + new = os.lseek(fd, 3, os.SEEK_CUR) + if new != 4: + print "Wrong offset, %d should be 4" % new + char_should_be(os.read(fd, 1), "o") + new = os.lseek(fd, -3, os.SEEK_END) + if new != 11: + print "Wrong offset, %d should be 11" % new + char_should_be(os.read(fd, 1), "d") + new = os.lseek(fd, 7, os.SEEK_SET) + if new != 7: + print "Wrong offset, %d should be 7" % new + char_should_be(os.read(fd, 1), "w") + print "All ok!" + return 0 + exe = compile(entry_point) + + proc = SandboxedProcWithFiles([exe]) + output, error = proc.communicate("") + assert output == "All ok!\n" + assert error == "" From notifications-noreply at bitbucket.org Mon Dec 5 07:04:54 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Mon, 05 Dec 2011 06:04:54 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20111205060454.7264.58808@bitbucket01.managed.contegix.com> You have received a notification from pypyja. Hi, I forked pypy. My fork is at https://bitbucket.org/pypyja/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From notifications-noreply at bitbucket.org Mon Dec 5 07:13:29 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Mon, 05 Dec 2011 06:13:29 -0000 Subject: [pypy-commit] Notification: Your access to pypy has been revoked. Message-ID: <20111205061329.3383.36534@bitbucket01.managed.contegix.com> You have received a notification from pypyja. You no longer have access to the source of pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Mon Dec 5 08:20:31 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 5 Dec 2011 08:20:31 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix test Message-ID: <20111205072031.5691B8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50162:16988785671d Date: 2011-12-05 08:20 +0100 http://bitbucket.org/pypy/pypy/changeset/16988785671d/ Log: fix test diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -157,7 +157,7 @@ loop = self.parse(ops) regalloc = RegAlloc(self.cpu.assembler, False) regalloc.prepare_loop(loop.inputargs, loop.operations, - loop.token, []) + loop.original_jitcell_token, []) return regalloc def getint(self, index): From noreply at buildbot.pypy.org Mon Dec 5 10:05:42 2011 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 5 Dec 2011 10:05:42 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: (arigo, bivab): move this hack to rmmap and make it a bit cleaner Message-ID: <20111205090542.D760F8205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50163:7873941fd23b Date: 2011-12-02 12:23 +0100 http://bitbucket.org/pypy/pypy/changeset/7873941fd23b/ Log: (arigo, bivab): move this hack to rmmap and make it a bit cleaner diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -14,6 +14,8 @@ _MS_WINDOWS = os.name == "nt" _LINUX = "linux" in sys.platform _64BIT = "64bit" in platform.architecture()[0] +_ARM = platform.machine().startswith('arm') +_PPC = platform.machine().startswith('ppc') class RValueError(Exception): def __init__(self, message): @@ -112,7 +114,11 @@ if _POSIX: has_mremap = cConfig['has_mremap'] - c_mmap, c_mmap_safe = external('mmap', [PTR, size_t, rffi.INT, rffi.INT, + if _ARM or _PPC: + funcname = 'mmap64' + else: + funcname = 'mmap' + c_mmap, c_mmap_safe = external(funcname, [PTR, size_t, rffi.INT, rffi.INT, rffi.INT, off_t], PTR) _, c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT) c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -1024,9 +1024,6 @@ old_eci = funcptr._obj.compilation_info funcname = funcptr._obj._name - #XXX Fix this, hack for ARM - if funcname == 'mmap': - funcname = 'mmap64' if hasattr(old_eci, '_with_ctypes'): old_eci = old_eci._with_ctypes From noreply at buildbot.pypy.org Mon Dec 5 10:05:44 2011 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 5 Dec 2011 10:05:44 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: (arigo, bivab): test the presence of INSTANCE_PTR_NE and INSTANCE_PTR_EQ in the backend Message-ID: <20111205090544.0E0958205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50164:b485870748d3 Date: 2011-12-02 12:24 +0100 http://bitbucket.org/pypy/pypy/changeset/b485870748d3/ Log: (arigo, bivab): test the presence of INSTANCE_PTR_NE and INSTANCE_PTR_EQ in the backend diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -761,6 +761,13 @@ r = self.execute_operation(rop.PTR_NE, [null_box, u1_box], 'int') assert r.value == 1 + # These operations are supposed to be the same as PTR_EQ/PTR_NE + # just checking that the operations are defined in the backend. + r = self.execute_operation(rop.INSTANCE_PTR_EQ, [u1_box, u2_box], 'int') + assert r.value == 0 + r = self.execute_operation(rop.INSTANCE_PTR_NE, [u2_box, u1_box], 'int') + assert r.value == 1 + def test_array_basic(self): a_box, A = self.alloc_array_of(rffi.SHORT, 342) arraydescr = self.cpu.arraydescrof(A) From noreply at buildbot.pypy.org Mon Dec 5 10:05:45 2011 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 5 Dec 2011 10:05:45 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: (arigo, bivab): add instance_ptr_... also in the register allocator Message-ID: <20111205090545.33DCF8205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50165:83649b8410c7 Date: 2011-12-02 12:31 +0100 http://bitbucket.org/pypy/pypy/changeset/83649b8410c7/ Log: (arigo, bivab): add instance_ptr_... also in the register allocator diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -439,8 +439,8 @@ prepare_op_uint_lt = prepare_cmp_op('uint_lt') prepare_op_uint_ge = prepare_cmp_op('uint_ge') - prepare_op_ptr_eq = prepare_op_int_eq - prepare_op_ptr_ne = prepare_op_int_ne + prepare_op_ptr_eq = prepare_op_instance_ptr_eq = prepare_op_int_eq + prepare_op_ptr_ne = prepare_op_instance_ptr_ne = prepare_op_int_ne prepare_guard_int_lt = prepare_cmp_op('guard_int_lt') prepare_guard_int_le = prepare_cmp_op('guard_int_le') @@ -455,8 +455,8 @@ prepare_guard_uint_lt = prepare_cmp_op('guard_uint_lt') prepare_guard_uint_ge = prepare_cmp_op('guard_uint_ge') - prepare_guard_ptr_eq = prepare_guard_int_eq - prepare_guard_ptr_ne = prepare_guard_int_ne + prepare_guard_ptr_eq = prepare_guard_instance_ptr_eq = prepare_guard_int_eq + prepare_guard_ptr_ne = prepare_guard_instance_ptr_ne = prepare_guard_int_ne prepare_op_int_add_ovf = prepare_op_int_add prepare_op_int_sub_ovf = prepare_op_int_sub From noreply at buildbot.pypy.org Mon Dec 5 10:05:46 2011 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 5 Dec 2011 10:05:46 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: (arigo, bivab): clear CPU cache everty time instructions are written to memory and put breakpoints at the locations that are going to be patched Message-ID: <20111205090546.59A0F8205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50166:8fbef9af526f Date: 2011-12-02 23:04 +0100 http://bitbucket.org/pypy/pypy/changeset/8fbef9af526f/ Log: (arigo, bivab): clear CPU cache everty time instructions are written to memory and put breakpoints at the locations that are going to be patched diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py --- a/pypy/jit/backend/arm/codebuilder.py +++ b/pypy/jit/backend/arm/codebuilder.py @@ -6,12 +6,19 @@ from pypy.rlib.rmmap import alloc, PTR from pypy.rpython.annlowlevel import llhelper -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.jit.metainterp.history import ConstInt, BoxInt, AbstractFailDescr from pypy.rlib.objectmodel import we_are_translated from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from pypy.tool.udir import udir +__clear_cache = rffi.llexternal( + "__clear_cache", + [llmemory.Address, llmemory.Address], + lltype.Void, + _nowrapper=True, + sandboxsafe=True) + def binary_helper_call(name): signature = getattr(arch, 'arm_%s_sign' % name) function = getattr(arch, 'arm_%s' % name) @@ -284,8 +291,14 @@ gcrootmap.put(rawstart + pos, mark) return rawstart + def clear_cache(self, addr): + startaddr = rffi.cast(llmemory.Address, addr) + endaddr = rffi.cast(llmemory.Address, addr + self.get_relative_pos()) + __clear_cache(startaddr, endaddr) + def copy_to_raw_memory(self, addr): self._copy_to_raw_memory(addr) + self.clear_cache(addr) self._dump(addr, "jit-backend-dump", 'arm') def currpos(self): diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -199,7 +199,7 @@ print 'Failargs: ', op.getfailargs() pos = self.mc.currpos() - self.mc.NOP() + self.mc.BKPT() self.pending_guards.append(GuardToken(descr, failargs=op.getfailargs(), faillocs=arglocs, @@ -495,7 +495,7 @@ self.mc.TST_ri(r.ip.value, imm=ofs) jz_location = self.mc.currpos() - self.mc.NOP() + self.mc.BKPT() # the following is supposed to be the slow path, so whenever possible # we choose the most compact encoding over the most efficient one. @@ -958,7 +958,7 @@ regalloc.possibly_free_var(resbox) fast_jmp_pos = self.mc.currpos() - self.mc.NOP() + self.mc.BKPT() # Path A: use assembler helper #if values are equal we take the fast path @@ -981,7 +981,7 @@ # jump to merge point jmp_pos = self.mc.currpos() #jmp_location = self.mc.curraddr() - self.mc.NOP() + self.mc.BKPT() # Path B: load return value and reset token # Fast Path using result boxes From noreply at buildbot.pypy.org Mon Dec 5 11:57:32 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Dec 2011 11:57:32 +0100 (CET) Subject: [pypy-commit] pypy default: Oups, sorry. Message-ID: <20111205105732.D9AA18205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50167:1f76b745f839 Date: 2011-12-05 11:57 +0100 http://bitbucket.org/pypy/pypy/changeset/1f76b745f839/ Log: Oups, sorry. diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -490,6 +490,7 @@ check(a[i].y.i == n + i * 100 + 2) check(a[i].z.i == n + i * 100 + 3) i += 1 + n -= x.foo return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s f(123, *[None]*11) # check that the check() are ok return None, f, None From noreply at buildbot.pypy.org Mon Dec 5 12:09:52 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Dec 2011 12:09:52 +0100 (CET) Subject: [pypy-commit] pypy default: A test and a fix Message-ID: <20111205110952.7736B8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50168:6b51800a7ece Date: 2011-12-05 13:09 +0200 http://bitbucket.org/pypy/pypy/changeset/6b51800a7ece/ Log: A test and a fix diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -823,6 +823,15 @@ bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETFIELD_RAW) + # ---------- write barrier for SETINTERIORFIELD_GC ------ + if op.getopnum() == rop.SETINTERIORFIELD_GC: + val = op.getarg(0) + if val is not last_malloc: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: val = op.getarg(0) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -570,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -492,7 +492,6 @@ i += 1 n -= x.foo return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s - f(123, *[None]*11) # check that the check() are ok return None, f, None def test_compile_framework_7_interior(self): From noreply at buildbot.pypy.org Mon Dec 5 12:33:11 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Mon, 5 Dec 2011 12:33:11 +0100 (CET) Subject: [pypy-commit] pypy type-specialized-instances: fixed mapdict tests Message-ID: <20111205113311.BD2638205C@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: type-specialized-instances Changeset: r50169:591a337d364f Date: 2011-12-05 12:32 +0100 http://bitbucket.org/pypy/pypy/changeset/591a337d364f/ Log: fixed mapdict tests diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -25,17 +25,17 @@ class typedef: hasdict = False -def erase_storage_items(items): - return [IntAttribute.erase_item(item) for item in items] +def erase_storage_items(items, eraser=PlainAttribute): + return [eraser.erase_item(item) for item in items] -def unerase_storage_items(storage, uneraser=IntAttribute): +def unerase_storage_items(storage, uneraser=PlainAttribute): return [uneraser.unerase_item(item) for item in storage] def test_plain_attribute(): w_cls = "class" - aa = IntAttribute(("b", DICT), - IntAttribute(("a", DICT), + aa = PlainAttribute(("b", DICT), + PlainAttribute(("a", DICT), Terminator(space, w_cls))) assert aa.space is space assert aa.terminator.w_cls is w_cls @@ -81,7 +81,8 @@ def test_add_attribute(): cls = Class() obj = cls.instantiate() - obj.setdictvalue(space, "a", 10) + obj.setdictvalue(space, "a", space.wrap(10)) + print obj.map assert unerase_storage_items(obj.storage) == [10] assert obj.getdictvalue(space, "a") == 10 assert obj.getdictvalue(space, "b") is None @@ -159,9 +160,7 @@ assert obj.getdictvalue(space, "a") == 50 assert obj.getdictvalue(space, "b") == 60 assert obj.getdictvalue(space, "c") == 70 - #assert unerase_storage_items(obj.storage) == [50, 60, 70, lifeline1] - assert unerase_storage_items(obj.storage[:-1], IntAttribute) == [50, 60, 70] - assert unerase_storage_items(obj.storage[-1:], PlainAttribute) == [lifeline1] + assert unerase_storage_items(obj.storage) == [50, 60, 70, lifeline1] assert obj.getweakref() is lifeline1 obj2 = c.instantiate() @@ -169,9 +168,7 @@ obj2.setdictvalue(space, "b", 160) obj2.setdictvalue(space, "c", 170) obj2.setweakref(space, lifeline2) - #assert unerase_storage_items(obj2.storage) == [150, 160, 170, lifeline2] - assert unerase_storage_items(obj2.storage[:-1], IntAttribute) == [150, 160, 170] - assert unerase_storage_items(obj2.storage[-1:], PlainAttribute) == [lifeline2] + assert unerase_storage_items(obj2.storage) == [150, 160, 170, lifeline2] assert obj2.getweakref() is lifeline2 assert obj2.map is obj.map @@ -282,9 +279,7 @@ assert flag materialize_r_dict(space, obj, d) assert d == {"a": 5, "b": 6, "c": 7} - #assert unerase_storage_items(obj.storage) == [50, 60, 70, w_d] - assert unerase_storage_items(obj.storage[:-1], IntAttribute) == [50, 60, 70] - assert unerase_storage_items(obj.storage[-1:], PlainAttribute) == [w_d] + assert unerase_storage_items(obj.storage) == [50, 60, 70, w_d] def test_size_prediction(): @@ -766,12 +761,21 @@ def test_delete_slot(self): class A(object): __slots__ = ['x'] - + a = A() a.x = 42 del a.x raises(AttributeError, "a.x") + def test_subclassed_int(self): + class Integer(int): + pass + + a = Integer() + a.x = 5 + + assert a.x == 5 + class AppTestWithMapDictAndCounters(object): def setup_class(cls): from pypy.interpreter import gateway From noreply at buildbot.pypy.org Mon Dec 5 14:49:26 2011 From: noreply at buildbot.pypy.org (ned) Date: Mon, 5 Dec 2011 14:49:26 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox: Remove some pypy dependencies from sandlib. Message-ID: <20111205134926.E553A8205C@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox Changeset: r50170:4fe097ff1a9b Date: 2011-12-05 08:49 -0500 http://bitbucket.org/pypy/pypy/changeset/4fe097ff1a9b/ Log: Remove some pypy dependencies from sandlib. diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -6,9 +6,7 @@ import py import sys, os, posixpath, errno, stat, time -from pypy.rpython.module.ll_os_stat import s_StatResult from pypy.tool.ansi_print import AnsiLog -from pypy.rlib.rarithmetic import r_longlong import subprocess from pypy.tool.killsubprocess import killsubprocess @@ -34,6 +32,9 @@ from pypy.tool.lib_pypy import import_from_lib_pypy marshal = import_from_lib_pypy('marshal') +# Non-marshal result types +RESULTTYPE_STATRESULT, RESULTTYPE_LONGLONG = range(2) + def read_message(f, timeout=None): # warning: 'timeout' is not really reliable and should only be used # for testing. Also, it doesn't work if the file f does any buffering. @@ -50,7 +51,7 @@ marshal.dump(msg, g) else: marshal.dump(msg, g, 0) - elif resulttype is s_StatResult: + elif resulttype is RESULTTYPE_STATRESULT: # Hand-coded marshal for stat results that mimics what rmarshal expects. # marshal.dump(tuple(msg)) would have been too easy. rmarshal insists # on 64-bit ints at places, even when the value fits in 32 bits. @@ -69,12 +70,11 @@ buf.append(struct.pack(" Author: hager Branch: ppc-jit-backend Changeset: r50171:22a892ccc25e Date: 2011-12-05 17:24 +0100 http://bitbucket.org/pypy/pypy/changeset/22a892ccc25e/ Log: Add cashe flushing diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -18,7 +18,7 @@ compute_vars_longevity) from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.model import CompiledLoopToken -from pypy.rpython.lltypesystem import lltype, rffi, rstr +from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.history import (BoxInt, ConstInt, ConstPtr, ConstFloat, Box, INT, REF, FLOAT) @@ -26,6 +26,8 @@ from pypy.tool.udir import udir from pypy.rlib.objectmodel import we_are_translated +from pypy.translator.tool.cbuild import ExternalCompilationInfo + A = Form("frD", "frA", "frB", "XO3", "Rc") A1 = Form("frD", "frB", "XO3", "Rc") A2 = Form("frD", "frA", "frC", "XO3", "Rc") @@ -918,6 +920,20 @@ def high(w): return (w >> 16) & 0x0000FFFF +# XXX check this +if we_are_translated(): + eci = ExternalCompilationInfo(includes = ['asm_ppc.h']) + + flush_icache = rffi.llexternal( + "LL_flush_icache", + [lltype.Signed, lltype.Signed], + lltype.Void, + compilation_info=eci, + _nowrapper=True, + sandboxsafe=True) +else: + def flush_icache(x, y): pass + class GuardToken(object): def __init__(self, descr, failargs, faillocs, offset, save_exc=False, is_invalidate=False): @@ -1039,8 +1055,14 @@ def currpos(self): return self.get_rel_pos() + def flush_cache(self, addr): + startaddr = rffi.cast(lltype.Signed, addr) + size = rffi.cast(lltype.Signed, self.get_relative_pos()) + flush_icache(startaddr, size) + def copy_to_raw_memory(self, addr): self._copy_to_raw_memory(addr) + self.flush_cache(addr) def cmp_op(self, block, a, b, imm=False, signed=True): if IS_PPC_32: From noreply at buildbot.pypy.org Mon Dec 5 17:36:18 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 5 Dec 2011 17:36:18 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge default Message-ID: <20111205163618.D6B478205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50172:d638586fc5b1 Date: 2011-12-05 16:16 +0100 http://bitbucket.org/pypy/pypy/changeset/d638586fc5b1/ Log: hg merge default diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -823,6 +823,15 @@ bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETFIELD_RAW) + # ---------- write barrier for SETINTERIORFIELD_GC ------ + if op.getopnum() == rop.SETINTERIORFIELD_GC: + val = op.getarg(0) + if val is not last_malloc: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: val = op.getarg(0) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -570,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -490,8 +490,8 @@ check(a[i].y.i == n + i * 100 + 2) check(a[i].z.i == n + i * 100 + 3) i += 1 + n -= x.foo return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s - f(123, *[None]*11) # check that the check() are ok return None, f, None def test_compile_framework_7_interior(self): From noreply at buildbot.pypy.org Mon Dec 5 17:36:20 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 5 Dec 2011 17:36:20 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: dont crash if two equal virtuals become not equal Message-ID: <20111205163620.0C26A8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50173:9f840860f2cb Date: 2011-12-05 17:35 +0100 http://bitbucket.org/pypy/pypy/changeset/9f840860f2cb/ Log: dont crash if two equal virtuals become not equal diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -172,6 +172,27 @@ with raises(InvalidLoop): self.optimize_loop(ops, ops) + def test_virtual_turns_constant(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + guard_value(p3, ConstPtr(myptr)) [] + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_turns_not_equal(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3, p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + jump(p3, p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) class TestLLtype(BaseTestMultiLabel, LLtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -281,6 +281,13 @@ # Inline the short preamble at the end of the loop jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) + assert len(short_inputargs) == len(jmp_to_short_args) + args = {} + for i in range(len(short_inputargs)): + if short_inputargs[i] in args: + if args[short_inputargs[i]] != jmp_to_short_args[i]: + raise InvalidLoop + args[short_inputargs[i]] = jmp_to_short_args[i] self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) for box, const in constant_inputargs.items(): self.short_inliner.argmap[box] = const From noreply at buildbot.pypy.org Mon Dec 5 19:52:21 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Dec 2011 19:52:21 +0100 (CET) Subject: [pypy-commit] pypy default: Add a failing test. Message-ID: <20111205185221.04CEF8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50174:0d1f6b514b53 Date: 2011-12-05 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/0d1f6b514b53/ Log: Add a failing test. diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1238,6 +1238,30 @@ self.meta_interp(portal, [0, 0, 0], inline=True) self.check_resops(call_may_force=0, call=0) + def test_dont_repeatedly_trace_from_the_same_guard(self): + driver = JitDriver(greens = [], reds = ['level', 'i']) + + def portal(level): + if level == 0: + i = -10 + else: + i = 0 + # + while True: + driver.jit_merge_point(level=level, i=i) + if level == 25: + return 42 + i += 1 + if i <= 0: # <- guard + continue # first make a loop + else: + # then we fail the guard above, doing a recursive call + return portal(level + 1) + + self.meta_interp(portal, [0]) + assert self.check_loop_count_at_most(3) # and not, e.g., 24 + + class TestLLtype(RecursiveTests, LLJitMixin): pass From noreply at buildbot.pypy.org Mon Dec 5 19:52:22 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Dec 2011 19:52:22 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the test. Message-ID: <20111205185222.3644A82ABA@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50175:5abc8457062e Date: 2011-12-05 19:05 +0100 http://bitbucket.org/pypy/pypy/changeset/5abc8457062e/ Log: Fix the test. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -298,7 +298,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +309,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +329,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,14 +339,17 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + return self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) @@ -359,12 +367,22 @@ def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +409,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -608,9 +635,6 @@ metainterp.set_compiled_merge_points(self.original_greenkey, old_loop_tokens) - def reset_counter_from_failure(self): - pass - def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): """Try to compile a new bridge leading from the beginning of the history diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1790,7 +1790,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1255,11 +1255,12 @@ if i <= 0: # <- guard continue # first make a loop else: - # then we fail the guard above, doing a recursive call + # then we fail the guard above, doing a recursive call, + # which will itself fail the same guard above, and so on return portal(level + 1) self.meta_interp(portal, [0]) - assert self.check_loop_count_at_most(3) # and not, e.g., 24 + self.check_loop_count_at_most(2) # and not, e.g., 24 class TestLLtype(RecursiveTests, LLJitMixin): From noreply at buildbot.pypy.org Mon Dec 5 19:57:55 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 5 Dec 2011 19:57:55 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: added real and imag descriptors to complex objs Message-ID: <20111205185755.5BEA58205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50176:7a810c2bdf0d Date: 2011-12-05 13:51 -0500 http://bitbucket.org/pypy/pypy/changeset/7a810c2bdf0d/ Log: added real and imag descriptors to complex objs diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -1,7 +1,7 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import operationerrfmt from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.complextype import complex_typedef from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.inttype import int_typedef @@ -65,6 +65,14 @@ dtype = self.get_dtype(space) return space.wrap(dtype.itemtype.bool(self)) + def descr_get_real(self, space): + dtype = self.get_dtype(space) + return dtype.itemtype.real(self) + + def descr_get_imag(self, space): + dtype = self.get_dtype(space) + return dtype.itemtype.imag(self) + def _binop_impl(ufunc_name): def impl(self, space, w_other): from pypy.module.micronumpy import interp_ufuncs @@ -197,6 +205,9 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), + + real = GetSetProperty(W_GenericBox.descr_get_real), + imag = GetSetProperty(W_GenericBox.descr_get_imag), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -34,6 +34,15 @@ ) return dispatcher +def raw_unary_op(func): + specialize.argtype(1) + @functools.wraps(func) + def dispatcher(self, v): + return func(self, + self.for_computation(self.unbox(v)) + ) + return dispatcher + def raw_binary_op(func): specialize.argtype(1, 2)(func) @functools.wraps(func) @@ -429,20 +438,28 @@ def __init__(self, itemtypes): BaseCompositeType.__init__(self, itemtypes) - [self.real, self.imag] = self.itemtypes + [self.real_type, self.imag_type] = self.itemtypes def coerce(self, space, w_item): if isinstance(w_item, self.BoxType): return w_item real, imag = space.unpackcomplex(w_item) - return self.box([self.real.box(real), self.imag.box(imag)]) - + return self.box([self.real_type.box(real), self.imag_type.box(imag)]) + def for_computation(self, (real, imag)): return [ - self.real.for_computation(self.real.unbox(real)), - self.imag.for_computation(self.imag.unbox(imag)), + self.real_type.for_computation(self.real_type.unbox(real)), + self.imag_type.for_computation(self.imag_type.unbox(imag)), ] @raw_binary_op def eq(self, (real1, imag1), (real2, imag2)): return real1 == real2 and imag1 == imag2 + + @raw_unary_op + def real(self, (real, imag)): + return self.real_type.box(real) + + @raw_unary_op + def imag(self, (real, imag)): + return self.imag_type.box(imag) \ No newline at end of file From noreply at buildbot.pypy.org Mon Dec 5 19:57:56 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 5 Dec 2011 19:57:56 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: merged default Message-ID: <20111205185756.919FD8205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50177:006352005df1 Date: 2011-12-05 13:53 -0500 http://bitbucket.org/pypy/pypy/changeset/006352005df1/ Log: merged default diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -823,6 +823,15 @@ bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETFIELD_RAW) + # ---------- write barrier for SETINTERIORFIELD_GC ------ + if op.getopnum() == rop.SETINTERIORFIELD_GC: + val = op.getarg(0) + if val is not last_malloc: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: val = op.getarg(0) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -570,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -490,8 +490,8 @@ check(a[i].y.i == n + i * 100 + 2) check(a[i].z.i == n + i * 100 + 3) i += 1 + n -= x.foo return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s - f(123, *[None]*11) # check that the check() are ok return None, f, None def test_compile_framework_7_interior(self): diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -298,7 +298,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +309,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +329,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,14 +339,17 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + return self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) @@ -359,12 +367,22 @@ def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +409,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -608,9 +635,6 @@ metainterp.set_compiled_merge_points(self.original_greenkey, old_loop_tokens) - def reset_counter_from_failure(self): - pass - def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): """Try to compile a new bridge leading from the beginning of the history diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1790,7 +1790,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1238,6 +1238,31 @@ self.meta_interp(portal, [0, 0, 0], inline=True) self.check_resops(call_may_force=0, call=0) + def test_dont_repeatedly_trace_from_the_same_guard(self): + driver = JitDriver(greens = [], reds = ['level', 'i']) + + def portal(level): + if level == 0: + i = -10 + else: + i = 0 + # + while True: + driver.jit_merge_point(level=level, i=i) + if level == 25: + return 42 + i += 1 + if i <= 0: # <- guard + continue # first make a loop + else: + # then we fail the guard above, doing a recursive call, + # which will itself fail the same guard above, and so on + return portal(level + 1) + + self.meta_interp(portal, [0]) + self.check_loop_count_at_most(2) # and not, e.g., 24 + + class TestLLtype(RecursiveTests, LLJitMixin): pass From noreply at buildbot.pypy.org Mon Dec 5 19:57:57 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 5 Dec 2011 19:57:57 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: fix, make this numpypy Message-ID: <20111205185757.B78FF8205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50178:22862d325dba Date: 2011-12-05 13:57 -0500 http://bitbucket.org/pypy/pypy/changeset/22862d325dba/ Log: fix, make this numpypy diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -294,9 +294,9 @@ ) W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, - __module__ = "numpy", + __module__ = "numpypy", ) W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpy", -) \ No newline at end of file + __module__ = "numpypy", +) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -462,4 +462,4 @@ @raw_unary_op def imag(self, (real, imag)): - return self.imag_type.box(imag) \ No newline at end of file + return self.imag_type.box(imag) From noreply at buildbot.pypy.org Mon Dec 5 20:07:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Dec 2011 20:07:39 +0100 (CET) Subject: [pypy-commit] pypy default: Add a test that I wrote long ago, but apparently forgot to check in. Message-ID: <20111205190739.76F888205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50179:ed550a1d0c11 Date: 2011-12-05 20:07 +0100 http://bitbucket.org/pypy/pypy/changeset/ed550a1d0c11/ Log: Add a test that I wrote long ago, but apparently forgot to check in. diff --git a/pypy/jit/metainterp/test/test_math.py b/pypy/jit/metainterp/test/test_math.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_math.py @@ -0,0 +1,47 @@ +import math +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN + +class MathTests: + + def test_math_sqrt(self): + def f(x): + try: + return math.sqrt(x) + except ValueError: + return -INFINITY + + res = self.interp_operations(f, [0.0]) + assert res == 0.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [25.0]) + assert res == 5.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-0.0]) + assert str(res) == '-0.0' + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [1000000.0]) + assert res == 1000.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-1.0]) + assert res == -INFINITY + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [INFINITY]) + assert isinf(res) and not isnan(res) and res > 0.0 + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [NAN]) + assert isnan(res) and not isinf(res) + self.check_operations_history(call_pure=0) + + +class TestOOtype(MathTests, OOJitMixin): + pass + +class TestLLtype(MathTests, LLJitMixin): + pass From noreply at buildbot.pypy.org Mon Dec 5 20:32:30 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Dec 2011 20:32:30 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix. Message-ID: <20111205193230.DFFCC8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50180:92d4f8f7bcfb Date: 2011-12-05 20:31 +0100 http://bitbucket.org/pypy/pypy/changeset/92d4f8f7bcfb/ Log: Test and fix. diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -13,12 +13,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -19,11 +19,22 @@ def test_keyerror_without_factory(self): from _collections import defaultdict - d1 = defaultdict() - for key in ['foo', (1,)]: - try: - d1[key] - except KeyError, err: - assert err.args[0] == key - else: - assert 0, "expected KeyError" + for d1 in [defaultdict(), defaultdict(None)]: + for key in ['foo', (1,)]: + try: + d1[key] + except KeyError, err: + assert err.args[0] == key + else: + assert 0, "expected KeyError" + + def test_noncallable(self): + from _collections import defaultdict + raises(TypeError, defaultdict, [('a', 5)]) + d = defaultdict(None, [('a', 5)]) + assert d.items() == [('a', 5)] + + def test_kwds(self): + from _collections import defaultdict + d = defaultdict(default_factory=5) + assert d.keys() == ['default_factory'] From noreply at buildbot.pypy.org Mon Dec 5 20:40:12 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Dec 2011 20:40:12 +0100 (CET) Subject: [pypy-commit] pypy default: Forgot this version of the file. Thanks amaury. Message-ID: <20111205194012.8C78E8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50181:0732486f6a76 Date: 2011-12-05 20:39 +0100 http://bitbucket.org/pypy/pypy/changeset/0732486f6a76/ Log: Forgot this version of the file. Thanks amaury. diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): From noreply at buildbot.pypy.org Mon Dec 5 22:11:40 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 5 Dec 2011 22:11:40 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Dont rgc.collect() more than we have too (not working when run in a separate process by test_all) Message-ID: <20111205211140.435918205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50182:417d2384375b Date: 2011-12-05 22:10 +0100 http://bitbucket.org/pypy/pypy/changeset/417d2384375b/ Log: Dont rgc.collect() more than we have too (not working when run in a separate process by test_all) diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -15,6 +15,8 @@ from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside from pypy.jit.metainterp.warmspot import get_stats +from pypy.jit.metainterp.warmstate import JitCell +from pypy.rlib import rgc class FakeLoopToken: generation = 0 @@ -81,6 +83,20 @@ # See comments in TestMemoryManager. To get temporarily the normal # behavior just rename this class to TestIntegration. + # We need an extra rgc.collect in get_procedure_token() for some of + # these tests to pass. But we dont want it there always since that will + # make all other tests take forever. + def setup_class(cls): + original_get_procedure_token = JitCell.get_procedure_token + def get_procedure_token(self): + rgc.collect(); + return original_get_procedure_token(self) + JitCell.get_procedure_token = get_procedure_token + cls.original_get_procedure_token = original_get_procedure_token + + def teardown_class(cls): + JitCell.get_procedure_token = cls.original_get_procedure_token + def test_loop_kept_alive(self): myjitdriver = JitDriver(greens=[], reds=['n']) def g(): diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -173,9 +173,6 @@ wref_procedure_token = None def get_procedure_token(self): - if not we_are_translated(): - from pypy.rlib import rgc - rgc.collect(); if self.wref_procedure_token is not None: token = self.wref_procedure_token() if token and not token.invalidated: From noreply at buildbot.pypy.org Mon Dec 5 22:42:02 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:02 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: Merge with default Message-ID: <20111205214202.22C108205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50183:9160acf08843 Date: 2011-12-04 21:28 +0200 http://bitbucket.org/pypy/pypy/changeset/9160acf08843/ Log: Merge with default diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -20,7 +20,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -1432,6 +1432,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1510,12 +1514,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -167,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -195,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + f(123, *[None]*11) # check that the check() are ok + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -247,7 +247,7 @@ if funcobj.random_effects_on_gcobjs: return True except (AttributeError, lltype.DelayedPointer): - pass + return True # better safe than sorry return super(RandomEffectsAnalyzer, self).analyze_external_call( op, seen) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -207,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -487,6 +504,7 @@ d.pop('flavor') add_memory_pressure = d.pop('add_memory_pressure', False) zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) if d: raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value @@ -495,6 +513,8 @@ name += '_zero' if add_memory_pressure: name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,27 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc - def build_ll_1_raw_malloc_zero(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', zero=True) - return _ll_1_raw_malloc - - def build_ll_1_raw_malloc_zero_add_memory_pressure(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', zero=True, - add_memory_pressure=True) - return _ll_1_raw_malloc - - def build_ll_1_raw_malloc_add_memory_pressure(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', - add_memory_pressure=True) - return _ll_1_raw_malloc + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6482,6 +6482,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -243,6 +243,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3585,6 +3585,132 @@ self.interp_operations(f, [5], translationoptions=translationoptions) + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -612,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -761,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,11 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + if isinstance(w_list, W_ListObject): + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,21 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + + l = [1, 2, 3] + assert list_strategy(l) == "int" + l = ["a", "b", "c"] + assert list_strategy(l) == "str" + l = [1.1, 2.2, 3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1, "b", 3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + raises(TypeError, list_strategy, 5) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,10 +5,11 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.NDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', @@ -16,8 +17,22 @@ 'fromstring': 'interp_support.fromstring', 'flatiter': 'interp_numarray.W_FlatIterator', - 'True_': 'space.w_True', - 'False_': 'space.w_False', + 'True_': 'types.Bool.True', + 'False_': 'types.Bool.False', + + 'generic': 'interp_boxes.W_GenericBox', + 'number': 'interp_boxes.W_NumberBox', + 'integer': 'interp_boxes.W_IntegerBox', + 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'bool_': 'interp_boxes.W_BoolBox', + 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', + 'int64': 'interp_boxes.W_Int64Box', + 'int_': 'interp_boxes.W_LongBox', + 'inexact': 'interp_boxes.W_InexactBox', + 'floating': 'interp_boxes.W_FloatingBox', + 'float64': 'interp_boxes.W_Float64Box', } # ufuncs diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_boxes.py @@ -0,0 +1,267 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef +from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.inttype import int_typedef +from pypy.objspace.std.typeobject import W_TypeObject +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.tool.sourcetools import func_with_new_name + + +MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () + +def new_dtype_getter(name): + def get_dtype(space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return getattr(get_dtype_cache(space), "w_%sdtype" % name) + def new(space, w_subtype, w_value): + dtype = get_dtype(space) + return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) + return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + +class PrimitiveBox(object): + _mixin_ = True + + def __init__(self, value): + self.value = value + + def convert_to(self, dtype): + return dtype.box(self.value) + +class W_GenericBox(Wrappable): + _attrs_ = () + + def descr__new__(space, w_subtype, __args__): + assert isinstance(w_subtype, W_TypeObject) + raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", + w_subtype.get_module_type_name() + ) + + def descr_str(self, space): + return self.descr_repr(space) + + def descr_repr(self, space): + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + + def descr_int(self, space): + box = self.convert_to(W_LongBox.get_dtype(space)) + assert isinstance(box, W_LongBox) + return space.wrap(box.value) + + def descr_float(self, space): + box = self.convert_to(W_Float64Box.get_dtype(space)) + assert isinstance(box, W_Float64Box) + return space.wrap(box.value) + + def descr_nonzero(self, space): + dtype = self.get_dtype(space) + return space.wrap(dtype.itemtype.bool(self)) + + def _binop_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + def _binop_right_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) + + def _unaryop_impl(ufunc_name): + def impl(self, space): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + + descr_radd = _binop_right_impl("add") + descr_rmul = _binop_right_impl("multiply") + + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") + + +class W_BoolBox(W_GenericBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("bool") + +class W_NumberBox(W_GenericBox): + _attrs_ = () + +class W_IntegerBox(W_NumberBox): + pass + +class W_SignedIntegerBox(W_IntegerBox): + pass + +class W_UnsignedIntgerBox(W_IntegerBox): + pass + +class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int8") + +class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint8") + +class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int16") + +class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint16") + +class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int32") + +class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint32") + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("long") + +class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int64") + +class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_InexactBox(W_NumberBox): + _attrs_ = () + +class W_FloatingBox(W_InexactBox): + _attrs_ = () + +class W_Float32Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float32") + +class W_Float64Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float64") + + + +W_GenericBox.typedef = TypeDef("generic", + __module__ = "numpypy", + + __new__ = interp2app(W_GenericBox.descr__new__.im_func), + + __str__ = interp2app(W_GenericBox.descr_str), + __repr__ = interp2app(W_GenericBox.descr_repr), + __int__ = interp2app(W_GenericBox.descr_int), + __float__ = interp2app(W_GenericBox.descr_float), + __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + + __add__ = interp2app(W_GenericBox.descr_add), + __sub__ = interp2app(W_GenericBox.descr_sub), + __mul__ = interp2app(W_GenericBox.descr_mul), + __div__ = interp2app(W_GenericBox.descr_div), + + __radd__ = interp2app(W_GenericBox.descr_add), + __rmul__ = interp2app(W_GenericBox.descr_rmul), + + __eq__ = interp2app(W_GenericBox.descr_eq), + __ne__ = interp2app(W_GenericBox.descr_ne), + __lt__ = interp2app(W_GenericBox.descr_lt), + __le__ = interp2app(W_GenericBox.descr_le), + __gt__ = interp2app(W_GenericBox.descr_gt), + __ge__ = interp2app(W_GenericBox.descr_ge), + + __neg__ = interp2app(W_GenericBox.descr_neg), + __abs__ = interp2app(W_GenericBox.descr_abs), +) + +W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_BoolBox.descr__new__.im_func), +) + +W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + +W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int8Box.descr__new__.im_func), +) + +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), +) + +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), +) + +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +if LONG_BIT == 32: + long_name = "int32" +elif LONG_BIT == 64: + long_name = "int64" +W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), + __module__ = "numpypy", +) + +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, + __module__ = "numpypy", + __new__ = interp2app(W_Int64Box.descr__new__.im_func), +) + +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, + __module__ = "numpypy", +) + +W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, + __module__ = "numpypy", +) + +W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), + __module__ = "numpypy", + + __new__ = interp2app(W_Float64Box.descr__new__.im_func), +) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,16 +1,11 @@ -import functools -import math - from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty -from pypy.module.micronumpy import signature -from pypy.objspace.std.floatobject import float2string -from pypy.rlib import rarithmetic, rfloat -from pypy.rlib.rarithmetic import LONG_BIT, widen -from pypy.rlib.objectmodel import specialize, enforceargs -from pypy.rlib.unroll import unrolling_iterable +from pypy.interpreter.typedef import (TypeDef, GetSetProperty, + interp_attrproperty, interp_attrproperty_w) +from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT from pypy.rpython.lltypesystem import lltype, rffi @@ -19,523 +14,218 @@ BOOLLTR = "b" FLOATINGLTR = "f" + +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) + class W_Dtype(Wrappable): - def __init__(self, space): - pass + _immuable_fields_ = ["itemtype", "num", "kind"] + + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): + self.signature = signature.BaseSignature() + self.itemtype = itemtype + self.num = num + self.kind = kind + self.name = name + self.char = char + self.w_box_type = w_box_type + self.alternate_constructors = alternate_constructors + + def malloc(self, length): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True + ) + + @specialize.argtype(1) + def box(self, value): + return self.itemtype.box(value) + + def coerce(self, space, w_item): + return self.itemtype.coerce(space, w_item) + + def getitem(self, storage, i): + return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + + def setitem(self, storage, i, box): + self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + + def fill(self, storage, box, start, stop): + self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + if space.is_w(w_dtype, space.w_None): - return space.fromcache(W_Float64Dtype) + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype elif space.isinstance_w(w_dtype, space.w_str): - dtype = space.str_w(w_dtype) - for alias, dtype_class in dtypes_by_alias: - if alias == dtype: - return space.fromcache(dtype_class) - elif isinstance(space.interpclass_w(w_dtype), W_Dtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_type): - for typename, dtype_class in dtypes_by_apptype: - if space.is_w(getattr(space, "w_%s" % typename), w_dtype): - return space.fromcache(dtype_class) + name = space.str_w(w_dtype) + for dtype in cache.builtin_dtypes: + if dtype.name == name or dtype.char == name: + return dtype + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + def descr_str(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("dtype('%s')" % self.name) - def descr_str(self, space): - return space.wrap(self.name) + def descr_get_itemsize(self, space): + return space.wrap(self.itemtype.get_element_size()) def descr_get_shape(self, space): return space.newtuple([]) - -class BaseBox(object): - pass - -VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) - -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, - expected_size=None): - - class Box(BaseBox): - def __init__(self, val): - self.val = val - - def wrap(self, space): - val = self.val - if valtype is rarithmetic.r_singlefloat: - val = float(val) - return space.wrap(val) - - def convert_to(self, dtype): - return dtype.adapt_val(self.val) - Box.__name__ = "%sBox" % T._name - - TP = lltype.Ptr(lltype.Array(T, hints={'nolength': True})) - class W_LowLevelDtype(W_Dtype): - signature = signature.BaseSignature() - - def erase(self, storage): - return rffi.cast(VOID_TP, storage) - - def unerase(self, storage): - return rffi.cast(TP, storage) - - @enforceargs(None, valtype) - def box(self, value): - return Box(value) - - def unbox(self, box): - assert isinstance(box, Box) - return box.val - - def unwrap(self, space, w_item): - raise NotImplementedError - - def malloc(self, size): - # XXX find out why test_zjit explodes with tracking of allocations - return self.erase(lltype.malloc(TP.TO, size, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - )) - - def getitem(self, storage, i): - return Box(self.unerase(storage)[i]) - - def setitem(self, storage, i, item): - self.unerase(storage)[i] = self.unbox(item) - - def setitem_w(self, space, storage, i, w_item): - self.setitem(storage, i, self.unwrap(space, w_item)) - - def fill(self, storage, item, start, stop): - storage = self.unerase(storage) - item = self.unbox(item) - for i in xrange(start, stop): - storage[i] = item - - @specialize.argtype(1) - def adapt_val(self, val): - return self.box(rffi.cast(TP.TO.OF, val)) - - W_LowLevelDtype.__name__ = "W_%sDtype" % name.capitalize() - W_LowLevelDtype.num = num - W_LowLevelDtype.kind = kind - W_LowLevelDtype.name = name - W_LowLevelDtype.aliases = aliases - W_LowLevelDtype.applevel_types = applevel_types - W_LowLevelDtype.num_bytes = rffi.sizeof(T) - if expected_size is not None: - assert W_LowLevelDtype.num_bytes == expected_size - return W_LowLevelDtype - - -def binop(func): - specialize.argtype(1, 2)(func) - @functools.wraps(func) - def impl(self, v1, v2): - return self.adapt_val(func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)), - )) - return impl - -def raw_binop(func): - specialize.argtype(1, 2)(func) - # Returns the result unwrapped. - @functools.wraps(func) - def impl(self, v1, v2): - return func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)) - ) - return impl - -def unaryop(func): - specialize.argtype(1)(func) - @functools.wraps(func) - def impl(self, v): - return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) - return impl - -class ArithmeticTypeMixin(object): - _mixin_ = True - - @binop - def add(self, v1, v2): - return v1 + v2 - @binop - def sub(self, v1, v2): - return v1 - v2 - @binop - def mul(self, v1, v2): - return v1 * v2 - - @unaryop - def pos(self, v): - return +v - @unaryop - def neg(self, v): - return -v - @unaryop - def abs(self, v): - return abs(v) - - @binop - def max(self, v1, v2): - return max(v1, v2) - @binop - def min(self, v1, v2): - return min(v1, v2) - - def bool(self, v): - return bool(self.for_computation(self.unbox(v))) - @raw_binop - def eq(self, v1, v2): - return v1 == v2 - @raw_binop - def ne(self, v1, v2): - return v1 != v2 - @raw_binop - def lt(self, v1, v2): - return v1 < v2 - @raw_binop - def le(self, v1, v2): - return v1 <= v2 - @raw_binop - def gt(self, v1, v2): - return v1 > v2 - @raw_binop - def ge(self, v1, v2): - return v1 >= v2 - - -class FloatArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def for_computation(self, v): - return float(v) - - def str_format(self, item): - return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION) - - @binop - def div(self, v1, v2): - # XXX this won't work after translation, probably requires ovfcheck - try: - return v1 / v2 - except ZeroDivisionError: - if v1 == v2 == 0.0: - return rfloat.NAN - return rfloat.copysign(rfloat.INFINITY, v1 * v2) - @binop - def mod(self, v1, v2): - return math.fmod(v1, v2) - @binop - def pow(self, v1, v2): - return math.pow(v1, v2) - - @unaryop - def sign(self, v): - if v == 0.0: - return 0.0 - return rfloat.copysign(1.0, v) - @unaryop - def reciprocal(self, v): - if v == 0.0: - return rfloat.copysign(rfloat.INFINITY, v) - return 1.0 / v - @unaryop - def fabs(self, v): - return math.fabs(v) - @unaryop - def floor(self, v): - return math.floor(v) - - @binop - def copysign(self, v1, v2): - return math.copysign(v1, v2) - @unaryop - def exp(self, v): - try: - return math.exp(v) - except OverflowError: - return rfloat.INFINITY - @unaryop - def sin(self, v): - return math.sin(v) - @unaryop - def cos(self, v): - return math.cos(v) - @unaryop - def tan(self, v): - return math.tan(v) - @unaryop - def arcsin(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.asin(v) - @unaryop - def arccos(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.acos(v) - @unaryop - def arctan(self, v): - return math.atan(v) - @unaryop - def arcsinh(self, v): - return math.asinh(v) - @unaryop - def arctanh(self, v): - if v == 1.0 or v == -1.0: - return math.copysign(rfloat.INFINITY, v) - if not -1.0 < v < 1.0: - return rfloat.NAN - return math.atanh(v) - @unaryop - def sqrt(self, v): - try: - return math.sqrt(v) - except ValueError: - return rfloat.NAN - -class IntegerArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) - - def for_computation(self, v): - return widen(v) - - def str_format(self, item): - return str(widen(self.unbox(item))) - - @binop - def div(self, v1, v2): - if v2 == 0: - return 0 - return v1 / v2 - @binop - def mod(self, v1, v2): - return v1 % v2 - @binop - def pow(self, v1, v2): - res = 1 - while v2 > 0: - if v2 & 1: - res *= v1 - v2 >>= 1 - if v2 == 0: - break - v1 *= v1 - return res - - -class SignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - if v > 0: - return 1 - elif v < 0: - return -1 - else: - assert v == 0 - return 0 - -class UnsignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - return int(v != 0) - - -W_BoolDtype = create_low_level_dtype( - num = 0, kind = BOOLLTR, name = "bool", - aliases = ["?", "bool", "bool8"], - applevel_types = ["bool"], - T = lltype.Bool, - valtype = bool, -) -class W_BoolDtype(SignedIntegerArithmeticDtype, W_BoolDtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.is_true(w_item)) - - def str_format(self, item): - v = self.unbox(item) - return "True" if v else "False" - - def for_computation(self, v): - return int(v) - -W_Int8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "int8", - aliases = ["b", "int8", "i1"], - applevel_types = [], - T = rffi.SIGNEDCHAR, - valtype = rffi.SIGNEDCHAR._type, - expected_size = 1, -) -class W_Int8Dtype(SignedIntegerArithmeticDtype, W_Int8Dtype): - pass - -W_UInt8Dtype = create_low_level_dtype( - num = 2, kind = UNSIGNEDLTR, name = "uint8", - aliases = ["B", "uint8", "I1"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(UnsignedIntegerArithmeticDtype, W_UInt8Dtype): - pass - -W_Int16Dtype = create_low_level_dtype( - num = 3, kind = SIGNEDLTR, name = "int16", - aliases = ["h", "int16", "i2"], - applevel_types = [], - T = rffi.SHORT, - valtype = rffi.SHORT._type, - expected_size = 2, -) -class W_Int16Dtype(SignedIntegerArithmeticDtype, W_Int16Dtype): - pass - -W_UInt16Dtype = create_low_level_dtype( - num = 4, kind = UNSIGNEDLTR, name = "uint16", - aliases = ["H", "uint16", "I2"], - applevel_types = [], - T = rffi.USHORT, - valtype = rffi.USHORT._type, - expected_size = 2, -) -class W_UInt16Dtype(UnsignedIntegerArithmeticDtype, W_UInt16Dtype): - pass - -W_Int32Dtype = create_low_level_dtype( - num = 5, kind = SIGNEDLTR, name = "int32", - aliases = ["i", "int32", "i4"], - applevel_types = [], - T = rffi.INT, - valtype = rffi.INT._type, - expected_size = 4, -) -class W_Int32Dtype(SignedIntegerArithmeticDtype, W_Int32Dtype): - pass - -W_UInt32Dtype = create_low_level_dtype( - num = 6, kind = UNSIGNEDLTR, name = "uint32", - aliases = ["I", "uint32", "I4"], - applevel_types = [], - T = rffi.UINT, - valtype = rffi.UINT._type, - expected_size = 4, -) -class W_UInt32Dtype(UnsignedIntegerArithmeticDtype, W_UInt32Dtype): - pass - -W_Int64Dtype = create_low_level_dtype( - num = 9, kind = SIGNEDLTR, name = "int64", - aliases = ["q", "int64", "i8"], - applevel_types = ["long"], - T = rffi.LONGLONG, - valtype = rffi.LONGLONG._type, - expected_size = 8, -) -class W_Int64Dtype(SignedIntegerArithmeticDtype, W_Int64Dtype): - pass - -W_UInt64Dtype = create_low_level_dtype( - num = 10, kind = UNSIGNEDLTR, name = "uint64", - aliases = ["Q", "uint64", "I8"], - applevel_types = [], - T = rffi.ULONGLONG, - valtype = rffi.ULONGLONG._type, - expected_size = 8, -) -class W_UInt64Dtype(UnsignedIntegerArithmeticDtype, W_UInt64Dtype): - pass - -if LONG_BIT == 32: - long_dtype = W_Int32Dtype - ulong_dtype = W_UInt32Dtype -elif LONG_BIT == 64: - long_dtype = W_Int64Dtype - ulong_dtype = W_UInt64Dtype -else: - assert False - -class W_LongDtype(long_dtype): - num = 7 - aliases = ["l"] - applevel_types = ["int"] - -class W_ULongDtype(ulong_dtype): - num = 8 - aliases = ["L"] - -W_Float32Dtype = create_low_level_dtype( - num = 11, kind = FLOATINGLTR, name = "float32", - aliases = ["f", "float32", "f4"], - applevel_types = [], - T = lltype.SingleFloat, - valtype = rarithmetic.r_singlefloat, - expected_size = 4, -) -class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): - pass - -W_Float64Dtype = create_low_level_dtype( - num = 12, kind = FLOATINGLTR, name = "float64", - aliases = ["d", "float64", "f8"], - applevel_types = ["float"], - T = lltype.Float, - valtype = float, - expected_size = 8, -) -class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): - pass - -ALL_DTYPES = [ - W_BoolDtype, - W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, - W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, - W_Int64Dtype, W_UInt64Dtype, - W_Float32Dtype, W_Float64Dtype, -] - -dtypes_by_alias = unrolling_iterable([ - (alias, dtype) - for dtype in ALL_DTYPES - for alias in dtype.aliases -]) -dtypes_by_apptype = unrolling_iterable([ - (apptype, dtype) - for dtype in ALL_DTYPES - for apptype in dtype.applevel_types -]) -dtypes_by_num_bytes = unrolling_iterable(sorted([ - (dtype.num_bytes, dtype) - for dtype in ALL_DTYPES -])) - W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), + __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), - __str__ = interp2app(W_Dtype.descr_str), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), - itemsize = interp_attrproperty("num_bytes", cls=W_Dtype), + type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), shape = GetSetProperty(W_Dtype.descr_get_shape), ) W_Dtype.typedef.acceptable_as_base_class = False + +class DtypeCache(object): + def __init__(self, space): + self.w_booldtype = W_Dtype( + types.Bool(), + num=0, + kind=BOOLLTR, + name="bool", + char="?", + w_box_type = space.gettypefor(interp_boxes.W_BoolBox), + alternate_constructors=[space.w_bool], + ) + self.w_int8dtype = W_Dtype( + types.Int8(), + num=1, + kind=SIGNEDLTR, + name="int8", + char="b", + w_box_type = space.gettypefor(interp_boxes.W_Int8Box) + ) + self.w_uint8dtype = W_Dtype( + types.UInt8(), + num=2, + kind=UNSIGNEDLTR, + name="uint8", + char="B", + w_box_type = space.gettypefor(interp_boxes.W_UInt8Box), + ) + self.w_int16dtype = W_Dtype( + types.Int16(), + num=3, + kind=SIGNEDLTR, + name="int16", + char="h", + w_box_type = space.gettypefor(interp_boxes.W_Int16Box), + ) + self.w_uint16dtype = W_Dtype( + types.UInt16(), + num=4, + kind=UNSIGNEDLTR, + name="uint16", + char="H", + w_box_type = space.gettypefor(interp_boxes.W_UInt16Box), + ) + self.w_int32dtype = W_Dtype( + types.Int32(), + num=5, + kind=SIGNEDLTR, + name="int32", + char="i", + w_box_type = space.gettypefor(interp_boxes.W_Int32Box), + ) + self.w_uint32dtype = W_Dtype( + types.UInt32(), + num=6, + kind=UNSIGNEDLTR, + name="uint32", + char="I", + w_box_type = space.gettypefor(interp_boxes.W_UInt32Box), + ) + if LONG_BIT == 32: + name = "int32" + elif LONG_BIT == 64: + name = "int64" + self.w_longdtype = W_Dtype( + types.Long(), + num=7, + kind=SIGNEDLTR, + name=name, + char="l", + w_box_type = space.gettypefor(interp_boxes.W_LongBox), + alternate_constructors=[space.w_int], + ) + self.w_ulongdtype = W_Dtype( + types.ULong(), + num=8, + kind=UNSIGNEDLTR, + name="u" + name, + char="L", + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + ) + self.w_int64dtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name="int64", + char="q", + w_box_type = space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], + ) + self.w_uint64dtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name="uint64", + char="Q", + w_box_type = space.gettypefor(interp_boxes.W_UInt64Box), + ) + self.w_float32dtype = W_Dtype( + types.Float32(), + num=11, + kind=FLOATINGLTR, + name="float32", + char="f", + w_box_type = space.gettypefor(interp_boxes.W_Float32Box), + ) + self.w_float64dtype = W_Dtype( + types.Float64(), + num=12, + kind=FLOATINGLTR, + name="float64", + char="d", + w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + alternate_constructors=[space.w_float], + ) + + self.builtin_dtypes = [ + self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, + self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, + self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, + self.w_float64dtype + ] + self.dtypes_by_num_bytes = sorted( + (dtype.itemtype.get_element_size(), dtype) + for dtype in self.builtin_dtypes + ) + +def get_dtype_cache(space): + return space.fromcache(DtypeCache) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,47 +98,6 @@ endshape[i] = remainder[i] return endshape -def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, - w_order=NoneNotWrapped): - # find scalar - if not space.issequence_w(w_item_or_iterable): - if space.is_w(w_dtype, space.w_None): - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, - w_item_or_iterable) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - return scalar_w(space, dtype, w_item_or_iterable) - if w_order is None: - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise operationerrfmt(space.w_ValueError, "Unknown order: %s", - order) - shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) - # they come back in C order - size = len(elems_w) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): - break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = NDimArray(size, shape[:], dtype=dtype, order=order) - shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) - for i in range(len(elems_w)): - w_elem = elems_w[i] - dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) - arr_iter = arr_iter.next(shapelen) - return arr # Iterators for arrays # -------------------- @@ -378,6 +337,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -451,8 +417,8 @@ self=self, dtype=dtype, i=i, result=result, idx=idx, cur_best=cur_best) - new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.ne(new_best, cur_best): + new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best i = i.next(shapelen) @@ -462,8 +428,7 @@ size = self.find_size() if size == 0: raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) + space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -475,7 +440,7 @@ all_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if not dtype.bool(self.eval(i)): + if not dtype.itemtype.bool(self.eval(i)): return False i = i.next(shapelen) return True @@ -490,7 +455,7 @@ any_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if dtype.bool(self.eval(i)): + if dtype.itemtype.bool(self.eval(i)): return True i = i.next(shapelen) return False @@ -586,8 +551,8 @@ res.append(')') else: concrete.to_str(space, 1, res, indent=' ') - if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or \ + if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and + dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ not self.find_size(): res.append(", dtype=" + dtype.name) res.append(")") @@ -656,7 +621,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] # Add a comma only if comma is False - this prevents adding two # commas @@ -669,7 +634,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] i += 1 else: @@ -756,7 +721,7 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item).wrap(space) + return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) return space.wrap(self.create_slice(space, chunks)) @@ -815,14 +780,15 @@ shape[:]) def descr_mean(self, space): - return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) + return space.div(self.descr_sum(space), space.wrap(self.find_size())) def descr_nonzero(self, space): if self.find_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true(self.get_concrete().eval( - self.start_iter(self.shape)).wrap(space))) + return space.wrap(space.is_true( + self.get_concrete().eval(self.start_iter(self.shape)) + )) def descr_get_transpose(self, space): concrete = self.get_concrete() @@ -858,17 +824,14 @@ return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) return scalar_w(space, dtype, w_obj) def scalar_w(space, dtype, w_obj): - assert isinstance(dtype, interp_dtype.W_Dtype) - return Scalar(dtype, dtype.unwrap(space, w_obj)) + return Scalar(dtype, dtype.coerce(space, w_obj)) class Scalar(BaseArray): """ @@ -879,6 +842,7 @@ _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): + self.shape = self.strides = [] BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value @@ -902,7 +866,7 @@ return ConstantIterator() def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.str_format(self.value)) + builder.append(self.dtype.itemtype.str_format(self.value)) def copy(self): return Scalar(self.dtype, self.value) @@ -928,7 +892,7 @@ i = 0 signature = self.signature result_size = self.find_size() - result = NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) i = self.start_iter() ri = result.start_iter() @@ -1155,14 +1119,14 @@ return 'Slice(%s)' % self.parent.debug_repr() def copy(self): - array = NDimArray(self.size, self.shape[:], self.find_dtype()) + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() while not iter.done(): array.setitem(iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) return array -class NDimArray(BaseArray): +class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ @@ -1189,11 +1153,11 @@ return self.dtype.getitem(self.storage, iter.get_offset()) def copy(self): - array = NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( - rffi.cast(rffi.VOIDP, array.storage), - rffi.cast(rffi.VOIDP, self.storage), - self.size * self.dtype.num_bytes + array.storage, + self.storage, + self.size * self.dtype.itemtype.get_element_size() ) return array @@ -1204,8 +1168,7 @@ "len() of unsized object")) def setitem_w(self, space, item, w_value): - self.invalidated() - self.dtype.setitem_w(space, self.storage, item, w_value) + return self.setitem(item, self.dtype.coerce(space, w_value)) def setitem(self, item, value): self.invalidated() @@ -1237,20 +1200,62 @@ shape.append(item) return size, shape +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr + def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) size, shape = _find_size_and_shape(space, w_size) - return space.wrap(NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) + size, shape = _find_size_and_shape(space, w_size) - arr = NDimArray(size, shape[:], dtype=dtype) - one = dtype.adapt_val(1) + arr = W_NDimArray(size, shape[:], dtype=dtype) + one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) @@ -1261,9 +1266,9 @@ return w_arr.descr_dot(space, w_obj2) BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), @@ -1352,10 +1357,10 @@ def descr_next(self, space): if self.iter.done(): - raise OperationError(space.w_StopIteration, space.wrap('')) + raise OperationError(space.w_StopIteration, space.w_None) result = self.eval(self.iter) self.iter = self.iter.next(self.shapelen) - return result.wrap(space) + return result def descr_iter(self): return self diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import NDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -18,8 +18,8 @@ raise OperationError(space.w_ValueError, space.wrap( "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - dtype = space.fromcache(W_Float64Dtype) - a = NDimArray(number, [number], dtype=dtype) + dtype = get_dtype_cache(space).w_float64dtype + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_dtype, signature +from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -15,6 +15,7 @@ class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + _immutable_fields_ = ["promote_to_float", "promote_bools"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -29,7 +30,7 @@ def descr_get_identity(self, space): if self.identity is None: return space.w_None - return self.identity.wrap(space) + return self.identity def descr_call(self, space, __args__): if __args__.keywords or len(__args__.arguments_w) < self.argcount: @@ -80,8 +81,7 @@ new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, - dtype).wrap(space) + return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): while not i.done(): @@ -115,7 +115,7 @@ promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) @@ -124,6 +124,7 @@ class W_Ufunc2(W_Ufunc): + _immutable_fields_ = ["comparison_func", "func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -148,14 +149,14 @@ promote_bools=self.promote_bools, ) if self.comparison_func: - res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): return self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - ).wrap(space) + ) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature @@ -169,7 +170,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), @@ -187,7 +188,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. @@ -197,14 +198,14 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == 11 and dt1.num_bytes >= 4: - return space.fromcache(interp_dtype.W_Float64Dtype) + if dt2.num == 11 and dt1.itemtype.get_element_size() >= 4: + return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == interp_dtype.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it - if dt1.num_bytes < dt2.num_bytes: + if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 @@ -214,10 +215,11 @@ # UInt64 + signed = Float64 if dt2.num == 10: dtypenum += 1 - newdtype = interp_dtype.ALL_DTYPES[dtypenum] + newdtype = interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] - if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(newdtype) + if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or + newdtype.kind == interp_dtype.FLOATINGLTR): + return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes @@ -225,35 +227,42 @@ dtypenum += 2 else: dtypenum += 3 - return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum]) + return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt if dt.num >= 5: - return space.fromcache(interp_dtype.W_Float64Dtype) - for bytes, dtype in interp_dtype.dtypes_by_num_bytes: - if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: - return space.fromcache(dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype + for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + if (dtype.kind == interp_dtype.FLOATINGLTR and + dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): + return dtype if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: - return space.fromcache(interp_dtype.W_Int64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.UNSIGNEDLTR: - return space.fromcache(interp_dtype.W_UInt64Dtype) + return interp_dtype.get_dtype_cache(space).w_uint64dtype else: assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) + bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + + if isinstance(w_obj, interp_boxes.W_GenericBox): + dtype = w_obj.get_dtype(space) + if current_guess is None: + return dtype + return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: @@ -269,20 +278,19 @@ current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): - return getattr(res_dtype, op_name)(value) + return getattr(res_dtype.itemtype, op_name)(value) elif argcount == 2: + dtype_cache = interp_dtype.get_dtype_cache(space) def impl(res_dtype, lvalue, rvalue): - res = getattr(res_dtype, op_name)(lvalue, rvalue) + res = getattr(res_dtype.itemtype, op_name)(lvalue, rvalue) if comparison_func: - booldtype = space.fromcache(interp_dtype.W_BoolDtype) - assert isinstance(booldtype, interp_dtype.W_BoolDtype) - res = booldtype.box(res) + return dtype_cache.w_booldtype.box(res) return res return func_with_new_name(impl, ufunc_name) @@ -338,7 +346,7 @@ identity = extra_kwargs.get("identity") if identity is not None: - identity = space.fromcache(interp_dtype.W_LongDtype).adapt_val(identity) + identity = interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace -from pypy.module.micronumpy import interp_dtype -from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar +from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -11,9 +11,10 @@ class TestSignature(object): def test_binop_signature(self, space): - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + float64_dtype = get_dtype_cache(space).w_float64dtype + bool_dtype = get_dtype_cache(space).w_booldtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -22,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_BoolDtype)) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -30,7 +31,9 @@ assert v5.signature is v6.signature def test_slice_signature(self, space): - ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_Float64Dtype)) + float64_dtype = get_dtype_cache(space).w_float64dtype + + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature @@ -41,10 +44,10 @@ class TestUfuncCoerscion(object): def test_binops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Basic pairing assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype @@ -62,19 +65,19 @@ assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype def test_unaryops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - uint8_dtype = space.fromcache(interp_dtype.W_UInt8Dtype) - int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype) - uint16_dtype = space.fromcache(interp_dtype.W_UInt16Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - uint32_dtype = space.fromcache(interp_dtype.W_UInt32Dtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - ulong_dtype = space.fromcache(interp_dtype.W_ULongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) - uint64_dtype = space.fromcache(interp_dtype.W_UInt64Dtype) - float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + uint8_dtype = get_dtype_cache(space).w_uint8dtype + int16_dtype = get_dtype_cache(space).w_int16dtype + uint16_dtype = get_dtype_cache(space).w_uint16dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + uint32_dtype = get_dtype_cache(space).w_uint32dtype + long_dtype = get_dtype_cache(space).w_longdtype + ulong_dtype = get_dtype_cache(space).w_ulongdtype + int64_dtype = get_dtype_cache(space).w_int64dtype + uint64_dtype = get_dtype_cache(space).w_uint64dtype + float32_dtype = get_dtype_cache(space).w_float32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Normal rules, everything returns itself assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -30,7 +30,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -44,13 +44,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from numpypy import array, False_, True_ + from numpypy import array, False_, True_, int64 a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], (int, long)) + assert isinstance(a[0], int64) b = a.copy() - assert isinstance(b[0], (int, long)) + assert isinstance(b[0], int64) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -72,17 +72,17 @@ assert a[i] is True_ def test_zeros_long(self): - from numpypy import zeros + from numpypy import zeros, int64 a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 0 def test_ones_long(self): - from numpypy import ones + from numpypy import ones, int64 a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 1 def test_overflow(self): @@ -165,3 +165,99 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) + +class AppTestTypes(BaseNumpyAppTest): + def test_abstract_types(self): + import numpypy as numpy + raises(TypeError, numpy.generic, 0) + raises(TypeError, numpy.number, 0) + raises(TypeError, numpy.integer, 0) + exc = raises(TypeError, numpy.signedinteger, 0) + assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + + raises(TypeError, numpy.floating, 0) + raises(TypeError, numpy.inexact, 0) + + def test_bool(self): + import numpypy as numpy + + assert numpy.bool_.mro() == [numpy.bool_, numpy.generic, object] + assert numpy.bool_(3) is numpy.True_ + assert numpy.bool_("") is numpy.False_ + assert type(numpy.True_) is type(numpy.False_) is numpy.bool_ + + class X(numpy.bool_): + pass + + assert type(X(True)) is numpy.bool_ + assert X(True) is numpy.True_ + + def test_int8(self): + import numpypy as numpy + + assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.int8) + assert type(a[1]) is numpy.int8 + assert numpy.dtype("int8").type is numpy.int8 + + x = numpy.int8(128) + assert x == -128 + assert x != 128 + assert type(x) is numpy.int8 + assert repr(x) == "-128" + + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + + def test_int_(self): + import numpypy as numpy + + assert numpy.int_ is numpy.dtype(int).type + assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + + def test_int64(self): + import sys + import numpypy as numpy + + if sys.maxint == 2 ** 63 -1: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + else: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.int64).type is numpy.int64 + assert numpy.int64(3) == 3 + + def test_float64(self): + import numpypy as numpy + + assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] + + a = numpy.array([1, 2, 3], numpy.float64) + assert type(a[1]) is numpy.float64 + assert numpy.dtype(float).type is numpy.float64 + + assert numpy.float64(2.0) == 2.0 + + def test_subclass_type(self): + import numpypy as numpy + + class X(numpy.float64): + def m(self): + return self + 2 + + b = X(10) + assert type(b) is X + assert b.m() == 12 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,7 +1,7 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy import signature from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace @@ -28,18 +28,18 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -58,7 +58,7 @@ def test_create_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -78,7 +78,7 @@ def test_slice_of_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -96,7 +96,7 @@ def test_slice_of_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -114,7 +114,7 @@ def test_negative_step_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] @@ -122,14 +122,14 @@ def test_negative_step_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -139,7 +139,7 @@ assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -160,6 +160,21 @@ class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -359,11 +374,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 @@ -719,7 +734,7 @@ assert b[i] == 2.5 * a[i] def test_dtype_guessing(self): - from numpypy import array, dtype + from numpypy import array, dtype, float64, int8, bool_ assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) @@ -729,6 +744,10 @@ assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + assert array([float64(2)]).dtype is dtype(float) + assert array([int8(3)]).dtype is dtype("int8") + assert array([bool_(True)]).dtype is dtype(bool) + assert array([bool_(True), 3.0]).dtype is dtype(float) def test_comparison(self): import operator @@ -1018,10 +1037,10 @@ b = a[0].copy() assert (b == zeros(10)).all() -class AppTestSupport(object): +class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct - cls.space = gettestobjspace(usemodules=('micronumpy',)) + BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) def test_fromstring(self): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,11 +8,11 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_ufuncs, signature +from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import NDimArray, NDimSlice,\ - BaseArray +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, + BaseArray) from pypy.rlib.nonconst import NonConstant from pypy.rpython.annlowlevel import llstr, hlstr @@ -48,17 +48,15 @@ def f(i): interp = InterpreterState(codes[i]) interp.run(space) - res = interp.results[-1] - assert isinstance(res, BaseArray) - w_res = res.eval(res.start_iter()).wrap(interp.space) - if isinstance(w_res, BoolObject): - return float(w_res.boolval) - elif isinstance(w_res, FloatObject): - return w_res.floatval - elif isinstance(w_res, IntObject): - return w_res.intval - else: - return -42. + w_res = interp.results[-1] + if isinstance(w_res, BaseArray): + w_res = w_res.eval(w_res.start_iter()) + + if isinstance(w_res, interp_boxes.W_Float64Box): + return w_res.value + elif isinstance(w_res, interp_boxes.W_BoolBox): + return float(w_res.value) + raise TypeError(w_res) if self.graph is None: interp, graph = self.meta_interp(f, [i], @@ -80,9 +78,9 @@ def test_add(self): result = self.run("add") - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 def define_float_add(): @@ -94,9 +92,9 @@ def test_floatadd(self): result = self.run("float_add") assert result == 3 + 3 - self.check_simple_loop({"getarrayitem_raw": 1, "float_add": 1, - "setarrayitem_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_sum(): return """ @@ -108,9 +106,9 @@ def test_sum(self): result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 2, - "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, + "int_add": 2, "int_ge": 1, "guard_false": 1, + "jump": 1}) def define_prod(): return """ @@ -125,9 +123,9 @@ for i in range(30): expected *= i * 2 assert result == expected - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -138,9 +136,9 @@ max(b) """) assert result == 256 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def test_min(self): py.test.skip("broken, investigate") @@ -151,9 +149,9 @@ min(b) """) assert result == -24 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def define_any(): return """ @@ -166,10 +164,10 @@ def test_any(self): result = self.run("any") assert result == 1 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, - "int_ge": 1, "jump": 1, - "guard_false": 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) def define_already_forced(): return """ @@ -188,10 +186,10 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) def define_ufunc(): @@ -205,10 +203,9 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -6 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, - "setarrayitem_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1, - }) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, + "setinteriorfield_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_specialization(): return """ @@ -246,9 +243,9 @@ def test_slice(self): result = self.run("slice") assert result == 18 - self.check_simple_loop({'getarrayitem_raw': 2, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) @@ -265,8 +262,8 @@ def test_slice2(self): result = self.run("slice2") assert result == 15 - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) def define_multidim(): @@ -279,11 +276,11 @@ def test_multidim(self): result = self.run('multidim') assert result == 8 - self.check_simple_loop({'float_add': 1, 'getarrayitem_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setarrayitem_raw': 1}) # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization + self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1}) def define_multidim_slice(): return """ @@ -329,18 +326,18 @@ result = self.run("setslice") assert result == 11.0 self.check_loop_count(1) - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add' : 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import W_Float64Dtype + from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() - cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) + cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " @@ -355,7 +352,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = NDimArray(n, [n], dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/types.py @@ -0,0 +1,389 @@ +import functools +import math + +from pypy.module.micronumpy import interp_boxes +from pypy.objspace.std.floatobject import float2string +from pypy.rlib import rfloat, libffi, clibffi +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rpython.lltypesystem import lltype, rffi + + +def simple_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v): + return self.box( + func( + self, + self.for_computation(self.unbox(v)) + ) + ) + return dispatcher + +def simple_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return self.box( + func( + self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + ) + ) + return dispatcher + +def raw_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)) + ) + return dispatcher + +class BaseType(object): + def _unimplemented_ufunc(self, *args): + raise NotImplementedError + # add = sub = mul = div = mod = pow = eq = ne = lt = le = gt = ge = max = \ + # min = copysign = pos = neg = abs = sign = reciprocal = fabs = floor = \ + # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ + # arctanh = _unimplemented_ufunc + +class Primitive(object): + _mixin_ = True + def get_element_size(self): + return rffi.sizeof(self.T) + + @specialize.argtype(1) + def box(self, value): + return self.BoxType(rffi.cast(self.T, value)) + + def unbox(self, box): + assert isinstance(box, self.BoxType) + return box.value + + def coerce(self, space, w_item): + if isinstance(w_item, self.BoxType): + return w_item + return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # XXX: ugly + w_obj = space.allocate_instance(self.BoxType, w_subtype) + assert isinstance(w_obj, self.BoxType) + w_obj.__init__(self._coerce(space, w_item).value) + return w_obj + + def _coerce(self, space, w_item): + raise NotImplementedError + + def read(self, storage, width, i, offset): + return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset + )) + + def store(self, storage, width, i, offset, box): + value = self.unbox(box) + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + @simple_binary_op + def add(self, v1, v2): + return v1 + v2 + + @simple_binary_op + def sub(self, v1, v2): + return v1 - v2 + + @simple_binary_op + def mul(self, v1, v2): + return v1 * v2 + + @simple_unary_op + def pos(self, v): + return +v + + @simple_unary_op + def neg(self, v): + return -v + + @simple_unary_op + def abs(self, v): + return abs(v) + + @raw_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @raw_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @raw_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @raw_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @raw_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @raw_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + def bool(self, v): + return bool(self.for_computation(self.unbox(v))) + + @simple_binary_op + def max(self, v1, v2): + return max(v1, v2) + + @simple_binary_op + def min(self, v1, v2): + return min(v1, v2) + +class Bool(BaseType, Primitive): + T = lltype.Bool + BoxType = interp_boxes.W_BoolBox + + True = BoxType(True) + False = BoxType(False) + + @specialize.argtype(1) + def box(self, value): + box = Primitive.box(self, value) + if box.value: + return self.True + else: + return self.False + + def coerce_subtype(self, space, w_subtype, w_item): + # Doesn't return subclasses so it can return the constants. + return self._coerce(space, w_item) + + def _coerce(self, space, w_item): + return self.box(space.is_true(w_item)) + + def str_format(self, box): + value = self.unbox(box) + return "True" if value else "False" + + def for_computation(self, v): + return int(v) + +class Integer(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.int_w(space.int(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return str(self.for_computation(value)) + + def for_computation(self, v): + return widen(v) + + @simple_binary_op + def div(self, v1, v2): + if v2 == 0: + return 0 + return v1 / v2 + + @simple_binary_op + def mod(self, v1, v2): + return v1 % v2 + + @simple_binary_op + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + + @simple_unary_op + def sign(self, v): + if v > 0: + return 1 + elif v < 0: + return -1 + else: + assert v == 0 + return 0 + +class Int8(BaseType, Integer): + T = rffi.SIGNEDCHAR + BoxType = interp_boxes.W_Int8Box + +class UInt8(BaseType, Integer): + T = rffi.UCHAR + BoxType = interp_boxes.W_UInt8Box + +class Int16(BaseType, Integer): + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + +class UInt16(BaseType, Integer): + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + +class Int32(BaseType, Integer): + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + +class UInt32(BaseType, Integer): + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + +class Int64(BaseType, Integer): + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + +class UInt64(BaseType, Integer): + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + +class Float(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.float_w(space.float(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + + def for_computation(self, v): + return float(v) + + @simple_binary_op + def div(self, v1, v2): + try: + return v1 / v2 + except ZeroDivisionError: + if v1 == v2 == 0.0: + return rfloat.NAN + return rfloat.copysign(rfloat.INFINITY, v1 * v2) + + @simple_binary_op + def mod(self, v1, v2): + return math.fmod(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return math.pow(v1, v2) + + @simple_binary_op + def copysign(self, v1, v2): + return math.copysign(v1, v2) + + @simple_unary_op + def sign(self, v): + if v == 0.0: + return 0.0 + return rfloat.copysign(1.0, v) + + @simple_unary_op + def fabs(self, v): + return math.fabs(v) + + @simple_unary_op + def reciprocal(self, v): + if v == 0.0: + return rfloat.copysign(rfloat.INFINITY, v) + return 1.0 / v + + @simple_unary_op + def floor(self, v): + return math.floor(v) + + @simple_unary_op + def exp(self, v): + try: + return math.exp(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def sin(self, v): + return math.sin(v) + + @simple_unary_op + def cos(self, v): + return math.cos(v) + + @simple_unary_op + def tan(self, v): + return math.tan(v) + + @simple_unary_op + def arcsin(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.asin(v) + + @simple_unary_op + def arccos(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.acos(v) + + @simple_unary_op + def arctan(self, v): + return math.atan(v) + + @simple_unary_op + def arcsinh(self, v): + return math.asinh(v) + + @simple_unary_op + def arctanh(self, v): + if v == 1.0 or v == -1.0: + return math.copysign(rfloat.INFINITY, v) + if not -1.0 < v < 1.0: + return rfloat.NAN + return math.atanh(v) + + @simple_unary_op + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN + + +class Float32(BaseType, Float): + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + +class Float64(BaseType, Float): + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box \ No newline at end of file diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -326,6 +326,8 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + _applevel_repr = "empty" + def __init__(self, space): ListStrategy.__init__(self, space) # cache an empty list that is used whenever getitems is called (i.e. sorting) @@ -426,6 +428,8 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" + _applevel_repr = "range" + def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -864,6 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -892,6 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 + _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -918,6 +924,7 @@ class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 + _applevel_repr = "float" def wrap(self, floatval): return self.space.wrap(floatval) @@ -944,6 +951,7 @@ class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "str" def wrap(self, stringval): return self.space.wrap(stringval) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -102,6 +102,7 @@ 'instancetypedef', 'terminator', '_version_tag?', + 'name?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -738,3 +738,29 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +def record_known_class(value, cls): + """ + Assure the JIT that value is an instance of cls. This is not a precise + class check, unlike a guard_class. + """ + assert isinstance(value, cls) + + +class Entry(ExtRegistryEntry): + _about_ = record_known_class + + def compute_result_annotation(self, s_inst, s_cls): + from pypy.annotation import model as annmodel + assert s_cls.is_constant() + assert not s_inst.can_be_none() + assert isinstance(s_inst, annmodel.SomeInstance) + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype, rclass + classrepr = rclass.get_type_repr(hop.rtyper) + + hop.exception_cannot_occur() + v_inst = hop.inputarg(hop.args_r[0], arg=0) + v_cls = hop.inputarg(classrepr, arg=1) + return hop.genop('jit_record_known_class', [v_inst, v_cls], + resulttype=lltype.Void) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -411,6 +411,10 @@ def getaddressindll(self, name): return dlsym(self.lib, name) +# These specialize.call_location's should really be specialize.arg(0), however +# you can't hash a pointer obj, which the specialize machinery wants to do. +# Given the present usage of these functions, it's good enough. + at specialize.call_location() @jit.oopspec("libffi_array_getitem(ffitype, width, addr, index, offset)") def array_getitem(ffitype, width, addr, index, offset): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -420,6 +424,7 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False + at specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -428,4 +433,4 @@ addr = rffi.ptradd(addr, offset) rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return - assert False \ No newline at end of file + assert False diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -91,9 +91,18 @@ return decorated_func + def call_location(self): + """ Specializes the function for each call site. + """ + def decorated_func(func): + func._annspecialcase_ = "specialize:call_location" + return func + + return decorated_func + def _wrap(self, args): return "("+','.join([repr(arg) for arg in args]) +")" - + specialize = _Specialize() def enforceargs(*args): @@ -125,7 +134,7 @@ def __hash__(self): raise TypeError("Symbolics are not hashable!") - + def __nonzero__(self): raise TypeError("Symbolics are not comparable") @@ -155,7 +164,7 @@ def lltype(self): from pypy.rpython.lltypesystem import lltype return lltype.Signed - + malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) # running_on_llinterp is meant to have the value 0 in all backends @@ -221,7 +230,7 @@ def compute_result_annotation(self, s_sizehint): from pypy.annotation.model import SomeInteger - + assert isinstance(s_sizehint, SomeInteger) return self.bookkeeper.newlist() diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -548,6 +548,9 @@ def op_jit_marker(self, *args): pass + def op_jit_record_known_class(self, *args): + pass + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -430,6 +430,7 @@ 'jit_force_virtual': LLOp(canrun=True), 'jit_is_virtual': LLOp(canrun=True), 'jit_force_quasi_immutable': LLOp(canrun=True), + 'jit_record_known_class' : LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -548,6 +548,9 @@ def op_jit_force_quasi_immutable(*args): pass +def op_jit_record_known_class(x, y): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -11,6 +11,7 @@ #endif /* MIN */ #define RUNNING_ON_LLINTERP 0 +#define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ #define FAIL_EXCEPTION(exc, msg) \ { \ From noreply at buildbot.pypy.org Mon Dec 5 22:42:03 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:03 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: fix for renamed class Message-ID: <20111205214203.492D98205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50184:0f00b31fe95a Date: 2011-12-04 21:48 +0200 http://bitbucket.org/pypy/pypy/changeset/0f00b31fe95a/ Log: fix for renamed class diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -500,7 +500,7 @@ dtype = interp_ufuncs.find_binop_result_dtype(space, self.find_dtype(), w_other.find_dtype()) #TODO: what should the order be? C or F? - arr = NDimArray(out_size, out_shape, dtype=dtype) + arr = W_NDimArray(out_size, out_shape, dtype=dtype) out_iter = ArrayIterator(out_size) #TODO: invalidate self, w_other with arr me_iter = BroadcastIterator(self,self.shape[:-1] + [1]) From noreply at buildbot.pypy.org Mon Dec 5 22:42:04 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:04 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: Merge with default Message-ID: <20111205214204.A1E198205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50185:55693d8b2b89 Date: 2011-12-04 21:49 +0200 http://bitbucket.org/pypy/pypy/changeset/55693d8b2b89/ Log: Merge with default diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -67,10 +67,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_builtin = space.getbuiltinmodule('__builtin__') - w_import = space.getattr(w_builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -100,7 +100,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, maxlength) - res = intmask(res) # XXX why? try: if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -117,7 +116,6 @@ res, newbuf = self.do_recv_string( space, length - offset, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: raise BufferTooShort(space, space.wrap( @@ -148,7 +146,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) @@ -413,7 +410,7 @@ self.buffer, min(self.BUFFER_SIZE, buflength), read_ptr, rffi.NULL) if result: - return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) + return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: @@ -476,7 +473,7 @@ block = timeout < 0 if not block: # XXX does not check for overflow - deadline = _GetTickCount() + int(1000 * timeout + 0.5) + deadline = intmask(_GetTickCount()) + int(1000 * timeout + 0.5) else: deadline = 0 @@ -500,7 +497,7 @@ return True if not block: - now = _GetTickCount() + now = intmask(_GetTickCount()) if now > deadline: return False diff = deadline - now diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -235,7 +235,7 @@ elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise OperationError(space.w_OverflowError, space.wrap("timeout is too large")) - full_msecs = int(timeout + 0.5) + full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) @@ -243,7 +243,7 @@ if res != rwin32.WAIT_TIMEOUT: return True - msecs = r_uint(full_msecs) + msecs = full_msecs start = _GetTickCount() while True: @@ -269,7 +269,7 @@ ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False - msecs = r_uint(full_msecs - (ticks - start)) + msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,6 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def set_last_error(space, w_error): + at unwrap_spec(error=int) +def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError - SetLastError(space.uint_w(w_error)) + SetLastError(error) diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = cConfig.INVALID_SOCKET + INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -98,8 +98,13 @@ INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) - GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) - SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + _GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) + _SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + + def GetLastError(): + return rffi.cast(lltype.Signed, _GetLastError()) + def SetLastError(err): + _SetLastError(rffi.cast(DWORD, err)) # In tests, the first call to GetLastError is always wrong, because error # is hidden by operations in ll2ctypes. Call it now. @@ -184,12 +189,12 @@ msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, None, - code, + rffi.cast(DWORD, code), DEFAULT_LANGUAGE, rffi.cast(rffi.CCHARP, buf), 0, None) - if msglen <= 2 or msglen > sys.maxint: + if msglen <= 2: # includes the case msglen < 0 return fake_FormatError(code) # FormatMessage always appends \r\n. diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -1773,7 +1773,7 @@ @registering(rwin32.FormatError) def register_rwin32_FormatError(self): - return extdef([rwin32.DWORD], str, + return extdef([lltype.Signed], str, "rwin32_FormatError", llimpl=rwin32.llimpl_FormatError, ooimpl=rwin32.fake_FormatError) diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py --- a/pypy/rpython/module/ll_os_stat.py +++ b/pypy/rpython/module/ll_os_stat.py @@ -12,6 +12,7 @@ from pypy.rpython.tool import rffi_platform as platform from pypy.rpython.lltypesystem.rtupletype import TUPLE_TYPE from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import specialize from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.annlowlevel import hlstr @@ -442,20 +443,19 @@ # Helper functions for win32 def make_longlong(high, low): - return (lltype.r_longlong(high) << 32) + lltype.r_longlong(low) + return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) # Seconds between 1.1.1601 and 1.1.1970 -secs_between_epochs = lltype.r_longlong(11644473600) +secs_between_epochs = rffi.r_longlong(11644473600) def FILE_TIME_to_time_t_nsec(filetime): ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) # FILETIME is in units of 100 nsec nsec = (ft % 10000000) * 100 time = (ft / 10000000) - secs_between_epochs - return time, nsec + return intmask(time), intmask(nsec) def time_t_to_FILE_TIME(time, filetime): - ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) - filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & lltype.r_uint(-1)) - + ft = (rffi.r_longlong(time) + secs_between_epochs) * 10000000 + filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) + filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits From noreply at buildbot.pypy.org Mon Dec 5 22:42:05 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:05 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: bad merge Message-ID: <20111205214205.C6B468205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50186:55f149559e25 Date: 2011-12-04 22:06 +0200 http://bitbucket.org/pypy/pypy/changeset/55f149559e25/ Log: bad merge diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,13 +3,16 @@ It should not be imported by the module itself """ +import re + from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_BoolDtype +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, NDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs -from pypy.rlib.objectmodel import specialize -import re +from pypy.rlib.objectmodel import specialize, instantiate + class BogusBytecode(Exception): pass @@ -49,15 +52,12 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_float64dtype = W_Float64Dtype(self) def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): - if w_obj.tp == w_tp: - return True - return False + return w_obj.tp == w_tp def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -98,8 +98,10 @@ fixedview = listview def float(self, w_obj): - assert isinstance(w_obj, FloatObject) - return w_obj + if isinstance(w_obj, FloatObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): assert isinstance(w_obj, FloatObject) @@ -113,7 +115,10 @@ raise NotImplementedError def int(self, w_obj): - return w_obj + if isinstance(w_obj, IntObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.int(w_obj.descr_int(self)) def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) @@ -136,6 +141,9 @@ assert isinstance(what, tp) return what + def allocate_instance(self, klass, w_subtype): + return instantiate(klass) + def len_w(self, w_obj): if isinstance(w_obj, ListObject): return len(w_obj.items) @@ -248,7 +256,7 @@ w_rhs = self.rhs.execute(interp) if not isinstance(w_lhs, BaseArray): # scalar - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) if self.name == '+': @@ -265,8 +273,9 @@ w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError - if not isinstance(w_res, BaseArray): - dtype = interp.space.fromcache(W_Float64Dtype) + if (not isinstance(w_res, BaseArray) and + not isinstance(w_res, interp_boxes.W_GenericBox)): + dtype = get_dtype_cache(interp.space).w_float64dtype w_res = scalar_w(interp.space, dtype, w_res) return w_res @@ -284,7 +293,7 @@ return space.wrap(self.v) def execute(self, interp): - return FloatObject(self.v) + return interp.space.wrap(self.v) class RangeConstant(Node): def __init__(self, v): @@ -292,10 +301,10 @@ def execute(self, interp): w_list = interp.space.newlist( - [interp.space.wrap(float(i)) for i in range(self.v)]) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + [interp.space.wrap(float(i)) for i in range(self.v)] + ) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -316,9 +325,8 @@ def execute(self, interp): w_list = self.wrap(interp.space) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" @@ -398,9 +406,11 @@ if isinstance(w_res, BaseArray): return w_res if isinstance(w_res, FloatObject): - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype elif isinstance(w_res, BoolObject): - dtype = interp.space.fromcache(W_BoolDtype) + dtype = get_dtype_cache(interp.space).w_booldtype + elif isinstance(w_res, interp_boxes.W_GenericBox): + dtype = w_res.get_dtype(interp.space) else: dtype = None return scalar_w(interp.space, dtype, w_res) diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,8 @@ import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) class TestCompiler(object): def compile(self, code): @@ -106,7 +108,7 @@ c -> 3 """ interp = self.run(code) - assert interp.results[-1].value.val == 9 + assert interp.results[-1].value == 9 def test_array_getitem(self): code = """ @@ -115,7 +117,7 @@ a + b -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 3 + 6 + assert interp.results[0].value == 3 + 6 def test_range_getitem(self): code = """ @@ -123,7 +125,7 @@ r -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_sum(self): code = """ @@ -132,7 +134,7 @@ r """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value.value == 15 def test_array_write(self): code = """ @@ -141,7 +143,7 @@ a -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_min(self): interp = self.run(""" @@ -150,7 +152,7 @@ b = a + a min(b) """) - assert interp.results[0].value.val == -24 + assert interp.results[0].value.value == -24 def test_max(self): interp = self.run(""" @@ -159,7 +161,7 @@ b = a + a max(b) """) - assert interp.results[0].value.val == 256 + assert interp.results[0].value.value == 256 def test_slice(self): interp = self.run(""" @@ -167,7 +169,7 @@ b = a -> : b -> 3 """) - assert interp.results[0].value.val == 4 + assert interp.results[0].value == 4 def test_slice_step(self): interp = self.run(""" @@ -175,7 +177,7 @@ b = a -> ::2 b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_setslice(self): interp = self.run(""" @@ -185,7 +187,7 @@ a[::3] = b a -> 3 """) - assert interp.results[0].value.val == 5 + assert interp.results[0].value == 5 def test_slice2(self): @@ -196,14 +198,14 @@ b = s1 + s2 b -> 3 """) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_multidim_getitem(self): interp = self.run(""" a = [[1,2]] a -> 0 -> 1 """) - assert interp.results[0].value.val == 2 + assert interp.results[0].value == 2 def test_multidim_getitem_2(self): interp = self.run(""" @@ -211,7 +213,7 @@ b = a + a b -> 1 -> 1 """) - assert interp.results[0].value.val == 8 + assert interp.results[0].value == 8 def test_set_slice(self): interp = self.run(""" @@ -220,7 +222,7 @@ b[:] = a + a b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_set_slice2(self): interp = self.run(""" @@ -231,7 +233,7 @@ a[0:30:3] = c a -> 3 """) - assert interp.results[0].value.val == 11 + assert interp.results[0].value == 11 def test_dot(self): interp = self.run(""" a = [[1, 2], [3, 4]] @@ -239,4 +241,4 @@ c = dot(a, b) c -> 0 -> 0 """) - assert interp.results[0].value.val == 19 + assert interp.results[0].value == 19 From noreply at buildbot.pypy.org Mon Dec 5 22:42:06 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:06 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: bad merge Message-ID: <20111205214206.E7F6C8205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50187:09f8917ecaf3 Date: 2011-12-04 22:09 +0200 http://bitbucket.org/pypy/pypy/changeset/09f8917ecaf3/ Log: bad merge diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,9 +1,10 @@ +import py -import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): return numpy_compile(code) From noreply at buildbot.pypy.org Mon Dec 5 22:42:08 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:08 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: experimental approach to dot iterator problem Message-ID: <20111205214208.18D558205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50188:885d36165f89 Date: 2011-12-04 23:22 +0200 http://bitbucket.org/pypy/pypy/changeset/885d36165f89/ Log: experimental approach to dot iterator problem diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -121,6 +121,11 @@ def get_offset(self): raise NotImplementedError +class DummyIterator(object): + '''Dummy placeholder + ''' + pass + class ArrayIterator(BaseIterator): def __init__(self, size): self.offset = 0 @@ -354,8 +359,10 @@ descr_abs = _unaryop_impl("absolute") def _binop_impl(ufunc_name): - def impl(self, space, w_other): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + def impl(self, space, w_other, w_selfiter=DummyIterator(), + w_otheriter=DummyIterator()): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_selfiter, w_otheriter]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -980,12 +987,15 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): + def __init__(self, signature, shape, calc_dtype, res_dtype, left, right, + liter = DummyIterator(), riter = DummyIterator()): # XXX do something if left.order != right.order VirtualArray.__init__(self, signature, shape, res_dtype, left.order) self.left = left self.right = right self.calc_dtype = calc_dtype + self.liter = liter + self.riter = riter self.size = 1 for s in self.shape: self.size *= s @@ -1002,8 +1012,15 @@ return self.forced_result.start_iter(res_shape) if res_shape is None: res_shape = self.shape # we still force the shape on children - return Call2Iterator(self.left.start_iter(res_shape), - self.right.start_iter(res_shape)) + if not getattr(self.liter, 'get_offest', ''): + _liter = self.left.start_iter(res_shape) + else: + _liter = self.liter + if not getattr(self.riter, 'get_offest', ''): + _riter = self.right.start_iter(res_shape) + else: + _riter = self.riter + return Call2Iterator(_liter, _riter) def _eval(self, iter): assert isinstance(iter, Call2Iterator) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -139,8 +139,10 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + if len(args_w)>2: + [w_lhs, w_rhs, w_liter, w_riter] = args_w + else: + [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) calc_dtype = find_binop_result_dtype(space, @@ -162,8 +164,12 @@ self.signature, w_lhs.signature, w_rhs.signature ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - w_res = Call2(new_sig, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + if len(args_w)>2: + w_res = Call2(new_sig, new_shape, calc_dtype, + res_dtype, w_lhs, w_rhs, w_liter, w_riter) + else: + w_res = Call2(new_sig, new_shape, calc_dtype, + res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res From noreply at buildbot.pypy.org Mon Dec 5 22:42:09 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:09 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: clean compile a bit Message-ID: <20111205214209.3C9EA8205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50189:dc6c3373f647 Date: 2011-12-05 09:02 +0200 http://bitbucket.org/pypy/pypy/changeset/dc6c3373f647/ Log: clean compile a bit diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -399,8 +399,10 @@ raise ArgumentNotAnArray if not isinstance(arr1, BaseArray): raise ArgumentNotAnArray - elif self.name == "dot": + if self.name == "dot": w_res = arr0.descr_dot(interp.space, arr1) + else: + assert False # unreachable code else: raise WrongFunctionName if isinstance(w_res, BaseArray): diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -235,6 +235,7 @@ a -> 3 """) assert interp.results[0].value == 11 + def test_dot(self): interp = self.run(""" a = [[1, 2], [3, 4]] From noreply at buildbot.pypy.org Mon Dec 5 22:42:10 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:10 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: Backed out changeset: 885d36165f89 Message-ID: <20111205214210.6276A8205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50190:1ae4feb97953 Date: 2011-12-05 09:03 +0200 http://bitbucket.org/pypy/pypy/changeset/1ae4feb97953/ Log: Backed out changeset: 885d36165f89 diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -121,11 +121,6 @@ def get_offset(self): raise NotImplementedError -class DummyIterator(object): - '''Dummy placeholder - ''' - pass - class ArrayIterator(BaseIterator): def __init__(self, size): self.offset = 0 @@ -359,10 +354,8 @@ descr_abs = _unaryop_impl("absolute") def _binop_impl(ufunc_name): - def impl(self, space, w_other, w_selfiter=DummyIterator(), - w_otheriter=DummyIterator()): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, - [self, w_other, w_selfiter, w_otheriter]) + def impl(self, space, w_other): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -987,15 +980,12 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right, - liter = DummyIterator(), riter = DummyIterator()): + def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): # XXX do something if left.order != right.order VirtualArray.__init__(self, signature, shape, res_dtype, left.order) self.left = left self.right = right self.calc_dtype = calc_dtype - self.liter = liter - self.riter = riter self.size = 1 for s in self.shape: self.size *= s @@ -1012,15 +1002,8 @@ return self.forced_result.start_iter(res_shape) if res_shape is None: res_shape = self.shape # we still force the shape on children - if not getattr(self.liter, 'get_offest', ''): - _liter = self.left.start_iter(res_shape) - else: - _liter = self.liter - if not getattr(self.riter, 'get_offest', ''): - _riter = self.right.start_iter(res_shape) - else: - _riter = self.riter - return Call2Iterator(_liter, _riter) + return Call2Iterator(self.left.start_iter(res_shape), + self.right.start_iter(res_shape)) def _eval(self, iter): assert isinstance(iter, Call2Iterator) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -139,10 +139,8 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement) - if len(args_w)>2: - [w_lhs, w_rhs, w_liter, w_riter] = args_w - else: - [w_lhs, w_rhs] = args_w + + [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) calc_dtype = find_binop_result_dtype(space, @@ -164,12 +162,8 @@ self.signature, w_lhs.signature, w_rhs.signature ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - if len(args_w)>2: - w_res = Call2(new_sig, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs, w_liter, w_riter) - else: - w_res = Call2(new_sig, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + w_res = Call2(new_sig, new_shape, calc_dtype, + res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res From noreply at buildbot.pypy.org Mon Dec 5 22:42:11 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:11 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: merge Message-ID: <20111205214211.86BC28205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50191:5a5cfa32fe70 Date: 2011-12-05 09:04 +0200 http://bitbucket.org/pypy/pypy/changeset/5a5cfa32fe70/ Log: merge diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -121,11 +121,6 @@ def get_offset(self): raise NotImplementedError -class DummyIterator(object): - '''Dummy placeholder - ''' - pass - class ArrayIterator(BaseIterator): def __init__(self, size): self.offset = 0 @@ -359,10 +354,8 @@ descr_abs = _unaryop_impl("absolute") def _binop_impl(ufunc_name): - def impl(self, space, w_other, w_selfiter=DummyIterator(), - w_otheriter=DummyIterator()): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, - [self, w_other, w_selfiter, w_otheriter]) + def impl(self, space, w_other): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -987,15 +980,12 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right, - liter = DummyIterator(), riter = DummyIterator()): + def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): # XXX do something if left.order != right.order VirtualArray.__init__(self, signature, shape, res_dtype, left.order) self.left = left self.right = right self.calc_dtype = calc_dtype - self.liter = liter - self.riter = riter self.size = 1 for s in self.shape: self.size *= s @@ -1012,15 +1002,8 @@ return self.forced_result.start_iter(res_shape) if res_shape is None: res_shape = self.shape # we still force the shape on children - if not getattr(self.liter, 'get_offest', ''): - _liter = self.left.start_iter(res_shape) - else: - _liter = self.liter - if not getattr(self.riter, 'get_offest', ''): - _riter = self.right.start_iter(res_shape) - else: - _riter = self.riter - return Call2Iterator(_liter, _riter) + return Call2Iterator(self.left.start_iter(res_shape), + self.right.start_iter(res_shape)) def _eval(self, iter): assert isinstance(iter, Call2Iterator) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -139,10 +139,8 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement) - if len(args_w)>2: - [w_lhs, w_rhs, w_liter, w_riter] = args_w - else: - [w_lhs, w_rhs] = args_w + + [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) calc_dtype = find_binop_result_dtype(space, @@ -164,12 +162,8 @@ self.signature, w_lhs.signature, w_rhs.signature ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - if len(args_w)>2: - w_res = Call2(new_sig, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs, w_liter, w_riter) - else: - w_res = Call2(new_sig, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + w_res = Call2(new_sig, new_shape, calc_dtype, + res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res From noreply at buildbot.pypy.org Mon Dec 5 22:42:12 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:12 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: translation fixes: avoid negative slices Message-ID: <20111205214212.AC4E68205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50192:26e6b2238d4a Date: 2011-12-05 10:30 +0200 http://bitbucket.org/pypy/pypy/changeset/26e6b2238d4a/ Log: translation fixes: avoid negative slices diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -482,30 +482,43 @@ w_res = self.descr_mul(space, w_other) assert isinstance(w_res, BaseArray) return w_res.descr_sum(space) + dtype = interp_ufuncs.find_binop_result_dtype(space, + self.find_dtype(), w_other.find_dtype()) + if self.find_size() < 1 and w_other.find_size() <1: + #numpy compatability + return scalar_w(space, dtype,space.wrap(0)) #Do the dims match? my_critical_dim_size = self.shape[-1] other_critical_dim_size = w_other.shape[0] + other_critical_dim = 0 other_critical_dim_stride = w_other.strides[0] - if len(w_other.shape) > 2: - other_critical_dim_size = w_other.shape[-2] - other_critical_dim_stride = w_other.strides[-2] + out_shape = [] + if len(w_other.shape) > 1: + other_critical_dim = len(w_other.shape)-1 + other_critical_dim_size = w_other.shape[other_critical_dim] + other_critical_dim_stride = w_other.strides[other_critical_dim] + assert other_critical_dim >= 0 + out_shape += self.shape[:-1] + w_other.shape[0:other_critical_dim] + w_other.shape[other_critical_dim:] + elif len(w_other.shape) > 0: + #dot does not reduce + out_shape += self.shape[:-1] if my_critical_dim_size != other_critical_dim_size: raise OperationError(space.w_ValueError, space.wrap( "objects are not aligned")) - out_shape = self.shape[:-1] + w_other.shape[0:-2] + w_other.shape[-1:] out_size = 1 for os in out_shape: out_size *= os out_ndims = len(out_shape) - dtype = interp_ufuncs.find_binop_result_dtype(space, - self.find_dtype(), w_other.find_dtype()) #TODO: what should the order be? C or F? arr = W_NDimArray(out_size, out_shape, dtype=dtype) out_iter = ArrayIterator(out_size) - #TODO: invalidate self, w_other with arr + #TODO: invalidate self, w_other with arr ? + me_iter = BroadcastIterator(self,self.shape[:-1] + [1]) + assert other_critical_dim >= 0 other_iter = BroadcastIterator(self, - w_other.shape[:-2] + [1] + w_other.shape[-1:]) + w_other.shape[:other_critical_dim] + [1] + \ + w_other.shape[other_critical_dim:]) while not out_iter.done(): i = OneDimIterator(me_iter.get_offset(), self.strides[-1], self.shape[-1]) j = OneDimIterator(other_iter.get_offset(), other_critical_dim_stride, other_critical_dim_size) From noreply at buildbot.pypy.org Mon Dec 5 22:42:13 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:13 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: add bin_impl_one_dim, would be nice to have some tests Message-ID: <20111205214213.D2E158205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50193:beba9400c9dd Date: 2011-12-05 15:41 +0200 http://bitbucket.org/pypy/pypy/changeset/beba9400c9dd/ Log: add bin_impl_one_dim, would be nice to have some tests diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -403,6 +403,7 @@ greens=['shapelen', 'signature'], reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] ) + def loop(self): i = self.start_iter() cur_best = self.eval(i) @@ -424,6 +425,7 @@ i = i.next(shapelen) idx += 1 return result + def impl(self, space): size = self.find_size() if size == 0: @@ -444,6 +446,7 @@ return False i = i.next(shapelen) return True + def descr_all(self, space): return space.wrap(self._all()) @@ -459,22 +462,39 @@ return True i = i.next(shapelen) return False + def descr_any(self, space): return space.wrap(self._any()) descr_argmax = _reduce_argmax_argmin_impl("max") descr_argmin = _reduce_argmax_argmin_impl("min") + def _binop_impl_one_dim(ufunc_name): + #The third and fourth arguments allow the operator to proceed on a + #single dimension starting at a particular index + #i.e. ssd => self start, dimension; osd => other start, dimension + def impl(self, space, w_other, w_ssd, w_osd): + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_ssd, w_osd]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + descr_add1d = _binop_impl_one_dim("add") + descr_sub1d = _binop_impl_one_dim("subtract") + descr_mul1d = _binop_impl_one_dim("multiply") + descr_div1d = _binop_impl_one_dim("divide") + descr_pow1d = _binop_impl_one_dim("power") + descr_mod1d = _binop_impl_one_dim("mod") + def descr_dot(self, space, w_other): '''Dot product of two arrays. - + For 2-D arrays it is equivalent to matrix multiplication, and for 1-D arrays to inner product of vectors (without complex conjugation). For N dimensions it is a sum product over the last axis of `a` and the second-to-last of `b`:: - + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])''' - #numpy's doc string :) + #numpy's doc string :) w_other = convert_to_array(space, w_other) if isinstance(w_other, Scalar): return self.descr_mul(space, w_other) @@ -482,23 +502,25 @@ w_res = self.descr_mul(space, w_other) assert isinstance(w_res, BaseArray) return w_res.descr_sum(space) - dtype = interp_ufuncs.find_binop_result_dtype(space, + dtype = interp_ufuncs.find_binop_result_dtype(space, self.find_dtype(), w_other.find_dtype()) - if self.find_size() < 1 and w_other.find_size() <1: + if self.find_size() < 1 and w_other.find_size() < 1: #numpy compatability - return scalar_w(space, dtype,space.wrap(0)) + return scalar_w(space, dtype, space.wrap(0)) #Do the dims match? my_critical_dim_size = self.shape[-1] other_critical_dim_size = w_other.shape[0] other_critical_dim = 0 - other_critical_dim_stride = w_other.strides[0] + other_critical_dim_stride = w_other.strides[0] out_shape = [] if len(w_other.shape) > 1: - other_critical_dim = len(w_other.shape)-1 + other_critical_dim = len(w_other.shape) - 1 other_critical_dim_size = w_other.shape[other_critical_dim] other_critical_dim_stride = w_other.strides[other_critical_dim] assert other_critical_dim >= 0 - out_shape += self.shape[:-1] + w_other.shape[0:other_critical_dim] + w_other.shape[other_critical_dim:] + out_shape += self.shape[:-1] + \ + w_other.shape[0:other_critical_dim] + \ + w_other.shape[other_critical_dim:] elif len(w_other.shape) > 0: #dot does not reduce out_shape += self.shape[:-1] @@ -513,15 +535,16 @@ arr = W_NDimArray(out_size, out_shape, dtype=dtype) out_iter = ArrayIterator(out_size) #TODO: invalidate self, w_other with arr ? - - me_iter = BroadcastIterator(self,self.shape[:-1] + [1]) + me_iter = BroadcastIterator(self, self.shape[:-1] + [1]) assert other_critical_dim >= 0 - other_iter = BroadcastIterator(self, + other_iter = BroadcastIterator(self, w_other.shape[:other_critical_dim] + [1] + \ w_other.shape[other_critical_dim:]) while not out_iter.done(): - i = OneDimIterator(me_iter.get_offset(), self.strides[-1], self.shape[-1]) - j = OneDimIterator(other_iter.get_offset(), other_critical_dim_stride, other_critical_dim_size) + i = OneDimIterator(me_iter.get_offset(), + self.strides[-1], self.shape[-1]) + j = OneDimIterator(other_iter.get_offset(), + other_critical_dim_stride, other_critical_dim_size) #Heres what I would like to do, but how? #value = sum(mult_with_iters(self, i, w_other, j)) #arr.setitem(out_iter, value) @@ -529,7 +552,6 @@ me_iter = me_iter.next(0) other_iter = other_iter.next(0) return arr - def get_concrete(self): raise NotImplementedError @@ -898,7 +920,8 @@ self.res_dtype = res_dtype def _del_sources(self): - # Function for deleting references to source arrays, to allow garbage-collecting them + # Function for deleting references to source arrays, + #to allow garbage-collecting them raise NotImplementedError def compute(self): @@ -993,11 +1016,14 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): + def __init__(self, signature, shape, calc_dtype, res_dtype, left, right, + left_start_dim=[-1, -1], right_start_dim=[-1, -1]): # XXX do something if left.order != right.order VirtualArray.__init__(self, signature, shape, res_dtype, left.order) self.left = left self.right = right + self.left_start_dim = left_start_dim + self.right_start_dim = right_start_dim self.calc_dtype = calc_dtype self.size = 1 for s in self.shape: @@ -1015,6 +1041,7 @@ return self.forced_result.start_iter(res_shape) if res_shape is None: res_shape = self.shape # we still force the shape on children + #TODO: use left_start_dim, right_start_dim if they are not [-1, -1] return Call2Iterator(self.left.start_iter(res_shape), self.right.start_iter(res_shape)) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -139,8 +139,12 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement) - - [w_lhs, w_rhs] = args_w + if len(args_w)<4: + [w_lhs, w_rhs] = args_w + w_ssd = space.newlist([space.wrap(-1)]*2) + w_osd = space.newlist([space.wrap(-1)]*2) + else: + [w_lhs, w_rhs, w_ssd, w_osd] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) calc_dtype = find_binop_result_dtype(space, @@ -163,7 +167,7 @@ ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) w_res = Call2(new_sig, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs) + res_dtype, w_lhs, w_rhs, w_ssd, w_osd) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -716,14 +716,14 @@ assert dot(range(5), range(5)) == 30 assert (dot(5, [1, 2, 3]) == [5, 10, 15]).all() a = array([[range(4), range(4, 8), range(8, 12)], - [range(12, 16),range(16, 20),range(20, 24)]]) - raises(ValueError,"a.dot(a)") + [range(12, 16), range(16, 20), range(20, 24)]]) + raises(ValueError, "a.dot(a)") b = a[0, :, :].T #Superfluous shape test makes the intention of the test clearer - assert a.shape == (2, 3, 4) + assert a.shape == (2, 3, 4) assert b.shape == (4, 3) c = a.dot(b) - assert (c == [[[14, 38,62], [38, 126, 214], [62, 214, 366]], + assert (c == [[[14, 38, 62], [38, 126, 214], [62, 214, 366]], [[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all() def test_dot_constant(self): From noreply at buildbot.pypy.org Mon Dec 5 22:42:15 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:15 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: add failing test for sum return value Message-ID: <20111205214215.022088205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50194:699511d4d5d5 Date: 2011-12-05 16:36 +0200 http://bitbucket.org/pypy/pypy/changeset/699511d4d5d5/ Log: add failing test for sum return value diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -411,6 +411,7 @@ b = a * a for i in range(5): assert b[i] == i * i + assert b.dtype is numpypy.dtype(int) a = numpypy.array(range(5), dtype=bool) b = a * a @@ -629,8 +630,10 @@ def test_sum(self): from numpypy import array a = array(range(5)) - assert a.sum() == 10.0 - assert a[:4].sum() == 6.0 + b = a.sum() + assert b == 10 + assert isinstance(b,int) + assert a[:4].sum() == 6 a = array([True] * 5, bool) assert a.sum() == 5 From noreply at buildbot.pypy.org Mon Dec 5 22:42:16 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:16 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: need to fix failing sum test before continuing Message-ID: <20111205214216.25A778205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50195:ee2362c0cec8 Date: 2011-12-05 16:37 +0200 http://bitbucket.org/pypy/pypy/changeset/ee2362c0cec8/ Log: need to fix failing sum test before continuing diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -514,7 +514,7 @@ other_critical_dim_stride = w_other.strides[0] out_shape = [] if len(w_other.shape) > 1: - other_critical_dim = len(w_other.shape) - 1 + other_critical_dim = len(w_other.shape) - 2 other_critical_dim_size = w_other.shape[other_critical_dim] other_critical_dim_stride = w_other.strides[other_critical_dim] assert other_critical_dim >= 0 @@ -541,13 +541,14 @@ w_other.shape[:other_critical_dim] + [1] + \ w_other.shape[other_critical_dim:]) while not out_iter.done(): - i = OneDimIterator(me_iter.get_offset(), - self.strides[-1], self.shape[-1]) - j = OneDimIterator(other_iter.get_offset(), - other_critical_dim_stride, other_critical_dim_size) - #Heres what I would like to do, but how? - #value = sum(mult_with_iters(self, i, w_other, j)) - #arr.setitem(out_iter, value) + w_ssd = space.newlist([space.wrap(me_iter.get_offset()), + space.wrap(len(self.shape)-1)]) + w_osd = space.newlist([space.wrap(other_iter.get_offset()), + space.wrap(other_critical_dim)]) + w_res = self.descr_mul1d(space, w_other, w_ssd, w_osd) + value = w_res.descr_sum(space) + abc=hgk + arr.setitem(out_iter, value) out_iter = out_iter.next(out_ndims) me_iter = me_iter.next(0) other_iter = other_iter.next(0) From noreply at buildbot.pypy.org Mon Dec 5 22:42:17 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:17 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: expose promote_to_largest to reduce functions, fixes sum() prod() bug without ruining mean() Message-ID: <20111205214217.4A8208205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50196:924460a509cd Date: 2011-12-05 17:36 +0200 http://bitbucket.org/pypy/pypy/changeset/924460a509cd/ Log: expose promote_to_largest to reduce functions, fixes sum() prod() bug without ruining mean() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -388,15 +388,17 @@ descr_rpow = _binop_right_impl("power") descr_rmod = _binop_right_impl("mod") - def _reduce_ufunc_impl(ufunc_name): + def _reduce_ufunc_impl(ufunc_name, promote_to_largest): def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, multidim=True) + return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, + self, multidim=True, promote_to_largest=promote_to_largest) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) - descr_sum = _reduce_ufunc_impl("add") - descr_prod = _reduce_ufunc_impl("multiply") - descr_max = _reduce_ufunc_impl("maximum") - descr_min = _reduce_ufunc_impl("minimum") + descr_sum = _reduce_ufunc_impl("add", False) + descr_prod = _reduce_ufunc_impl("multiply", False) + descr_max = _reduce_ufunc_impl("maximum", False) + descr_min = _reduce_ufunc_impl("minimum", False) + descr_sumpromote = _reduce_ufunc_impl("add", True) def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( @@ -816,7 +818,7 @@ shape[:]) def descr_mean(self, space): - return space.div(self.descr_sum(space), space.wrap(self.find_size())) + return space.div(self.descr_sumpromote(space), space.wrap(self.find_size())) def descr_nonzero(self, space): if self.find_size() > 1: diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -46,9 +46,9 @@ return self.call(space, __args__.arguments_w) def descr_reduce(self, space, w_obj): - return self.reduce(space, w_obj, multidim=False) + return self.reduce(space, w_obj, multidim=False, promote_to_largest=False) - def reduce(self, space, w_obj, multidim): + def reduce(self, space, w_obj, multidim, promote_to_largest): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " @@ -63,7 +63,8 @@ size = obj.find_size() dtype = find_unaryop_result_dtype( space, obj.find_dtype(), - promote_to_largest=True + promote_to_largest = promote_to_largest, + promote_bools = True, ) start = obj.start_iter(obj.shape) shapelen = len(obj.shape) From noreply at buildbot.pypy.org Mon Dec 5 22:42:18 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:18 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: dot works Message-ID: <20111205214218.72E848205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50197:d28b98fc74ed Date: 2011-12-05 23:17 +0200 http://bitbucket.org/pypy/pypy/changeset/d28b98fc74ed/ Log: dot works diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -152,7 +152,7 @@ return arr def done(self): - return self.offset == self.size + return self.offset >= self.size def get_offset(self): return self.offset @@ -197,7 +197,7 @@ class BroadcastIterator(BaseIterator): '''Like a view iterator, but will repeatedly access values for all iterations across a res_shape, folding the offset - using mod() arithmetic + using stride = backstride = 0 ''' def __init__(self, arr, res_shape): self.indices = [0] * len(res_shape) @@ -522,7 +522,7 @@ assert other_critical_dim >= 0 out_shape += self.shape[:-1] + \ w_other.shape[0:other_critical_dim] + \ - w_other.shape[other_critical_dim:] + w_other.shape[other_critical_dim + 1:] elif len(w_other.shape) > 0: #dot does not reduce out_shape += self.shape[:-1] @@ -535,25 +535,28 @@ out_ndims = len(out_shape) #TODO: what should the order be? C or F? arr = W_NDimArray(out_size, out_shape, dtype=dtype) - out_iter = ArrayIterator(out_size) + out_iter = ViewIterator(arr) #TODO: invalidate self, w_other with arr ? - me_iter = BroadcastIterator(self, self.shape[:-1] + [1]) - assert other_critical_dim >= 0 - other_iter = BroadcastIterator(self, - w_other.shape[:other_critical_dim] + [1] + \ - w_other.shape[other_critical_dim:]) while not out_iter.done(): - w_ssd = space.newlist([space.wrap(me_iter.get_offset()), - space.wrap(len(self.shape)-1)]) - w_osd = space.newlist([space.wrap(other_iter.get_offset()), + my_index = self.start + other_index = w_other.start + i = 0 + while i < len(self.shape) - 1: + my_index += out_iter.indices[i] * self.strides[i] + i += 1 + for j in range(len(w_other.shape) - 2): + other_index += out_iter.indices[i] * w_other.strides[j] + other_index += out_iter.indices[-1] * w_other.strides[-1] + w_ssd = space.newlist([space.wrap(my_index), + space.wrap(len(self.shape) - 1)]) + w_osd = space.newlist([space.wrap(other_index), space.wrap(other_critical_dim)]) w_res = self.descr_mul1d(space, w_other, w_ssd, w_osd) + assert isinstance(w_res, BaseArray) value = w_res.descr_sum(space) - abc=hgk - arr.setitem(out_iter, value) + arr.setitem(out_iter.get_offset(), value) out_iter = out_iter.next(out_ndims) - me_iter = me_iter.next(0) - other_iter = other_iter.next(0) + ii += 1 return arr def get_concrete(self): @@ -818,7 +821,8 @@ shape[:]) def descr_mean(self, space): - return space.div(self.descr_sumpromote(space), space.wrap(self.find_size())) + return space.div(self.descr_sumpromote(space), + space.wrap(self.find_size())) def descr_nonzero(self, space): if self.find_size() > 1: @@ -940,7 +944,7 @@ shapelen=shapelen, result_size=result_size, i=i, ri=ri, self=self, result=result) - result.dtype.setitem(result.storage, ri.offset, self.eval(i)) + result.dtype.setitem(result.storage, ri.get_offset(), self.eval(i)) i = i.next(shapelen) ri = ri.next(shapelen) return result @@ -1045,6 +1049,14 @@ if res_shape is None: res_shape = self.shape # we still force the shape on children #TODO: use left_start_dim, right_start_dim if they are not [-1, -1] + if self.left_start_dim[0] >= 0: + ldim = self.left_start_dim[1] + rdim = self.right_start_dim[1] + left_iter = OneDimIterator(self.left_start_dim[0], + self.left.strides[ldim], self.left.shape[ldim]) + right_iter = OneDimIterator(self.right_start_dim[0], + self.right.strides[rdim], self.right.shape[rdim]) + return Call2Iterator(left_iter, right_iter) return Call2Iterator(self.left.start_iter(res_shape), self.right.start_iter(res_shape)) @@ -1143,7 +1155,7 @@ self=self, source=source, res_iter=res_iter, source_iter=source_iter) - self.setitem(res_iter.offset, source.eval(source_iter).convert_to( + self.setitem(res_iter.get_offset(), source.eval(source_iter).convert_to( self.find_dtype())) source_iter = source_iter.next(shapelen) res_iter = res_iter.next(shapelen) @@ -1165,7 +1177,7 @@ array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() while not iter.done(): - array.setitem(iter.offset, self.getitem(iter.offset)) + array.setitem(iter.get_offset(), self.getitem(iter.get_offset())) iter = iter.next(len(self.shape)) return array @@ -1280,7 +1292,7 @@ arr_iter = arr.start_iter(arr.shape) for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + dtype.setitem(arr.storage, arr_iter.get_offset(), dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -140,6 +140,7 @@ def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement) + #TODO: use of w_ssd, w_osd can be optimized. if len(args_w)<4: [w_lhs, w_rhs] = args_w w_ssd = space.newlist([space.wrap(-1)]*2) @@ -166,9 +167,17 @@ new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature ]) - new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + new_shape = [] + ssd = [space.int_w(s) for s in space.listview(w_ssd)] + osd = [space.int_w(s) for s in space.listview(w_osd)] + if ssd[0]<0: + new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + else: + #Assumption (should have been checked in call): + #w_lhs.shape[ssd[1]] == w_rhs.shape[osd[1]] + new_shape = [w_lhs.shape[ssd[1]]] w_res = Call2(new_sig, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs, w_ssd, w_osd) + res_dtype, w_lhs, w_rhs, ssd, osd) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -718,6 +718,12 @@ assert a.dot(range(5)) == 30 assert dot(range(5), range(5)) == 30 assert (dot(5, [1, 2, 3]) == [5, 10, 15]).all() + + a = array([range(4), range(4, 8), range(8, 12)]) + b = array([range(3), range(3, 6), range(6, 9), range(9, 12)]) + c = a.dot(b) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + a = array([[range(4), range(4, 8), range(8, 12)], [range(12, 16), range(16, 20), range(20, 24)]]) raises(ValueError, "a.dot(a)") From noreply at buildbot.pypy.org Mon Dec 5 22:42:19 2011 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 Dec 2011 22:42:19 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: dot seems to work, more of a proof-of-concept than usable Message-ID: <20111205214219.95AF68205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50198:8ee1a24557b8 Date: 2011-12-05 23:31 +0200 http://bitbucket.org/pypy/pypy/changeset/8ee1a24557b8/ Log: dot seems to work, more of a proof-of-concept than usable diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -152,7 +152,7 @@ return arr def done(self): - return self.offset >= self.size + return self.offset == self.size def get_offset(self): return self.offset @@ -556,7 +556,6 @@ value = w_res.descr_sum(space) arr.setitem(out_iter.get_offset(), value) out_iter = out_iter.next(out_ndims) - ii += 1 return arr def get_concrete(self): From noreply at buildbot.pypy.org Tue Dec 6 00:12:13 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 00:12:13 +0100 (CET) Subject: [pypy-commit] pypy default: expose numpy.float32, thanks to jterrance for pointing it out Message-ID: <20111205231213.DFD478205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50199:24a17a8610e1 Date: 2011-12-05 18:11 -0500 http://bitbucket.org/pypy/pypy/changeset/24a17a8610e1/ Log: expose numpy.float32, thanks to jterrance for pointing it out diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -32,6 +32,7 @@ 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', + 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', } diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -258,6 +258,8 @@ W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, __module__ = "numpypy", + + __new__ = interp2app(W_Float32Box.descr__new__.im_func), ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -240,6 +240,13 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + def test_float32(self): + import numpypy as numpy + + assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] + + assert numpy.float32(12) == numpy.float64(12) + def test_float64(self): import numpypy as numpy From noreply at buildbot.pypy.org Tue Dec 6 08:34:28 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Tue, 6 Dec 2011 08:34:28 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: suport for setup_class and teardown_class Message-ID: <20111206073428.D9DE08205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50200:4c76bc680c2f Date: 2011-12-06 08:34 +0100 http://bitbucket.org/pypy/pypy/changeset/4c76bc680c2f/ Log: suport for setup_class and teardown_class diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -241,10 +241,17 @@ if __name__ == '__main__': # occurs in the subprocess for test in [_TestMemoryManager(), _TestIntegration()]: - for name in dir(test): - if name.startswith('test_'): - print - print '-'*79 - print '----- Now running test', name, '-----' - print - getattr(test, name)() + if hasattr(test, 'setup_class'): + test.setup_class() + try: + for name in dir(test): + if name.startswith('test_'): + print + print '-'*79 + print '----- Now running test', name, '-----' + print + getattr(test, name)() + finally: + if hasattr(test, 'teardown_class'): + test.teardown_class() + From noreply at buildbot.pypy.org Tue Dec 6 09:49:02 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Dec 2011 09:49:02 +0100 (CET) Subject: [pypy-commit] pypy default: merge matrix-reshape-merge branch. Thanks mattip for doing that. Message-ID: <20111206084902.BA4898205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50201:ea547b8be18f Date: 2011-12-06 10:48 +0200 http://bitbucket.org/pypy/pypy/changeset/ea547b8be18f/ Log: merge matrix-reshape-merge branch. Thanks mattip for doing that. Adds settable shape of an array as well as reshape method/function. diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -77,4 +77,5 @@ 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', 'arange': 'app_numpy.arange', + 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -36,3 +36,39 @@ j += 1 i += step return arr + +def reshape(a, shape): + '''reshape(a, newshape) + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred + from the length of the array and remaining dimensions. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. + + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raise if the data is copied, + you should assign the new shape to the shape attribute of the array +''' + if not hasattr(a, 'reshape'): + a = numpypy.array(a) + return a.reshape(shape) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,6 +98,107 @@ endshape[i] = remainder[i] return endshape +def get_shape_from_iterable(space, old_size, w_iterable): + new_size = 0 + new_shape = [] + if space.isinstance_w(w_iterable, space.w_int): + new_size = space.int_w(w_iterable) + if new_size < 0: + new_size = old_size + new_shape = [new_size, ] + else: + neg_dim = -1 + batch = space.listview(w_iterable) + #Allow for shape = (1,2,3) or shape = ((1,2,3)) + if len(batch) > 1 and space.issequence_w(batch[0]): + batch = space.listview(batch[0]) + new_size = 1 + if len(batch) < 1: + if old_size == 1: + #Scalars can have an empty size. + new_size = 1 + else: + new_size = 0 + new_shape = [] + i = 0 + for elem in batch: + s = space.int_w(elem) + if s < 0: + if neg_dim >= 0: + raise OperationError(space.w_ValueError, space.wrap( + "can only specify one unknown dimension")) + s = 1 + neg_dim = i + new_size *= s + new_shape.append(s) + i += 1 + if neg_dim >= 0: + new_shape[neg_dim] = old_size / new_size + new_size *= new_shape[neg_dim] + if new_size != old_size: + raise OperationError(space.w_ValueError, + space.wrap("total size of new array must be unchanged")) + return new_shape + +#Recalculating strides. Find the steps that the iteration does for each +#dimension, given the stride and shape. Then try to create a new stride that +#fits the new shape, using those steps. If there is a shape/step mismatch +#(meaning that the realignment of elements crosses from one step into another) +#return None so that the caller can raise an exception. +def calc_new_strides(new_shape, old_shape, old_strides): + #Return the proper strides for new_shape, or None + # if the mapping crosses stepping boundaries + + #Assumes that prod(old_shape) ==prod(new_shape), len(old_shape) > 1 and + # len(new_shape) > 0 + steps = [] + last_step = 1 + oldI = 0 + new_strides = [] + if old_strides[0] < old_strides[-1]: + for i in range(len(old_shape)): + steps.append(old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[0] + n_new_elems_used = 1 + n_old_elems_to_use = old_shape[0] + for s in new_shape: + new_strides.append(cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI += 1 + if steps[oldI] != steps[oldI - 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI += 1 + if oldI >= len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + else: + for i in range(len(old_shape) - 1, -1, -1): + steps.insert(0, old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[-1] + n_new_elems_used = 1 + oldI = -1 + n_old_elems_to_use = old_shape[-1] + for s in new_shape[::-1]: + new_strides.insert(0, cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI -= 1 + if steps[oldI] != steps[oldI + 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI -= 1 + if oldI < -len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + return new_strides # Iterators for arrays # -------------------- @@ -444,6 +545,7 @@ return False i = i.next(shapelen) return True + def descr_all(self, space): return space.wrap(self._all()) @@ -459,6 +561,7 @@ return True i = i.next(shapelen) return False + def descr_any(self, space): return space.wrap(self._any()) @@ -483,6 +586,12 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) + def descr_set_shape(self, space, w_iterable): + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_iterable) + concrete.setshape(space, new_shape) + def descr_get_size(self, space): return space.wrap(self.find_size()) @@ -735,6 +844,40 @@ return NDimSlice(self, new_sig, start, strides[:], backstrides[:], shape[:]) + def descr_reshape(self, space, w_args): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `%s.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function +""" % 'numpypy' + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_args) + #Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + #We can create a view, strides somehow match up. + new_sig = signature.Signature.find_sig([ + NDimSlice.signature, self.signature, ]) + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = NDimSlice(self, new_sig, self.start, new_strides, + new_backstrides, new_shape) + else: + #Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr + def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -830,6 +973,11 @@ def debug_repr(self): return 'Scalar' + def setshape(self, space, new_shape): + # In order to get here, we already checked that prod(new_shape)==1, + # so in order to have a consistent API, let it go through. + pass + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1022,6 +1170,39 @@ return space.wrap(self.shape[0]) return space.wrap(1) + def setshape(self, space, new_shape): + if len(self.shape) < 1: + return + elif len(self.shape) < 2: + #TODO: this code could be refactored into calc_strides + #but then calc_strides would have to accept a stepping factor + strides = [] + backstrides = [] + s = self.strides[0] + if self.order == 'C': + new_shape.reverse() + for sh in new_shape: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + new_shape.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + self.shape = new_shape[:] + return + new_strides = calc_new_strides(new_shape, self.shape, self.strides) + if new_strides is None: + raise OperationError(space.w_AttributeError, space.wrap( + "incompatible shape for a non-contiguous array")) + new_backstrides = [0] * len(new_shape) + for nd in range(len(new_shape)): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + self.strides = new_strides[:] + self.backstrides = new_backstrides[:] + self.shape = new_shape[:] class NDimSlice(ViewArray): signature = signature.BaseSignature() @@ -1077,9 +1258,11 @@ def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() + a_iter = array.start_iter() while not iter.done(): - array.setitem(iter.offset, self.getitem(iter.offset)) + array.setitem(a_iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) return array class W_NDimArray(BaseArray): @@ -1137,6 +1320,10 @@ return ArrayIterator(self.size) raise NotImplementedError # use ViewIterator simply, test it + def setshape(self, space, new_shape): + self.shape = new_shape + self.calc_strides(new_shape) + def debug_repr(self): return 'Array' @@ -1261,7 +1448,8 @@ __debug_repr__ = interp2app(BaseArray.descr_debug_repr), dtype = GetSetProperty(BaseArray.descr_get_dtype), - shape = GetSetProperty(BaseArray.descr_get_shape), + shape = GetSetProperty(BaseArray.descr_get_shape, + BaseArray.descr_set_shape), size = GetSetProperty(BaseArray.descr_get_size), T = GetSetProperty(BaseArray.descr_get_transpose), @@ -1279,6 +1467,7 @@ dot = interp2app(BaseArray.descr_dot), copy = interp2app(BaseArray.descr_copy), + reshape = interp2app(BaseArray.descr_reshape), ) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -158,6 +158,13 @@ assert shape_agreement(self.space, [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + def test_calc_new_strides(self): + from pypy.module.micronumpy.interp_numarray import calc_new_strides + assert calc_new_strides([2, 4], [4, 2], [4, 2]) == [8, 2] + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2]) == [2] class AppTestNumArray(BaseNumpyAppTest): def test_ndarray(self): @@ -216,8 +223,8 @@ assert a[2] == 4 def test_copy(self): - from numpypy import array - a = array(range(5)) + from numpypy import arange, array + a = arange(5) b = a.copy() for i in xrange(5): assert b[i] == a[i] @@ -227,6 +234,11 @@ a = array(1) assert a.copy() == a + a = arange(8) + b = a[::2] + c = b.copy() + assert (c == b).all() + def test_iterator_init(self): from numpypy import array a = array(range(5)) @@ -339,6 +351,76 @@ c = a[:3] assert c.shape == (3,) + def test_set_shape(self): + from numpypy import array, zeros + a = array([]) + a.shape = [] + a = array(range(12)) + a.shape = (3, 4) + assert (a == [range(4), range(4, 8), range(8, 12)]).all() + a.shape = (3, 2, 2) + assert a[1, 1, 1] == 7 + a.shape = (3, -1, 2) + assert a.shape == (3, 2, 2) + a.shape = 12 + assert a.shape == (12, ) + exc = raises(ValueError, "a.shape = 10") + assert str(exc.value) == "total size of new array must be unchanged" + a = array(3) + a.shape = () + #numpy allows this + a.shape = (1,) + + def test_reshape(self): + from numpypy import array, zeros + a = array(range(12)) + exc = raises(ValueError, "b = a.reshape((3, 10))") + assert str(exc.value) == "total size of new array must be unchanged" + b = a.reshape((3, 4)) + assert b.shape == (3, 4) + assert (b == [range(4), range(4, 8), range(8, 12)]).all() + b[:, 0] = 1000 + assert (a == [1000, 1, 2, 3, 1000, 5, 6, 7, 1000, 9, 10, 11]).all() + a = zeros((4, 2, 3)) + a.shape = (12, 2) + + def test_slice_reshape(self): + from numpypy import zeros, arange + a = zeros((4, 2, 3)) + b = a[::2, :, :] + b.shape = (2, 6) + exc = raises(AttributeError, "b.shape = 12") + assert str(exc.value) == \ + "incompatible shape for a non-contiguous array" + b = a[::2, :, :].reshape((2, 6)) + assert b.shape == (2, 6) + b = arange(20)[1:17:2] + b.shape = (4, 2) + assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() + c = b.reshape((2, 4)) + assert (c == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + + z = arange(96).reshape((12, -1)) + assert z.shape == (12, 8) + y = z.reshape((4, 3, 8)) + v = y[:, ::2, :] + w = y.reshape(96) + u = v.reshape(64) + assert y[1, 2, 1] == z[5, 1] + y[1, 2, 1] = 1000 + #z, y, w, v are views of eachother + assert z[5, 1] == 1000 + assert v[1, 1, 1] == 1000 + assert w[41] == 1000 + #u is not a view, it is a copy! + assert u[25] == 41 + + def test_reshape_varargs(self): + skip("How do I do varargs in rpython? reshape should accept a" + " variable number of arguments") + z = arange(96).reshape(12, -1) + y = z.reshape(4, 3, 8) + def test_add(self): from numpypy import array a = array(range(5)) @@ -1155,3 +1237,14 @@ a = arange(0, 0.8, 0.1) assert len(a) == 8 assert arange(False, True, True).dtype is dtype(int) + + +class AppTestRanges(BaseNumpyAppTest): + def test_app_reshape(self): + from numpypy import arange, array, dtype, reshape + a = arange(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) + a = range(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -186,7 +186,8 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, + py.test.skip("counting exact number of classes is nonsense") + self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, From noreply at buildbot.pypy.org Tue Dec 6 09:49:43 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Dec 2011 09:49:43 +0100 (CET) Subject: [pypy-commit] pypy default: close merged branch Message-ID: <20111206084943.DF5098205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50202:e36a33ee5710 Date: 2011-12-06 10:48 +0200 http://bitbucket.org/pypy/pypy/changeset/e36a33ee5710/ Log: close merged branch From noreply at buildbot.pypy.org Tue Dec 6 09:49:45 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Dec 2011 09:49:45 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape: close merged branch Message-ID: <20111206084945.1EEDF8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: matrixmath-reshape Changeset: r50203:c8b9ca8d5c25 Date: 2011-12-06 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/c8b9ca8d5c25/ Log: close merged branch From noreply at buildbot.pypy.org Tue Dec 6 09:52:40 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Dec 2011 09:52:40 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-reshape-merge: close merged branch Message-ID: <20111206085240.EE30D8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: matrixmath-reshape-merge Changeset: r50204:83b5695339e8 Date: 2011-12-06 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/83b5695339e8/ Log: close merged branch From noreply at buildbot.pypy.org Tue Dec 6 09:56:17 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Dec 2011 09:56:17 +0100 (CET) Subject: [pypy-commit] pypy default: rename NDimSlice to W_NDimSlice, kill some unused imports Message-ID: <20111206085617.ACFE68205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50205:ea4102860b98 Date: 2011-12-06 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/ea4102860b98/ Log: rename NDimSlice to W_NDimSlice, kill some unused imports diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -839,10 +839,10 @@ strides += self.strides[s:] backstrides += self.backstrides[s:] new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature, + W_NDimSlice.signature, self.signature, ]) - return NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) + return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], + shape[:]) def descr_reshape(self, space, w_args): """reshape(...) @@ -865,13 +865,13 @@ if new_strides: #We can create a view, strides somehow match up. new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature, ]) + W_NDimSlice.signature, self.signature, ]) ndims = len(new_shape) new_backstrides = [0] * ndims for nd in range(ndims): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - arr = NDimSlice(self, new_sig, self.start, new_strides, - new_backstrides, new_shape) + arr = W_NDimSlice(self, new_sig, self.start, new_strides, + new_backstrides, new_shape) else: #Create copy with contiguous data arr = concrete.copy() @@ -894,7 +894,7 @@ if len(concrete.shape) < 2: return space.wrap(self) new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature + W_NDimSlice.signature, self.signature ]) strides = [] backstrides = [] @@ -903,8 +903,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) + return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -1204,12 +1204,12 @@ self.backstrides = new_backstrides[:] self.shape = new_shape[:] -class NDimSlice(ViewArray): +class W_NDimSlice(ViewArray): signature = signature.BaseSignature() def __init__(self, parent, signature, start, strides, backstrides, shape): - if isinstance(parent, NDimSlice): + if isinstance(parent, W_NDimSlice): parent = parent.parent ViewArray.__init__(self, parent, signature, strides, backstrides, shape) self.start = start diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,13 +8,12 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature -from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, - FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.compile import (FakeSpace, + IntObject, Parser, InterpreterState) +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, BaseArray) from pypy.rlib.nonconst import NonConstant -from pypy.rpython.annlowlevel import llstr, hlstr class TestNumpyJIt(LLJitMixin): From noreply at buildbot.pypy.org Tue Dec 6 11:04:21 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 6 Dec 2011 11:04:21 +0100 (CET) Subject: [pypy-commit] pypy default: fixed _socket int/uint matters Message-ID: <20111206100421.0E2148205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r50206:f6023488fc52 Date: 2011-12-06 11:03 +0100 http://bitbucket.org/pypy/pypy/changeset/f6023488fc52/ Log: fixed _socket int/uint matters diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module._socket.interp_socket import converted_error, W_RSocket from pypy.rlib import rsocket -from pypy.rlib.rsocket import SocketError +from pypy.rlib.rsocket import SocketError, INVALID_SOCKET from pypy.interpreter.error import OperationError def gethostname(space): @@ -284,7 +284,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(-1, space)]) # -1 as per cpython + addr.as_object(INVALID_SOCKET, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) + INVALID_SOCKET = r_uint(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -20,6 +20,7 @@ from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.rffi import sizeof, offsetof +INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): return lltype.malloc(rffi.CCHARP.TO, buffersize, flavor='raw') From noreply at buildbot.pypy.org Tue Dec 6 14:34:22 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Tue, 6 Dec 2011 14:34:22 +0100 (CET) Subject: [pypy-commit] pypy type-specialized-instances: some fixes for type-specialized-attributes Message-ID: <20111206133422.EC0CA8205C@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: type-specialized-instances Changeset: r50207:940ed396c2d9 Date: 2011-12-06 14:34 +0100 http://bitbucket.org/pypy/pypy/changeset/940ed396c2d9/ Log: some fixes for type-specialized-attributes diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -42,7 +42,9 @@ try: attr.write_attr(obj, w_value) #obj._mapdict_write_storage(index, w_value) except OperationError: - firstattr = obj.map + if not e.match(self.space, self.space.w_TypeError): + raise + firstattr = obj._get_mapdict_map() firstattr.delete(obj, selector) firstattr.add_attr(obj, selector, w_value) return True @@ -158,7 +160,7 @@ oldattr._size_estimate = size_est if attr.length() > obj._mapdict_storage_length(): # note that attr.size_estimate() is always at least attr.length() - new_storage = [None] * attr.size_estimate() + new_storage = [PlainAttribute.erase_item(None)] * attr.size_estimate() for i in range(obj._mapdict_storage_length()): new_storage[i] = obj._mapdict_read_storage(i) obj._set_mapdict_storage_and_map(new_storage, attr) @@ -375,6 +377,7 @@ attrclass = PlainAttribute if is_taggable_int(space, w_value): attrclass = IntAttribute + return attrclass def _become(w_obj, new_obj): @@ -517,7 +520,7 @@ def _init_empty(self, map): from pypy.rlib.debug import make_sure_not_resized self.map = map - self.storage = make_sure_not_resized([None] * map.size_estimate()) + self.storage = make_sure_not_resized([PlainAttribute.erase_item(None)] * map.size_estimate()) def _mapdict_read_storage(self, index): assert index >= 0 @@ -918,7 +921,7 @@ version_tag) if w_method is None or isinstance(w_method, TypeCell): return - _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) + _fill_cache(pycode, nameindex, map, version_tag, None, w_method) # XXX fix me: if a function contains a loop with both LOAD_ATTR and # XXX LOOKUP_METHOD on the same attribute name, it keeps trashing and From noreply at buildbot.pypy.org Tue Dec 6 14:38:17 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Tue, 6 Dec 2011 14:38:17 +0100 (CET) Subject: [pypy-commit] pypy type-specialized-instances: merge with default Message-ID: <20111206133817.56D5B8205C@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: type-specialized-instances Changeset: r50208:68bb4e39d162 Date: 2011-12-06 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/68bb4e39d162/ Log: merge with default diff too long, truncating to 10000 out of 39790 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,3 +1,4 @@ b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 +ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -74,7 +74,8 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + f = open(name, "w") + f.close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -201,7 +201,7 @@ RegrTest('test_difflib.py'), RegrTest('test_dircache.py', core=True), RegrTest('test_dis.py'), - RegrTest('test_distutils.py'), + RegrTest('test_distutils.py', skip=True), RegrTest('test_dl.py', skip=True), RegrTest('test_doctest.py', usemodules="thread"), RegrTest('test_doctest2.py'), diff --git a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py --- a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py +++ b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py @@ -1,6 +1,5 @@ import unittest from ctypes import * -from ctypes.test import xfail class MyInt(c_int): def __cmp__(self, other): @@ -27,7 +26,6 @@ self.assertEqual(None, cb()) - @xfail def test_int_callback(self): args = [] def func(arg): diff --git a/lib-python/modified-2.7/heapq.py b/lib-python/modified-2.7/heapq.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/heapq.py @@ -0,0 +1,442 @@ +# -*- coding: latin-1 -*- + +"""Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues + +[explanation by Fran�ois Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +an usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + +from itertools import islice, repeat, count, imap, izip, tee, chain +from operator import itemgetter +import bisect + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + else: + returnitem = lastelt + return returnitem + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(heap)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(xrange(n//2)): + _siftup(x, i) + +def nlargest(n, iterable): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, reverse=True)[:n] + """ + if n < 0: # for consistency with the c impl + return [] + it = iter(iterable) + result = list(islice(it, n)) + if not result: + return result + heapify(result) + _heappushpop = heappushpop + for elem in it: + _heappushpop(result, elem) + result.sort(reverse=True) + return result + +def nsmallest(n, iterable): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable)[:n] + """ + if n < 0: # for consistency with the c impl + return [] + if hasattr(iterable, '__len__') and n * 10 <= len(iterable): + # For smaller values of n, the bisect method is faster than a minheap. + # It is also memory efficient, consuming only n elements of space. + it = iter(iterable) + result = sorted(islice(it, 0, n)) + if not result: + return result + insort = bisect.insort + pop = result.pop + los = result[-1] # los --> Largest of the nsmallest + for elem in it: + if los <= elem: + continue + insort(result, elem) + pop() + los = result[-1] + return result + # An alternative approach manifests the whole iterable in memory but + # saves comparisons by heapifying all at once. Also, saves time + # over bisect.insort() which has O(n) data movement time for every + # insertion. Finding the n smallest of an m length iterable requires + # O(m) + O(n log m) comparisons. + h = list(iterable) + heapify(h) + return map(heappop, repeat(h, min(n, len(h)))) + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom __cmp__ methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass + +def merge(*iterables): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + ''' + _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration + + h = [] + h_append = h.append + for itnum, it in enumerate(map(iter, iterables)): + try: + next = it.next + h_append([next(), itnum, next]) + except _StopIteration: + pass + heapify(h) + + while 1: + try: + while 1: + v, itnum, next = s = h[0] # raises IndexError when h is empty + yield v + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except _StopIteration: + _heappop(h) # remove empty iterator + except IndexError: + return + +# Extend the implementations of nsmallest and nlargest to use a key= argument +_nsmallest = nsmallest +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + # Short-cut for n==1 is to use min() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [min(chain(head, it))] + return [min(chain(head, it), key=key)] + + # When n>=size, it's faster to use sort() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = izip(iterable, count()) # decorate + result = _nsmallest(n, it) + return map(itemgetter(0), result) # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = izip(imap(key, in1), count(), in2) # decorate + result = _nsmallest(n, it) + return map(itemgetter(2), result) # undecorate + +_nlargest = nlargest +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [max(chain(head, it))] + return [max(chain(head, it), key=key)] + + # When n>=size, it's faster to use sort() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = izip(iterable, count(0,-1)) # decorate + result = _nlargest(n, it) + return map(itemgetter(0), result) # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = izip(imap(key, in1), count(0,-1), in2) # decorate + result = _nlargest(n, it) + return map(itemgetter(2), result) # undecorate + +if __name__ == "__main__": + # Simple sanity test + heap = [] + data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] + for item in data: + heappush(heap, item) + sort = [] + while heap: + sort.append(heappop(heap)) + print sort + + import doctest + doctest.testmod() diff --git a/lib-python/modified-2.7/httplib.py b/lib-python/modified-2.7/httplib.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/httplib.py @@ -0,0 +1,1377 @@ +"""HTTP/1.1 client library + + + + +HTTPConnection goes through a number of "states", which define when a client +may legally make another request or fetch the response for a particular +request. This diagram details these state transitions: + + (null) + | + | HTTPConnection() + v + Idle + | + | putrequest() + v + Request-started + | + | ( putheader() )* endheaders() + v + Request-sent + | + | response = getresponse() + v + Unread-response [Response-headers-read] + |\____________________ + | | + | response.read() | putrequest() + v v + Idle Req-started-unread-response + ______/| + / | + response.read() | | ( putheader() )* endheaders() + v v + Request-started Req-sent-unread-response + | + | response.read() + v + Request-sent + +This diagram presents the following rules: + -- a second request may not be started until {response-headers-read} + -- a response [object] cannot be retrieved until {request-sent} + -- there is no differentiation between an unread response body and a + partially read response body + +Note: this enforcement is applied by the HTTPConnection class. The + HTTPResponse class does not enforce this state machine, which + implies sophisticated clients may accelerate the request/response + pipeline. Caution should be taken, though: accelerating the states + beyond the above pattern may imply knowledge of the server's + connection-close behavior for certain requests. For example, it + is impossible to tell whether the server will close the connection + UNTIL the response headers have been read; this means that further + requests cannot be placed into the pipeline until it is known that + the server will NOT be closing the connection. + +Logical State __state __response +------------- ------- ---------- +Idle _CS_IDLE None +Request-started _CS_REQ_STARTED None +Request-sent _CS_REQ_SENT None +Unread-response _CS_IDLE +Req-started-unread-response _CS_REQ_STARTED +Req-sent-unread-response _CS_REQ_SENT +""" + +from array import array +import os +import socket +from sys import py3kwarning +from urlparse import urlsplit +import warnings +with warnings.catch_warnings(): + if py3kwarning: + warnings.filterwarnings("ignore", ".*mimetools has been removed", + DeprecationWarning) + import mimetools + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +__all__ = ["HTTP", "HTTPResponse", "HTTPConnection", + "HTTPException", "NotConnected", "UnknownProtocol", + "UnknownTransferEncoding", "UnimplementedFileMode", + "IncompleteRead", "InvalidURL", "ImproperConnectionState", + "CannotSendRequest", "CannotSendHeader", "ResponseNotReady", + "BadStatusLine", "error", "responses"] + +HTTP_PORT = 80 +HTTPS_PORT = 443 + +_UNKNOWN = 'UNKNOWN' + +# connection states +_CS_IDLE = 'Idle' +_CS_REQ_STARTED = 'Request-started' +_CS_REQ_SENT = 'Request-sent' + +# status codes +# informational +CONTINUE = 100 +SWITCHING_PROTOCOLS = 101 +PROCESSING = 102 + +# successful +OK = 200 +CREATED = 201 +ACCEPTED = 202 +NON_AUTHORITATIVE_INFORMATION = 203 +NO_CONTENT = 204 +RESET_CONTENT = 205 +PARTIAL_CONTENT = 206 +MULTI_STATUS = 207 +IM_USED = 226 + +# redirection +MULTIPLE_CHOICES = 300 +MOVED_PERMANENTLY = 301 +FOUND = 302 +SEE_OTHER = 303 +NOT_MODIFIED = 304 +USE_PROXY = 305 +TEMPORARY_REDIRECT = 307 + +# client error +BAD_REQUEST = 400 +UNAUTHORIZED = 401 +PAYMENT_REQUIRED = 402 +FORBIDDEN = 403 +NOT_FOUND = 404 +METHOD_NOT_ALLOWED = 405 +NOT_ACCEPTABLE = 406 +PROXY_AUTHENTICATION_REQUIRED = 407 +REQUEST_TIMEOUT = 408 +CONFLICT = 409 +GONE = 410 +LENGTH_REQUIRED = 411 +PRECONDITION_FAILED = 412 +REQUEST_ENTITY_TOO_LARGE = 413 +REQUEST_URI_TOO_LONG = 414 +UNSUPPORTED_MEDIA_TYPE = 415 +REQUESTED_RANGE_NOT_SATISFIABLE = 416 +EXPECTATION_FAILED = 417 +UNPROCESSABLE_ENTITY = 422 +LOCKED = 423 +FAILED_DEPENDENCY = 424 +UPGRADE_REQUIRED = 426 + +# server error +INTERNAL_SERVER_ERROR = 500 +NOT_IMPLEMENTED = 501 +BAD_GATEWAY = 502 +SERVICE_UNAVAILABLE = 503 +GATEWAY_TIMEOUT = 504 +HTTP_VERSION_NOT_SUPPORTED = 505 +INSUFFICIENT_STORAGE = 507 +NOT_EXTENDED = 510 + +# Mapping status codes to official W3C names +responses = { + 100: 'Continue', + 101: 'Switching Protocols', + + 200: 'OK', + 201: 'Created', + 202: 'Accepted', + 203: 'Non-Authoritative Information', + 204: 'No Content', + 205: 'Reset Content', + 206: 'Partial Content', + + 300: 'Multiple Choices', + 301: 'Moved Permanently', + 302: 'Found', + 303: 'See Other', + 304: 'Not Modified', + 305: 'Use Proxy', + 306: '(Unused)', + 307: 'Temporary Redirect', + + 400: 'Bad Request', + 401: 'Unauthorized', + 402: 'Payment Required', + 403: 'Forbidden', + 404: 'Not Found', + 405: 'Method Not Allowed', + 406: 'Not Acceptable', + 407: 'Proxy Authentication Required', + 408: 'Request Timeout', + 409: 'Conflict', + 410: 'Gone', + 411: 'Length Required', + 412: 'Precondition Failed', + 413: 'Request Entity Too Large', + 414: 'Request-URI Too Long', + 415: 'Unsupported Media Type', + 416: 'Requested Range Not Satisfiable', + 417: 'Expectation Failed', + + 500: 'Internal Server Error', + 501: 'Not Implemented', + 502: 'Bad Gateway', + 503: 'Service Unavailable', + 504: 'Gateway Timeout', + 505: 'HTTP Version Not Supported', +} + +# maximal amount of data to read at one time in _safe_read +MAXAMOUNT = 1048576 + +class HTTPMessage(mimetools.Message): + + def addheader(self, key, value): + """Add header for field key handling repeats.""" + prev = self.dict.get(key) + if prev is None: + self.dict[key] = value + else: + combined = ", ".join((prev, value)) + self.dict[key] = combined + + def addcontinue(self, key, more): + """Add more field data from a continuation line.""" + prev = self.dict[key] + self.dict[key] = prev + "\n " + more + + def readheaders(self): + """Read header lines. + + Read header lines up to the entirely blank line that terminates them. + The (normally blank) line that ends the headers is skipped, but not + included in the returned list. If a non-header line ends the headers, + (which is an error), an attempt is made to backspace over it; it is + never included in the returned list. + + The variable self.status is set to the empty string if all went well, + otherwise it is an error message. The variable self.headers is a + completely uninterpreted list of lines contained in the header (so + printing them will reproduce the header exactly as it appears in the + file). + + If multiple header fields with the same name occur, they are combined + according to the rules in RFC 2616 sec 4.2: + + Appending each subsequent field-value to the first, each separated + by a comma. The order in which header fields with the same field-name + are received is significant to the interpretation of the combined + field value. + """ + # XXX The implementation overrides the readheaders() method of + # rfc822.Message. The base class design isn't amenable to + # customized behavior here so the method here is a copy of the + # base class code with a few small changes. + + self.dict = {} + self.unixfrom = '' + self.headers = hlist = [] + self.status = '' + headerseen = "" + firstline = 1 + startofline = unread = tell = None + if hasattr(self.fp, 'unread'): + unread = self.fp.unread + elif self.seekable: + tell = self.fp.tell + while True: + if tell: + try: + startofline = tell() + except IOError: + startofline = tell = None + self.seekable = 0 + line = self.fp.readline() + if not line: + self.status = 'EOF in headers' + break + # Skip unix From name time lines + if firstline and line.startswith('From '): + self.unixfrom = self.unixfrom + line + continue + firstline = 0 + if headerseen and line[0] in ' \t': + # XXX Not sure if continuation lines are handled properly + # for http and/or for repeating headers + # It's a continuation line. + hlist.append(line) + self.addcontinue(headerseen, line.strip()) + continue + elif self.iscomment(line): + # It's a comment. Ignore it. + continue + elif self.islast(line): + # Note! No pushback here! The delimiter line gets eaten. + break + headerseen = self.isheader(line) + if headerseen: + # It's a legal header line, save it. + hlist.append(line) + self.addheader(headerseen, line[len(headerseen)+1:].strip()) + continue + else: + # It's not a header line; throw it back and stop here. + if not self.dict: + self.status = 'No headers' + else: + self.status = 'Non-header line where header expected' + # Try to undo the read. + if unread: + unread(line) + elif tell: + self.fp.seek(startofline) + else: + self.status = self.status + '; bad seek' + break + +class HTTPResponse: + + # strict: If true, raise BadStatusLine if the status line can't be + # parsed as a valid HTTP/1.0 or 1.1 status line. By default it is + # false because it prevents clients from talking to HTTP/0.9 + # servers. Note that a response with a sufficiently corrupted + # status line will look like an HTTP/0.9 response. + + # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details. + + def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False): + if buffering: + # The caller won't be using any sock.recv() calls, so buffering + # is fine and recommended for performance. + self.fp = sock.makefile('rb') + else: + # The buffer size is specified as zero, because the headers of + # the response are read with readline(). If the reads were + # buffered the readline() calls could consume some of the + # response, which make be read via a recv() on the underlying + # socket. + self.fp = sock.makefile('rb', 0) + self.debuglevel = debuglevel + self.strict = strict + self._method = method + + self.msg = None + + # from the Status-Line of the response + self.version = _UNKNOWN # HTTP-Version + self.status = _UNKNOWN # Status-Code + self.reason = _UNKNOWN # Reason-Phrase + + self.chunked = _UNKNOWN # is "chunked" being used? + self.chunk_left = _UNKNOWN # bytes left to read in current chunk + self.length = _UNKNOWN # number of bytes left in response + self.will_close = _UNKNOWN # conn will close at end of response + + def _read_status(self): + # Initialize with Simple-Response defaults + line = self.fp.readline() + if self.debuglevel > 0: + print "reply:", repr(line) + if not line: + # Presumably, the server closed the connection before + # sending a valid response. + raise BadStatusLine(line) + try: + [version, status, reason] = line.split(None, 2) + except ValueError: + try: + [version, status] = line.split(None, 1) + reason = "" + except ValueError: + # empty version will cause next test to fail and status + # will be treated as 0.9 response. + version = "" + if not version.startswith('HTTP/'): + if self.strict: + self.close() + raise BadStatusLine(line) + else: + # assume it's a Simple-Response from an 0.9 server + self.fp = LineAndFileWrapper(line, self.fp) + return "HTTP/0.9", 200, "" + + # The status code is a three-digit number + try: + status = int(status) + if status < 100 or status > 999: + raise BadStatusLine(line) + except ValueError: + raise BadStatusLine(line) + return version, status, reason + + def begin(self): + if self.msg is not None: + # we've already started reading the response + return + + # read until we get a non-100 response + while True: + version, status, reason = self._read_status() + if status != CONTINUE: + break + # skip the header from the 100 response + while True: + skip = self.fp.readline().strip() + if not skip: + break + if self.debuglevel > 0: + print "header:", skip + + self.status = status + self.reason = reason.strip() + if version == 'HTTP/1.0': + self.version = 10 + elif version.startswith('HTTP/1.'): + self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1 + elif version == 'HTTP/0.9': + self.version = 9 + else: + raise UnknownProtocol(version) + + if self.version == 9: + self.length = None + self.chunked = 0 + self.will_close = 1 + self.msg = HTTPMessage(StringIO()) + return + + self.msg = HTTPMessage(self.fp, 0) + if self.debuglevel > 0: + for hdr in self.msg.headers: + print "header:", hdr, + + # don't let the msg keep an fp + self.msg.fp = None + + # are we using the chunked-style of transfer encoding? + tr_enc = self.msg.getheader('transfer-encoding') + if tr_enc and tr_enc.lower() == "chunked": + self.chunked = 1 + self.chunk_left = None + else: + self.chunked = 0 + + # will the connection close at the end of the response? + self.will_close = self._check_close() + + # do we have a Content-Length? + # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" + length = self.msg.getheader('content-length') + if length and not self.chunked: + try: + self.length = int(length) + except ValueError: + self.length = None + else: + if self.length < 0: # ignore nonsensical negative lengths + self.length = None + else: + self.length = None + + # does the body have a fixed length? (of zero) + if (status == NO_CONTENT or status == NOT_MODIFIED or + 100 <= status < 200 or # 1xx codes + self._method == 'HEAD'): + self.length = 0 + + # if the connection remains open, and we aren't using chunked, and + # a content-length was not provided, then assume that the connection + # WILL close. + if not self.will_close and \ + not self.chunked and \ + self.length is None: + self.will_close = 1 + + def _check_close(self): + conn = self.msg.getheader('connection') + if self.version == 11: + # An HTTP/1.1 proxy is assumed to stay open unless + # explicitly closed. + conn = self.msg.getheader('connection') + if conn and "close" in conn.lower(): + return True + return False + + # Some HTTP/1.0 implementations have support for persistent + # connections, using rules different than HTTP/1.1. + + # For older HTTP, Keep-Alive indicates persistent connection. + if self.msg.getheader('keep-alive'): + return False + + # At least Akamai returns a "Connection: Keep-Alive" header, + # which was supposed to be sent by the client. + if conn and "keep-alive" in conn.lower(): + return False + + # Proxy-Connection is a netscape hack. + pconn = self.msg.getheader('proxy-connection') + if pconn and "keep-alive" in pconn.lower(): + return False + + # otherwise, assume it will close + return True + + def close(self): + if self.fp: + self.fp.close() + self.fp = None + + def isclosed(self): + # NOTE: it is possible that we will not ever call self.close(). This + # case occurs when will_close is TRUE, length is None, and we + # read up to the last byte, but NOT past it. + # + # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be + # called, meaning self.isclosed() is meaningful. + return self.fp is None + + # XXX It would be nice to have readline and __iter__ for this, too. + + def read(self, amt=None): + if self.fp is None: + return '' + + if self._method == 'HEAD': + self.close() + return '' + + if self.chunked: + return self._read_chunked(amt) + + if amt is None: + # unbounded read + if self.length is None: + s = self.fp.read() + else: + s = self._safe_read(self.length) + self.length = 0 + self.close() # we read everything + return s + + if self.length is not None: + if amt > self.length: + # clip the read to the "end of response" + amt = self.length + + # we do not use _safe_read() here because this may be a .will_close + # connection, and the user is reading more bytes than will be provided + # (for example, reading in 1k chunks) + s = self.fp.read(amt) + if self.length is not None: + self.length -= len(s) + if not self.length: + self.close() + return s + + def _read_chunked(self, amt): + assert self.chunked != _UNKNOWN + chunk_left = self.chunk_left + value = [] + while True: + if chunk_left is None: + line = self.fp.readline() + i = line.find(';') + if i >= 0: + line = line[:i] # strip chunk-extensions + try: + chunk_left = int(line, 16) + except ValueError: + # close the connection as protocol synchronisation is + # probably lost + self.close() + raise IncompleteRead(''.join(value)) + if chunk_left == 0: + break + if amt is None: + value.append(self._safe_read(chunk_left)) + elif amt < chunk_left: + value.append(self._safe_read(amt)) + self.chunk_left = chunk_left - amt + return ''.join(value) + elif amt == chunk_left: + value.append(self._safe_read(amt)) + self._safe_read(2) # toss the CRLF at the end of the chunk + self.chunk_left = None + return ''.join(value) + else: + value.append(self._safe_read(chunk_left)) + amt -= chunk_left + + # we read the whole chunk, get another + self._safe_read(2) # toss the CRLF at the end of the chunk + chunk_left = None + + # read and discard trailer up to the CRLF terminator + ### note: we shouldn't have any trailers! + while True: + line = self.fp.readline() + if not line: + # a vanishingly small number of sites EOF without + # sending the trailer + break + if line == '\r\n': + break + + # we read everything; close the "file" + self.close() + + return ''.join(value) + + def _safe_read(self, amt): + """Read the number of bytes requested, compensating for partial reads. + + Normally, we have a blocking socket, but a read() can be interrupted + by a signal (resulting in a partial read). + + Note that we cannot distinguish between EOF and an interrupt when zero + bytes have been read. IncompleteRead() will be raised in this + situation. + + This function should be used when bytes "should" be present for + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + """ + # NOTE(gps): As of svn r74426 socket._fileobject.read(x) will never + # return less than x bytes unless EOF is encountered. It now handles + # signal interruptions (socket.error EINTR) internally. This code + # never caught that exception anyways. It seems largely pointless. + # self.fp.read(amt) will work fine. + s = [] + while amt > 0: + chunk = self.fp.read(min(amt, MAXAMOUNT)) + if not chunk: + raise IncompleteRead(''.join(s), amt) + s.append(chunk) + amt -= len(chunk) + return ''.join(s) + + def fileno(self): + return self.fp.fileno() + + def getheader(self, name, default=None): + if self.msg is None: + raise ResponseNotReady() + return self.msg.getheader(name, default) + + def getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise ResponseNotReady() + return self.msg.items() + + +class HTTPConnection: + + _http_vsn = 11 + _http_vsn_str = 'HTTP/1.1' + + response_class = HTTPResponse + default_port = HTTP_PORT + auto_open = 1 + debuglevel = 0 + strict = 0 + + def __init__(self, host, port=None, strict=None, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): + self.timeout = timeout + self.source_address = source_address + self.sock = None + self._buffer = [] + self.__response = None + self.__state = _CS_IDLE + self._method = None + self._tunnel_host = None + self._tunnel_port = None + self._tunnel_headers = {} + + self._set_hostport(host, port) + if strict is not None: + self.strict = strict + + def set_tunnel(self, host, port=None, headers=None): + """ Sets up the host and the port for the HTTP CONNECT Tunnelling. + + The headers argument should be a mapping of extra HTTP headers + to send with the CONNECT request. + """ + self._tunnel_host = host + self._tunnel_port = port + if headers: + self._tunnel_headers = headers + else: + self._tunnel_headers.clear() + + def _set_hostport(self, host, port): + if port is None: + i = host.rfind(':') + j = host.rfind(']') # ipv6 addresses have [...] + if i > j: + try: + port = int(host[i+1:]) + except ValueError: + raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) + host = host[:i] + else: + port = self.default_port + if host and host[0] == '[' and host[-1] == ']': + host = host[1:-1] + self.host = host + self.port = port + + def set_debuglevel(self, level): + self.debuglevel = level + + def _tunnel(self): + self._set_hostport(self._tunnel_host, self._tunnel_port) + self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port)) + for header, value in self._tunnel_headers.iteritems(): + self.send("%s: %s\r\n" % (header, value)) + self.send("\r\n") + response = self.response_class(self.sock, strict = self.strict, + method = self._method) + (version, code, message) = response._read_status() + + if code != 200: + self.close() + raise socket.error("Tunnel connection failed: %d %s" % (code, + message.strip())) + while True: + line = response.fp.readline() + if line == '\r\n': break + + + def connect(self): + """Connect to the host and port specified in __init__.""" + self.sock = socket.create_connection((self.host,self.port), + self.timeout, self.source_address) + + if self._tunnel_host: + self._tunnel() + + def close(self): + """Close the connection to the HTTP server.""" + if self.sock: + self.sock.close() # close it manually... there may be other refs + self.sock = None + if self.__response: + self.__response.close() + self.__response = None + self.__state = _CS_IDLE + + def send(self, data): + """Send `data' to the server.""" + if self.sock is None: + if self.auto_open: + self.connect() + else: + raise NotConnected() + + if self.debuglevel > 0: + print "send:", repr(data) + blocksize = 8192 + if hasattr(data,'read') and not isinstance(data, array): + if self.debuglevel > 0: print "sendIng a read()able" + datablock = data.read(blocksize) + while datablock: + self.sock.sendall(datablock) + datablock = data.read(blocksize) + else: + self.sock.sendall(data) + + def _output(self, s): + """Add a line of output to the current request buffer. + + Assumes that the line does *not* end with \\r\\n. + """ + self._buffer.append(s) + + def _send_output(self, message_body=None): + """Send the currently buffered request and clear the buffer. + + Appends an extra \\r\\n to the buffer. + A message_body may be specified, to be appended to the request. + """ + self._buffer.extend(("", "")) + msg = "\r\n".join(self._buffer) + del self._buffer[:] + # If msg and message_body are sent in a single send() call, + # it will avoid performance problems caused by the interaction + # between delayed ack and the Nagle algorithim. + if isinstance(message_body, str): + msg += message_body + message_body = None + self.send(msg) + if message_body is not None: + #message_body was not a string (i.e. it is a file) and + #we must run the risk of Nagle + self.send(message_body) + + def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): + """Send a request to the server. + + `method' specifies an HTTP request method, e.g. 'GET'. + `url' specifies the object being requested, e.g. '/index.html'. + `skip_host' if True does not add automatically a 'Host:' header + `skip_accept_encoding' if True does not add automatically an + 'Accept-Encoding:' header + """ + + # if a prior response has been completed, then forget about it. + if self.__response and self.__response.isclosed(): + self.__response = None + + + # in certain cases, we cannot issue another request on this connection. + # this occurs when: + # 1) we are in the process of sending a request. (_CS_REQ_STARTED) + # 2) a response to a previous request has signalled that it is going + # to close the connection upon completion. + # 3) the headers for the previous response have not been read, thus + # we cannot determine whether point (2) is true. (_CS_REQ_SENT) + # + # if there is no prior response, then we can request at will. + # + # if point (2) is true, then we will have passed the socket to the + # response (effectively meaning, "there is no prior response"), and + # will open a new one when a new request is made. + # + # Note: if a prior response exists, then we *can* start a new request. + # We are not allowed to begin fetching the response to this new + # request, however, until that prior response is complete. + # + if self.__state == _CS_IDLE: + self.__state = _CS_REQ_STARTED + else: + raise CannotSendRequest() + + # Save the method we use, we need it later in the response phase + self._method = method + if not url: + url = '/' + hdr = '%s %s %s' % (method, url, self._http_vsn_str) + + self._output(hdr) + + if self._http_vsn == 11: + # Issue some standard headers for better HTTP/1.1 compliance + + if not skip_host: + # this header is issued *only* for HTTP/1.1 + # connections. more specifically, this means it is + # only issued when the client uses the new + # HTTPConnection() class. backwards-compat clients + # will be using HTTP/1.0 and those clients may be + # issuing this header themselves. we should NOT issue + # it twice; some web servers (such as Apache) barf + # when they see two Host: headers + + # If we need a non-standard port,include it in the + # header. If the request is going through a proxy, + # but the host of the actual URL, not the host of the + # proxy. + + netloc = '' + if url.startswith('http'): + nil, netloc, nil, nil, nil = urlsplit(url) + + if netloc: + try: + netloc_enc = netloc.encode("ascii") + except UnicodeEncodeError: + netloc_enc = netloc.encode("idna") + self.putheader('Host', netloc_enc) + else: + try: + host_enc = self.host.encode("ascii") + except UnicodeEncodeError: + host_enc = self.host.encode("idna") + # Wrap the IPv6 Host Header with [] (RFC 2732) + if host_enc.find(':') >= 0: + host_enc = "[" + host_enc + "]" + if self.port == self.default_port: + self.putheader('Host', host_enc) + else: + self.putheader('Host', "%s:%s" % (host_enc, self.port)) + + # note: we are assuming that clients will not attempt to set these + # headers since *this* library must deal with the + # consequences. this also means that when the supporting + # libraries are updated to recognize other forms, then this + # code should be changed (removed or updated). + + # we only want a Content-Encoding of "identity" since we don't + # support encodings such as x-gzip or x-deflate. + if not skip_accept_encoding: + self.putheader('Accept-Encoding', 'identity') + + # we can accept "chunked" Transfer-Encodings, but no others + # NOTE: no TE header implies *only* "chunked" + #self.putheader('TE', 'chunked') + + # if TE is supplied in the header, then it must appear in a + # Connection header. + #self.putheader('Connection', 'TE') + + else: + # For HTTP/1.0, the server will assume "not chunked" + pass + + def putheader(self, header, *values): + """Send a request header line to the server. + + For example: h.putheader('Accept', 'text/html') + """ + if self.__state != _CS_REQ_STARTED: + raise CannotSendHeader() + + hdr = '%s: %s' % (header, '\r\n\t'.join([str(v) for v in values])) + self._output(hdr) + + def endheaders(self, message_body=None): + """Indicate that the last header line has been sent to the server. + + This method sends the request to the server. The optional + message_body argument can be used to pass message body + associated with the request. The message body will be sent in + the same packet as the message headers if possible. The + message_body should be a string. + """ + if self.__state == _CS_REQ_STARTED: + self.__state = _CS_REQ_SENT + else: + raise CannotSendHeader() + self._send_output(message_body) + + def request(self, method, url, body=None, headers={}): + """Send a complete request to the server.""" + self._send_request(method, url, body, headers) + + def _set_content_length(self, body): + # Set the content-length based on the body. + thelen = None + try: + thelen = str(len(body)) + except TypeError, te: + # If this is a file-like object, try to + # fstat its file descriptor + try: + thelen = str(os.fstat(body.fileno()).st_size) + except (AttributeError, OSError): + # Don't send a length if this failed + if self.debuglevel > 0: print "Cannot stat!!" + + if thelen is not None: + self.putheader('Content-Length', thelen) + + def _send_request(self, method, url, body, headers): + # Honor explicitly requested Host: and Accept-Encoding: headers. + header_names = dict.fromkeys([k.lower() for k in headers]) + skips = {} + if 'host' in header_names: + skips['skip_host'] = 1 + if 'accept-encoding' in header_names: + skips['skip_accept_encoding'] = 1 + + self.putrequest(method, url, **skips) + + if body and ('content-length' not in header_names): + self._set_content_length(body) + for hdr, value in headers.iteritems(): + self.putheader(hdr, value) + self.endheaders(body) + + def getresponse(self, buffering=False): + "Get the response from the server." + + # if a prior response has been completed, then forget about it. + if self.__response and self.__response.isclosed(): + self.__response = None + + # + # if a prior response exists, then it must be completed (otherwise, we + # cannot read this response's header to determine the connection-close + # behavior) + # + # note: if a prior response existed, but was connection-close, then the + # socket and response were made independent of this HTTPConnection + # object since a new request requires that we open a whole new + # connection + # + # this means the prior response had one of two states: + # 1) will_close: this connection was reset and the prior socket and + # response operate independently + # 2) persistent: the response was retained and we await its + # isclosed() status to become true. + # + if self.__state != _CS_REQ_SENT or self.__response: + raise ResponseNotReady() + + args = (self.sock,) + kwds = {"strict":self.strict, "method":self._method} + if self.debuglevel > 0: + args += (self.debuglevel,) + if buffering: + #only add this keyword if non-default, for compatibility with + #other response_classes. + kwds["buffering"] = True; + response = self.response_class(*args, **kwds) + + try: + response.begin() + except: + response.close() + raise + assert response.will_close != _UNKNOWN + self.__state = _CS_IDLE + + if response.will_close: + # this effectively passes the connection to the response + self.close() + else: + # remember this, so we can tell when it is complete + self.__response = response + + return response + + +class HTTP: + "Compatibility class with httplib.py from 1.5." + + _http_vsn = 10 + _http_vsn_str = 'HTTP/1.0' + + debuglevel = 0 + + _connection_class = HTTPConnection + + def __init__(self, host='', port=None, strict=None): + "Provide a default host, since the superclass requires one." + + # some joker passed 0 explicitly, meaning default port + if port == 0: + port = None + + # Note that we may pass an empty string as the host; this will throw + # an error when we attempt to connect. Presumably, the client code + # will call connect before then, with a proper host. + self._setup(self._connection_class(host, port, strict)) + + def _setup(self, conn): + self._conn = conn + + # set up delegation to flesh out interface + self.send = conn.send + self.putrequest = conn.putrequest + self.putheader = conn.putheader + self.endheaders = conn.endheaders + self.set_debuglevel = conn.set_debuglevel + + conn._http_vsn = self._http_vsn + conn._http_vsn_str = self._http_vsn_str + + self.file = None + + def connect(self, host=None, port=None): + "Accept arguments to set the host/port, since the superclass doesn't." + + if host is not None: + self._conn._set_hostport(host, port) + self._conn.connect() + + def getfile(self): + "Provide a getfile, since the superclass' does not use this concept." + return self.file + + def getreply(self, buffering=False): + """Compat definition since superclass does not define it. + + Returns a tuple consisting of: + - server status code (e.g. '200' if all goes well) + - server "reason" corresponding to status code + - any RFC822 headers in the response from the server + """ + try: + if not buffering: + response = self._conn.getresponse() + else: + #only add this keyword if non-default for compatibility + #with other connection classes + response = self._conn.getresponse(buffering) + except BadStatusLine, e: + ### hmm. if getresponse() ever closes the socket on a bad request, + ### then we are going to have problems with self.sock + + ### should we keep this behavior? do people use it? + # keep the socket open (as a file), and return it + self.file = self._conn.sock.makefile('rb', 0) + + # close our socket -- we want to restart after any protocol error + self.close() + + self.headers = None + return -1, e.line, None + + self.headers = response.msg + self.file = response.fp + return response.status, response.reason, response.msg + + def close(self): + self._conn.close() + + # note that self.file == response.fp, which gets closed by the + # superclass. just clear the object ref here. + ### hmm. messy. if status==-1, then self.file is owned by us. + ### well... we aren't explicitly closing, but losing this ref will + ### do it + self.file = None + +try: + import ssl +except ImportError: + pass +else: + class HTTPSConnection(HTTPConnection): + "This class allows communication via SSL." + + default_port = HTTPS_PORT + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + HTTPConnection.__init__(self, host, port, strict, timeout, + source_address) + self.key_file = key_file + self.cert_file = cert_file + + def connect(self): + "Connect to a host on a given (SSL) port." + + sock = socket.create_connection((self.host, self.port), + self.timeout, self.source_address) + if self._tunnel_host: + self.sock = sock + self._tunnel() + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file) + + __all__.append("HTTPSConnection") + + class HTTPS(HTTP): + """Compatibility with 1.5 httplib interface + + Python 1.5.2 did not have an HTTPS class, but it defined an + interface for sending http requests that is also useful for + https. + """ + + _connection_class = HTTPSConnection + + def __init__(self, host='', port=None, key_file=None, cert_file=None, + strict=None): + # provide a default host, pass the X509 cert info + + # urf. compensate for bad input. + if port == 0: + port = None + self._setup(self._connection_class(host, port, key_file, + cert_file, strict)) + + # we never actually use these for anything, but we keep them + # here for compatibility with post-1.5.2 CVS. + self.key_file = key_file + self.cert_file = cert_file + + + def FakeSocket (sock, sslobj): + warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " + + "Use the result of ssl.wrap_socket() directly instead.", + DeprecationWarning, stacklevel=2) + return sslobj + + +class HTTPException(Exception): + # Subclasses that define an __init__ must call Exception.__init__ + # or define self.args. Otherwise, str() will fail. + pass + +class NotConnected(HTTPException): + pass + +class InvalidURL(HTTPException): + pass + +class UnknownProtocol(HTTPException): + def __init__(self, version): + self.args = version, + self.version = version + +class UnknownTransferEncoding(HTTPException): + pass + +class UnimplementedFileMode(HTTPException): + pass + +class IncompleteRead(HTTPException): + def __init__(self, partial, expected=None): + self.args = partial, + self.partial = partial + self.expected = expected + def __repr__(self): + if self.expected is not None: + e = ', %i more expected' % self.expected + else: + e = '' + return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e) + def __str__(self): + return repr(self) + +class ImproperConnectionState(HTTPException): + pass + +class CannotSendRequest(ImproperConnectionState): + pass + +class CannotSendHeader(ImproperConnectionState): + pass + +class ResponseNotReady(ImproperConnectionState): + pass + +class BadStatusLine(HTTPException): + def __init__(self, line): + if not line: + line = repr(line) + self.args = line, + self.line = line + +# for backwards compatibility +error = HTTPException + +class LineAndFileWrapper: + """A limited file-like object for HTTP/0.9 responses.""" + + # The status-line parsing code calls readline(), which normally + # get the HTTP status line. For a 0.9 response, however, this is + # actually the first line of the body! Clients need to get a + # readable file object that contains that line. + + def __init__(self, line, file): + self._line = line + self._file = file + self._line_consumed = 0 + self._line_offset = 0 + self._line_left = len(line) + + def __getattr__(self, attr): + return getattr(self._file, attr) + + def _done(self): + # called when the last byte is read from the line. After the + # call, all read methods are delegated to the underlying file + # object. + self._line_consumed = 1 + self.read = self._file.read + self.readline = self._file.readline + self.readlines = self._file.readlines + + def read(self, amt=None): + if self._line_consumed: + return self._file.read(amt) + assert self._line_left + if amt is None or amt > self._line_left: + s = self._line[self._line_offset:] + self._done() + if amt is None: + return s + self._file.read() + else: + return s + self._file.read(amt - len(s)) + else: + assert amt <= self._line_left + i = self._line_offset + j = i + amt + s = self._line[i:j] + self._line_offset = j + self._line_left -= amt + if self._line_left == 0: + self._done() + return s + + def readline(self): + if self._line_consumed: + return self._file.readline() + assert self._line_left + s = self._line[self._line_offset:] + self._done() + return s + + def readlines(self, size=None): + if self._line_consumed: + return self._file.readlines(size) + assert self._line_left + L = [self._line[self._line_offset:]] + self._done() + if size is None: + return L + self._file.readlines() + else: + return L + self._file.readlines(size) + +def test(): + """Test this module. + + A hodge podge of tests collected here, because they have too many + external dependencies for the regular test suite. + """ + + import sys + import getopt + opts, args = getopt.getopt(sys.argv[1:], 'd') + dl = 0 + for o, a in opts: + if o == '-d': dl = dl + 1 + host = 'www.python.org' + selector = '/' + if args[0:]: host = args[0] + if args[1:]: selector = args[1] + h = HTTP() + h.set_debuglevel(dl) + h.connect(host) + h.putrequest('GET', selector) + h.endheaders() + status, reason, headers = h.getreply() + print 'status =', status + print 'reason =', reason + print "read", len(h.getfile().read()) + print + if headers: + for header in headers.headers: print header.strip() + print + + # minimal test that code to extract host from url works + class HTTP11(HTTP): + _http_vsn = 11 + _http_vsn_str = 'HTTP/1.1' + + h = HTTP11('www.python.org') + h.putrequest('GET', 'http://www.python.org/~jeremy/') + h.endheaders() + h.getreply() + h.close() + + try: + import ssl + except ImportError: + pass + else: + + for host, selector in (('sourceforge.net', '/projects/python'), + ): + print "https://%s%s" % (host, selector) + hs = HTTPS() + hs.set_debuglevel(dl) + hs.connect(host) + hs.putrequest('GET', selector) + hs.endheaders() + status, reason, headers = hs.getreply() + print 'status =', status + print 'reason =', reason + print "read", len(hs.getfile().read()) + print + if headers: + for header in headers.headers: print header.strip() + print + +if __name__ == '__main__': + test() diff --git a/lib-python/modified-2.7/json/encoder.py b/lib-python/modified-2.7/json/encoder.py --- a/lib-python/modified-2.7/json/encoder.py +++ b/lib-python/modified-2.7/json/encoder.py @@ -2,14 +2,7 @@ """ import re -try: - from _json import encode_basestring_ascii as c_encode_basestring_ascii -except ImportError: - c_encode_basestring_ascii = None -try: - from _json import make_encoder as c_make_encoder -except ImportError: - c_make_encoder = None +from __pypy__.builders import StringBuilder, UnicodeBuilder ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') @@ -24,8 +17,7 @@ '\t': '\\t', } for i in range(0x20): - ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) - #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) + ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) # Assume this produces an infinity on all machines (probably not guaranteed) INFINITY = float('1e66666') @@ -37,10 +29,9 @@ """ def replace(match): return ESCAPE_DCT[match.group(0)] - return '"' + ESCAPE.sub(replace, s) + '"' + return ESCAPE.sub(replace, s) - -def py_encode_basestring_ascii(s): +def encode_basestring_ascii(s): """Return an ASCII-only JSON representation of a Python string """ @@ -53,20 +44,18 @@ except KeyError: n = ord(s) if n < 0x10000: - return '\\u{0:04x}'.format(n) - #return '\\u%04x' % (n,) + return '\\u%04x' % (n,) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) - return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) - #return '\\u%04x\\u%04x' % (s1, s2) - return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' - - -encode_basestring_ascii = ( - c_encode_basestring_ascii or py_encode_basestring_ascii) + return '\\u%04x\\u%04x' % (s1, s2) + if ESCAPE_ASCII.search(s): + return str(ESCAPE_ASCII.sub(replace, s)) + return s +py_encode_basestring_ascii = lambda s: '"' + encode_basestring_ascii(s) + '"' +c_encode_basestring_ascii = None class JSONEncoder(object): """Extensible JSON encoder for Python data structures. @@ -147,6 +136,17 @@ self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii + if ensure_ascii: + self.encoder = encode_basestring_ascii + else: + self.encoder = encode_basestring + if encoding != 'utf-8': + orig_encoder = self.encoder + def encoder(o): + if isinstance(o, str): + o = o.decode(encoding) + return orig_encoder(o) + self.encoder = encoder self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys @@ -184,24 +184,126 @@ '{"foo": ["bar", "baz"]}' """ - # This is for extremely simple cases and benchmarks. + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + builder = StringBuilder() + else: + builder = UnicodeBuilder() + self._encode(o, markers, builder, 0) + return builder.build() + + def _emit_indent(self, builder, _current_indent_level): + if self.indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + (' ' * (self.indent * + _current_indent_level)) + separator = self.item_separator + newline_indent + builder.append(newline_indent) + else: + separator = self.item_separator + return separator, _current_indent_level + + def _emit_unindent(self, builder, _current_indent_level): + if self.indent is not None: + builder.append('\n') + builder.append(' ' * (self.indent * (_current_indent_level - 1))) + + def _encode(self, o, markers, builder, _current_indent_level): if isinstance(o, basestring): - if isinstance(o, str): - _encoding = self.encoding - if (_encoding is not None - and not (_encoding == 'utf-8')): - o = o.decode(_encoding) - if self.ensure_ascii: - return encode_basestring_ascii(o) + builder.append('"') + builder.append(self.encoder(o)) + builder.append('"') + elif o is None: + builder.append('null') + elif o is True: + builder.append('true') + elif o is False: + builder.append('false') + elif isinstance(o, (int, long)): + builder.append(str(o)) + elif isinstance(o, float): + builder.append(self._floatstr(o)) + elif isinstance(o, (list, tuple)): + if not o: + builder.append('[]') + return + self._encode_list(o, markers, builder, _current_indent_level) + elif isinstance(o, dict): + if not o: + builder.append('{}') + return + self._encode_dict(o, markers, builder, _current_indent_level) + else: + self._mark_markers(markers, o) + res = self.default(o) + self._encode(res, markers, builder, _current_indent_level) + self._remove_markers(markers, o) + return res + + def _encode_list(self, l, markers, builder, _current_indent_level): + self._mark_markers(markers, l) + builder.append('[') + first = True + separator, _current_indent_level = self._emit_indent(builder, + _current_indent_level) + for elem in l: + if first: + first = False else: - return encode_basestring(o) - # This doesn't pass the iterator directly to ''.join() because the - # exceptions aren't as detailed. The list call should be roughly - # equivalent to the PySequence_Fast that ''.join() would do. - chunks = self.iterencode(o, _one_shot=True) - if not isinstance(chunks, (list, tuple)): - chunks = list(chunks) - return ''.join(chunks) + builder.append(separator) + self._encode(elem, markers, builder, _current_indent_level) + del elem # XXX grumble + self._emit_unindent(builder, _current_indent_level) + builder.append(']') + self._remove_markers(markers, l) + + def _encode_dict(self, d, markers, builder, _current_indent_level): + self._mark_markers(markers, d) + first = True + builder.append('{') + separator, _current_indent_level = self._emit_indent(builder, + _current_indent_level) + if self.sort_keys: + items = sorted(d.items(), key=lambda kv: kv[0]) + else: + items = d.iteritems() + + for key, v in items: + if first: + first = False + else: + builder.append(separator) + if isinstance(key, basestring): + pass + # JavaScript is weakly typed for these, so it makes sense to + # also allow them. Many encoders seem to do something like this. + elif isinstance(key, float): + key = self._floatstr(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif isinstance(key, (int, long)): + key = str(key) + elif self.skipkeys: + continue + else: + raise TypeError("key " + repr(key) + " is not a string") + builder.append('"') + builder.append(self.encoder(key)) + builder.append('"') + builder.append(self.key_separator) + self._encode(v, markers, builder, _current_indent_level) + del key + del v # XXX grumble + self._emit_unindent(builder, _current_indent_level) + builder.append('}') + self._remove_markers(markers, d) def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string @@ -217,86 +319,54 @@ markers = {} else: markers = None - if self.ensure_ascii: - _encoder = encode_basestring_ascii + return self._iterencode(o, markers, 0) + + def _floatstr(self, o): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on the + # internals. + + if o != o: + text = 'NaN' + elif o == INFINITY: + text = 'Infinity' + elif o == -INFINITY: + text = '-Infinity' else: - _encoder = encode_basestring - if self.encoding != 'utf-8': - def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): - if isinstance(o, str): - o = o.decode(_encoding) - return _orig_encoder(o) + return FLOAT_REPR(o) - def floatstr(o, allow_nan=self.allow_nan, - _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY): - # Check for specials. Note that this type of test is processor - # and/or platform-specific, so do tests which don't depend on the - # internals. + if not self.allow_nan: + raise ValueError( + "Out of range float values are not JSON compliant: " + + repr(o)) - if o != o: - text = 'NaN' - elif o == _inf: - text = 'Infinity' - elif o == _neginf: - text = '-Infinity' - else: - return _repr(o) + return text - if not allow_nan: - raise ValueError( - "Out of range float values are not JSON compliant: " + - repr(o)) + def _mark_markers(self, markers, o): + if markers is not None: + if id(o) in markers: + raise ValueError("Circular reference detected") + markers[id(o)] = None - return text + def _remove_markers(self, markers, o): + if markers is not None: + del markers[id(o)] - - if (_one_shot and c_make_encoder is not None - and not self.indent and not self.sort_keys): - _iterencode = c_make_encoder( - markers, self.default, _encoder, self.indent, - self.key_separator, self.item_separator, self.sort_keys, - self.skipkeys, self.allow_nan) - else: - _iterencode = _make_iterencode( - markers, self.default, _encoder, self.indent, floatstr, - self.key_separator, self.item_separator, self.sort_keys, - self.skipkeys, _one_shot) - return _iterencode(o, 0) - -def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, - _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, - ## HACK: hand-optimized bytecode; turn globals into locals - ValueError=ValueError, - basestring=basestring, - dict=dict, - float=float, - id=id, - int=int, - isinstance=isinstance, - list=list, - long=long, - str=str, - tuple=tuple, - ): - - def _iterencode_list(lst, _current_indent_level): + def _iterencode_list(self, lst, markers, _current_indent_level): if not lst: yield '[]' return - if markers is not None: - markerid = id(lst) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = lst + self._mark_markers(markers, lst) buf = '[' - if _indent is not None: + if self.indent is not None: _current_indent_level += 1 - newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) - separator = _item_separator + newline_indent + newline_indent = '\n' + (' ' * (self.indent * + _current_indent_level)) + separator = self.item_separator + newline_indent buf += newline_indent else: newline_indent = None - separator = _item_separator + separator = self.item_separator first = True for value in lst: if first: @@ -304,7 +374,7 @@ else: buf = separator if isinstance(value, basestring): - yield buf + _encoder(value) + yield buf + '"' + self.encoder(value) + '"' elif value is None: yield buf + 'null' elif value is True: @@ -314,44 +384,43 @@ elif isinstance(value, (int, long)): yield buf + str(value) elif isinstance(value, float): - yield buf + _floatstr(value) + yield buf + self._floatstr(value) else: yield buf if isinstance(value, (list, tuple)): - chunks = _iterencode_list(value, _current_indent_level) + chunks = self._iterencode_list(value, markers, + _current_indent_level) elif isinstance(value, dict): - chunks = _iterencode_dict(value, _current_indent_level) + chunks = self._iterencode_dict(value, markers, + _current_indent_level) else: - chunks = _iterencode(value, _current_indent_level) + chunks = self._iterencode(value, markers, + _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 - yield '\n' + (' ' * (_indent * _current_indent_level)) + yield '\n' + (' ' * (self.indent * _current_indent_level)) yield ']' - if markers is not None: - del markers[markerid] + self._remove_markers(markers, lst) - def _iterencode_dict(dct, _current_indent_level): + def _iterencode_dict(self, dct, markers, _current_indent_level): if not dct: yield '{}' return - if markers is not None: - markerid = id(dct) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = dct + self._mark_markers(markers, dct) yield '{' - if _indent is not None: + if self.indent is not None: _current_indent_level += 1 - newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) - item_separator = _item_separator + newline_indent + newline_indent = '\n' + (' ' * (self.indent * + _current_indent_level)) + item_separator = self.item_separator + newline_indent yield newline_indent else: newline_indent = None - item_separator = _item_separator + item_separator = self.item_separator first = True - if _sort_keys: + if self.sort_keys: items = sorted(dct.items(), key=lambda kv: kv[0]) else: items = dct.iteritems() @@ -361,7 +430,7 @@ # JavaScript is weakly typed for these, so it makes sense to # also allow them. Many encoders seem to do something like this. elif isinstance(key, float): - key = _floatstr(key) + key = self._floatstr(key) elif key is True: key = 'true' elif key is False: @@ -370,7 +439,7 @@ key = 'null' elif isinstance(key, (int, long)): key = str(key) - elif _skipkeys: + elif self.skipkeys: continue else: raise TypeError("key " + repr(key) + " is not a string") @@ -378,10 +447,10 @@ first = False else: yield item_separator - yield _encoder(key) - yield _key_separator + yield '"' + self.encoder(key) + '"' + yield self.key_separator if isinstance(value, basestring): - yield _encoder(value) + yield '"' + self.encoder(value) + '"' elif value is None: yield 'null' elif value is True: @@ -391,26 +460,28 @@ elif isinstance(value, (int, long)): yield str(value) elif isinstance(value, float): - yield _floatstr(value) + yield self._floatstr(value) else: if isinstance(value, (list, tuple)): - chunks = _iterencode_list(value, _current_indent_level) + chunks = self._iterencode_list(value, markers, + _current_indent_level) elif isinstance(value, dict): - chunks = _iterencode_dict(value, _current_indent_level) + chunks = self._iterencode_dict(value, markers, + _current_indent_level) else: - chunks = _iterencode(value, _current_indent_level) + chunks = self._iterencode(value, markers, + _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 - yield '\n' + (' ' * (_indent * _current_indent_level)) + yield '\n' + (' ' * (self.indent * _current_indent_level)) yield '}' - if markers is not None: - del markers[markerid] + self._remove_markers(markers, dct) - def _iterencode(o, _current_indent_level): + def _iterencode(self, o, markers, _current_indent_level): if isinstance(o, basestring): - yield _encoder(o) + yield '"' + self.encoder(o) + '"' elif o is None: yield 'null' elif o is True: @@ -420,23 +491,19 @@ elif isinstance(o, (int, long)): yield str(o) elif isinstance(o, float): - yield _floatstr(o) + yield self._floatstr(o) elif isinstance(o, (list, tuple)): - for chunk in _iterencode_list(o, _current_indent_level): + for chunk in self._iterencode_list(o, markers, + _current_indent_level): yield chunk elif isinstance(o, dict): - for chunk in _iterencode_dict(o, _current_indent_level): + for chunk in self._iterencode_dict(o, markers, + _current_indent_level): yield chunk else: - if markers is not None: - markerid = id(o) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = o - o = _default(o) - for chunk in _iterencode(o, _current_indent_level): + self._mark_markers(markers, o) + obj = self.default(o) + for chunk in self._iterencode(obj, markers, + _current_indent_level): yield chunk - if markers is not None: - del markers[markerid] - - return _iterencode + self._remove_markers(markers, o) diff --git a/lib-python/modified-2.7/json/tests/test_unicode.py b/lib-python/modified-2.7/json/tests/test_unicode.py --- a/lib-python/modified-2.7/json/tests/test_unicode.py +++ b/lib-python/modified-2.7/json/tests/test_unicode.py @@ -80,3 +80,9 @@ self.assertEqual(type(json.loads(u'["a"]')[0]), unicode) # Issue 10038. self.assertEqual(type(json.loads('"foo"')), unicode) + + def test_encode_not_utf_8(self): + self.assertEqual(json.dumps('\xb1\xe6', encoding='iso8859-2'), + '"\\u0105\\u0107"') + self.assertEqual(json.dumps(['\xb1\xe6'], encoding='iso8859-2'), + '["\\u0105\\u0107"]') diff --git a/lib-python/2.7/pkgutil.py b/lib-python/modified-2.7/pkgutil.py copy from lib-python/2.7/pkgutil.py copy to lib-python/modified-2.7/pkgutil.py --- a/lib-python/2.7/pkgutil.py +++ b/lib-python/modified-2.7/pkgutil.py @@ -244,7 +244,8 @@ return mod def get_data(self, pathname): - return open(pathname, "rb").read() + with open(pathname, "rb") as f: + return f.read() def _reopen(self): if self.file and self.file.closed: diff --git a/lib-python/modified-2.7/test/test_array.py b/lib-python/modified-2.7/test/test_array.py --- a/lib-python/modified-2.7/test/test_array.py +++ b/lib-python/modified-2.7/test/test_array.py @@ -295,9 +295,10 @@ ) b = array.array(self.badtypecode()) - self.assertRaises(TypeError, "a + b") - - self.assertRaises(TypeError, "a + 'bad'") + with self.assertRaises(TypeError): + a + b + with self.assertRaises(TypeError): + a + 'bad' def test_iadd(self): a = array.array(self.typecode, self.example[::-1]) @@ -316,9 +317,10 @@ ) b = array.array(self.badtypecode()) - self.assertRaises(TypeError, "a += b") - - self.assertRaises(TypeError, "a += 'bad'") + with self.assertRaises(TypeError): + a += b + with self.assertRaises(TypeError): + a += 'bad' def test_mul(self): a = 5*array.array(self.typecode, self.example) @@ -345,7 +347,8 @@ array.array(self.typecode) ) - self.assertRaises(TypeError, "a * 'bad'") + with self.assertRaises(TypeError): + a * 'bad' def test_imul(self): a = array.array(self.typecode, self.example) @@ -374,7 +377,8 @@ a *= -1 self.assertEqual(a, array.array(self.typecode)) - self.assertRaises(TypeError, "a *= 'bad'") + with self.assertRaises(TypeError): + a *= 'bad' def test_getitem(self): a = array.array(self.typecode, self.example) diff --git a/lib-python/modified-2.7/test/test_heapq.py b/lib-python/modified-2.7/test/test_heapq.py --- a/lib-python/modified-2.7/test/test_heapq.py +++ b/lib-python/modified-2.7/test/test_heapq.py @@ -186,6 +186,11 @@ self.assertFalse(sys.modules['heapq'] is self.module) self.assertTrue(hasattr(self.module.heapify, 'func_code')) + def test_islice_protection(self): + m = self.module + self.assertFalse(m.nsmallest(-1, [1])) + self.assertFalse(m.nlargest(-1, [1])) + class TestHeapC(TestHeap): module = c_heapq diff --git a/lib-python/modified-2.7/test/test_import.py b/lib-python/modified-2.7/test/test_import.py --- a/lib-python/modified-2.7/test/test_import.py +++ b/lib-python/modified-2.7/test/test_import.py @@ -64,6 +64,7 @@ except ImportError, err: self.fail("import from %s failed: %s" % (ext, err)) else: + # XXX importing .pyw is missing on Windows self.assertEqual(mod.a, a, "module loaded (%s) but contents invalid" % mod) self.assertEqual(mod.b, b, diff --git a/lib-python/modified-2.7/test/test_repr.py b/lib-python/modified-2.7/test/test_repr.py --- a/lib-python/modified-2.7/test/test_repr.py +++ b/lib-python/modified-2.7/test/test_repr.py @@ -254,8 +254,14 @@ eq = self.assertEqual touch(os.path.join(self.subpkgname, self.pkgname + os.extsep + 'py')) from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation - eq(repr(areallylongpackageandmodulenametotestreprtruncation), - "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) + # On PyPy, we use %r to format the file name; on CPython it is done + # with '%s'. It seems to me that %r is safer . + if '__pypy__' in sys.builtin_module_names: + eq(repr(areallylongpackageandmodulenametotestreprtruncation), + "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) + else: + eq(repr(areallylongpackageandmodulenametotestreprtruncation), + "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) eq(repr(sys), "") def test_type(self): diff --git a/lib-python/2.7/test/test_subprocess.py b/lib-python/modified-2.7/test/test_subprocess.py copy from lib-python/2.7/test/test_subprocess.py copy to lib-python/modified-2.7/test/test_subprocess.py --- a/lib-python/2.7/test/test_subprocess.py +++ b/lib-python/modified-2.7/test/test_subprocess.py @@ -16,11 +16,11 @@ # Depends on the following external programs: Python # -if mswindows: - SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' - 'os.O_BINARY);') -else: - SETBINARY = '' +#if mswindows: +# SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' +# 'os.O_BINARY);') +#else: +# SETBINARY = '' try: @@ -420,8 +420,9 @@ self.assertStderrEqual(stderr, "") def test_universal_newlines(self): - p = subprocess.Popen([sys.executable, "-c", - 'import sys,os;' + SETBINARY + + # NB. replaced SETBINARY with the -u flag + p = subprocess.Popen([sys.executable, "-u", "-c", + 'import sys,os;' + #SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' @@ -448,8 +449,9 @@ def test_universal_newlines_communicate(self): # universal newlines through communicate() - p = subprocess.Popen([sys.executable, "-c", - 'import sys,os;' + SETBINARY + + # NB. replaced SETBINARY with the -u flag + p = subprocess.Popen([sys.executable, "-u", "-c", + 'import sys,os;' + #SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' diff --git a/lib-python/modified-2.7/test/test_sys_settrace.py b/lib-python/modified-2.7/test/test_sys_settrace.py --- a/lib-python/modified-2.7/test/test_sys_settrace.py +++ b/lib-python/modified-2.7/test/test_sys_settrace.py @@ -286,11 +286,11 @@ self.compare_events(func.func_code.co_firstlineno, tracer.events, func.events) - def set_and_retrieve_none(self): + def test_set_and_retrieve_none(self): sys.settrace(None) assert sys.gettrace() is None - def set_and_retrieve_func(self): + def test_set_and_retrieve_func(self): def fn(*args): pass diff --git a/lib-python/modified-2.7/test/test_urllib2.py b/lib-python/modified-2.7/test/test_urllib2.py --- a/lib-python/modified-2.7/test/test_urllib2.py +++ b/lib-python/modified-2.7/test/test_urllib2.py @@ -307,6 +307,9 @@ def getresponse(self): return MockHTTPResponse(MockFile(), {}, 200, "OK") + def close(self): + pass + class MockHandler: # useful for testing handler machinery # see add_ordered_mock_handlers() docstring diff --git a/lib-python/modified-2.7/urllib2.py b/lib-python/modified-2.7/urllib2.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/urllib2.py @@ -0,0 +1,1436 @@ +"""An extensible library for opening URLs using a variety of protocols + +The simplest way to use this module is to call the urlopen function, +which accepts a string containing a URL or a Request object (described +below). It opens the URL and returns the results as file-like +object; the returned object has some extra methods described below. + +The OpenerDirector manages a collection of Handler objects that do +all the actual work. Each Handler implements a particular protocol or +option. The OpenerDirector is a composite object that invokes the +Handlers needed to open the requested URL. For example, the +HTTPHandler performs HTTP GET and POST requests and deals with +non-error returns. The HTTPRedirectHandler automatically deals with +HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler +deals with digest authentication. + +urlopen(url, data=None) -- Basic usage is the same as original +urllib. pass the url and optionally data to post to an HTTP URL, and +get a file-like object back. One difference is that you can also pass +a Request instance instead of URL. Raises a URLError (subclass of +IOError); for HTTP errors, raises an HTTPError, which can also be +treated as a valid response. + +build_opener -- Function that creates a new OpenerDirector instance. +Will install the default handlers. Accepts one or more Handlers as +arguments, either instances or Handler classes that it will +instantiate. If one of the argument is a subclass of the default +handler, the argument will be installed instead of the default. + +install_opener -- Installs a new opener as the default opener. + +objects of interest: + +OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages +the Handler classes, while dealing with requests and responses. + +Request -- An object that encapsulates the state of a request. The +state can be as simple as the URL. It can also include extra HTTP +headers, e.g. a User-Agent. + +BaseHandler -- + +exceptions: +URLError -- A subclass of IOError, individual protocols have their own +specific subclass. + +HTTPError -- Also a valid HTTP response, so you can treat an HTTP error +as an exceptional event or valid response. + +internals: +BaseHandler and parent +_call_chain conventions + +Example usage: + +import urllib2 + +# set up authentication info +authinfo = urllib2.HTTPBasicAuthHandler() +authinfo.add_password(realm='PDQ Application', + uri='https://mahler:8092/site-updates.py', + user='klem', + passwd='geheim$parole') + +proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"}) + +# build a new opener that adds authentication and caching FTP handlers +opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler) + +# install it +urllib2.install_opener(opener) + +f = urllib2.urlopen('http://www.python.org/') + + +""" + +# XXX issues: +# If an authentication error handler that tries to perform +# authentication for some reason but fails, how should the error be +# signalled? The client needs to know the HTTP error code. But if +# the handler knows that the problem was, e.g., that it didn't know +# that hash algo that requested in the challenge, it would be good to +# pass that information along to the client, too. +# ftp errors aren't handled cleanly +# check digest against correct (i.e. non-apache) implementation + +# Possible extensions: +# complex proxies XXX not sure what exactly was meant by this +# abstract factory for opener + +import base64 +import hashlib +import httplib +import mimetools +import os +import posixpath +import random +import re +import socket +import sys +import time +import urlparse +import bisect + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +from urllib import (unwrap, unquote, splittype, splithost, quote, + addinfourl, splitport, splittag, + splitattr, ftpwrapper, splituser, splitpasswd, splitvalue) + +# support for FileHandler, proxies via environment variables +from urllib import localhost, url2pathname, getproxies, proxy_bypass + +# used in User-Agent header sent +__version__ = sys.version[:3] + +_opener = None +def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + global _opener + if _opener is None: + _opener = build_opener() + return _opener.open(url, data, timeout) + +def install_opener(opener): + global _opener + _opener = opener + +# do these error classes make sense? +# make sure all of the IOError stuff is overridden. we just want to be +# subtypes. + +class URLError(IOError): + # URLError is a sub-type of IOError, but it doesn't share any of + # the implementation. need to override __init__ and __str__. + # It sets self.args for compatibility with other EnvironmentError + # subclasses, but args doesn't have the typical format with errno in + # slot 0 and strerror in slot 1. This may be better than nothing. + def __init__(self, reason): + self.args = reason, + self.reason = reason + + def __str__(self): + return '' % self.reason + +class HTTPError(URLError, addinfourl): + """Raised when HTTP error occurs, but also acts like non-error return""" + __super_init = addinfourl.__init__ + + def __init__(self, url, code, msg, hdrs, fp): + self.code = code + self.msg = msg + self.hdrs = hdrs + self.fp = fp + self.filename = url + # The addinfourl classes depend on fp being a valid file + # object. In some cases, the HTTPError may not have a valid + # file object. If this happens, the simplest workaround is to + # not initialize the base classes. + if fp is not None: + self.__super_init(fp, hdrs, url, code) + + def __str__(self): + return 'HTTP Error %s: %s' % (self.code, self.msg) + +# copied from cookielib.py +_cut_port_re = re.compile(r":\d+$") +def request_host(request): + """Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + """ + url = request.get_full_url() + host = urlparse.urlparse(url)[1] + if host == "": + host = request.get_header("Host", "") + + # remove port, if present + host = _cut_port_re.sub("", host, 1) + return host.lower() + +class Request: + + def __init__(self, url, data=None, headers={}, + origin_req_host=None, unverifiable=False): + # unwrap('') --> 'type://host/path' + self.__original = unwrap(url) + self.__original, fragment = splittag(self.__original) + self.type = None + # self.__r_type is what's left after doing the splittype + self.host = None + self.port = None + self._tunnel_host = None + self.data = data + self.headers = {} + for key, value in headers.items(): + self.add_header(key, value) + self.unredirected_hdrs = {} + if origin_req_host is None: + origin_req_host = request_host(self) + self.origin_req_host = origin_req_host + self.unverifiable = unverifiable + + def __getattr__(self, attr): + # XXX this is a fallback mechanism to guard against these + # methods getting called in a non-standard order. this may be + # too complicated and/or unnecessary. + # XXX should the __r_XXX attributes be public? + if attr[:12] == '_Request__r_': + name = attr[12:] + if hasattr(Request, 'get_' + name): + getattr(self, 'get_' + name)() + return getattr(self, attr) + raise AttributeError, attr + + def get_method(self): + if self.has_data(): + return "POST" + else: + return "GET" + + # XXX these helper methods are lame + + def add_data(self, data): + self.data = data + + def has_data(self): + return self.data is not None + + def get_data(self): + return self.data + + def get_full_url(self): + return self.__original + + def get_type(self): + if self.type is None: + self.type, self.__r_type = splittype(self.__original) + if self.type is None: + raise ValueError, "unknown url type: %s" % self.__original + return self.type + + def get_host(self): + if self.host is None: + self.host, self.__r_host = splithost(self.__r_type) + if self.host: + self.host = unquote(self.host) + return self.host + + def get_selector(self): + return self.__r_host + + def set_proxy(self, host, type): + if self.type == 'https' and not self._tunnel_host: + self._tunnel_host = self.host + else: + self.type = type + self.__r_host = self.__original + + self.host = host + + def has_proxy(self): + return self.__r_host == self.__original + + def get_origin_req_host(self): + return self.origin_req_host + + def is_unverifiable(self): + return self.unverifiable + + def add_header(self, key, val): + # useful for something like authentication + self.headers[key.capitalize()] = val + + def add_unredirected_header(self, key, val): + # will not be added to a redirected request + self.unredirected_hdrs[key.capitalize()] = val + + def has_header(self, header_name): + return (header_name in self.headers or + header_name in self.unredirected_hdrs) + + def get_header(self, header_name, default=None): + return self.headers.get( + header_name, + self.unredirected_hdrs.get(header_name, default)) + + def header_items(self): + hdrs = self.unredirected_hdrs.copy() + hdrs.update(self.headers) + return hdrs.items() + +class OpenerDirector: + def __init__(self): + client_version = "Python-urllib/%s" % __version__ + self.addheaders = [('User-agent', client_version)] + # manage the individual handlers + self.handlers = [] + self.handle_open = {} + self.handle_error = {} + self.process_response = {} + self.process_request = {} + + def add_handler(self, handler): + if not hasattr(handler, "add_parent"): + raise TypeError("expected BaseHandler instance, got %r" % + type(handler)) + + added = False + for meth in dir(handler): + if meth in ["redirect_request", "do_open", "proxy_open"]: + # oops, coincidental match + continue + + i = meth.find("_") + protocol = meth[:i] + condition = meth[i+1:] + + if condition.startswith("error"): + j = condition.find("_") + i + 1 + kind = meth[j+1:] + try: + kind = int(kind) + except ValueError: + pass + lookup = self.handle_error.get(protocol, {}) + self.handle_error[protocol] = lookup + elif condition == "open": + kind = protocol + lookup = self.handle_open + elif condition == "response": + kind = protocol + lookup = self.process_response + elif condition == "request": + kind = protocol + lookup = self.process_request + else: + continue + + handlers = lookup.setdefault(kind, []) + if handlers: + bisect.insort(handlers, handler) + else: + handlers.append(handler) + added = True + + if added: + # the handlers must work in an specific order, the order + # is specified in a Handler attribute + bisect.insort(self.handlers, handler) + handler.add_parent(self) + + def close(self): + # Only exists for backwards compatibility. + pass + + def _call_chain(self, chain, kind, meth_name, *args): + # Handlers raise an exception if no one else should try to handle + # the request, or return None if they can't but another handler + # could. Otherwise, they return the response. + handlers = chain.get(kind, ()) + for handler in handlers: + func = getattr(handler, meth_name) + + result = func(*args) + if result is not None: + return result + + def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + # accept a URL or a Request object + if isinstance(fullurl, basestring): + req = Request(fullurl, data) + else: + req = fullurl + if data is not None: + req.add_data(data) + + req.timeout = timeout + protocol = req.get_type() + + # pre-process request + meth_name = protocol+"_request" + for processor in self.process_request.get(protocol, []): + meth = getattr(processor, meth_name) + req = meth(req) + + response = self._open(req, data) + + # post-process response + meth_name = protocol+"_response" + for processor in self.process_response.get(protocol, []): + meth = getattr(processor, meth_name) + response = meth(req, response) + + return response + + def _open(self, req, data=None): + result = self._call_chain(self.handle_open, 'default', + 'default_open', req) + if result: + return result + + protocol = req.get_type() + result = self._call_chain(self.handle_open, protocol, protocol + + '_open', req) + if result: + return result + + return self._call_chain(self.handle_open, 'unknown', + 'unknown_open', req) + + def error(self, proto, *args): + if proto in ('http', 'https'): + # XXX http[s] protocols are special-cased + dict = self.handle_error['http'] # https is not different than http + proto = args[2] # YUCK! + meth_name = 'http_error_%s' % proto + http_err = 1 + orig_args = args + else: + dict = self.handle_error + meth_name = proto + '_error' + http_err = 0 + args = (dict, proto, meth_name) + args + result = self._call_chain(*args) + if result: + return result + + if http_err: + args = (dict, 'default', 'http_error_default') + orig_args + return self._call_chain(*args) + +# XXX probably also want an abstract factory that knows when it makes +# sense to skip a superclass in favor of a subclass and when it might +# make sense to include both + +def build_opener(*handlers): + """Create an opener object from a list of handlers. + + The opener will use several default handlers, including support + for HTTP, FTP and when applicable, HTTPS. + + If any of the handlers passed as arguments are subclasses of the + default handlers, the default handlers will not be used. + """ + import types + def isclass(obj): + return isinstance(obj, (types.ClassType, type)) + + opener = OpenerDirector() + default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, + HTTPDefaultErrorHandler, HTTPRedirectHandler, + FTPHandler, FileHandler, HTTPErrorProcessor] + if hasattr(httplib, 'HTTPS'): + default_classes.append(HTTPSHandler) + skip = set() + for klass in default_classes: + for check in handlers: + if isclass(check): + if issubclass(check, klass): + skip.add(klass) + elif isinstance(check, klass): + skip.add(klass) + for klass in skip: + default_classes.remove(klass) + + for klass in default_classes: + opener.add_handler(klass()) + + for h in handlers: + if isclass(h): + h = h() + opener.add_handler(h) + return opener + +class BaseHandler: + handler_order = 500 + + def add_parent(self, parent): + self.parent = parent + + def close(self): + # Only exists for backwards compatibility + pass + + def __lt__(self, other): + if not hasattr(other, "handler_order"): + # Try to preserve the old behavior of having custom classes + # inserted after default ones (works only for custom user + # classes which are not aware of handler_order). + return True + return self.handler_order < other.handler_order + + +class HTTPErrorProcessor(BaseHandler): + """Process HTTP error responses.""" + handler_order = 1000 # after all other processing + + def http_response(self, request, response): + code, msg, hdrs = response.code, response.msg, response.info() + + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if not (200 <= code < 300): + response = self.parent.error( + 'http', request, response, code, msg, hdrs) + + return response + + https_response = http_response + +class HTTPDefaultErrorHandler(BaseHandler): + def http_error_default(self, req, fp, code, msg, hdrs): + raise HTTPError(req.get_full_url(), code, msg, hdrs, fp) + +class HTTPRedirectHandler(BaseHandler): + # maximum number of redirections to any single URL + # this is needed because of the state that cookies introduce + max_repeats = 4 + # maximum total number of redirections (regardless of URL) before + # assuming we're in a loop + max_redirections = 10 + + def redirect_request(self, req, fp, code, msg, headers, newurl): + """Return a Request or None in response to a redirect. + + This is called by the http_error_30x methods when a + redirection response is received. If a redirection should + take place, return a new Request to allow http_error_30x to + perform the redirect. Otherwise, raise HTTPError if no-one + else should try to handle this url. Return None if you can't + but another Handler might. + """ + m = req.get_method() + if (code in (301, 302, 303, 307) and m in ("GET", "HEAD") + or code in (301, 302, 303) and m == "POST"): + # Strictly (according to RFC 2616), 301 or 302 in response + # to a POST MUST NOT cause a redirection without confirmation + # from the user (of urllib2, in this case). In practice, + # essentially all clients do redirect in this case, so we + # do the same. + # be conciliant with URIs containing a space + newurl = newurl.replace(' ', '%20') + newheaders = dict((k,v) for k,v in req.headers.items() + if k.lower() not in ("content-length", "content-type") + ) + return Request(newurl, + headers=newheaders, + origin_req_host=req.get_origin_req_host(), + unverifiable=True) + else: + raise HTTPError(req.get_full_url(), code, msg, headers, fp) + + # Implementation note: To avoid the server sending us into an + # infinite loop, the request object needs to track what URLs we + # have already seen. Do this by adding a handler-specific + # attribute to the Request object. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + if 'location' in headers: + newurl = headers.getheaders('location')[0] + elif 'uri' in headers: + newurl = headers.getheaders('uri')[0] + else: + return + + # fix a possible malformed URL + urlparts = urlparse.urlparse(newurl) + if not urlparts.path: + urlparts = list(urlparts) + urlparts[2] = "/" + newurl = urlparse.urlunparse(urlparts) + + newurl = urlparse.urljoin(req.get_full_url(), newurl) + + # XXX Probably want to forget about the state of the current + # request, although that might interact poorly with other + # handlers that also use handler-specific request attributes + new = self.redirect_request(req, fp, code, msg, headers, newurl) + if new is None: + return + + # loop detection + # .redirect_dict has a key url if url was previously visited. + if hasattr(req, 'redirect_dict'): + visited = new.redirect_dict = req.redirect_dict + if (visited.get(newurl, 0) >= self.max_repeats or + len(visited) >= self.max_redirections): + raise HTTPError(req.get_full_url(), code, + self.inf_msg + msg, headers, fp) + else: + visited = new.redirect_dict = req.redirect_dict = {} + visited[newurl] = visited.get(newurl, 0) + 1 + + # Don't close the fp until we are sure that we won't use it + # with HTTPError. + fp.read() + fp.close() + + return self.parent.open(new, timeout=req.timeout) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + + inf_msg = "The HTTP server returned a redirect error that would " \ + "lead to an infinite loop.\n" \ + "The last 30x error message was:\n" + + +def _parse_proxy(proxy): + """Return (scheme, user, password, host/port) given a URL or an authority. + + If a URL is supplied, it must have an authority (host:port) component. + According to RFC 3986, having an authority component means the URL must + have two slashes after the scheme: + + >>> _parse_proxy('file:/ftp.example.com/') + Traceback (most recent call last): + ValueError: proxy URL with no authority: 'file:/ftp.example.com/' + + The first three items of the returned tuple may be None. + + Examples of authority parsing: + + >>> _parse_proxy('proxy.example.com') + (None, None, None, 'proxy.example.com') + >>> _parse_proxy('proxy.example.com:3128') + (None, None, None, 'proxy.example.com:3128') + + The authority component may optionally include userinfo (assumed to be + username:password): + + >>> _parse_proxy('joe:password at proxy.example.com') + (None, 'joe', 'password', 'proxy.example.com') + >>> _parse_proxy('joe:password at proxy.example.com:3128') + (None, 'joe', 'password', 'proxy.example.com:3128') + + Same examples, but with URLs instead: + + >>> _parse_proxy('http://proxy.example.com/') + ('http', None, None, 'proxy.example.com') + >>> _parse_proxy('http://proxy.example.com:3128/') + ('http', None, None, 'proxy.example.com:3128') + >>> _parse_proxy('http://joe:password at proxy.example.com/') + ('http', 'joe', 'password', 'proxy.example.com') + >>> _parse_proxy('http://joe:password at proxy.example.com:3128') + ('http', 'joe', 'password', 'proxy.example.com:3128') + + Everything after the authority is ignored: + + >>> _parse_proxy('ftp://joe:password at proxy.example.com/rubbish:3128') + ('ftp', 'joe', 'password', 'proxy.example.com') + + Test for no trailing '/' case: + + >>> _parse_proxy('http://joe:password at proxy.example.com') + ('http', 'joe', 'password', 'proxy.example.com') + + """ + scheme, r_scheme = splittype(proxy) + if not r_scheme.startswith("/"): + # authority + scheme = None + authority = proxy + else: + # URL + if not r_scheme.startswith("//"): + raise ValueError("proxy URL with no authority: %r" % proxy) + # We have an authority, so for RFC 3986-compliant URLs (by ss 3. + # and 3.3.), path is empty or starts with '/' + end = r_scheme.find("/", 2) + if end == -1: + end = None + authority = r_scheme[2:end] + userinfo, hostport = splituser(authority) + if userinfo is not None: + user, password = splitpasswd(userinfo) + else: + user = password = None + return scheme, user, password, hostport + +class ProxyHandler(BaseHandler): + # Proxies must be in front + handler_order = 100 + + def __init__(self, proxies=None): + if proxies is None: + proxies = getproxies() + assert hasattr(proxies, 'has_key'), "proxies must be a mapping" + self.proxies = proxies + for type, url in proxies.items(): + setattr(self, '%s_open' % type, + lambda r, proxy=url, type=type, meth=self.proxy_open: \ + meth(r, proxy, type)) + + def proxy_open(self, req, proxy, type): + orig_type = req.get_type() + proxy_type, user, password, hostport = _parse_proxy(proxy) + + if proxy_type is None: + proxy_type = orig_type + + if req.host and proxy_bypass(req.host): + return None + + if user and password: + user_pass = '%s:%s' % (unquote(user), unquote(password)) + creds = base64.b64encode(user_pass).strip() + req.add_header('Proxy-authorization', 'Basic ' + creds) + hostport = unquote(hostport) + req.set_proxy(hostport, proxy_type) + + if orig_type == proxy_type or orig_type == 'https': + # let other handlers take care of it + return None + else: + # need to start over, because the other handlers don't + # grok the proxy's URL type + # e.g. if we have a constructor arg proxies like so: + # {'http': 'ftp://proxy.example.com'}, we may end up turning + # a request for http://acme.example.com/a into one for + # ftp://proxy.example.com/a + return self.parent.open(req, timeout=req.timeout) + +class HTTPPasswordMgr: + + def __init__(self): + self.passwd = {} + + def add_password(self, realm, uri, user, passwd): + # uri could be a single URI or a sequence + if isinstance(uri, basestring): + uri = [uri] + if not realm in self.passwd: + self.passwd[realm] = {} + for default_port in True, False: + reduced_uri = tuple( + [self.reduce_uri(u, default_port) for u in uri]) + self.passwd[realm][reduced_uri] = (user, passwd) + + def find_user_password(self, realm, authuri): + domains = self.passwd.get(realm, {}) + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uris, authinfo in domains.iteritems(): + for uri in uris: + if self.is_suburi(uri, reduced_authuri): + return authinfo + return None, None + + def reduce_uri(self, uri, default_port=True): + """Accept authority or URI and extract only the authority and path.""" + # note HTTP URLs do not have a userinfo component + parts = urlparse.urlsplit(uri) + if parts[1]: + # URI + scheme = parts[0] + authority = parts[1] + path = parts[2] or '/' + else: + # host or host:port + scheme = None + authority = uri + path = '/' + host, port = splitport(authority) + if default_port and port is None and scheme is not None: + dport = {"http": 80, + "https": 443, + }.get(scheme) + if dport is not None: + authority = "%s:%d" % (host, dport) + return authority, path + + def is_suburi(self, base, test): + """Check if test is below base in a URI tree + + Both args must be URIs in reduced form. + """ + if base == test: + return True + if base[0] != test[0]: + return False + common = posixpath.commonprefix((base[1], test[1])) + if len(common) == len(base[1]): + return True + return False + + +class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): + + def find_user_password(self, realm, authuri): + user, password = HTTPPasswordMgr.find_user_password(self, realm, + authuri) + if user is not None: + return user, password + return HTTPPasswordMgr.find_user_password(self, None, authuri) + + +class AbstractBasicAuthHandler: + + # XXX this allows for multiple auth-schemes, but will stupidly pick + # the last one with a realm specified. + + # allow for double- and single-quoted realm values + # (single quotes are a violation of the RFC, but appear in the wild) + rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+' + 'realm=(["\'])(.*?)\\2', re.I) + + # XXX could pre-emptively send auth info already accepted (RFC 2617, + # end of section 2, and section 1.2 immediately after "credentials" + # production). + + def __init__(self, password_mgr=None): + if password_mgr is None: + password_mgr = HTTPPasswordMgr() + self.passwd = password_mgr + self.add_password = self.passwd.add_password + self.retried = 0 + + def reset_retry_count(self): + self.retried = 0 + + def http_error_auth_reqed(self, authreq, host, req, headers): + # host may be an authority (without userinfo) or a URL with an + # authority + # XXX could be multiple headers + authreq = headers.get(authreq, None) + + if self.retried > 5: + # retry sending the username:password 5 times before failing. + raise HTTPError(req.get_full_url(), 401, "basic auth failed", + headers, None) + else: + self.retried += 1 + + if authreq: + mo = AbstractBasicAuthHandler.rx.search(authreq) + if mo: + scheme, quote, realm = mo.groups() + if scheme.lower() == 'basic': + response = self.retry_http_basic_auth(host, req, realm) + if response and response.code != 401: + self.retried = 0 + return response + + def retry_http_basic_auth(self, host, req, realm): + user, pw = self.passwd.find_user_password(realm, host) + if pw is not None: + raw = "%s:%s" % (user, pw) + auth = 'Basic %s' % base64.b64encode(raw).strip() + if req.headers.get(self.auth_header, None) == auth: + return None + req.add_unredirected_header(self.auth_header, auth) + return self.parent.open(req, timeout=req.timeout) + else: + return None + + +class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Authorization' + + def http_error_401(self, req, fp, code, msg, headers): + url = req.get_full_url() + response = self.http_error_auth_reqed('www-authenticate', + url, req, headers) + self.reset_retry_count() + return response + + +class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Proxy-authorization' + + def http_error_407(self, req, fp, code, msg, headers): + # http_error_auth_reqed requires that there is no userinfo component in + # authority. Assume there isn't one, since urllib2 does not (and + # should not, RFC 3986 s. 3.2.1) support requests for URLs containing + # userinfo. + authority = req.get_host() + response = self.http_error_auth_reqed('proxy-authenticate', + authority, req, headers) + self.reset_retry_count() + return response + + +def randombytes(n): + """Return n random bytes.""" + # Use /dev/urandom if it is available. Fall back to random module + # if not. It might be worthwhile to extend this function to use + # other platform-specific mechanisms for getting random bytes. + if os.path.exists("/dev/urandom"): + f = open("/dev/urandom") + s = f.read(n) + f.close() + return s + else: + L = [chr(random.randrange(0, 256)) for i in range(n)] + return "".join(L) + +class AbstractDigestAuthHandler: + # Digest authentication is specified in RFC 2617. + + # XXX The client does not inspect the Authentication-Info header + # in a successful response. + + # XXX It should be possible to test this implementation against + # a mock server that just generates a static set of challenges. + + # XXX qop="auth-int" supports is shaky + + def __init__(self, passwd=None): + if passwd is None: + passwd = HTTPPasswordMgr() + self.passwd = passwd + self.add_password = self.passwd.add_password + self.retried = 0 + self.nonce_count = 0 + self.last_nonce = None + + def reset_retry_count(self): + self.retried = 0 + + def http_error_auth_reqed(self, auth_header, host, req, headers): + authreq = headers.get(auth_header, None) + if self.retried > 5: + # Don't fail endlessly - if we failed once, we'll probably + # fail a second time. Hm. Unless the Password Manager is + # prompting for the information. Crap. This isn't great + # but it's better than the current 'repeat until recursion + # depth exceeded' approach + raise HTTPError(req.get_full_url(), 401, "digest auth failed", + headers, None) + else: + self.retried += 1 + if authreq: + scheme = authreq.split()[0] + if scheme.lower() == 'digest': + return self.retry_http_digest_auth(req, authreq) + + def retry_http_digest_auth(self, req, auth): + token, challenge = auth.split(' ', 1) + chal = parse_keqv_list(parse_http_list(challenge)) + auth = self.get_authorization(req, chal) + if auth: + auth_val = 'Digest %s' % auth + if req.headers.get(self.auth_header, None) == auth_val: + return None + req.add_unredirected_header(self.auth_header, auth_val) + resp = self.parent.open(req, timeout=req.timeout) + return resp + + def get_cnonce(self, nonce): + # The cnonce-value is an opaque + # quoted string value provided by the client and used by both client + # and server to avoid chosen plaintext attacks, to provide mutual + # authentication, and to provide some message integrity protection. + # This isn't a fabulous effort, but it's probably Good Enough. + dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(), + randombytes(8))).hexdigest() + return dig[:16] + + def get_authorization(self, req, chal): + try: + realm = chal['realm'] + nonce = chal['nonce'] + qop = chal.get('qop') + algorithm = chal.get('algorithm', 'MD5') + # mod_digest doesn't send an opaque, even though it isn't + # supposed to be optional + opaque = chal.get('opaque', None) + except KeyError: + return None + + H, KD = self.get_algorithm_impls(algorithm) + if H is None: + return None + + user, pw = self.passwd.find_user_password(realm, req.get_full_url()) + if user is None: + return None + + # XXX not implemented yet + if req.has_data(): + entdig = self.get_entity_digest(req.get_data(), chal) + else: + entdig = None + + A1 = "%s:%s:%s" % (user, realm, pw) + A2 = "%s:%s" % (req.get_method(), + # XXX selector: what about proxies and full urls + req.get_selector()) + if qop == 'auth': + if nonce == self.last_nonce: + self.nonce_count += 1 + else: + self.nonce_count = 1 + self.last_nonce = nonce + + ncvalue = '%08x' % self.nonce_count + cnonce = self.get_cnonce(nonce) + noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2)) + respdig = KD(H(A1), noncebit) + elif qop is None: + respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) + else: + # XXX handle auth-int. + raise URLError("qop '%s' is not supported." % qop) + + # XXX should the partial digests be encoded too? + + base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ + 'response="%s"' % (user, realm, nonce, req.get_selector(), + respdig) + if opaque: + base += ', opaque="%s"' % opaque + if entdig: + base += ', digest="%s"' % entdig + base += ', algorithm="%s"' % algorithm + if qop: + base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) + return base + + def get_algorithm_impls(self, algorithm): + # algorithm should be case-insensitive according to RFC2617 + algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if algorithm == 'MD5': + H = lambda x: hashlib.md5(x).hexdigest() + elif algorithm == 'SHA': + H = lambda x: hashlib.sha1(x).hexdigest() + # XXX MD5-sess + KD = lambda s, d: H("%s:%s" % (s, d)) + return H, KD + + def get_entity_digest(self, data, chal): + # XXX not implemented yet + return None + + +class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + """An authentication protocol defined by RFC 2069 + + Digest authentication improves on basic authentication because it + does not transmit passwords in the clear. + """ + + auth_header = 'Authorization' + handler_order = 490 # before Basic auth + + def http_error_401(self, req, fp, code, msg, headers): + host = urlparse.urlparse(req.get_full_url())[1] + retry = self.http_error_auth_reqed('www-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + + +class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + + auth_header = 'Proxy-Authorization' + handler_order = 490 # before Basic auth + + def http_error_407(self, req, fp, code, msg, headers): + host = req.get_host() + retry = self.http_error_auth_reqed('proxy-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + +class AbstractHTTPHandler(BaseHandler): + + def __init__(self, debuglevel=0): + self._debuglevel = debuglevel + + def set_http_debuglevel(self, level): + self._debuglevel = level + + def do_request_(self, request): + host = request.get_host() + if not host: + raise URLError('no host given') + + if request.has_data(): # POST + data = request.get_data() + if not request.has_header('Content-type'): + request.add_unredirected_header( + 'Content-type', + 'application/x-www-form-urlencoded') + if not request.has_header('Content-length'): + request.add_unredirected_header( + 'Content-length', '%d' % len(data)) + + sel_host = host + if request.has_proxy(): + scheme, sel = splittype(request.get_selector()) + sel_host, sel_path = splithost(sel) + + if not request.has_header('Host'): + request.add_unredirected_header('Host', sel_host) + for name, value in self.parent.addheaders: + name = name.capitalize() + if not request.has_header(name): + request.add_unredirected_header(name, value) + + return request + + def do_open(self, http_class, req): + """Return an addinfourl object for the request, using http_class. + + http_class must implement the HTTPConnection API from httplib. + The addinfourl return value is a file-like object. It also + has methods and attributes including: + - info(): return a mimetools.Message object for the headers + - geturl(): return the original request URL + - code: HTTP status code + """ + host = req.get_host() + if not host: + raise URLError('no host given') + + h = http_class(host, timeout=req.timeout) # will parse host:port + h.set_debuglevel(self._debuglevel) + + headers = dict(req.unredirected_hdrs) + headers.update(dict((k, v) for k, v in req.headers.items() + if k not in headers)) + + # We want to make an HTTP/1.1 request, but the addinfourl + # class isn't prepared to deal with a persistent connection. + # It will try to read all remaining data from the socket, + # which will block while the server waits for the next request. + # So make sure the connection gets closed after the (only) + # request. + headers["Connection"] = "close" + headers = dict( + (name.title(), val) for name, val in headers.items()) + + if req._tunnel_host: + tunnel_headers = {} + proxy_auth_hdr = "Proxy-Authorization" + if proxy_auth_hdr in headers: + tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] + # Proxy-Authorization should not be sent to origin + # server. + del headers[proxy_auth_hdr] + h.set_tunnel(req._tunnel_host, headers=tunnel_headers) + + try: + h.request(req.get_method(), req.get_selector(), req.data, headers) + try: + r = h.getresponse(buffering=True) + except TypeError: #buffering kw not supported + r = h.getresponse() + except socket.error, err: # XXX what error? + h.close() + raise URLError(err) + + # Pick apart the HTTPResponse object to get the addinfourl + # object initialized properly. + + # Wrap the HTTPResponse object in socket's file object adapter + # for Windows. That adapter calls recv(), so delegate recv() + # to read(). This weird wrapping allows the returned object to + # have readline() and readlines() methods. + + # XXX It might be better to extract the read buffering code + # out of socket._fileobject() and into a base class. + + r.recv = r.read + fp = socket._fileobject(r, close=True) + + resp = addinfourl(fp, r.msg, req.get_full_url()) + resp.code = r.status + resp.msg = r.reason + return resp + + +class HTTPHandler(AbstractHTTPHandler): + + def http_open(self, req): + return self.do_open(httplib.HTTPConnection, req) + + http_request = AbstractHTTPHandler.do_request_ + +if hasattr(httplib, 'HTTPS'): + class HTTPSHandler(AbstractHTTPHandler): + + def https_open(self, req): + return self.do_open(httplib.HTTPSConnection, req) + + https_request = AbstractHTTPHandler.do_request_ + +class HTTPCookieProcessor(BaseHandler): + def __init__(self, cookiejar=None): + import cookielib + if cookiejar is None: + cookiejar = cookielib.CookieJar() + self.cookiejar = cookiejar + + def http_request(self, request): + self.cookiejar.add_cookie_header(request) + return request + + def http_response(self, request, response): + self.cookiejar.extract_cookies(response, request) + return response + + https_request = http_request + https_response = http_response + +class UnknownHandler(BaseHandler): + def unknown_open(self, req): + type = req.get_type() + raise URLError('unknown url type: %s' % type) + +def parse_keqv_list(l): + """Parse list of key=value strings where keys are not duplicated.""" + parsed = {} + for elt in l: + k, v = elt.split('=', 1) + if v[0] == '"' and v[-1] == '"': + v = v[1:-1] + parsed[k] = v + return parsed + +def parse_http_list(s): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Neither commas nor quotes count if they are escaped. + Only double-quotes count, not single-quotes. + """ + res = [] + part = '' + + escape = quote = False + for cur in s: + if escape: + part += cur + escape = False + continue + if quote: + if cur == '\\': + escape = True + continue + elif cur == '"': + quote = False + part += cur + continue + + if cur == ',': + res.append(part) + part = '' + continue + + if cur == '"': + quote = True + + part += cur + + # append last part + if part: + res.append(part) + + return [part.strip() for part in res] + +def _safe_gethostbyname(host): + try: + return socket.gethostbyname(host) + except socket.gaierror: + return None + +class FileHandler(BaseHandler): + # Use local file or FTP depending on form of URL + def file_open(self, req): + url = req.get_selector() + if url[:2] == '//' and url[2:3] != '/' and (req.host and + req.host != 'localhost'): + req.type = 'ftp' + return self.parent.open(req) + else: + return self.open_local_file(req) + + # names for the localhost + names = None + def get_names(self): + if FileHandler.names is None: + try: + FileHandler.names = tuple( + socket.gethostbyname_ex('localhost')[2] + + socket.gethostbyname_ex(socket.gethostname())[2]) + except socket.gaierror: + FileHandler.names = (socket.gethostbyname('localhost'),) + return FileHandler.names + + # not entirely sure what the rules are here + def open_local_file(self, req): + import email.utils + import mimetypes + host = req.get_host() + filename = req.get_selector() + localfile = url2pathname(filename) + try: + stats = os.stat(localfile) + size = stats.st_size + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + mtype = mimetypes.guess_type(filename)[0] + headers = mimetools.Message(StringIO( + 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % + (mtype or 'text/plain', size, modified))) + if host: + host, port = splitport(host) + if not host or \ + (not port and _safe_gethostbyname(host) in self.get_names()): + if host: + origurl = 'file://' + host + filename + else: + origurl = 'file://' + filename + return addinfourl(open(localfile, 'rb'), headers, origurl) + except OSError, msg: + # urllib2 users shouldn't expect OSErrors coming from urlopen() + raise URLError(msg) + raise URLError('file not on local host') + +class FTPHandler(BaseHandler): + def ftp_open(self, req): + import ftplib + import mimetypes + host = req.get_host() + if not host: + raise URLError('ftp error: no host given') + host, port = splitport(host) + if port is None: + port = ftplib.FTP_PORT + else: + port = int(port) + + # username/password handling + user, host = splituser(host) + if user: + user, passwd = splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = user or '' + passwd = passwd or '' + + try: + host = socket.gethostbyname(host) + except socket.error, msg: + raise URLError(msg) + path, attrs = splitattr(req.get_selector()) + dirs = path.split('/') + dirs = map(unquote, dirs) + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: + dirs = dirs[1:] + try: + fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) + type = file and 'I' or 'D' + for attr in attrs: + attr, value = splitvalue(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + fp, retrlen = fw.retrfile(file, type) + headers = "" + mtype = mimetypes.guess_type(req.get_full_url())[0] + if mtype: + headers += "Content-type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-length: %d\n" % retrlen + sf = StringIO(headers) + headers = mimetools.Message(sf) + return addinfourl(fp, headers, req.get_full_url()) + except ftplib.all_errors, msg: + raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2] + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + fw = ftpwrapper(user, passwd, host, port, dirs, timeout) +## fw.ftp.set_debuglevel(1) + return fw + +class CacheFTPHandler(FTPHandler): + # XXX would be nice to have pluggable cache strategies + # XXX this stuff is definitely not thread safe + def __init__(self): + self.cache = {} + self.timeout = {} + self.soonest = 0 + self.delay = 60 + self.max_conns = 16 + + def setTimeout(self, t): + self.delay = t + + def setMaxConns(self, m): + self.max_conns = m + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + key = user, host, port, '/'.join(dirs), timeout + if key in self.cache: + self.timeout[key] = time.time() + self.delay + else: + self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout) + self.timeout[key] = time.time() + self.delay + self.check_cache() + return self.cache[key] + + def check_cache(self): + # first check for old ones + t = time.time() + if self.soonest <= t: + for k, v in self.timeout.items(): + if v < t: + self.cache[k].close() + del self.cache[k] + del self.timeout[k] + self.soonest = min(self.timeout.values()) + + # then check the size + if len(self.cache) == self.max_conns: + for k, v in self.timeout.items(): + if v == self.soonest: + del self.cache[k] + del self.timeout[k] + break + self.soonest = min(self.timeout.values()) diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -124,7 +124,8 @@ # for now, we always allow types.pointer, else a lot of tests # break. We need to rethink how pointers are represented, though if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: - raise ArgumentError, "expected %s instance, got %s" % (type(value), ffitype) + raise ArgumentError("expected %s instance, got %s" % (type(value), + ffitype)) return value._get_buffer_value() def _cast_addr(obj, _, tp): diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -17,7 +17,7 @@ if len(f) == 3: if (not hasattr(tp, '_type_') or not isinstance(tp._type_, str) - or tp._type_ not in "iIhHbBlL"): + or tp._type_ not in "iIhHbBlLqQ"): #XXX: are those all types? # we just dont get the type name # in the interp levle thrown TypeError diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,117 +1,6 @@ -"""qvfgbcvna naq hgbcvna punvef -qlfgbcvna naq hgbcvna punvef -V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur png nf jryy? -V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur punve nf jryy? -jr cnffrq gur RH erivrj -cbfg RhebClguba fcevag fgnegf 12.IVV.2007, 10nz -RhebClguba raqrq -n Pyrna Ragrecevfrf cebqhpgvba -npnqrzl vf n pbzcyvpngrq ebyr tnzr -npnqrzvn vf n pbzcyvpngrq ebyr tnzr -jbexvat pbqr vf crn fbhc -abg lbhe snhyg, zber yvxr vg'f n zbivat gnetrg -guvf fragrapr vf snyfr -abguvat vf gehr -Yncfnat Fbhpubat -Oenpunzhgnaqn -fbeel, V'yy grnpu gur pnpghf ubj gb fjvz yngre -Jul fb znal znal znal znal znal ivbyvaf? -Jul fb znal znal znal znal znal bowrpgf? -"eha njnl naq yvir ba n snez" nccebnpu gb fbsgjner qrirybczrag -"va snpg, lbh zvtug xabj zber nobhg gur genafyngvba gbbypunva nsgre znfgrevat eclguba guna fbzr angvir fcrnxre xabjf nobhg uvf zbgure gbathr" - kbeNkNk -"jurer qvq nyy gur ivbyvaf tb?" -- ClCl fgnghf oybt: uggc://zberclcl.oybtfcbg.pbz/ -uggc://kxpq.pbz/353/ -pnfhnyvgl ivbyngvbaf naq sylvat -wrgmg abpu fpubxbynqvtre -R09 2X @PNN:85? -vs lbh'er gelvat gb oybj hc fghss, jub pnerf? -vs fghss oybjf hc, lbh pner -2008 jvyy or gur lrne bs clcl ba gur qrfxgbc -2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl -2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl, Wnahnel jvyy or gur zbagu bs gur nyc gbcf -lrf, ohg jung'g gur frafr bs 0 < "qhena qhena" -eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb -2009 jvyy or gur lrne bs WVG ba gur qrfxgbc -N ynathntr vf n qvnyrpg jvgu na nezl naq anil -gbcvpf ner sbe gur srroyr zvaqrq -2009 vf gur lrne bs ersyrpgvba ba gur qrfxgbc -gur tybor vf bhe cbal, gur pbfzbf bhe erny ubefr -jub nz V naq vs lrf, ubj znal? -cebtenzzvat va orq vf n cresrpgyl svar npgvivgl -zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja -EClguba: jr hfr vg fb lbh qba'g unir gb -Zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja. EClguba: haqrpvqrq. -guvatf jvyy or avpr naq fghss -qba'g cbfg yvaxf gb cngragf urer -Abg lbhe hfhny nanylfrf. -Gur Neg bs gur Punaary -Clguba 300 -V fhccbfr ZO bs UGZY cre frpbaq vf abg gur hfhny fcrrq zrnfher crbcyr jbhyq rkcrpg sbe n wvg -gur fha arire frgf ba gur ClCl rzcver -ghegyrf ner snfgre guna lbh guvax -cebtenzzvat vf na nrfgrguvp raqrnibhe -P vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner -trezna vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner -trezna vf tbbq sbe artngvbaf, whfg abg sbe jevgvat fbsgjner -# nffreg qvq abg penfu -lbh fubhyq fgneg n cresrpg fbsgjner zbirzrag -lbh fubhyq fgneg n cresrpg punaary gbcvp zbirzrag -guvf vf n cresrpg punaary gbcvp -guvf vf n frys-ersreragvny punaary gbcvp -crrcubcr bcgvzvmngvbaf ner jung n Fhssvpvragyl Fzneg Pbzcvyre hfrf -"crrcubcr" bcgvzvmngvbaf ner jung na bcgvzvfgvp Pbzcvyre hfrf -pubbfr lbhe unpx -gur 'fhcre' xrljbeq vf abg gung uhttnoyr -wlguba cngpurf ner abg rabhtu sbe clcl -- qb lbh xabj oreyva? - nyy bs vg? - jryy, whfg oreyva -- ubj jvyy gur snpg gung gurl ner hfrq va bhe ercy punatr bhe gbcvpf? -- ubj pna vg rire unir jbexrq? -- jurer fubhyq gur unpx or fgberq? -- Vg'f uneq gb fnl rknpgyl jung pbafgvghgrf erfrnepu va gur pbzchgre jbeyq, ohg nf n svefg nccebkvzngvba, vg'f fbsgjner gung qbrfa'g unir hfref. -- Cebtenzzvat vf nyy nobhg xabjvat jura gb obvy gur benatr fcbatr qbaxrl npebff gur cuvyyvcvarf -- Jul fb znal, znal, znal, znal, znal, znal qhpxyvatf? -- ab qrgnvy vf bofpher rabhtu gb abg unir fbzr pbqr qrcraqvat ba vg. -- jung V trarenyyl jnag vf serr fcrrqhcf -- nyy bs ClCl vf kv-dhnyvgl -"lbh pna nyjnlf xvyy -9 be bf._rkvg() vs lbh'er va n uheel" -Ohernhpengf ohvyq npnqrzvp rzcverf juvpu puhea bhg zrnavatyrff fbyhgvbaf gb veeryrinag ceboyrzf. -vg'f abg n unpx, vg'f n jbexnebhaq -ClCl qbrfa'g unir pbcbylinevnqvp qrcraqragyl-zbabzbecurq ulcresyhknqf -ClCl qbrfa'g punatr gur shaqnzragny culfvpf pbafgnagf -Qnapr bs gur Fhtnecyhz Snvel -Wnin vf whfg tbbq rabhtu gb or cenpgvpny, ohg abg tbbq rabhtu gb or hfnoyr. -RhebClguba vf unccravat, qba'g rkcrpg nal dhvpx erfcbafr gvzrf. -"V jbhyq yvxr gb fgnl njnl sebz ernyvgl gura" -"gung'f jul gur 'be' vf ernyyl na 'naq' " -jvgu nyy nccebcevngr pbagrkghnyvfngvbavat -qba'g gevc ba gur cbjre pbeq -vzcyrzragvat YBTB va YBTB: "ghegyrf nyy gur jnl qbja" -gur ohooyrfbeg jbhyq or gur jebat jnl gb tb -gur cevapvcyr bs pbafreingvba bs zrff -gb fnir n gerr, rng n ornire -Qre Ovore znpugf evpugvt: Antg nyyrf xnchgg. -"Nal jbeyqivrj gung vfag jenpxrq ol frys-qbhog naq pbashfvba bire vgf bja vqragvgl vf abg n jbeyqivrj sbe zr." - Fpbgg Nnebafba -jr oryvrir va cnapnxrf, znlor -jr oryvrir va ghegyrf, znlor -jr qrsvavgryl oryvrir va zrgn -gur zngevk unf lbh -"Yvsr vf uneq, gura lbh anc" - n png -Vf Nezva ubzr jura gur havirefr prnfrf gb rkvfg? -Qhrffryqbes fcevag fgnegrq -frys.nobeeg("pnaabg ybnq negvpyrf") -QRAGVFGEL FLZOBY YVTUG IREGVPNY NAQ JNIR -"Gur UUH pnzchf vf n tbbq Dhnxr yriry" - Nezva -"Gur UUH pnzchf jbhyq or n greevoyr dhnxr yriry - lbh'q arire unir n pyhr jurer lbh ner" - zvpunry -N enqvbnpgvir png unf 18 unys-yvirf. - : j [fvtu] -f -pbybe-pbqrq oyhrf -"Neebtnapr va pbzchgre fpvrapr vf zrnfherq va anab-Qvwxfgenf." -ClCl arrqf n Whfg-va-Gvzr WVG -"Lbh pna'g gvzr geniry whfg ol frggvat lbhe pybpxf jebat" -Gjb guernqf jnyx vagb n one. Gur onexrrcre ybbxf hc naq lryyf, "url, V jnag qba'g nal pbaqvgvbaf enpr yvxr gvzr ynfg!" Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! Clguba 2.k vf abg qrnq Riregvzr fbzrbar nethrf jvgu "Fznyygnyx unf nyjnlf qbar K", vg vf nyjnlf n tbbq uvag gung fbzrguvat arrqf gb or punatrq snfg. - Znephf Qraxre @@ -119,7 +8,6 @@ __kkk__ naq __ekkk__ if bcrengvba fybgf: cnegvpyr dhnaghz fhcrecbfvgvba xvaq bs sha ClCl vf na rkpvgvat grpuabybtl gung yrgf lbh gb jevgr snfg, cbegnoyr, zhygv-cyngsbez vagrecergref jvgu yrff rssbeg Nezva: "Cebybt vf n zrff.", PS: "Ab, vg'f irel pbby!", Nezva: "Vfa'g guvf jung V fnvq?" - tbbq, grfgf ner hfrshy fbzrgvzrf :-) ClCl vf yvxr nofheq gurngre jr unir ab nagv-vzcbffvoyr fgvpx gung znxrf fher gung nyy lbhe cebtenzf unyg clcl vf n enpr orgjrra crbcyr funivat lnxf naq gur havirefr cebqhpvat zber orneqrq lnxf. Fb sne, gur havirefr vf jvaavat @@ -136,14 +24,14 @@ ClCl 1.1.0orgn eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy "gurer fubhyq or bar naq bayl bar boivbhf jnl gb qb vg". ClCl inevnag: "gurer pna or A unys-ohttl jnlf gb qb vg" 1.1 svany eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy -1.1 svany eryrnfrq | nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba + nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba Vf gurer n clcl gvzr? - vs lbh pna srry vg (?) gura gurer vf ab, abezny jbex vf fhpu zhpu yrff gvevat guna inpngvbaf ab, abezny jbex vf fb zhpu yrff gvevat guna inpngvbaf -SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva. +-SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva.- vg'f Fhaqnl, znlor vg'f Fhaqnl, ntnva -"3 + 3 = 8" Nagb va gur WVG gnyx +"3 + 3 = 8" - Nagb va gur WVG gnyx RPBBC vf unccravat RPBBC vf svavfurq cflpb rngf bar oenva cre vapu bs cebterff @@ -175,10 +63,108 @@ "nu, whfg va gvzr qbphzragngvba" (__nc__) ClCl vf abg n erny IZ: ab frtsnhyg unaqyref gb qb gur ener pnfrf lbh pna'g unir obgu pbairavrapr naq fcrrq -gur WVG qbrfa'g jbex ba BF/K (abi'09) -ab fhccbeg sbe BF/K evtug abj! (abi'09) fyvccref urvtug pna or zrnfherq va k86 ertvfgref clcl vf n enpr orgjrra gur vaqhfgel gelvat gb ohvyq znpuvarf jvgu zber naq zber erfbheprf, naq gur clcl qrirybcref gelvat gb rng nyy bs gurz. Fb sne, gur jvaare vf fgvyy hapyrne +"znl pbagnva ahgf naq/be lbhat cbvagref" +vg'f nyy irel fvzcyr, yvxr gur ubyvqnlf +unccl ClCl'f lrne 2010! +fnzhryr fnlf gung jr ybfg n enmbe. fb jr pna'g funir lnxf +"yrg'f abg or bofpher, hayrff jr ernyyl arrq gb" + (abg guernq-fnsr, ohg jryy, abguvat vf) +clcl unf znal ceboyrzf, ohg rnpu bar unf znal fbyhgvbaf +whfg nabgure vgrz (1.333...) ba bhe erny-ahzorerq gbqb yvfg +ClCl vf Fuveg Bevtnzv erfrnepu + nafjrevat n dhrfgvba: "ab -- sbe ng yrnfg bar cbffvoyr vagrecergngvba bs lbhe fragrapr" +eryrnfr 1.2 hcpbzvat +ClCl 1.2 eryrnfrq - uggc://clcl.bet/ +AB IPF QVFPHFFVBAF +EClguba vf n svar pnzry unve oehfu +ClCl vf n npghnyyl n ivfhnyvmngvba cebwrpg, jr whfg ohvyq vagrecergref gb unir vagrerfgvat qngn gb ivfhnyvmr +clcl vf yvxr fnhfntrf +naq abj sbe fbzrguvat pbzcyrgryl qvssrerag +n 10gu bs sberire vf 1u45 +pbeerpg pbqr qbrfag arrq nal grfgf +cbfgfgehpghenyvfz rgp. +clcl UVG trarengbe +gur arj clcl fcbeg vf gb cnff clcl ohtf nf pclguba ohtf +jr unir zhpu zber vagrecergref guna hfref +ClCl 1.3 njnvgvat eryrnfr +ClCl 1.3 eryrnfrq +vg frrzf gb zr gung bapr lbh frggyr ba na rkrphgvba / bowrpg zbqry naq / be olgrpbqr sbezng, lbh'ir nyernql qrpvqrq jung ynathntrf (jurer gur 'f' frrzf fhcresyhbhf) fhccbeg vf tbvat gb or svefg pynff sbe +"Nyy ceboyrzf va ClCl pna or fbyirq ol nabgure yriry bs vagrecergngvba" +ClCl 1.3 eryrnfrq (jvaqbjf ovanevrf vapyhqrq) +jul qvq lbh thlf unir gb znxr gur ohvygva sbeghar zber vagrerfgvat guna npghny jbex? v whfg pngpurq zlfrys erfgnegvat clcl 20 gvzrf +"jr hfrq gb unir n zrff jvgu na bofpher vagresnpr, abj jr unir zrff urer naq bofpher vagresnpr gurer. cebterff" crqebavf ba n clcl fcevag +"phcf bs pbssrr ner yvxr nanybtvrf va gung V'z znxvat bar evtug abj" +"vg'f nyjnlf hc gb hf, va n jnl be gur bgure" +ClCl vf infg, naq pbagnvaf zhygvghqrf +qravny vf eneryl n tbbq qrohttvat grpuavdhr +"Yrg'f tb." - "Jr pna'g" - "Jul abg?" - "Jr'er jnvgvat sbe n Genafyngvba." - (qrfcnvevatyl) "Nu!" +'gung'f qrsvavgryl n pnfr bs "hu????"' +va gurbel gurer vf gur Ybbc, va cenpgvpr gurer ner oevqtrf +gur uneqqevir - pbafgnag qngn cvytevzntr +ClCl vf n gbby gb xrrc bgurejvfr qnatrebhf zvaqf fnsryl bpphcvrq. +jr ner n trareny senzrjbex ohvyg ba pbafvfgrag nccyvpngvba bs nqubp-arff +gur jnl gb nibvq n jbexnebhaq vf gb vagebqhpr n fgebatre jbexnebhaq fbzrjurer ryfr +pnyyvat gur genafyngvba gbby punva n 'fpevcg' vf xvaq bs bssrafvir +ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq pbafhzr nyy gur zrzbel ng nal gvzr +ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq qvr ng nal gvzr orpnhfr bs gur 32-ovg 4TO yvzvg bs ENZ +Qh jvefg rora tranh qnf reervpura, jbena xrvare tynhog +vs fjvgmreynaq jrer jurer terrpr vf (ba vfynaqf) jbhyq gurl nyy or pbaarpgrq ol oevqtrf? +genafyngvat clcl jvgu pclguba vf fbbbbbb fybj +ClCl 1.4 eryrnfrq! +Jr ner abg urebrf, whfg irel cngvrag. +QBAR zrnaf vg'f qbar +jul gurer vf ab "ClCl 1.4 eryrnfrq" va gbcvp nal zber? +fabj! fabj! +svanyyl, zrephevny zvtengvba vf unccravat! +Gur zvtengvba gb zrephevny vf pbzcyrgrq! uggc://ovgohpxrg.bet/clcl/clcl +fabj! fabj! (gre) +unccl arj lrne +naq anaanaw gb lbh nf jryy +Frrvat nf gur ynjf bs culfvpf ner ntnvafg lbh, lbh unir gb pnershyyl pbafvqre lbhe fpbcr fb gung lbhe tbnyf ner ernfbanoyr. +nf hfhny va clcl, gur fbyhgvba nccrnef pbzcyrgryl qvfcebcbegvbangr gb gur ceboyrz naq vafgrnq jr'yy tb sbe n pbzcyrgryl qvssrerag fvzcyre nccebnpu gb gur bevtvany ceboyrz +fabj, fabj! +va clcl lbh ner nyjnlf ng gur jebat yriry, va bar jnl be gur bgure +jryy, vg'f jebat ohg abg fb "irel jebat" nf vg ybbxrq + V ybir clcl +ynmvarff vzcngvrapr naq uhoevf +fabj, fabj +EClguba: guvatf lbh jbhyqa'g qb va Clguba, naq pna'g qb va P. +vg vf gur rkcrpgrq orunivbe, rkprcg jura lbh qba'g rkcrpg vg +erqrsvavat lryybj frrzf yvxr n orggre vqrn +"gung'f ubjrire whfg ratvarrevat" (svwny) +"[vg] whfg fubjf ntnva gung eclguba vf bofpher" (psobym) +"naljnl, clguba vf n infg ynathntr" (svwny) +bhg-bs-yvr-thneqf +"gurer ner qnlf ba juvpu lbh ybbx nebhaq naq abguvat fubhyq unir rire jbexrq" (svwny) +clcl vf n orggre xvaq bs sbbyvfuarff - ynp +ehaavat grfgf vf rffragvny sbe qrirybcvat clcl -- hu? qvq V oernx gur grfg? (svwny) +V'ir tbg guvf sybbe jnk gung'f nyfb n TERNG qrffreg gbccvat!! +rknexha: "gur cneg gung V gubhtug jnf tbvat gb or uneq jnf gevivny, fb abj V whfg unir guvf cneg gung V qvqa'g rira guvax bs gung vf uneq" +V fhccbfr jr pna yvir jvgu gur bofphevgl, nf ybat nf gurer vf n pbzzrag znxvat vg yvtugre +V nz n ovt oryvrire va ernfbaf. ohg gur nccnerag xvaq ner zl snibevgr. +clcl: trg n WVG sbe serr (jryy gur svefg qnl lbh jba'g znantr naq vg jvyy or irel sehfgengvat) + thgjbegu: bu, jr fubhyq znxr gur WVG zntvpnyyl orggre, jvgu qrpbengbef naq fghss +vg'f n pbzcyrgr unpx, ohg n irel zvavzny bar (nevtngb) +svefg gurl ynhtu ng lbh, gura gurl vtaber lbh, gura gurl svtug lbh, gura lbh jva +ClCl vf snzvyl sevraqyl +jr yvxr pbzcynvagf +gbqnl jr'er snfgre guna lrfgreqnl (hfhnyyl) +ClCl naq PClguba: gurl ner zbegny rarzvrf vagrag ba xvyyvat rnpu bgure +nethnoyl, rirelguvat vf n avpur +clcl unf ynlref yvxr bavbaf: crryvat gurz onpx jvyy znxr lbh pel +EClguba zntvpnyyl znxrf lbh evpu naq snzbhf (fnlf fb ba gur gva) +Vf evtbobg nebhaq jura gur havirefr prnfrf gb rkvfg? +ClCl vf gbb pbby sbe dhrelfgevatf. +< nevtngb> gura jung bpphef? < svwny> tbbq fghss V oryvrir +ClCl 1.6 eryrnfrq! + jurer ner gur grfgf? +uggc://gjvgcvp.pbz/52nr8s +N enaqbz dhbgr +Nyy rkprcgoybpxf frrz fnar. +N cvax tyvggrel ebgngvat ynzoqn +"vg'f yvxryl grzcbenel hagvy sberire" nevtb """ def some_topic(): diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,6 +231,9 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None +sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] +sqlite.sqlite3_enable_load_extension.restype = c_int + ########################################## # END Wrapped SQLite C API and constants ########################################## @@ -705,6 +708,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() + + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") + DML, DQL, DDL = range(3) class Cursor(object): diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/lib_pypy/pyrepl/commands.py b/lib_pypy/pyrepl/commands.py --- a/lib_pypy/pyrepl/commands.py +++ b/lib_pypy/pyrepl/commands.py @@ -33,10 +33,9 @@ class Command(object): finish = 0 kills_digit_arg = 1 - def __init__(self, reader, (event_name, event)): + def __init__(self, reader, cmd): self.reader = reader - self.event = event - self.event_name = event_name + self.event_name, self.event = cmd def do(self): pass diff --git a/lib_pypy/pyrepl/pygame_console.py b/lib_pypy/pyrepl/pygame_console.py --- a/lib_pypy/pyrepl/pygame_console.py +++ b/lib_pypy/pyrepl/pygame_console.py @@ -130,7 +130,7 @@ s.fill(c, [0, 600 - bmargin, 800, bmargin]) s.fill(c, [800 - rmargin, 0, lmargin, 600]) - def refresh(self, screen, (cx, cy)): + def refresh(self, screen, cxy): self.screen = screen self.pygame_screen.fill(colors.bg, [0, tmargin + self.cur_top + self.scroll, @@ -139,8 +139,8 @@ line_top = self.cur_top width, height = self.fontsize - self.cxy = (cx, cy) - cp = self.char_pos(cx, cy) + self.cxy = cxy + cp = self.char_pos(*cxy) if cp[1] < tmargin: self.scroll = - (cy*self.fh + self.cur_top) self.repaint() @@ -148,7 +148,7 @@ self.scroll += (600 - bmargin) - (cp[1] + self.fh) self.repaint() if self.curs_vis: - self.pygame_screen.blit(self.cursor, self.char_pos(cx, cy)) + self.pygame_screen.blit(self.cursor, self.char_pos(*cxy)) for line in screen: if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh): if line: diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -231,7 +231,11 @@ return ''.join(chars) def _histline(self, line): - return unicode(line.rstrip('\n'), ENCODING) + line = line.rstrip('\n') + try: + return unicode(line, ENCODING) + except UnicodeDecodeError: # bah, silently fall back... + return unicode(line, 'utf-8') def get_history_length(self): return self.saved_history_length @@ -268,7 +272,10 @@ f = open(os.path.expanduser(filename), 'w') for entry in history: if isinstance(entry, unicode): - entry = entry.encode(ENCODING) + try: + entry = entry.encode(ENCODING) + except UnicodeEncodeError: # bah, silently fall back... + entry = entry.encode('utf-8') entry = entry.replace('\n', '\r\n') # multiline history support f.write(entry + '\n') f.close() @@ -395,9 +402,21 @@ _wrapper.f_in = f_in _wrapper.f_out = f_out - if hasattr(sys, '__raw_input__'): # PyPy - _old_raw_input = sys.__raw_input__ + if '__pypy__' in sys.builtin_module_names: # PyPy + + def _old_raw_input(prompt=''): + # sys.__raw_input__() is only called when stdin and stdout are + # as expected and are ttys. If it is the case, then get_reader() + # should not really fail in _wrapper.raw_input(). If it still + # does, then we will just cancel the redirection and call again + # the built-in raw_input(). + try: + del sys.__raw_input__ + except AttributeError: + pass + return raw_input(prompt) sys.__raw_input__ = _wrapper.raw_input + else: # this is not really what readline.c does. Better than nothing I guess import __builtin__ diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -163,7 +163,7 @@ def change_encoding(self, encoding): self.encoding = encoding - def refresh(self, screen, (cx, cy)): + def refresh(self, screen, cxy): # this function is still too long (over 90 lines) if not self.__gone_tall: @@ -198,6 +198,7 @@ # we make sure the cursor is on the screen, and that we're # using all of the screen if we can + cx, cy = cxy if cy < offset: offset = cy elif cy >= offset + height: @@ -411,7 +412,12 @@ e.args[4] == 'unexpected end of data': pass else: - raise + # was: "raise". But it crashes pyrepl, and by extension the + # pypy currently running, in which we are e.g. in the middle + # of some debugging session. Argh. Instead just print an + # error message to stderr and continue running, for now. + self.partial_char = '' + sys.stderr.write('\n%s: %s\n' % (e.__class__.__name__, e)) else: self.partial_char = '' self.event_queue.push(c) diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py --- a/lib_pypy/resource.py +++ b/lib_pypy/resource.py @@ -7,7 +7,7 @@ from ctypes_support import standard_c_lib as libc from ctypes_support import get_errno -from ctypes import Structure, c_int, c_long, byref, sizeof, POINTER +from ctypes import Structure, c_int, c_long, byref, POINTER from errno import EINVAL, EPERM import _structseq @@ -165,7 +165,6 @@ @builtinify def getpagesize(): - pagesize = 0 if _getpagesize: return _getpagesize() else: diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -38,9 +38,27 @@ _setlogmask.argtypes = (c_int,) _setlogmask.restype = c_int +_S_log_open = False +_S_ident_o = None + +def _get_argv(): + try: + import sys + script = sys.argv[0] + if isinstance(script, str): + return script[script.rfind('/')+1:] or None + except Exception: + pass + return None + @builtinify -def openlog(ident, option, facility): - _openlog(ident, option, facility) +def openlog(ident=None, logoption=0, facility=LOG_USER): + global _S_ident_o, _S_log_open + if ident is None: + ident = _get_argv() + _S_ident_o = c_char_p(ident) # keepalive + _openlog(_S_ident_o, logoption, facility) + _S_log_open = True @builtinify def syslog(arg1, arg2=None): @@ -48,11 +66,18 @@ priority, message = arg1, arg2 else: priority, message = LOG_INFO, arg1 + # if log is not opened, open it now + if not _S_log_open: + openlog() _syslog(priority, "%s", message) @builtinify def closelog(): - _closelog() + global _S_log_open, S_ident_o + if _S_log_open: + _closelog() + _S_log_open = False + _S_ident_o = None @builtinify def setlogmask(mask): diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -307,7 +307,7 @@ self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo - self.typename = self.type.__name__ + self.typename = getattr(self.type, "__name__", "???") self.traceback = py.code.Traceback(tb) def __repr__(self): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py --- a/pypy/annotation/classdef.py +++ b/pypy/annotation/classdef.py @@ -276,8 +276,8 @@ # create the Attribute and do the generalization asked for newattr = Attribute(attr, self.bookkeeper) if s_value: - if newattr.name == 'intval' and getattr(s_value, 'unsigned', False): - import pdb; pdb.set_trace() + #if newattr.name == 'intval' and getattr(s_value, 'unsigned', False): + # import pdb; pdb.set_trace() newattr.s_value = s_value # keep all subattributes' values diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -72,6 +72,7 @@ del working_modules['fcntl'] # LOCK_NB not defined del working_modules["_minimal_curses"] del working_modules["termios"] + del working_modules["_multiprocessing"] # depends on rctime @@ -91,7 +92,7 @@ module_import_dependencies = { # no _rawffi if importing pypy.rlib.clibffi raises ImportError - # or CompilationError + # or CompilationError or py.test.skip.Exception "_rawffi" : ["pypy.rlib.clibffi"], "_ffi" : ["pypy.rlib.clibffi"], @@ -112,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError), e: + except (ImportError, CompilationError, py.test.skip.Exception), e: errcls = e.__class__.__name__ config.add_warning( "The module %r is disabled\n" % (modname,) + @@ -127,7 +128,7 @@ pypy_optiondescription = OptionDescription("objspace", "Object Space Options", [ ChoiceOption("name", "Object Space name", - ["std", "flow", "thunk", "dump", "taint"], + ["std", "flow", "thunk", "dump"], "std", cmdline='--objspace -o'), diff --git a/pypy/config/test/test_translationoption.py b/pypy/config/test/test_translationoption.py new file mode 100644 --- /dev/null +++ b/pypy/config/test/test_translationoption.py @@ -0,0 +1,10 @@ +import py +from pypy.config.translationoption import get_combined_translation_config +from pypy.config.translationoption import set_opt_level +from pypy.config.config import ConflictConfigError + + +def test_no_gcrootfinder_with_boehm(): + config = get_combined_translation_config() + config.translation.gcrootfinder = "shadowstack" + py.test.raises(ConflictConfigError, set_opt_level, config, '0') diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, @@ -398,6 +398,10 @@ # make_sure_not_resized often relies on it, so we always enable them config.translation.suggest(list_comprehension_operations=True) + # finally, make the choice of the gc definitive. This will fail + # if we have specified strange inconsistent settings. + config.translation.gc = config.translation.gc + # ---------------------------------------------------------------- def set_platform(config): diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -37,29 +37,6 @@ .. _`thunk object space docs`: objspace-proxies.html#thunk .. _`interface section of the thunk object space docs`: objspace-proxies.html#thunk-interface -.. broken: - - Taint Object Space Functionality - ================================ - - When the taint object space is used (choose with :config:`objspace.name`), - the following names are put into ``__pypy__``: - - - ``taint`` - - ``is_tainted`` - - ``untaint`` - - ``taint_atomic`` - - ``_taint_debug`` - - ``_taint_look`` - - ``TaintError`` - - Those are all described in the `interface section of the taint object space - docs`_. - - For more detailed explanations and examples see the `taint object space docs`_. - - .. _`taint object space docs`: objspace-proxies.html#taint - .. _`interface section of the taint object space docs`: objspace-proxies.html#taint-interface Transparent Proxy Functionality =============================== diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -270,7 +270,12 @@ - *slicing*: the slice start must be within bounds. The stop doesn't need to, but it must not be smaller than the start. All negative indexes are disallowed, except for - the [:-1] special case. No step. + the [:-1] special case. No step. Slice deletion follows the same rules. + + - *slice assignment*: + only supports ``lst[x:y] = sublist``, if ``len(sublist) == y - x``. + In other words, slice assignment cannot change the total length of the list, + but just replace items. - *other operators*: ``+``, ``+=``, ``in``, ``*``, ``*=``, ``==``, ``!=`` work as expected. diff --git a/pypy/doc/config/objspace.name.txt b/pypy/doc/config/objspace.name.txt --- a/pypy/doc/config/objspace.name.txt +++ b/pypy/doc/config/objspace.name.txt @@ -4,7 +4,6 @@ for normal usage): * thunk_: The thunk object space adds lazy evaluation to PyPy. - * taint_: The taint object space adds soft security features. * dump_: Using this object spaces results in the dumpimp of all operations to a log. @@ -12,5 +11,4 @@ .. _`Object Space Proxies`: ../objspace-proxies.html .. _`Standard Object Space`: ../objspace.html#standard-object-space .. _thunk: ../objspace-proxies.html#thunk -.. _taint: ../objspace-proxies.html#taint .. _dump: ../objspace-proxies.html#dump diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -304,5 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. + .. include:: _ref.txt diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,6 +1,3 @@ -.. include:: needswork.txt - -.. needs work, it talks about svn. also, it is not really user documentation Making a PyPy Release ======================= @@ -12,11 +9,8 @@ forgetting things. A set of todo files may also work. Check and prioritize all issues for the release, postpone some if necessary, -create new issues also as necessary. A meeting (or meetings) should be -organized to decide what things are priorities, should go in and work for -the release. - -An important thing is to get the documentation into an up-to-date state! +create new issues also as necessary. An important thing is to get +the documentation into an up-to-date state! Release Steps ---------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix @@ -309,7 +309,6 @@ .. _`object space`: objspace.html .. _FlowObjSpace: objspace.html#the-flow-object-space .. _`trace object space`: objspace.html#the-trace-object-space -.. _`taint object space`: objspace-proxies.html#taint .. _`thunk object space`: objspace-proxies.html#thunk .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -129,297 +129,6 @@ function behaves lazily: all calls to it return a thunk object. -.. broken right now: - - .. _taint: - - The Taint Object Space - ====================== - - Motivation - ---------- - - The Taint Object Space provides a form of security: "tainted objects", - inspired by various sources, see [D12.1]_ for a more detailed discussion. - - The basic idea of this kind of security is not to protect against - malicious code but to help with handling and boxing sensitive data. - It covers two kinds of sensitive data: secret data which should not leak, - and untrusted data coming from an external source and that must be - validated before it is used. - - The idea is that, considering a large application that handles these - kinds of sensitive data, there are typically only a small number of - places that need to explicitly manipulate that sensitive data; all the - other places merely pass it around, or do entirely unrelated things. - - Nevertheless, if a large application needs to be reviewed for security, - it must be entirely carefully checked, because it is possible that a - bug at some apparently unrelated place could lead to a leak of sensitive - information in a way that an external attacker could exploit. For - example, if any part of the application provides web services, an - attacker might be able to issue unexpected requests with a regular web - browser and deduce secret information from the details of the answers he - gets. Another example is the common CGI attack where an attacker sends - malformed inputs and causes the CGI script to do unintended things. - - An approach like that of the Taint Object Space allows the small parts - of the program that manipulate sensitive data to be explicitly marked. - The effect of this is that although these small parts still need a - careful security review, the rest of the application no longer does, - because even a bug would be unable to leak the information. - - We have implemented a simple two-level model: objects are either - regular (untainted), or sensitive (tainted). Objects are marked as - sensitive if they are secret or untrusted, and only declassified at - carefully-checked positions (e.g. where the secret data is needed, or - after the untrusted data has been fully validated). - - It would be simple to extend the code for more fine-grained scales of - secrecy. For example it is typical in the literature to consider - user-specified lattices of secrecy levels, corresponding to multiple - "owners" that cannot access data belonging to another "owner" unless - explicitly authorized to do so. - - Tainting and untainting - ----------------------- - - Start a py.py with the Taint Object Space and try the following example:: - - $ py.py -o taint - >>>> from __pypy__ import taint - >>>> x = taint(6) - - # x is hidden from now on. We can pass it around and - # even operate on it, but not inspect it. Taintness - # is propagated to operation results. - - >>>> x - TaintError - - >>>> if x > 5: y = 2 # see below - TaintError - - >>>> y = x + 5 # ok - >>>> lst = [x, y] - >>>> z = lst.pop() - >>>> t = type(z) # type() works too, tainted answer - >>>> t - TaintError - >>>> u = t is int # even 'is' works - >>>> u - TaintError - - Notice that using a tainted boolean like ``x > 5`` in an ``if`` - statement is forbidden. This is because knowing which path is followed - would give away a hint about ``x``; in the example above, if the - statement ``if x > 5: y = 2`` was allowed to run, we would know - something about the value of ``x`` by looking at the (untainted) value - in the variable ``y``. - - Of course, there is a way to inspect tainted objects. The basic way is - to explicitly "declassify" it with the ``untaint()`` function. In an - application, the places that use ``untaint()`` are the places that need - careful security review. To avoid unexpected objects showing up, the - ``untaint()`` function must be called with the exact type of the object - to declassify. It will raise ``TaintError`` if the type doesn't match:: - - >>>> from __pypy__ import taint - >>>> untaint(int, x) - 6 - >>>> untaint(int, z) - 11 - >>>> untaint(bool, x > 5) - True - >>>> untaint(int, x > 5) - TaintError - - - Taint Bombs - ----------- - - In this area, a common problem is what to do about failing operations. - If an operation raises an exception when manipulating a tainted object, - then the very presence of the exception can leak information about the - tainted object itself. Consider:: - - >>>> 5 / (x-6) - - By checking if this raises ``ZeroDivisionError`` or not, we would know - if ``x`` was equal to 6 or not. The solution to this problem in the - Taint Object Space is to introduce *Taint Bombs*. They are a kind of - tainted object that doesn't contain a real object, but a pending - exception. Taint Bombs are indistinguishable from normal tainted - objects to unprivileged code. See:: - - >>>> x = taint(6) - >>>> i = 5 / (x-6) # no exception here - >>>> j = i + 1 # nor here - >>>> k = j + 5 # nor here - >>>> untaint(int, k) - TaintError - - In the above example, all of ``i``, ``j`` and ``k`` contain a Taint - Bomb. Trying to untaint it raises an exception - a generic - ``TaintError``. What we win is that the exception gives little away, - and most importantly it occurs at the point where ``untaint()`` is - called, not where the operation failed. This means that all calls to - ``untaint()`` - but not the rest of the code - must be carefully - reviewed for what occurs if they receive a Taint Bomb; they might catch - the ``TaintError`` and give the user a generic message that something - went wrong, if we are reasonably careful that the message or even its - presence doesn't give information away. This might be a - problem by itself, but there is no satisfying general solution here: - it must be considered on a case-by-case basis. Again, what the - Taint Object Space approach achieves is not solving these problems, but - localizing them to well-defined small parts of the application - namely, - around calls to ``untaint()``. - - The ``TaintError`` exception deliberately does not include any - useful error messages, because they might give information away. - Of course, this makes debugging quite a bit harder; a difficult - problem to solve properly. So far we have implemented a way to peek in a Taint - Box or Bomb, ``__pypy__._taint_look(x)``, and a "debug mode" that - prints the exception as soon as a Bomb is created - both write - information to the low-level stderr of the application, where we hope - that it is unlikely to be seen by anyone but the application - developer. - - - Taint Atomic functions - ---------------------- - - Occasionally, a more complicated computation must be performed on a - tainted object. This requires first untainting the object, performing the - computations, and then carefully tainting the result again (including - hiding all exceptions into Bombs). - - There is a built-in decorator that does this for you:: - - >>>> @__pypy__.taint_atomic - >>>> def myop(x, y): - .... while x > 0: - .... x -= y - .... return x - .... - >>>> myop(42, 10) - -8 - >>>> z = myop(taint(42), 10) - >>>> z - TaintError - >>>> untaint(int, z) - -8 - - The decorator makes a whole function behave like a built-in operation. - If no tainted argument is passed in, the function behaves normally. But - if any of the arguments is tainted, it is automatically untainted - so - the function body always sees untainted arguments - and the eventual - result is tainted again (possibly in a Taint Bomb). - - It is important for the function marked as ``taint_atomic`` to have no - visible side effects, as these could cause information leakage. - This is currently not enforced, which means that all ``taint_atomic`` - functions have to be carefully reviewed for security (but not the - callers of ``taint_atomic`` functions). - - A possible future extension would be to forbid side-effects on - non-tainted objects from all ``taint_atomic`` functions. - - An example of usage: given a tainted object ``passwords_db`` that - references a database of passwords, we can write a function - that checks if a password is valid as follows:: - - @taint_atomic - def validate(passwords_db, username, password): - assert type(passwords_db) is PasswordDatabase - assert type(username) is str - assert type(password) is str - ...load username entry from passwords_db... - return expected_password == password - - It returns a tainted boolean answer, or a Taint Bomb if something - went wrong. A caller can do:: - - ok = validate(passwords_db, 'john', '1234') - ok = untaint(bool, ok) - - This can give three outcomes: ``True``, ``False``, or a ``TaintError`` - exception (with no information on it) if anything went wrong. If even - this is considered giving too much information away, the ``False`` case - can be made indistinguishable from the ``TaintError`` case (simply by - raising an exception in ``validate()`` if the password is wrong). - - In the above example, the security results achieved are the following: - as long as ``validate()`` does not leak information, no other part of - the code can obtain more information about a passwords database than a - Yes/No answer to a precise query. - - A possible extension of the ``taint_atomic`` decorator would be to check - the argument types, as ``untaint()`` does, for the same reason: to - prevent bugs where a function like ``validate()`` above is accidentally - called with the wrong kind of tainted object, which would make it - misbehave. For now, all ``taint_atomic`` functions should be - conservative and carefully check all assumptions on their input - arguments. - - - .. _`taint-interface`: - - Interface - --------- - - .. _`like a built-in operation`: - - The basic rule of the Tainted Object Space is that it introduces two new - kinds of objects, Tainted Boxes and Tainted Bombs (which are not types - in the Python sense). Each box internally contains a regular object; - each bomb internally contains an exception object. An operation - involving Tainted Boxes is performed on the objects contained in the - boxes, and gives a Tainted Box or a Tainted Bomb as a result (such an - operation does not let an exception be raised). An operation called - with a Tainted Bomb argument immediately returns the same Tainted Bomb. - - In a PyPy running with (or translated with) the Taint Object Space, - the ``__pypy__`` module exposes the following interface: - - * ``taint(obj)`` - - Return a new Tainted Box wrapping ``obj``. Return ``obj`` itself - if it is already tainted (a Box or a Bomb). - - * ``is_tainted(obj)`` - - Check if ``obj`` is tainted (a Box or a Bomb). - - * ``untaint(type, obj)`` - - Untaints ``obj`` if it is tainted. Raise ``TaintError`` if the type - of the untainted object is not exactly ``type``, or if ``obj`` is a - Bomb. - - * ``taint_atomic(func)`` - - Return a wrapper function around the callable ``func``. The wrapper - behaves `like a built-in operation`_ with respect to untainting the - arguments, tainting the result, and returning a Bomb. - - * ``TaintError`` - - Exception. On purpose, it provides no attribute or error message. - - * ``_taint_debug(level)`` - - Set the debugging level to ``level`` (0=off). At level 1 or above, - all Taint Bombs print a diagnostic message to stderr when they are - created. - - * ``_taint_look(obj)`` - - For debugging purposes: prints (to stderr) the type and address of - the object in a Tainted Box, or prints the exception if ``obj`` is - a Taint Bomb. - - .. _dump: The Dump Object Space diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -17,17 +17,26 @@ projects, or anything else in PyPy, pop up on IRC or write to us on the `mailing list`_. +Make big integers faster +------------------------- + +PyPy's implementation of the Python ``long`` type is slower than CPython's. +Find out why and optimize them. + +Make bytearray type fast +------------------------ + +PyPy's bytearray type is very inefficient. It would be an interesting +task to look into possible optimizations on this. + Numpy improvements ------------------ -This is more of a project-container than a single project. Possible ideas: +The numpy is rapidly progressing in pypy, so feel free to come to IRC and +ask for proposed topic. A not necesarilly up-to-date `list of topics`_ +is also available. -* experiment with auto-vectorization using SSE or implement vectorization - without automatically detecting it for array operations. - -* improve numpy, for example implement memory views. - -* interface with fortran/C libraries. +.. _`list of topics`: https://bitbucket.org/pypy/extradoc/src/extradoc/planning/micronumpy.txt Improving the jitviewer ------------------------ diff --git a/pypy/doc/release-1.7.0.rst b/pypy/doc/release-1.7.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-1.7.0.rst @@ -0,0 +1,94 @@ +================================== +PyPy 1.7 - widening the sweet spot +================================== + +We're pleased to announce the 1.7 release of PyPy. As became a habit, this +release brings a lot of bugfixes and performance improvements over the 1.6 +release. However, unlike the previous releases, the focus has been on widening +the "sweet spot" of PyPy. That is, classes of Python code that PyPy can greatly +speed up should be vastly improved with this release. You can download the 1.7 +release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 1.7 and cpython 2.7.1`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 32/64 or +Windows 32. Windows 64 work is ongoing, but not yet natively supported. + +The main topic of this release is widening the range of code which PyPy +can greatly speed up. On average on +our benchmark suite, PyPy 1.7 is around **30%** faster than PyPy 1.6 and up +to **20 times** faster on some benchmarks. + +.. _`pypy 1.7 and cpython 2.7.1`: http://speed.pypy.org + + +Highlights +========== + +* Numerous performance improvements. There are too many examples which python + constructs now should behave faster to list them. + +* Bugfixes and compatibility fixes with CPython. + +* Windows fixes. + +* PyPy now comes with stackless features enabled by default. However, + any loop using stackless features will interrupt the JIT for now, so no real + performance improvement for stackless-based programs. Contact pypy-dev for + info how to help on removing this restriction. + +* NumPy effort in PyPy was renamed numpypy. In order to try using it, simply + write:: + + import numpypy as numpy + + at the beginning of your program. There is a huge progress on numpy in PyPy + since 1.6, the main feature being implementation of dtypes. + +* JSON encoder (but not decoder) has been replaced with a new one. This one + is written in pure Python, but is known to outperform CPython's C extension + up to **2 times** in some cases. It's about **20 times** faster than + the one that we had in 1.6. + +* The memory footprint of some of our RPython modules has been drastically + improved. This should impact any applications using for example cryptography, + like tornado. + +* There was some progress in exposing even more CPython C API via cpyext. + +Things that didn't make it, expect in 1.8 soon +============================================== + +There is an ongoing work, which while didn't make it to the release, is +probably worth mentioning here. This is what you should probably expect in +1.8 some time soon: + +* Specialized list implementation. There is a branch that implements lists of + integers/floats/strings as compactly as array.array. This should drastically + improve performance/memory impact of some applications + +* NumPy effort is progressing forward, with multi-dimensional arrays coming + soon. + +* There are two brand new JIT assembler backends, notably for the PowerPC and + ARM processors. + +Fundraising +=========== + +It's maybe worth mentioning that we're running fundraising campaigns for +NumPy effort in PyPy and for Python 3 in PyPy. In case you want to see any +of those happen faster, we urge you to donate to `numpy proposal`_ or +`py3k proposal`_. In case you want PyPy to progress, but you trust us with +the general direction, you can always donate to the `general pot`_. + +.. _`numpy proposal`: http://pypy.org/numpydonate.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`general pot`: http://pypy.org diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter import typedef from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.rlib.unroll import unrolling_iterable from pypy.tool.pairtype import extendabletype from pypy.tool.sourcetools import func_with_new_name @@ -2925,14 +2925,13 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -2968,14 +2967,13 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -3015,8 +3013,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -3057,14 +3054,13 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -3104,8 +3100,7 @@ return w_obj if not w_self.initialization_state & w_self._lineno_mask: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'lineno'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -3126,8 +3121,7 @@ return w_obj if not w_self.initialization_state & w_self._col_offset_mask: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'col_offset'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3157,8 +3151,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'name'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3179,8 +3172,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'args'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3197,14 +3189,13 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -3215,14 +3206,13 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'decorator_list'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.decorator_list] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_decorator_list = w_list return w_self.w_decorator_list @@ -3266,8 +3256,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'name'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3284,14 +3273,13 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'bases'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: if w_self.bases is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.bases] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_bases = w_list return w_self.w_bases @@ -3302,14 +3290,13 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -3320,14 +3307,13 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'decorator_list'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.decorator_list] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_decorator_list = w_list return w_self.w_decorator_list @@ -3372,8 +3358,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3414,14 +3399,13 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'targets'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: if w_self.targets is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.targets] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_targets = w_list return w_self.w_targets @@ -3457,14 +3441,13 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'targets'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: if w_self.targets is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.targets] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_targets = w_list return w_self.w_targets @@ -3479,8 +3462,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3527,8 +3509,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'target'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3549,8 +3530,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'op'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3573,8 +3553,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3621,8 +3600,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'dest'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3639,14 +3617,13 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'values'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: if w_self.values is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.values] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_values = w_list return w_self.w_values @@ -3661,8 +3638,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'nl'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3710,8 +3686,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'target'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3732,8 +3707,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'iter'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3750,14 +3724,13 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -3768,14 +3741,13 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'orelse'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.orelse] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_orelse = w_list return w_self.w_orelse @@ -3819,8 +3791,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'test'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3837,14 +3808,13 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -3855,14 +3825,13 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'orelse'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.orelse] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_orelse = w_list return w_self.w_orelse @@ -3905,8 +3874,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'test'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3923,14 +3891,13 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -3941,14 +3908,13 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'orelse'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.orelse] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_orelse = w_list return w_self.w_orelse @@ -3991,8 +3957,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'context_expr'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -4013,8 +3978,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'optional_vars'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -4031,14 +3995,13 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -4080,8 +4043,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'type'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -4102,8 +4064,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'inst'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -4124,8 +4085,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'tback'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4168,14 +4128,13 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -4186,14 +4145,13 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'handlers'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.handlers] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_handlers = w_list return w_self.w_handlers @@ -4204,14 +4162,13 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'orelse'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.orelse] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_orelse = w_list return w_self.w_orelse @@ -4251,14 +4208,13 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -4269,14 +4225,13 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'finalbody'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.finalbody] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_finalbody = w_list return w_self.w_finalbody @@ -4318,8 +4273,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'test'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4340,8 +4294,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'msg'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4383,14 +4336,13 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'names'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: if w_self.names is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.names] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_names = w_list return w_self.w_names @@ -4430,8 +4382,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'module'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4451,14 +4402,13 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'names'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: if w_self.names is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.names] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_names = w_list return w_self.w_names @@ -4473,8 +4423,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'level'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4522,8 +4471,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4544,8 +4492,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'globals'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4566,8 +4513,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'locals'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4610,14 +4556,13 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'names'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: if w_self.names is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.names] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_names = w_list return w_self.w_names @@ -4657,8 +4602,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4754,8 +4698,7 @@ return w_obj if not w_self.initialization_state & w_self._lineno_mask: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'lineno'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4776,8 +4719,7 @@ return w_obj if not w_self.initialization_state & w_self._col_offset_mask: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'col_offset'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4807,8 +4749,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'op'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4827,14 +4768,13 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'values'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: if w_self.values is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.values] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_values = w_list return w_self.w_values @@ -4875,8 +4815,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'left'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4897,8 +4836,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'op'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4921,8 +4859,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'right'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4969,8 +4906,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'op'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4993,8 +4929,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'operand'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -5040,8 +4975,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'args'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -5062,8 +4996,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -5109,8 +5042,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'test'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -5131,8 +5063,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -5153,8 +5084,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'orelse'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5197,14 +5127,13 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'keys'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: if w_self.keys is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.keys] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_keys = w_list return w_self.w_keys @@ -5215,14 +5144,13 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'values'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: if w_self.values is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.values] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_values = w_list return w_self.w_values @@ -5260,14 +5188,13 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'elts'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: if w_self.elts is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.elts] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_elts = w_list return w_self.w_elts @@ -5307,8 +5234,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'elt'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5325,14 +5251,13 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'generators'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: if w_self.generators is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.generators] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_generators = w_list return w_self.w_generators @@ -5373,8 +5298,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'elt'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5391,14 +5315,13 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'generators'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: if w_self.generators is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.generators] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_generators = w_list return w_self.w_generators @@ -5439,8 +5362,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'key'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5461,8 +5383,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5479,14 +5400,13 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'generators'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: if w_self.generators is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.generators] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_generators = w_list return w_self.w_generators @@ -5528,8 +5448,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'elt'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5546,14 +5465,13 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'generators'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: if w_self.generators is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.generators] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_generators = w_list return w_self.w_generators @@ -5594,8 +5512,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5640,8 +5557,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'left'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5658,14 +5574,13 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'ops'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: if w_self.ops is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [cmpop_to_class[node - 1]() for node in w_self.ops] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_ops = w_list return w_self.w_ops @@ -5676,14 +5591,13 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'comparators'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.comparators] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_comparators = w_list return w_self.w_comparators @@ -5726,8 +5640,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'func'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5744,14 +5657,13 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'args'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: if w_self.args is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.args] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_args = w_list return w_self.w_args @@ -5762,14 +5674,13 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'keywords'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.keywords] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_keywords = w_list return w_self.w_keywords @@ -5784,8 +5695,7 @@ return w_obj if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'starargs'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5806,8 +5716,7 @@ return w_obj if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'kwargs'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5858,8 +5767,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5904,8 +5812,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'n'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5950,8 +5857,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 's'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5996,8 +5902,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -6018,8 +5923,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'attr'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -6040,8 +5944,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'ctx'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -6090,8 +5993,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -6112,8 +6014,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'slice'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -6134,8 +6035,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'ctx'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -6184,8 +6084,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'id'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6206,8 +6105,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'ctx'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6251,14 +6149,13 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'elts'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: if w_self.elts is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.elts] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_elts = w_list return w_self.w_elts @@ -6273,8 +6170,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'ctx'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6319,14 +6215,13 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'elts'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: if w_self.elts is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.elts] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_elts = w_list return w_self.w_elts @@ -6341,8 +6236,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'ctx'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6391,8 +6285,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6510,8 +6403,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'lower'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6532,8 +6424,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'upper'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6554,8 +6445,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'step'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6598,14 +6488,13 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'dims'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dims') if w_self.w_dims is None: if w_self.dims is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.dims] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_dims = w_list return w_self.w_dims @@ -6645,8 +6534,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6915,8 +6803,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'target'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6937,8 +6824,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'iter'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6955,14 +6841,13 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'ifs'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.ifs] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_ifs = w_list return w_self.w_ifs @@ -7004,8 +6889,7 @@ return w_obj if not w_self.initialization_state & w_self._lineno_mask: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'lineno'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) def excepthandler_set_lineno(space, w_self, w_new_value): @@ -7026,8 +6910,7 @@ return w_obj if not w_self.initialization_state & w_self._col_offset_mask: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'col_offset'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) def excepthandler_set_col_offset(space, w_self, w_new_value): @@ -7057,8 +6940,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'type'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) def ExceptHandler_set_type(space, w_self, w_new_value): @@ -7079,8 +6961,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'name'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) def ExceptHandler_set_name(space, w_self, w_new_value): @@ -7097,14 +6978,13 @@ def ExceptHandler_get_body(space, w_self): if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'body'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: if w_self.body is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_body = w_list return w_self.w_body @@ -7142,14 +7022,13 @@ def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'args'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: if w_self.args is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.args] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_args = w_list return w_self.w_args @@ -7164,8 +7043,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'vararg'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'vararg') return space.wrap(w_self.vararg) def arguments_set_vararg(space, w_self, w_new_value): @@ -7189,8 +7067,7 @@ return w_obj if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'kwarg'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwarg') return space.wrap(w_self.kwarg) def arguments_set_kwarg(space, w_self, w_new_value): @@ -7210,14 +7087,13 @@ def arguments_get_defaults(space, w_self): if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'defaults'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'defaults') if w_self.w_defaults is None: if w_self.defaults is None: - w_list = space.newlist([]) + list_w = [] else: list_w = [space.wrap(node) for node in w_self.defaults] - w_list = space.newlist(list_w) + w_list = space.newlist(list_w) w_self.w_defaults = w_list return w_self.w_defaults @@ -7261,8 +7137,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'arg'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'arg') return space.wrap(w_self.arg) def keyword_set_arg(space, w_self, w_new_value): @@ -7283,8 +7158,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'value'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) def keyword_set_value(space, w_self, w_new_value): @@ -7330,8 +7204,7 @@ return w_obj if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'name'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) def alias_set_name(space, w_self, w_new_value): @@ -7352,8 +7225,7 @@ return w_obj if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) - w_err = space.wrap("'%s' object has no attribute 'asname'" % typename) - raise OperationError(space.w_AttributeError, w_err) + raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'asname') return space.wrap(w_self.asname) def alias_set_asname(space, w_self, w_new_value): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -414,13 +414,12 @@ self.emit(" return w_obj", 1) self.emit("if not w_self.initialization_state & %s:" % (flag,), 1) self.emit("typename = space.type(w_self).getname(space)", 2) - self.emit("w_err = space.wrap(\"'%%s' object has no attribute '%s'\" %% typename)" % + self.emit("raise operationerrfmt(space.w_AttributeError, \"'%%s' object has no attribute '%%s'\", typename, '%s')" % (field.name,), 2) - self.emit("raise OperationError(space.w_AttributeError, w_err)", 2) if field.seq: self.emit("if w_self.w_%s is None:" % (field.name,), 1) self.emit("if w_self.%s is None:" % (field.name,), 2) - self.emit("w_list = space.newlist([])", 3) + self.emit("list_w = []", 3) self.emit("else:", 2) if field.type.value in self.data.simple_types: wrapper = "%s_to_class[node - 1]()" % (field.type,) @@ -428,7 +427,7 @@ wrapper = "space.wrap(node)" self.emit("list_w = [%s for node in w_self.%s]" % (wrapper, field.name), 3) - self.emit("w_list = space.newlist(list_w)", 3) + self.emit("w_list = space.newlist(list_w)", 2) self.emit("w_self.w_%s = w_list" % (field.name,), 2) self.emit("return w_self.w_%s" % (field.name,), 1) elif field.type.value in self.data.simple_types: @@ -540,7 +539,7 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter import typedef from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.rlib.unroll import unrolling_iterable from pypy.tool.pairtype import extendabletype from pypy.tool.sourcetools import func_with_new_name @@ -639,9 +638,7 @@ missing = required[i] if missing is not None: err = "required field \\"%s\\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) + raise operationerrfmt(space.w_TypeError, err, missing, host) raise AssertionError("should not reach here") diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -188,6 +188,12 @@ # ------------------------------------------------------------------- + def is_w(self, space, w_other): + return self is w_other + + def unique_id(self, space): + return space.wrap(compute_unique_id(self)) + def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) raise OperationError(space.w_TypeError, w_msg) @@ -681,9 +687,17 @@ """shortcut for space.is_true(space.eq(w_obj1, w_obj2))""" return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2)) - def is_w(self, w_obj1, w_obj2): - """shortcut for space.is_true(space.is_(w_obj1, w_obj2))""" - return self.is_true(self.is_(w_obj1, w_obj2)) + def is_(self, w_one, w_two): + return self.newbool(self.is_w(w_one, w_two)) + + def is_w(self, w_one, w_two): + # done by a method call on w_two (and not on w_one, because of the + # expected programming style where we say "if x is None" or + # "if x is object"). + return w_two.is_w(self, w_one) + + def id(self, w_obj): + return w_obj.unique_id(self) def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -777,22 +791,63 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - # If we know the expected length we can preallocate. if expected_length == -1: + # xxx special hack for speed + from pypy.interpreter.generator import GeneratorIterator + if isinstance(w_iterator, GeneratorIterator): + lst_w = [] + w_iterator.unpack_into(lst_w) + return lst_w + # /xxx + return self._unpackiterable_unknown_length(w_iterator, w_iterable) + else: + lst_w = self._unpackiterable_known_length(w_iterator, + expected_length) + return lst_w[:] # make the resulting list resizable + + @jit.dont_look_inside + def _unpackiterable_unknown_length(self, w_iterator, w_iterable): + # Unpack a variable-size list of unknown length. + # The JIT does not look inside this function because it + # contains a loop (made explicit with the decorator above). + # + # If we can guess the expected length we can preallocate. + try: + lgt_estimate = self.len_w(w_iterable) + except OperationError, o: + if (not o.match(self, self.w_AttributeError) and + not o.match(self, self.w_TypeError)): + raise + items = [] + else: try: - lgt_estimate = self.len_w(w_iterable) - except OperationError, o: - if (not o.match(self, self.w_AttributeError) and - not o.match(self, self.w_TypeError)): + items = newlist(lgt_estimate) + except MemoryError: + items = [] # it might have lied + # + while True: + try: + w_item = self.next(w_iterator) + except OperationError, e: + if not e.match(self, self.w_StopIteration): raise - items = [] - else: - try: - items = newlist(lgt_estimate) - except MemoryError: - items = [] # it might have lied - else: - items = [None] * expected_length + break # done + items.append(w_item) + # + return items + + @jit.dont_look_inside + def _unpackiterable_known_length(self, w_iterator, expected_length): + # Unpack a known length list, without letting the JIT look inside. + # Implemented by just calling the @jit.unroll_safe version, but + # the JIT stopped looking inside already. + return self._unpackiterable_known_length_jitlook(w_iterator, + expected_length) + + @jit.unroll_safe + def _unpackiterable_known_length_jitlook(self, w_iterator, + expected_length): + items = [None] * expected_length idx = 0 while True: try: @@ -801,26 +856,29 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and idx == expected_length: + if idx == expected_length: raise OperationError(self.w_ValueError, - self.wrap("too many values to unpack")) - if expected_length == -1: - items.append(w_item) - else: - items[idx] = w_item + self.wrap("too many values to unpack")) + items[idx] = w_item idx += 1 - if expected_length != -1 and idx < expected_length: + if idx < expected_length: if idx == 1: plural = "" else: plural = "s" - raise OperationError(self.w_ValueError, - self.wrap("need more than %d value%s to unpack" % - (idx, plural))) + raise operationerrfmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, plural) return items - unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, - 'unpackiterable_unroll')) + def unpackiterable_unroll(self, w_iterable, expected_length): + # Like unpackiterable(), but for the cases where we have + # an expected_length and want to unroll when JITted. + # Returns a fixed-size list. + w_iterator = self.iter(w_iterable) + assert expected_length != -1 + return self._unpackiterable_known_length_jitlook(w_iterator, + expected_length) def fixedview(self, w_iterable, expected_length=-1): """ A fixed list view of w_iterable. Don't modify the result @@ -979,9 +1037,6 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) - def id(self, w_obj): - return self.wrap(compute_unique_id(w_obj)) - # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the __builtin__ diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -175,6 +175,9 @@ self.w_tracefunc = w_func self.space.frame_trace_action.fire() + def gettrace(self): + return self.w_tracefunc + def setprofile(self, w_func): """Set the global trace function.""" if self.space.is_w(w_func, self.space.w_None): @@ -388,8 +391,11 @@ def decrement_ticker(self, by): value = self._ticker if self.has_bytecode_counter: # this 'if' is constant-folded - value -= by - self._ticker = value + if jit.isconstant(by) and by == 0: + pass # normally constant-folded too + else: + value -= by + self._ticker = value return value diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -1,14 +1,15 @@ +from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped +from pypy.interpreter.pyopcode import LoopBlock from pypy.rlib import jit -from pypy.interpreter.pyopcode import LoopBlock +from pypy.rlib.objectmodel import specialize class GeneratorIterator(Wrappable): "An iterator created by a generator." _immutable_fields_ = ['pycode'] - + def __init__(self, frame): self.space = frame.space self.frame = frame # turned into None when frame_finished_execution @@ -81,7 +82,7 @@ # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: self.frame = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) else: return w_result # YIELDed finally: @@ -97,21 +98,21 @@ def throw(self, w_type, w_val, w_tb): from pypy.interpreter.pytraceback import check_traceback space = self.space - + msg = "throw() third argument must be a traceback object" if space.is_w(w_tb, space.w_None): tb = None else: tb = check_traceback(space, w_tb, msg) - + operr = OperationError(w_type, w_val, tb) operr.normalize_exception(space) return self.send_ex(space.w_None, operr) - + def descr_next(self): """x.next() -> the next value, or raise StopIteration""" return self.send_ex(self.space.w_None) - + def descr_close(self): """x.close(arg) -> raise GeneratorExit inside generator.""" assert isinstance(self, GeneratorIterator) @@ -124,7 +125,7 @@ e.match(space, space.w_GeneratorExit): return space.w_None raise - + if w_retval is not None: msg = "generator ignored GeneratorExit" raise OperationError(space.w_RuntimeError, space.wrap(msg)) @@ -155,3 +156,44 @@ "interrupting generator of ") break block = block.previous + + # Results can be either an RPython list of W_Root, or it can be an + # app-level W_ListObject, which also has an append() method, that's why we + # generate 2 versions of the function and 2 jit drivers. + def _create_unpack_into(): + jitdriver = jit.JitDriver(greens=['pycode'], + reds=['self', 'frame', 'results']) + def unpack_into(self, results): + """This is a hack for performance: runs the generator and collects + all produced items in a list.""" + # XXX copied and simplified version of send_ex() + space = self.space + if self.running: + raise OperationError(space.w_ValueError, + space.wrap('generator already executing')) + frame = self.frame + if frame is None: # already finished + return + self.running = True + try: + pycode = self.pycode + while True: + jitdriver.jit_merge_point(self=self, frame=frame, + results=results, pycode=pycode) + try: + w_result = frame.execute_frame(space.w_None) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + # if the frame is now marked as finished, it was RETURNed from + if frame.frame_finished_execution: + break + results.append(w_result) # YIELDed + finally: + frame.f_backref = jit.vref_None + self.running = False + self.frame = None + return unpack_into + unpack_into = _create_unpack_into() + unpack_into_w = _create_unpack_into() \ No newline at end of file diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/pyparser/pytokenizer.py b/pypy/interpreter/pyparser/pytokenizer.py --- a/pypy/interpreter/pyparser/pytokenizer.py +++ b/pypy/interpreter/pyparser/pytokenizer.py @@ -226,7 +226,7 @@ parenlev = parenlev - 1 if parenlev < 0: raise TokenError("unmatched '%s'" % initial, line, - lnum-1, 0, token_list) + lnum, start + 1, token_list) if token in python_opmap: punct = python_opmap[token] else: diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py --- a/pypy/interpreter/pyparser/test/test_pyparse.py +++ b/pypy/interpreter/pyparser/test/test_pyparse.py @@ -87,6 +87,10 @@ assert exc.lineno == 1 assert exc.offset == 5 assert exc.lastlineno == 5 + exc = py.test.raises(SyntaxError, parse, "abc)").value + assert exc.msg == "unmatched ')'" + assert exc.lineno == 1 + assert exc.offset == 4 def test_is(self): self.parse("x is y") diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py --- a/pypy/interpreter/test/test_executioncontext.py +++ b/pypy/interpreter/test/test_executioncontext.py @@ -292,7 +292,7 @@ import os, sys print sys.executable, self.tmpfile if sys.platform == "win32": - cmdformat = '""%s" "%s""' # excellent! tons of "! + cmdformat = '"%s" "%s"' else: cmdformat = "'%s' '%s'" g = os.popen(cmdformat % (sys.executable, self.tmpfile), 'r') diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -117,7 +117,7 @@ g = f() raises(NameError, g.throw, NameError, "Error", None) - + def test_throw_fail(self): def f(): yield 1 @@ -129,7 +129,7 @@ yield 1 g = f() raises(TypeError, g.throw, list()) - + def test_throw_fail3(self): def f(): yield 1 @@ -188,7 +188,7 @@ g = f() g.next() raises(NameError, g.close) - + def test_close_fail(self): def f(): try: @@ -267,3 +267,15 @@ assert r.startswith("= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -1,13 +1,10 @@ import py -from pypy.rpython.lltypesystem import lltype, rffi, llmemory, rclass +from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.jit.backend.llsupport import symbolic, support -from pypy.jit.metainterp.history import AbstractDescr, getkind, BoxInt, BoxPtr -from pypy.jit.metainterp.history import BasicFailDescr, LoopToken, BoxFloat +from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history -from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker, longlong -from pypy.rlib.rarithmetic import r_longlong, r_ulonglong # The point of the class organization in this file is to make instances # as compact as possible. This is done by not storing the field size or @@ -23,6 +20,7 @@ self._cache_field = {} self._cache_array = {} self._cache_call = {} + self._cache_interiorfield = {} def init_size_descr(self, STRUCT, sizedescr): assert isinstance(STRUCT, lltype.GcStruct) @@ -113,6 +111,16 @@ def repr_of_descr(self): return '<%s %s %s>' % (self._clsname, self.name, self.offset) +class DynamicFieldDescr(BaseFieldDescr): + def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): + self.offset = offset + self._fieldsize = fieldsize + self._is_pointer_field = is_pointer + self._is_float_field = is_float + self._is_field_signed = is_signed + + def get_field_size(self, translate_support_code): + return self._fieldsize class NonGcPtrFieldDescr(BaseFieldDescr): _clsname = 'NonGcPtrFieldDescr' @@ -142,7 +150,6 @@ cachedict[fieldname] = fielddescr return fielddescr - # ____________________________________________________________ # ArrayDescrs @@ -167,6 +174,7 @@ _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr _is_array_of_floats = False # unless overridden by FloatArrayDescr + _is_array_of_structs = False # unless overridden by StructArrayDescr _is_item_signed = False # unless overridden by XxxArrayDescr def is_array_of_pointers(self): @@ -175,12 +183,16 @@ def is_array_of_floats(self): return self._is_array_of_floats + def is_array_of_structs(self): + return self._is_array_of_structs + def is_item_signed(self): return self._is_item_signed def repr_of_descr(self): return '<%s>' % self._clsname + class NonGcPtrArrayDescr(BaseArrayDescr): _clsname = 'NonGcPtrArrayDescr' def get_item_size(self, translate_support_code): @@ -199,6 +211,10 @@ def get_item_size(self, translate_support_code): return symbolic.get_size(lltype.Float, translate_support_code) +class StructArrayDescr(BaseArrayDescr): + _clsname = 'StructArrayDescr' + _is_array_of_structs = True + class BaseArrayNoLengthDescr(BaseArrayDescr): def get_base_size(self, translate_support_code): return 0 @@ -206,6 +222,13 @@ def get_ofs_length(self, translate_support_code): return -1 +class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): + def __init__(self, itemsize): + self.itemsize = itemsize + + def get_item_size(self, translate_support_code): + return self.itemsize + class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): _clsname = 'NonGcPtrArrayNoLengthDescr' def get_item_size(self, translate_support_code): @@ -218,6 +241,13 @@ def getArrayDescrClass(ARRAY): if ARRAY.OF is lltype.Float: return FloatArrayDescr + elif isinstance(ARRAY.OF, lltype.Struct): + class Descr(StructArrayDescr): + _clsname = '%sArrayDescr' % ARRAY.OF._name + def get_item_size(self, translate_support_code): + return symbolic.get_size(ARRAY.OF, translate_support_code) + Descr.__name__ = Descr._clsname + return Descr return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, NonGcPtrArrayDescr, 'Array', 'get_item_size', '_is_array_of_floats', '_is_item_signed') @@ -252,6 +282,39 @@ cache[ARRAY] = arraydescr return arraydescr +# ____________________________________________________________ +# InteriorFieldDescr + +class InteriorFieldDescr(AbstractDescr): + arraydescr = BaseArrayDescr() # workaround for the annotator + fielddescr = BaseFieldDescr('', 0) + + def __init__(self, arraydescr, fielddescr): + self.arraydescr = arraydescr + self.fielddescr = fielddescr + + def is_pointer_field(self): + return self.fielddescr.is_pointer_field() + + def is_float_field(self): + return self.fielddescr.is_float_field() + + def sort_key(self): + return self.fielddescr.sort_key() + + def repr_of_descr(self): + return '' % self.fielddescr.repr_of_descr() + +def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): + cache = gc_ll_descr._cache_interiorfield + try: + return cache[(ARRAY, FIELDTP, name)] + except KeyError: + arraydescr = get_array_descr(gc_ll_descr, ARRAY) + fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + descr = InteriorFieldDescr(arraydescr, fielddescr) + cache[(ARRAY, FIELDTP, name)] = descr + return descr # ____________________________________________________________ # CallDescrs @@ -260,12 +323,16 @@ _clsname = '' loop_token = None arg_classes = '' # <-- annotation hack - ffi_flags = 0 + ffi_flags = 1 - def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): + def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): self.arg_classes = arg_classes # string of "r" and "i" (ref/int) self.extrainfo = extrainfo self.ffi_flags = ffi_flags + # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which + # makes sense on Windows as it's the one for all the C functions + # we are compiling together with the JIT. On non-Windows platforms + # it is just ignored anyway. def __repr__(self): res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) @@ -306,6 +373,10 @@ return False # unless overridden def create_call_stub(self, rtyper, RESULT): + from pypy.rlib.clibffi import FFI_DEFAULT_ABI + assert self.get_call_conv() == FFI_DEFAULT_ABI, ( + "%r: create_call_stub() with a non-default call ABI" % (self,)) + def process(c): if c == 'L': assert longlong.supports_longlong @@ -400,7 +471,7 @@ """ _clsname = 'DynamicIntCallDescr' - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): + def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) assert isinstance(result_sign, bool) self._result_size = chr(result_size) @@ -525,7 +596,8 @@ # if TYPE is lltype.Float or is_longlong(TYPE): setattr(Descr, floatattrname, True) - elif TYPE is not lltype.Bool and rffi.cast(TYPE, -1) == -1: + elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): setattr(Descr, signedattrname, True) # _cache[nameprefix, TYPE] = Descr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -8,7 +8,7 @@ class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo, ffi_flags): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -45,6 +45,22 @@ def freeing_block(self, start, stop): pass + def get_funcptr_for_newarray(self): + return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) + def get_funcptr_for_newstr(self): + return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) + def get_funcptr_for_newunicode(self): + return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + + + def record_constptrs(self, op, gcrefs_output_list): + for i in range(op.numargs()): + v = op.getarg(i) + if isinstance(v, ConstPtr) and bool(v.value): + p = v.value + rgc._make_sure_does_not_move(p) + gcrefs_output_list.append(p) + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): @@ -88,6 +104,39 @@ malloc_fn_ptr = self.configure_boehm_once() self.funcptr_for_new = malloc_fn_ptr + def malloc_array(basesize, itemsize, ofs_length, num_elem): + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + except OverflowError: + return lltype.nullptr(llmemory.GCREF.TO) + res = self.funcptr_for_new(size) + if not res: + return res + rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + return res + self.malloc_array = malloc_array + self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( + [lltype.Signed] * 4, llmemory.GCREF)) + + + (str_basesize, str_itemsize, str_ofs_length + ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) + (unicode_basesize, unicode_itemsize, unicode_ofs_length + ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) + def malloc_str(length): + return self.malloc_array( + str_basesize, str_itemsize, str_ofs_length, length + ) + def malloc_unicode(length): + return self.malloc_array( + unicode_basesize, unicode_itemsize, unicode_ofs_length, length + ) + self.malloc_str = malloc_str + self.malloc_unicode = malloc_unicode + self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( + [lltype.Signed], llmemory.GCREF)) + + # on some platform GC_init is required before any other # GC_* functions, call it here for the benefit of tests # XXX move this to tests @@ -108,38 +157,34 @@ ofs_length = arraydescr.get_ofs_length(self.translate_support_code) basesize = arraydescr.get_base_size(self.translate_support_code) itemsize = arraydescr.get_item_size(self.translate_support_code) - size = basesize + itemsize * num_elem - res = self.funcptr_for_new(size) - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem - return res + return self.malloc_array(basesize, itemsize, ofs_length, num_elem) def gc_malloc_str(self, num_elem): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.translate_support_code) - assert itemsize == 1 - size = basesize + num_elem - res = self.funcptr_for_new(size) - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem - return res + return self.malloc_str(num_elem) def gc_malloc_unicode(self, num_elem): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.translate_support_code) - size = basesize + num_elem * itemsize - res = self.funcptr_for_new(size) - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem - return res + return self.malloc_unicode(num_elem) def args_for_new(self, sizedescr): assert isinstance(sizedescr, BaseSizeDescr) return [sizedescr.size] + def args_for_new_array(self, arraydescr): + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + return [basesize, itemsize, ofs_length] + def get_funcptr_for_new(self): return self.funcptr_for_new - get_funcptr_for_newarray = None - get_funcptr_for_newstr = None - get_funcptr_for_newunicode = None + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + # record all GCREFs too, because Boehm cannot see them and keep them + # alive if they end up as constants in the assembler + for op in operations: + self.record_constptrs(op, gcrefs_output_list) + return GcLLDescription.rewrite_assembler(self, cpu, operations, + gcrefs_output_list) # ____________________________________________________________ @@ -603,11 +648,10 @@ # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1<' # - cache = {} descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) assert 'GcPtrCallDescr' in descr4.repr_of_descr() # @@ -412,10 +413,10 @@ ARGS = [lltype.Float, lltype.Ptr(ARRAY)] RES = lltype.Float - def f(a, b): + def f2(a, b): return float(b[0]) + a - fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) + fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f2) descr2 = get_call_descr(c0, ARGS, RES) a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -13,44 +13,46 @@ def test_call_descr_dynamic(): args = [types.sint, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42) + descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, + ffi_flags=42) assert isinstance(descr, DynamicIntCallDescr) assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 args = [types.sint, types.double, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.void) + descr = get_call_descr_dynamic(FakeCPU(), args, types.void, None, 42) assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), - args, types.void, ffi_flags=43) + args, types.void, None, ffi_flags=43) assert isinstance(descr, VoidCallDescr) assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 - descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8) + descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) assert isinstance(descr, DynamicIntCallDescr) assert descr.get_result_size(False) == 1 assert descr.is_result_signed() == True - descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8) + descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) assert isinstance(descr, DynamicIntCallDescr) assert descr.get_result_size(False) == 1 assert descr.is_result_signed() == False if not is_64_bit: - descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong) + descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong, + None, 42) assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), - [], types.slonglong, ffi_flags=43) + [], types.slonglong, None, ffi_flags=43) assert isinstance(descr, LongLongCallDescr) assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong - descr = get_call_descr_dynamic(FakeCPU(), [], types.float) + descr = get_call_descr_dynamic(FakeCPU(), [], types.float, None, 42) assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), - [], types.float, ffi_flags=44) + [], types.float, None, ffi_flags=44) SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) assert isinstance(descr, SingleFloatCallDescr) assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -247,12 +247,14 @@ self.record = [] def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, - has_finalizer, contains_weakptr): + has_finalizer, has_light_finalizer, + contains_weakptr): assert not contains_weakptr + assert not has_finalizer # in these tests + assert not has_light_finalizer # in these tests p = llmemory.raw_malloc(size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - flags = int(has_finalizer) << 16 - tid = llop.combine_ushort(lltype.Signed, type_id, flags) + tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p @@ -568,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,5 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop -from pypy.jit.metainterp import history, compile +from pypy.jit.metainterp import history class AbstractCPU(object): @@ -183,34 +183,35 @@ lst[n] = None self.fail_descr_free_list.extend(faildescr_indices) - @staticmethod - def sizeof(S): + def sizeof(self, S): raise NotImplementedError - @staticmethod - def fielddescrof(S, fieldname): + def fielddescrof(self, S, fieldname): """Return the Descr corresponding to field 'fieldname' on the structure 'S'. It is important that this function (at least) caches the results.""" raise NotImplementedError - @staticmethod - def arraydescrof(A): + def interiorfielddescrof(self, A, fieldname): raise NotImplementedError - @staticmethod - def calldescrof(FUNC, ARGS, RESULT): + def interiorfielddescrof_dynamic(self, offset, width, fieldsize, is_pointer, + is_float, is_signed): + raise NotImplementedError + + def arraydescrof(self, A): + raise NotImplementedError + + def calldescrof(self, FUNC, ARGS, RESULT): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError - @staticmethod - def methdescrof(SELFTYPE, methname): + def methdescrof(self, SELFTYPE, methname): # must return a subclass of history.AbstractMethDescr raise NotImplementedError - @staticmethod - def typedescrof(TYPE): + def typedescrof(self, TYPE): raise NotImplementedError # ---------- the backend-dependent operations ---------- diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -5,7 +5,7 @@ BoxInt, Box, BoxPtr, LoopToken, ConstInt, ConstPtr, - BoxObj, Const, + BoxObj, ConstObj, BoxFloat, ConstFloat) from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.typesystem import deref @@ -111,7 +111,7 @@ self.cpu.set_future_value_int(0, 2) fail = self.cpu.execute_token(looptoken) res = self.cpu.get_latest_value_int(0) - assert res == 3 + assert res == 3 assert fail.identifier == 1 def test_compile_loop(self): @@ -127,7 +127,7 @@ ] inputargs = [i0] operations[2].setfailargs([i1]) - + self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) fail = self.cpu.execute_token(looptoken) @@ -148,7 +148,7 @@ ] inputargs = [i0] operations[2].setfailargs([None, None, i1, None]) - + self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) fail = self.cpu.execute_token(looptoken) @@ -372,7 +372,7 @@ for opnum, boxargs, retvalue in get_int_tests(): res = self.execute_operation(opnum, boxargs, 'int') assert res.value == retvalue - + def test_float_operations(self): from pypy.jit.metainterp.test.test_executor import get_float_tests for opnum, boxargs, rettype, retvalue in get_float_tests(self.cpu): @@ -438,7 +438,7 @@ def test_ovf_operations_reversed(self): self.test_ovf_operations(reversed=True) - + def test_bh_call(self): cpu = self.cpu # @@ -503,7 +503,7 @@ [funcbox, BoxInt(num), BoxInt(num)], 'int', descr=dyn_calldescr) assert res.value == 2 * num - + if cpu.supports_floats: def func(f0, f1, f2, f3, f4, f5, f6, i0, i1, f7, f8, f9): @@ -543,7 +543,7 @@ funcbox = self.get_funcbox(self.cpu, func_ptr) res = self.execute_operation(rop.CALL, [funcbox] + map(BoxInt, args), 'int', descr=calldescr) assert res.value == func(*args) - + def test_call_stack_alignment(self): # test stack alignment issues, notably for Mac OS/X. # also test the ordering of the arguments. @@ -615,7 +615,7 @@ res = self.execute_operation(rop.GETFIELD_GC, [t_box], 'int', descr=shortdescr) assert res.value == 1331 - + # u_box, U_box = self.alloc_instance(self.U) fielddescr2 = self.cpu.fielddescrof(self.S, 'next') @@ -695,7 +695,7 @@ def test_failing_guard_class(self): t_box, T_box = self.alloc_instance(self.T) - u_box, U_box = self.alloc_instance(self.U) + u_box, U_box = self.alloc_instance(self.U) null_box = self.null_instance() for opname, args in [(rop.GUARD_CLASS, [t_box, U_box]), (rop.GUARD_CLASS, [u_box, T_box]), @@ -787,7 +787,7 @@ r = self.execute_operation(rop.GETARRAYITEM_GC, [a_box, BoxInt(3)], 'int', descr=arraydescr) assert r.value == 160 - + # if isinstance(A, lltype.GcArray): A = lltype.Ptr(A) @@ -880,6 +880,73 @@ 'int', descr=arraydescr) assert r.value == 7441 + def test_array_of_structs(self): + TP = lltype.GcStruct('x') + ITEM = lltype.Struct('x', + ('vs', lltype.Signed), + ('vu', lltype.Unsigned), + ('vsc', rffi.SIGNEDCHAR), + ('vuc', rffi.UCHAR), + ('vss', rffi.SHORT), + ('vus', rffi.USHORT), + ('vsi', rffi.INT), + ('vui', rffi.UINT), + ('k', lltype.Float), + ('p', lltype.Ptr(TP))) + a_box, A = self.alloc_array_of(ITEM, 15) + s_box, S = self.alloc_instance(TP) + kdescr = self.cpu.interiorfielddescrof(A, 'k') + pdescr = self.cpu.interiorfielddescrof(A, 'p') + self.execute_operation(rop.SETINTERIORFIELD_GC, [a_box, BoxInt(3), + boxfloat(1.5)], + 'void', descr=kdescr) + f = self.cpu.bh_getinteriorfield_gc_f(a_box.getref_base(), 3, kdescr) + assert longlong.getrealfloat(f) == 1.5 + self.cpu.bh_setinteriorfield_gc_f(a_box.getref_base(), 3, kdescr, longlong.getfloatstorage(2.5)) + r = self.execute_operation(rop.GETINTERIORFIELD_GC, [a_box, BoxInt(3)], + 'float', descr=kdescr) + assert r.getfloat() == 2.5 + # + NUMBER_FIELDS = [('vs', lltype.Signed), + ('vu', lltype.Unsigned), + ('vsc', rffi.SIGNEDCHAR), + ('vuc', rffi.UCHAR), + ('vss', rffi.SHORT), + ('vus', rffi.USHORT), + ('vsi', rffi.INT), + ('vui', rffi.UINT)] + for name, TYPE in NUMBER_FIELDS[::-1]: + vdescr = self.cpu.interiorfielddescrof(A, name) + self.execute_operation(rop.SETINTERIORFIELD_GC, [a_box, BoxInt(3), + BoxInt(-15)], + 'void', descr=vdescr) + for name, TYPE in NUMBER_FIELDS: + vdescr = self.cpu.interiorfielddescrof(A, name) + i = self.cpu.bh_getinteriorfield_gc_i(a_box.getref_base(), 3, + vdescr) + assert i == rffi.cast(lltype.Signed, rffi.cast(TYPE, -15)) + for name, TYPE in NUMBER_FIELDS[::-1]: + vdescr = self.cpu.interiorfielddescrof(A, name) + self.cpu.bh_setinteriorfield_gc_i(a_box.getref_base(), 3, + vdescr, -25) + for name, TYPE in NUMBER_FIELDS: + vdescr = self.cpu.interiorfielddescrof(A, name) + r = self.execute_operation(rop.GETINTERIORFIELD_GC, + [a_box, BoxInt(3)], + 'int', descr=vdescr) + assert r.getint() == rffi.cast(lltype.Signed, rffi.cast(TYPE, -25)) + # + self.execute_operation(rop.SETINTERIORFIELD_GC, [a_box, BoxInt(4), + s_box], + 'void', descr=pdescr) + r = self.cpu.bh_getinteriorfield_gc_r(a_box.getref_base(), 4, pdescr) + assert r == s_box.getref_base() + self.cpu.bh_setinteriorfield_gc_r(a_box.getref_base(), 3, pdescr, + s_box.getref_base()) + r = self.execute_operation(rop.GETINTERIORFIELD_GC, [a_box, BoxInt(3)], + 'ref', descr=pdescr) + assert r.getref_base() == s_box.getref_base() + def test_string_basic(self): s_box = self.alloc_string("hello\xfe") r = self.execute_operation(rop.STRLEN, [s_box], 'int') @@ -1402,7 +1469,7 @@ addr = llmemory.cast_ptr_to_adr(func_ptr) return ConstInt(heaptracker.adr2int(addr)) - + MY_VTABLE = rclass.OBJECT_VTABLE # for tests only S = lltype.GcForwardReference() @@ -1439,7 +1506,6 @@ return BoxPtr(lltype.nullptr(llmemory.GCREF.TO)) def alloc_array_of(self, ITEM, length): - cpu = self.cpu A = lltype.GcArray(ITEM) a = lltype.malloc(A, length) a_box = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, a)) @@ -1468,20 +1534,16 @@ return u''.join(u.chars) - def test_casts(self): - py.test.skip("xxx fix or kill") - from pypy.rpython.lltypesystem import lltype, llmemory - TP = lltype.GcStruct('x') - x = lltype.malloc(TP) - x = lltype.cast_opaque_ptr(llmemory.GCREF, x) + def test_cast_int_to_ptr(self): + res = self.execute_operation(rop.CAST_INT_TO_PTR, + [BoxInt(-17)], 'ref').value + assert lltype.cast_ptr_to_int(res) == -17 + + def test_cast_ptr_to_int(self): + x = lltype.cast_int_to_ptr(llmemory.GCREF, -19) res = self.execute_operation(rop.CAST_PTR_TO_INT, - [BoxPtr(x)], 'int').value - expected = self.cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(x)) - assert rffi.get_real_int(res) == rffi.get_real_int(expected) - res = self.execute_operation(rop.CAST_PTR_TO_INT, - [ConstPtr(x)], 'int').value - expected = self.cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(x)) - assert rffi.get_real_int(res) == rffi.get_real_int(expected) + [BoxPtr(x)], 'int').value + assert res == -19 def test_ooops_non_gc(self): x = lltype.malloc(lltype.Struct('x'), flavor='raw') @@ -2299,13 +2361,6 @@ # cpu.bh_strsetitem(x, 4, ord('/')) assert str.chars[4] == '/' - # -## x = cpu.bh_newstr(5) -## y = cpu.bh_cast_ptr_to_int(x) -## z = cpu.bh_cast_ptr_to_int(x) -## y = rffi.get_real_int(y) -## z = rffi.get_real_int(z) -## assert type(y) == type(z) == int and y == z def test_sorting_of_fields(self): S = self.S @@ -2329,7 +2384,7 @@ for opname, arg, res in ops: self.execute_operation(opname, [arg], 'void') assert self.guard_failed == res - + lltype.free(x, flavor='raw') def test_assembler_call(self): @@ -2409,7 +2464,7 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - + ops = ''' [f0, f1] f2 = float_add(f0, f1) @@ -2500,7 +2555,7 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - + ops = ''' [f0, f1] f2 = float_add(f0, f1) @@ -2951,4 +3006,4 @@ def alloc_unicode(self, unicode): py.test.skip("implement me") - + diff --git a/pypy/jit/backend/test/test_ll_random.py b/pypy/jit/backend/test/test_ll_random.py --- a/pypy/jit/backend/test/test_ll_random.py +++ b/pypy/jit/backend/test/test_ll_random.py @@ -28,16 +28,27 @@ fork.structure_types_and_vtables = self.structure_types_and_vtables return fork - def get_structptr_var(self, r, must_have_vtable=False, type=lltype.Struct): + def _choose_ptr_vars(self, from_, type, array_of_structs): + ptrvars = [] + for i in range(len(from_)): + v, S = from_[i][:2] + if not isinstance(S, type): + continue + if ((isinstance(S, lltype.Array) and + isinstance(S.OF, lltype.Struct)) == array_of_structs): + ptrvars.append((v, S)) + return ptrvars + + def get_structptr_var(self, r, must_have_vtable=False, type=lltype.Struct, + array_of_structs=False): while True: - ptrvars = [(v, S) for (v, S) in self.ptrvars - if isinstance(S, type)] + ptrvars = self._choose_ptr_vars(self.ptrvars, type, + array_of_structs) if ptrvars and r.random() < 0.8: v, S = r.choice(ptrvars) else: - prebuilt_ptr_consts = [(v, S) - for (v, S, _) in self.prebuilt_ptr_consts - if isinstance(S, type)] + prebuilt_ptr_consts = self._choose_ptr_vars( + self.prebuilt_ptr_consts, type, array_of_structs) if prebuilt_ptr_consts and r.random() < 0.7: v, S = r.choice(prebuilt_ptr_consts) else: @@ -48,7 +59,8 @@ has_vtable=must_have_vtable) else: # create a new constant array - p = self.get_random_array(r) + p = self.get_random_array(r, + must_be_array_of_structs=array_of_structs) S = lltype.typeOf(p).TO v = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, p)) self.prebuilt_ptr_consts.append((v, S, @@ -74,7 +86,8 @@ TYPE = lltype.Signed return TYPE - def get_random_structure_type(self, r, with_vtable=None, cache=True): + def get_random_structure_type(self, r, with_vtable=None, cache=True, + type=lltype.GcStruct): if cache and self.structure_types and r.random() < 0.5: return r.choice(self.structure_types) fields = [] @@ -85,7 +98,7 @@ for i in range(r.randrange(1, 5)): TYPE = self.get_random_primitive_type(r) fields.append(('f%d' % i, TYPE)) - S = lltype.GcStruct('S%d' % self.counter, *fields, **kwds) + S = type('S%d' % self.counter, *fields, **kwds) self.counter += 1 if cache: self.structure_types.append(S) @@ -125,17 +138,29 @@ setattr(p, fieldname, rffi.cast(TYPE, r.random_integer())) return p - def get_random_array_type(self, r): - TYPE = self.get_random_primitive_type(r) + def get_random_array_type(self, r, can_be_array_of_struct=False, + must_be_array_of_structs=False): + if ((can_be_array_of_struct and r.random() < 0.1) or + must_be_array_of_structs): + TYPE = self.get_random_structure_type(r, cache=False, + type=lltype.Struct) + else: + TYPE = self.get_random_primitive_type(r) return lltype.GcArray(TYPE) - def get_random_array(self, r): - A = self.get_random_array_type(r) + def get_random_array(self, r, must_be_array_of_structs=False): + A = self.get_random_array_type(r, + must_be_array_of_structs=must_be_array_of_structs) length = (r.random_integer() // 15) % 300 # length: between 0 and 299 # likely to be small p = lltype.malloc(A, length) - for i in range(length): - p[i] = rffi.cast(A.OF, r.random_integer()) + if isinstance(A.OF, lltype.Primitive): + for i in range(length): + p[i] = rffi.cast(A.OF, r.random_integer()) + else: + for i in range(length): + for fname, TP in A.OF._flds.iteritems(): + setattr(p[i], fname, rffi.cast(TP, r.random_integer())) return p def get_index(self, length, r): @@ -155,8 +180,16 @@ dic[fieldname] = getattr(p, fieldname) else: assert isinstance(S, lltype.Array) - for i in range(len(p)): - dic[i] = p[i] + if isinstance(S.OF, lltype.Struct): + for i in range(len(p)): + item = p[i] + s1 = {} + for fieldname in S.OF._names: + s1[fieldname] = getattr(item, fieldname) + dic[i] = s1 + else: + for i in range(len(p)): + dic[i] = p[i] return dic def print_loop_prebuilt(self, names, writevar, s): @@ -220,7 +253,7 @@ class GetFieldOperation(test_random.AbstractOperation): def field_descr(self, builder, r): - v, S = builder.get_structptr_var(r) + v, S = builder.get_structptr_var(r, ) names = S._names if names[0] == 'parent': names = names[1:] @@ -239,6 +272,28 @@ continue break +class GetInteriorFieldOperation(test_random.AbstractOperation): + def field_descr(self, builder, r): + v, A = builder.get_structptr_var(r, type=lltype.Array, + array_of_structs=True) + array = v.getref(lltype.Ptr(A)) + v_index = builder.get_index(len(array), r) + name = r.choice(A.OF._names) + descr = builder.cpu.interiorfielddescrof(A, name) + descr._random_info = 'cpu.interiorfielddescrof(%s, %r)' % (A.OF._name, + name) + TYPE = getattr(A.OF, name) + return v, v_index, descr, TYPE + + def produce_into(self, builder, r): + while True: + try: + v, v_index, descr, _ = self.field_descr(builder, r) + self.put(builder, [v, v_index], descr) + except lltype.UninitializedMemoryAccess: + continue + break + class SetFieldOperation(GetFieldOperation): def produce_into(self, builder, r): v, descr, TYPE = self.field_descr(builder, r) @@ -251,6 +306,18 @@ break builder.do(self.opnum, [v, w], descr) +class SetInteriorFieldOperation(GetInteriorFieldOperation): + def produce_into(self, builder, r): + v, v_index, descr, TYPE = self.field_descr(builder, r) + while True: + if r.random() < 0.3: + w = ConstInt(r.random_integer()) + else: + w = r.choice(builder.intvars) + if rffi.cast(lltype.Signed, rffi.cast(TYPE, w.value)) == w.value: + break + builder.do(self.opnum, [v, v_index, w], descr) + class NewOperation(test_random.AbstractOperation): def size_descr(self, builder, S): descr = builder.cpu.sizeof(S) @@ -306,7 +373,7 @@ class NewArrayOperation(ArrayOperation): def produce_into(self, builder, r): - A = builder.get_random_array_type(r) + A = builder.get_random_array_type(r, can_be_array_of_struct=True) v_size = builder.get_index(300, r) v_ptr = builder.do(self.opnum, [v_size], self.array_descr(builder, A)) builder.ptrvars.append((v_ptr, A)) @@ -586,7 +653,9 @@ for i in range(4): # make more common OPERATIONS.append(GetFieldOperation(rop.GETFIELD_GC)) OPERATIONS.append(GetFieldOperation(rop.GETFIELD_GC)) + OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC)) OPERATIONS.append(SetFieldOperation(rop.SETFIELD_GC)) + OPERATIONS.append(SetInteriorFieldOperation(rop.SETINTERIORFIELD_GC)) OPERATIONS.append(NewOperation(rop.NEW)) OPERATIONS.append(NewOperation(rop.NEW_WITH_VTABLE)) diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -495,9 +495,9 @@ if pytest.config.option.backend == 'llgraph': from pypy.jit.backend.llgraph.runner import LLtypeCPU return LLtypeCPU(None) - elif pytest.config.option.backend == 'x86': - from pypy.jit.backend.x86.runner import CPU386 - return CPU386(None, None) + elif pytest.config.option.backend == 'cpu': + from pypy.jit.backend.detect_cpu import getcpuclass + return getcpuclass()(None, None) else: assert 0, "unknown backend %r" % pytest.config.option.backend @@ -595,6 +595,10 @@ for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) + elif isinstance(value, dict): + item = container.getitem(name) + for key1, value1 in value.items(): + setattr(item, key1, value1) else: container.setitem(name, value) diff --git a/pypy/jit/backend/x86/test/test_zll_random.py b/pypy/jit/backend/test/test_zll_stress.py rename from pypy/jit/backend/x86/test/test_zll_random.py rename to pypy/jit/backend/test/test_zll_stress.py diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1,15 +1,15 @@ import sys, os from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from pypy.jit.metainterp.history import Const, Box, BoxInt, BoxPtr, BoxFloat +from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, LoopToken) from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, - _get_scale, gpr_reg_mgr_cls) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, _get_scale, + gpr_reg_mgr_cls, _valid_addressing_size) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -36,7 +36,6 @@ from pypy.rlib import rgc from pypy.rlib.clibffi import FFI_DEFAULT_ABI from pypy.jit.backend.x86.jump import remap_frame_layout -from pypy.jit.metainterp.history import ConstInt, BoxInt from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong @@ -729,8 +728,8 @@ # Also, make sure this is consistent with FRAME_FIXED_SIZE. self.mc.PUSH_r(ebp.value) self.mc.MOV_rr(ebp.value, esp.value) - for regloc in self.cpu.CALLEE_SAVE_REGISTERS: - self.mc.PUSH_r(regloc.value) + for loc in self.cpu.CALLEE_SAVE_REGISTERS: + self.mc.PUSH_r(loc.value) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -994,7 +993,7 @@ effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex genop_llong_list[oopspecindex](self, op, arglocs, resloc) - + def regalloc_perform_math(self, op, arglocs, resloc): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex @@ -1277,8 +1276,8 @@ genop_int_ne = _cmpop("NE", "NE") genop_int_gt = _cmpop("G", "L") genop_int_ge = _cmpop("GE", "LE") - genop_ptr_eq = genop_int_eq - genop_ptr_ne = genop_int_ne + genop_ptr_eq = genop_instance_ptr_eq = genop_int_eq + genop_ptr_ne = genop_instance_ptr_ne = genop_int_ne genop_float_lt = _cmpop_float('B', 'A') genop_float_le = _cmpop_float('BE', 'AE') @@ -1298,8 +1297,8 @@ genop_guard_int_ne = _cmpop_guard("NE", "NE", "E", "E") genop_guard_int_gt = _cmpop_guard("G", "L", "LE", "GE") genop_guard_int_ge = _cmpop_guard("GE", "LE", "L", "G") - genop_guard_ptr_eq = genop_guard_int_eq - genop_guard_ptr_ne = genop_guard_int_ne + genop_guard_ptr_eq = genop_guard_instance_ptr_eq = genop_guard_int_eq + genop_guard_ptr_ne = genop_guard_instance_ptr_ne = genop_guard_int_ne genop_guard_uint_gt = _cmpop_guard("A", "B", "BE", "AE") genop_guard_uint_lt = _cmpop_guard("B", "A", "AE", "BE") @@ -1311,7 +1310,7 @@ genop_guard_float_eq = _cmpop_guard_float("E", "E", "NE","NE") genop_guard_float_gt = _cmpop_guard_float("A", "B", "BE","AE") genop_guard_float_ge = _cmpop_guard_float("AE","BE", "B", "A") - + def genop_math_sqrt(self, op, arglocs, resloc): self.mc.SQRTSD(arglocs[0], resloc) @@ -1387,7 +1386,8 @@ def genop_same_as(self, op, arglocs, resloc): self.mov(arglocs[0], resloc) - #genop_cast_ptr_to_int = genop_same_as + genop_cast_ptr_to_int = genop_same_as + genop_cast_int_to_ptr = genop_same_as def genop_int_mod(self, op, arglocs, resloc): if IS_X86_32: @@ -1596,12 +1596,50 @@ genop_getarrayitem_gc_pure = genop_getarrayitem_gc genop_getarrayitem_raw = genop_getarrayitem_gc + def _get_interiorfield_addr(self, temp_loc, index_loc, itemsize_loc, + base_loc, ofs_loc): + assert isinstance(itemsize_loc, ImmedLoc) + if isinstance(index_loc, ImmedLoc): + temp_loc = imm(index_loc.value * itemsize_loc.value) + elif _valid_addressing_size(itemsize_loc.value): + return AddressLoc(base_loc, index_loc, _get_scale(itemsize_loc.value), ofs_loc.value) + else: + # XXX should not use IMUL in more cases, it can use a clever LEA + assert isinstance(temp_loc, RegLoc) + assert isinstance(index_loc, RegLoc) + assert not temp_loc.is_xmm + self.mc.IMUL_rri(temp_loc.value, index_loc.value, + itemsize_loc.value) + assert isinstance(ofs_loc, ImmedLoc) + return AddressLoc(base_loc, temp_loc, 0, ofs_loc.value) + + def genop_getinteriorfield_gc(self, op, arglocs, resloc): + (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, + index_loc, temp_loc, sign_loc) = arglocs + src_addr = self._get_interiorfield_addr(temp_loc, index_loc, + itemsize_loc, base_loc, + ofs_loc) + self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) + + genop_getinteriorfield_raw = genop_getinteriorfield_gc + + def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs assert isinstance(size_loc, ImmedLoc) dest_addr = AddressLoc(base_loc, ofs_loc) self.save_into_mem(dest_addr, value_loc, size_loc) + def genop_discard_setinteriorfield_gc(self, op, arglocs): + (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, + index_loc, temp_loc, value_loc) = arglocs + dest_addr = self._get_interiorfield_addr(temp_loc, index_loc, + itemsize_loc, base_loc, + ofs_loc) + self.save_into_mem(dest_addr, value_loc, fieldsize_loc) + + genop_discard_setinteriorfield_raw = genop_discard_setinteriorfield_gc + def genop_discard_setarrayitem_gc(self, op, arglocs): base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs assert isinstance(baseofs, ImmedLoc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -7,7 +7,7 @@ ResOperation, BoxPtr, ConstFloat, BoxFloat, LoopToken, INT, REF, FLOAT) from pypy.jit.backend.x86.regloc import * -from pypy.rpython.lltypesystem import lltype, ll2ctypes, rffi, rstr +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rlib.objectmodel import we_are_translated from pypy.rlib import rgc from pypy.jit.backend.llsupport import symbolic @@ -17,11 +17,12 @@ from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr +from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS -from pypy.rlib.rarithmetic import r_longlong, r_uint +from pypy.rlib.rarithmetic import r_longlong class X86RegisterManager(RegisterManager): @@ -166,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -194,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -210,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -286,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -433,7 +430,7 @@ if self.can_merge_with_next_guard(op, i, operations): oplist_with_guard[op.getopnum()](self, op, operations[i + 1]) i += 1 - elif not we_are_translated() and op.getopnum() == -124: + elif not we_are_translated() and op.getopnum() == -124: self._consider_force_spill(op) else: oplist[op.getopnum()](self, op) @@ -449,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -458,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -485,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy @@ -650,8 +656,8 @@ consider_uint_lt = _consider_compop consider_uint_le = _consider_compop consider_uint_ge = _consider_compop - consider_ptr_eq = _consider_compop - consider_ptr_ne = _consider_compop + consider_ptr_eq = consider_instance_ptr_eq = _consider_compop + consider_ptr_ne = consider_instance_ptr_ne = _consider_compop def _consider_float_op(self, op): loc1 = self.xrm.loc(op.getarg(1)) @@ -815,7 +821,7 @@ save_all_regs = guard_not_forced_op is not None self.xrm.before_call(force_store, save_all_regs=save_all_regs) if not save_all_regs: - gcrootmap = gc_ll_descr = self.assembler.cpu.gc_ll_descr.gcrootmap + gcrootmap = self.assembler.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: save_all_regs = 2 self.rm.before_call(force_store, save_all_regs=save_all_regs) @@ -972,74 +978,27 @@ return self._call(op, arglocs) def consider_newstr(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.get_funcptr_for_newstr is not None: - # framework GC - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - # boehm GC (XXX kill the following code at some point) - ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, self.translate_support_code) - assert itemsize == 1 - return self._malloc_varsize(ofs_items, ofs, 0, op.getarg(0), - op.result) + loc = self.loc(op.getarg(0)) + return self._call(op, [loc]) def consider_newunicode(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.get_funcptr_for_newunicode is not None: - # framework GC - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - # boehm GC (XXX kill the following code at some point) - ofs_items, _, ofs = symbolic.get_array_token(rstr.UNICODE, - self.translate_support_code) - scale = self._get_unicode_item_scale() - return self._malloc_varsize(ofs_items, ofs, scale, op.getarg(0), - op.result) - - def _malloc_varsize(self, ofs_items, ofs_length, scale, v, res_v): - # XXX kill this function at some point - if isinstance(v, Box): - loc = self.rm.make_sure_var_in_reg(v, [v]) - tempbox = TempBox() - other_loc = self.rm.force_allocate_reg(tempbox, [v]) - self.assembler.load_effective_addr(loc, ofs_items,scale, other_loc) - else: - tempbox = None - other_loc = imm(ofs_items + (v.getint() << scale)) - self._call(ResOperation(rop.NEW, [], res_v), - [other_loc], [v]) - loc = self.rm.make_sure_var_in_reg(v, [res_v]) - assert self.loc(res_v) == eax - # now we have to reload length to some reasonable place - self.rm.possibly_free_var(v) - if tempbox is not None: - self.rm.possibly_free_var(tempbox) - self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [None, None], None), - [eax, imm(ofs_length), imm(WORD), loc]) + loc = self.loc(op.getarg(0)) + return self._call(op, [loc]) def consider_new_array(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.get_funcptr_for_newarray is not None: - # framework GC - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(box_num_elem)) - self._call(op, arglocs) - return - # boehm GC (XXX kill the following code at some point) - itemsize, basesize, ofs_length, _, _ = ( - self._unpack_arraydescr(op.getdescr())) - scale_of_field = _get_scale(itemsize) - self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) + arglocs = [imm(x) for x in args] + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -1058,6 +1017,16 @@ sign = fielddescr.is_field_signed() return imm(ofs), imm(size), ptr, sign + def _unpack_interiorfielddescr(self, descr): + assert isinstance(descr, InteriorFieldDescr) + arraydescr = descr.arraydescr + ofs = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + sign = descr.fielddescr.is_field_signed() + ofs += descr.fielddescr.offset + return imm(ofs), imm(itemsize), imm(fieldsize), sign + def consider_setfield_gc(self, op): ofs_loc, size_loc, _, _ = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) @@ -1074,6 +1043,37 @@ consider_setfield_raw = consider_setfield_gc + def consider_setinteriorfield_gc(self, op): + t = self._unpack_interiorfielddescr(op.getdescr()) + ofs, itemsize, fieldsize, _ = t + args = op.getarglist() + if fieldsize.value == 1: + need_lower_byte = True + else: + need_lower_byte = False + box_base, box_index, box_value = args + base_loc = self.rm.make_sure_var_in_reg(box_base, args) + index_loc = self.rm.make_sure_var_in_reg(box_index, args) + value_loc = self.make_sure_var_in_reg(box_value, args, + need_lower_byte=need_lower_byte) + # If 'index_loc' is not an immediate, then we need a 'temp_loc' that + # is a register whose value will be destroyed. It's fine to destroy + # the same register as 'index_loc', but not the other ones. + self.rm.possibly_free_var(box_index) + if not isinstance(index_loc, ImmedLoc): + tempvar = TempBox() + temp_loc = self.rm.force_allocate_reg(tempvar, [box_base, + box_value]) + self.rm.possibly_free_var(tempvar) + else: + temp_loc = None + self.rm.possibly_free_var(box_base) + self.possibly_free_var(box_value) + self.PerformDiscard(op, [base_loc, ofs, itemsize, fieldsize, + index_loc, temp_loc, value_loc]) + + consider_setinteriorfield_raw = consider_setinteriorfield_gc + def consider_strsetitem(self, op): args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) @@ -1135,6 +1135,38 @@ consider_getarrayitem_raw = consider_getarrayitem_gc consider_getarrayitem_gc_pure = consider_getarrayitem_gc + def consider_getinteriorfield_gc(self, op): + t = self._unpack_interiorfielddescr(op.getdescr()) + ofs, itemsize, fieldsize, sign = t + if sign: + sign_loc = imm1 + else: From noreply at buildbot.pypy.org Tue Dec 6 14:47:35 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Tue, 6 Dec 2011 14:47:35 +0100 (CET) Subject: [pypy-commit] pypy type-specialized-instances: oups Message-ID: <20111206134735.3C3CE8205C@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: type-specialized-instances Changeset: r50209:0c894ad8e9b5 Date: 2011-12-06 14:47 +0100 http://bitbucket.org/pypy/pypy/changeset/0c894ad8e9b5/ Log: oups diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -41,7 +41,7 @@ return self.terminator._write_terminator(obj, selector, w_value) try: attr.write_attr(obj, w_value) #obj._mapdict_write_storage(index, w_value) - except OperationError: + except OperationError, e: if not e.match(self.space, self.space.w_TypeError): raise firstattr = obj._get_mapdict_map() From noreply at buildbot.pypy.org Tue Dec 6 17:08:44 2011 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 6 Dec 2011 17:08:44 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: (arigo, bivab): aahhh. Fix an issue with floats that are spilled in a loop and Message-ID: <20111206160844.B1C438205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50210:63e21292fab4 Date: 2011-12-06 17:06 +0100 http://bitbucket.org/pypy/pypy/changeset/63e21292fab4/ Log: (arigo, bivab): aahhh. Fix an issue with floats that are spilled in a loop and later read in a brigde. Due to an off by one issue the reading was broken in the bridge diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -244,7 +244,7 @@ stack_loc = decode32(enc, i+1) i += 4 if group == self.FLOAT_TYPE: - value = decode64(stack, frame_depth - stack_loc*WORD) + value = decode64(stack, frame_depth - (stack_loc+1)*WORD) fvalue = rffi.cast(longlong.FLOATSTORAGE, value) self.fail_boxes_float.setitem(fail_index, fvalue) continue @@ -408,7 +408,13 @@ else: assert loc.is_stack() mem[j] = self.STACK_LOC - encode32(mem, j+1, loc.position) + if arg.type == FLOAT: + # Float locs store the location number with an offset + # of 1 -.- so we need to take this into account here + # when generating the encoding + encode32(mem, j+1, loc.position-1) + else: + encode32(mem, j+1, loc.position) j += 5 else: mem[j] = self.EMPTY_LOC diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -53,6 +53,9 @@ def frame_pos(loc, type): num_words = ARMFrameManager.frame_size(type) if type == FLOAT: + # Make sure that loc is an even value + # the frame layout requires loc to be even!! + assert (loc & 1) == 0 return locations.StackLocation(loc+1, num_words=num_words, type=type) return locations.StackLocation(loc, num_words=num_words, type=type) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1212,6 +1212,51 @@ got = longlong.getrealfloat(self.cpu.get_latest_value_float(i)) assert got == 13.5 + 6.73 * i + def test_compile_bridge_spilled_float(self): + if not self.cpu.supports_floats: + py.test.skip("requires floats") + fboxes = [BoxFloat() for i in range(3)] + faildescr1 = BasicFailDescr(100) + loopops = """ + [i0,f1, f2] + f3 = float_add(f1, f2) + force_spill(f3) + force_spill(f1) + force_spill(f2) + guard_false(i0) [f1, f2, f3] + finish()""" + loop = parse(loopops) + looptoken = LoopToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.set_future_value_int(0, 1) + self.cpu.set_future_value_float(1, longlong.getfloatstorage(132.25)) + self.cpu.set_future_value_float(2, longlong.getfloatstorage(0.75)) + fail = self.cpu.execute_token(looptoken) + assert loop.operations[-2].getdescr() == fail + f1 = self.cpu.get_latest_value_float(0) + f2 = self.cpu.get_latest_value_float(1) + f3 = self.cpu.get_latest_value_float(2) + assert longlong.getrealfloat(f1) == 132.25 + assert longlong.getrealfloat(f2) == 0.75 + assert longlong.getrealfloat(f3) == 133.0 + + bridgeops = [ + ResOperation(rop.FINISH, fboxes, None, descr=faildescr1), + ] + self.cpu.compile_bridge(loop.operations[-2].getdescr(), fboxes, + bridgeops, looptoken) + self.cpu.set_future_value_int(0, 1) + self.cpu.set_future_value_float(1, longlong.getfloatstorage(132.25)) + self.cpu.set_future_value_float(2, longlong.getfloatstorage(0.75)) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 100 + f1 = self.cpu.get_latest_value_float(0) + f2 = self.cpu.get_latest_value_float(1) + f3 = self.cpu.get_latest_value_float(2) + assert longlong.getrealfloat(f1) == 132.25 + assert longlong.getrealfloat(f2) == 0.75 + assert longlong.getrealfloat(f3) == 133.0 + def test_integers_and_guards2(self): for opname, compare in [ (rop.INT_IS_TRUE, lambda x: bool(x)), From noreply at buildbot.pypy.org Tue Dec 6 17:08:45 2011 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 6 Dec 2011 17:08:45 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: remove some unused imports Message-ID: <20111206160845.D89AD8205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50211:5140a209b50a Date: 2011-12-06 17:07 +0100 http://bitbucket.org/pypy/pypy/changeset/5140a209b50a/ Log: remove some unused imports diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -3,9 +3,7 @@ from pypy.jit.backend.arm import locations from pypy.jit.backend.arm import registers as r from pypy.jit.backend.arm import shift -from pypy.jit.backend.arm.arch import (WORD, FUNC_ALIGN, arm_int_div, - arm_int_div_sign, arm_int_mod_sign, - arm_int_mod, PC_OFFSET) +from pypy.jit.backend.arm.arch import WORD, PC_OFFSET from pypy.jit.backend.arm.helper.assembler import (gen_emit_op_by_helper_call, gen_emit_op_unary_cmp, From noreply at buildbot.pypy.org Tue Dec 6 17:08:47 2011 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 6 Dec 2011 17:08:47 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: fix an issue with the private attribute renaming going on when reading a field Message-ID: <20111206160847.0950A8205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50212:9e1351914eb6 Date: 2011-12-06 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/9e1351914eb6/ Log: fix an issue with the private attribute renaming going on when reading a field that starts with '__' and does not end with '__' in the context of a class diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py --- a/pypy/jit/backend/arm/codebuilder.py +++ b/pypy/jit/backend/arm/codebuilder.py @@ -3,16 +3,16 @@ from pypy.jit.backend.arm import registers as reg from pypy.jit.backend.arm.arch import (WORD, FUNC_ALIGN) from pypy.jit.backend.arm.instruction_builder import define_instructions - +from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin +from pypy.jit.metainterp.history import ConstInt, BoxInt, AbstractFailDescr +from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.rmmap import alloc, PTR from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import lltype, rffi, llmemory -from pypy.jit.metainterp.history import ConstInt, BoxInt, AbstractFailDescr -from pypy.rlib.objectmodel import we_are_translated -from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from pypy.tool.udir import udir +from pypy.translator.tool.cbuild import ExternalCompilationInfo -__clear_cache = rffi.llexternal( +clear_cache = rffi.llexternal( "__clear_cache", [llmemory.Address, llmemory.Address], lltype.Void, @@ -292,9 +292,10 @@ return rawstart def clear_cache(self, addr): - startaddr = rffi.cast(llmemory.Address, addr) - endaddr = rffi.cast(llmemory.Address, addr + self.get_relative_pos()) - __clear_cache(startaddr, endaddr) + if we_are_translated(): + startaddr = rffi.cast(llmemory.Address, addr) + endaddr = rffi.cast(llmemory.Address, addr + self.get_relative_pos()) + clear_cache(startaddr, endaddr) def copy_to_raw_memory(self, addr): self._copy_to_raw_memory(addr) From noreply at buildbot.pypy.org Tue Dec 6 17:40:32 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 6 Dec 2011 17:40:32 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge default Message-ID: <20111206164032.E6CDB8205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50213:b853ad20830d Date: 2011-12-06 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/b853ad20830d/ Log: merge default diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -823,6 +823,15 @@ bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETFIELD_RAW) + # ---------- write barrier for SETINTERIORFIELD_GC ------ + if op.getopnum() == rop.SETINTERIORFIELD_GC: + val = op.getarg(0) + if val is not last_malloc: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: val = op.getarg(0) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -571,6 +571,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -490,8 +490,8 @@ check(a[i].y.i == n + i * 100 + 2) check(a[i].z.i == n + i * 100 + 3) i += 1 + n -= x.foo return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s - f(123, *[None]*11) # check that the check() are ok return None, f, None def test_compile_framework_7_interior(self): diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -298,7 +298,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +309,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +329,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,14 +339,17 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + return self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) @@ -359,12 +367,22 @@ def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +409,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -608,9 +635,6 @@ metainterp.set_compiled_merge_points(self.original_greenkey, old_loop_tokens) - def reset_counter_from_failure(self): - pass - def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): """Try to compile a new bridge leading from the beginning of the history diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1790,7 +1790,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate diff --git a/pypy/jit/metainterp/test/test_math.py b/pypy/jit/metainterp/test/test_math.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_math.py @@ -0,0 +1,47 @@ +import math +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN + +class MathTests: + + def test_math_sqrt(self): + def f(x): + try: + return math.sqrt(x) + except ValueError: + return -INFINITY + + res = self.interp_operations(f, [0.0]) + assert res == 0.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [25.0]) + assert res == 5.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-0.0]) + assert str(res) == '-0.0' + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [1000000.0]) + assert res == 1000.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-1.0]) + assert res == -INFINITY + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [INFINITY]) + assert isinf(res) and not isnan(res) and res > 0.0 + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [NAN]) + assert isnan(res) and not isinf(res) + self.check_operations_history(call_pure=0) + + +class TestOOtype(MathTests, OOJitMixin): + pass + +class TestLLtype(MathTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1238,6 +1238,31 @@ self.meta_interp(portal, [0, 0, 0], inline=True) self.check_resops(call_may_force=0, call=0) + def test_dont_repeatedly_trace_from_the_same_guard(self): + driver = JitDriver(greens = [], reds = ['level', 'i']) + + def portal(level): + if level == 0: + i = -10 + else: + i = 0 + # + while True: + driver.jit_merge_point(level=level, i=i) + if level == 25: + return 42 + i += 1 + if i <= 0: # <- guard + continue # first make a loop + else: + # then we fail the guard above, doing a recursive call, + # which will itself fail the same guard above, and so on + return portal(level + 1) + + self.meta_interp(portal, [0]) + self.check_loop_count_at_most(2) # and not, e.g., 24 + + class TestLLtype(RecursiveTests, LLJitMixin): pass diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -13,12 +13,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -19,11 +19,22 @@ def test_keyerror_without_factory(self): from _collections import defaultdict - d1 = defaultdict() - for key in ['foo', (1,)]: - try: - d1[key] - except KeyError, err: - assert err.args[0] == key - else: - assert 0, "expected KeyError" + for d1 in [defaultdict(), defaultdict(None)]: + for key in ['foo', (1,)]: + try: + d1[key] + except KeyError, err: + assert err.args[0] == key + else: + assert 0, "expected KeyError" + + def test_noncallable(self): + from _collections import defaultdict + raises(TypeError, defaultdict, [('a', 5)]) + d = defaultdict(None, [('a', 5)]) + assert d.items() == [('a', 5)] + + def test_kwds(self): + from _collections import defaultdict + d = defaultdict(default_factory=5) + assert d.keys() == ['default_factory'] diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module._socket.interp_socket import converted_error, W_RSocket from pypy.rlib import rsocket -from pypy.rlib.rsocket import SocketError +from pypy.rlib.rsocket import SocketError, INVALID_SOCKET from pypy.interpreter.error import OperationError def gethostname(space): @@ -284,7 +284,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(-1, space)]) # -1 as per cpython + addr.as_object(INVALID_SOCKET, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -32,6 +32,7 @@ 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', + 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', } @@ -76,4 +77,5 @@ 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', 'arange': 'app_numpy.arange', + 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -36,3 +36,39 @@ j += 1 i += step return arr + +def reshape(a, shape): + '''reshape(a, newshape) + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred + from the length of the array and remaining dimensions. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. + + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raise if the data is copied, + you should assign the new shape to the shape attribute of the array +''' + if not hasattr(a, 'reshape'): + a = numpypy.array(a) + return a.reshape(shape) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -258,6 +258,8 @@ W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, __module__ = "numpypy", + + __new__ = interp2app(W_Float32Box.descr__new__.im_func), ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,6 +98,107 @@ endshape[i] = remainder[i] return endshape +def get_shape_from_iterable(space, old_size, w_iterable): + new_size = 0 + new_shape = [] + if space.isinstance_w(w_iterable, space.w_int): + new_size = space.int_w(w_iterable) + if new_size < 0: + new_size = old_size + new_shape = [new_size, ] + else: + neg_dim = -1 + batch = space.listview(w_iterable) + #Allow for shape = (1,2,3) or shape = ((1,2,3)) + if len(batch) > 1 and space.issequence_w(batch[0]): + batch = space.listview(batch[0]) + new_size = 1 + if len(batch) < 1: + if old_size == 1: + #Scalars can have an empty size. + new_size = 1 + else: + new_size = 0 + new_shape = [] + i = 0 + for elem in batch: + s = space.int_w(elem) + if s < 0: + if neg_dim >= 0: + raise OperationError(space.w_ValueError, space.wrap( + "can only specify one unknown dimension")) + s = 1 + neg_dim = i + new_size *= s + new_shape.append(s) + i += 1 + if neg_dim >= 0: + new_shape[neg_dim] = old_size / new_size + new_size *= new_shape[neg_dim] + if new_size != old_size: + raise OperationError(space.w_ValueError, + space.wrap("total size of new array must be unchanged")) + return new_shape + +#Recalculating strides. Find the steps that the iteration does for each +#dimension, given the stride and shape. Then try to create a new stride that +#fits the new shape, using those steps. If there is a shape/step mismatch +#(meaning that the realignment of elements crosses from one step into another) +#return None so that the caller can raise an exception. +def calc_new_strides(new_shape, old_shape, old_strides): + #Return the proper strides for new_shape, or None + # if the mapping crosses stepping boundaries + + #Assumes that prod(old_shape) ==prod(new_shape), len(old_shape) > 1 and + # len(new_shape) > 0 + steps = [] + last_step = 1 + oldI = 0 + new_strides = [] + if old_strides[0] < old_strides[-1]: + for i in range(len(old_shape)): + steps.append(old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[0] + n_new_elems_used = 1 + n_old_elems_to_use = old_shape[0] + for s in new_shape: + new_strides.append(cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI += 1 + if steps[oldI] != steps[oldI - 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI += 1 + if oldI >= len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + else: + for i in range(len(old_shape) - 1, -1, -1): + steps.insert(0, old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[-1] + n_new_elems_used = 1 + oldI = -1 + n_old_elems_to_use = old_shape[-1] + for s in new_shape[::-1]: + new_strides.insert(0, cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI -= 1 + if steps[oldI] != steps[oldI + 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI -= 1 + if oldI < -len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + return new_strides # Iterators for arrays # -------------------- @@ -444,6 +545,7 @@ return False i = i.next(shapelen) return True + def descr_all(self, space): return space.wrap(self._all()) @@ -459,6 +561,7 @@ return True i = i.next(shapelen) return False + def descr_any(self, space): return space.wrap(self._any()) @@ -483,6 +586,12 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) + def descr_set_shape(self, space, w_iterable): + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_iterable) + concrete.setshape(space, new_shape) + def descr_get_size(self, space): return space.wrap(self.find_size()) @@ -730,10 +839,44 @@ strides += self.strides[s:] backstrides += self.backstrides[s:] new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature, + W_NDimSlice.signature, self.signature, ]) - return NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) + return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], + shape[:]) + + def descr_reshape(self, space, w_args): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `%s.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function +""" % 'numpypy' + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_args) + #Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + #We can create a view, strides somehow match up. + new_sig = signature.Signature.find_sig([ + W_NDimSlice.signature, self.signature, ]) + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(self, new_sig, self.start, new_strides, + new_backstrides, new_shape) + else: + #Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -751,7 +894,7 @@ if len(concrete.shape) < 2: return space.wrap(self) new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature + W_NDimSlice.signature, self.signature ]) strides = [] backstrides = [] @@ -760,8 +903,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) + return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -830,6 +973,11 @@ def debug_repr(self): return 'Scalar' + def setshape(self, space, new_shape): + # In order to get here, we already checked that prod(new_shape)==1, + # so in order to have a consistent API, let it go through. + pass + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1022,13 +1170,46 @@ return space.wrap(self.shape[0]) return space.wrap(1) + def setshape(self, space, new_shape): + if len(self.shape) < 1: + return + elif len(self.shape) < 2: + #TODO: this code could be refactored into calc_strides + #but then calc_strides would have to accept a stepping factor + strides = [] + backstrides = [] + s = self.strides[0] + if self.order == 'C': + new_shape.reverse() + for sh in new_shape: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + new_shape.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + self.shape = new_shape[:] + return + new_strides = calc_new_strides(new_shape, self.shape, self.strides) + if new_strides is None: + raise OperationError(space.w_AttributeError, space.wrap( + "incompatible shape for a non-contiguous array")) + new_backstrides = [0] * len(new_shape) + for nd in range(len(new_shape)): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + self.strides = new_strides[:] + self.backstrides = new_backstrides[:] + self.shape = new_shape[:] -class NDimSlice(ViewArray): +class W_NDimSlice(ViewArray): signature = signature.BaseSignature() def __init__(self, parent, signature, start, strides, backstrides, shape): - if isinstance(parent, NDimSlice): + if isinstance(parent, W_NDimSlice): parent = parent.parent ViewArray.__init__(self, parent, signature, strides, backstrides, shape) self.start = start @@ -1077,9 +1258,11 @@ def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() + a_iter = array.start_iter() while not iter.done(): - array.setitem(iter.offset, self.getitem(iter.offset)) + array.setitem(a_iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) return array class W_NDimArray(BaseArray): @@ -1137,6 +1320,10 @@ return ArrayIterator(self.size) raise NotImplementedError # use ViewIterator simply, test it + def setshape(self, space, new_shape): + self.shape = new_shape + self.calc_strides(new_shape) + def debug_repr(self): return 'Array' @@ -1261,7 +1448,8 @@ __debug_repr__ = interp2app(BaseArray.descr_debug_repr), dtype = GetSetProperty(BaseArray.descr_get_dtype), - shape = GetSetProperty(BaseArray.descr_get_shape), + shape = GetSetProperty(BaseArray.descr_get_shape, + BaseArray.descr_set_shape), size = GetSetProperty(BaseArray.descr_get_size), T = GetSetProperty(BaseArray.descr_get_transpose), @@ -1279,6 +1467,7 @@ dot = interp2app(BaseArray.descr_dot), copy = interp2app(BaseArray.descr_copy), + reshape = interp2app(BaseArray.descr_reshape), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -241,6 +241,13 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + def test_float32(self): + import numpypy as numpy + + assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] + + assert numpy.float32(12) == numpy.float64(12) + def test_float64(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -158,6 +158,13 @@ assert shape_agreement(self.space, [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + def test_calc_new_strides(self): + from pypy.module.micronumpy.interp_numarray import calc_new_strides + assert calc_new_strides([2, 4], [4, 2], [4, 2]) == [8, 2] + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2]) == [2] class AppTestNumArray(BaseNumpyAppTest): def test_ndarray(self): @@ -216,8 +223,8 @@ assert a[2] == 4 def test_copy(self): - from numpypy import array - a = array(range(5)) + from numpypy import arange, array + a = arange(5) b = a.copy() for i in xrange(5): assert b[i] == a[i] @@ -227,6 +234,11 @@ a = array(1) assert a.copy() == a + a = arange(8) + b = a[::2] + c = b.copy() + assert (c == b).all() + def test_iterator_init(self): from numpypy import array a = array(range(5)) @@ -339,6 +351,76 @@ c = a[:3] assert c.shape == (3,) + def test_set_shape(self): + from numpypy import array, zeros + a = array([]) + a.shape = [] + a = array(range(12)) + a.shape = (3, 4) + assert (a == [range(4), range(4, 8), range(8, 12)]).all() + a.shape = (3, 2, 2) + assert a[1, 1, 1] == 7 + a.shape = (3, -1, 2) + assert a.shape == (3, 2, 2) + a.shape = 12 + assert a.shape == (12, ) + exc = raises(ValueError, "a.shape = 10") + assert str(exc.value) == "total size of new array must be unchanged" + a = array(3) + a.shape = () + #numpy allows this + a.shape = (1,) + + def test_reshape(self): + from numpypy import array, zeros + a = array(range(12)) + exc = raises(ValueError, "b = a.reshape((3, 10))") + assert str(exc.value) == "total size of new array must be unchanged" + b = a.reshape((3, 4)) + assert b.shape == (3, 4) + assert (b == [range(4), range(4, 8), range(8, 12)]).all() + b[:, 0] = 1000 + assert (a == [1000, 1, 2, 3, 1000, 5, 6, 7, 1000, 9, 10, 11]).all() + a = zeros((4, 2, 3)) + a.shape = (12, 2) + + def test_slice_reshape(self): + from numpypy import zeros, arange + a = zeros((4, 2, 3)) + b = a[::2, :, :] + b.shape = (2, 6) + exc = raises(AttributeError, "b.shape = 12") + assert str(exc.value) == \ + "incompatible shape for a non-contiguous array" + b = a[::2, :, :].reshape((2, 6)) + assert b.shape == (2, 6) + b = arange(20)[1:17:2] + b.shape = (4, 2) + assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() + c = b.reshape((2, 4)) + assert (c == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + + z = arange(96).reshape((12, -1)) + assert z.shape == (12, 8) + y = z.reshape((4, 3, 8)) + v = y[:, ::2, :] + w = y.reshape(96) + u = v.reshape(64) + assert y[1, 2, 1] == z[5, 1] + y[1, 2, 1] = 1000 + #z, y, w, v are views of eachother + assert z[5, 1] == 1000 + assert v[1, 1, 1] == 1000 + assert w[41] == 1000 + #u is not a view, it is a copy! + assert u[25] == 41 + + def test_reshape_varargs(self): + skip("How do I do varargs in rpython? reshape should accept a" + " variable number of arguments") + z = arange(96).reshape(12, -1) + y = z.reshape(4, 3, 8) + def test_add(self): from numpypy import array a = array(range(5)) @@ -1155,3 +1237,14 @@ a = arange(0, 0.8, 0.1) assert len(a) == 8 assert arange(False, True, True).dtype is dtype(int) + + +class AppTestRanges(BaseNumpyAppTest): + def test_app_reshape(self): + from numpypy import arange, array, dtype, reshape + a = arange(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) + a = range(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,13 +8,12 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature -from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, - FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.compile import (FakeSpace, + IntObject, Parser, InterpreterState) +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, BaseArray) from pypy.rlib.nonconst import NonConstant -from pypy.rpython.annlowlevel import llstr, hlstr class TestNumpyJIt(LLJitMixin): @@ -186,7 +185,8 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, + py.test.skip("counting exact number of classes is nonsense") + self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) + INVALID_SOCKET = r_uint(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -20,6 +20,7 @@ from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.rffi import sizeof, offsetof +INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): return lltype.malloc(rffi.CCHARP.TO, buffersize, flavor='raw') From noreply at buildbot.pypy.org Tue Dec 6 17:45:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Dec 2011 17:45:19 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix. Message-ID: <20111206164519.055F08205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50214:b0cec017f8a9 Date: 2011-12-06 17:44 +0100 http://bitbucket.org/pypy/pypy/changeset/b0cec017f8a9/ Log: Test and fix. diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -406,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -38,7 +38,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -38,3 +38,22 @@ from _collections import defaultdict d = defaultdict(default_factory=5) assert d.keys() == ['default_factory'] + + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -0,0 +1,27 @@ + +""" +Extra tests for the pure Python PyPy _collections module +(not used in normal PyPy's) +""" + +from pypy.conftest import gettestobjspace + +class AppTestcStringIO: + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 From noreply at buildbot.pypy.org Tue Dec 6 18:07:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Dec 2011 18:07:13 +0100 (CET) Subject: [pypy-commit] pypy default: Translation fix. No cookie Message-ID: <20111206170713.57CA58205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50215:a88a15a5ccb7 Date: 2011-12-06 18:08 +0100 http://bitbucket.org/pypy/pypy/changeset/a88a15a5ccb7/ Log: Translation fix. No cookie diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -184,7 +184,8 @@ n_new_elems_used = 1 oldI = -1 n_old_elems_to_use = old_shape[-1] - for s in new_shape[::-1]: + for i in range(len(new_shape) - 1, -1, -1): + s = new_shape[i] new_strides.insert(0, cur_step * n_new_elems_used) n_new_elems_used *= s while n_new_elems_used > n_old_elems_to_use: From noreply at buildbot.pypy.org Tue Dec 6 18:27:26 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 6 Dec 2011 18:27:26 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Merge with default Message-ID: <20111206172726.EF7E38205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50216:7c2b76526385 Date: 2011-12-06 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/7c2b76526385/ Log: Merge with default diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -406,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -38,7 +38,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -38,3 +38,22 @@ from _collections import defaultdict d = defaultdict(default_factory=5) assert d.keys() == ['default_factory'] + + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -184,7 +184,8 @@ n_new_elems_used = 1 oldI = -1 n_old_elems_to_use = old_shape[-1] - for s in new_shape[::-1]: + for i in range(len(new_shape) - 1, -1, -1): + s = new_shape[i] new_strides.insert(0, cur_step * n_new_elems_used) n_new_elems_used *= s while n_new_elems_used > n_old_elems_to_use: diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -0,0 +1,27 @@ + +""" +Extra tests for the pure Python PyPy _collections module +(not used in normal PyPy's) +""" + +from pypy.conftest import gettestobjspace + +class AppTestcStringIO: + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 From noreply at buildbot.pypy.org Tue Dec 6 19:38:36 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 19:38:36 +0100 (CET) Subject: [pypy-commit] pypy default: whitespace and other small codecleanups Message-ID: <20111206183836.375B48205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50217:1a419a40ed62 Date: 2011-12-06 13:38 -0500 http://bitbucket.org/pypy/pypy/changeset/1a419a40ed62/ Log: whitespace and other small codecleanups diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -37,10 +37,11 @@ i += step return arr + def reshape(a, shape): '''reshape(a, newshape) Gives a new shape to an array without changing its data. - + Parameters ---------- a : array_like @@ -50,21 +51,21 @@ an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. - + Returns ------- reshaped_array : ndarray This will be a new view object if possible; otherwise, it will be a copy. - - + + See Also -------- ndarray.reshape : Equivalent method. - + Notes ----- - + It is not always possible to change the shape of an array without copying the data. If you want an error to be raise if the data is copied, you should assign the new shape to the shape attribute of the array diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -105,17 +105,17 @@ new_size = space.int_w(w_iterable) if new_size < 0: new_size = old_size - new_shape = [new_size, ] + new_shape = [new_size] else: neg_dim = -1 batch = space.listview(w_iterable) - #Allow for shape = (1,2,3) or shape = ((1,2,3)) + # Allow for shape = (1,2,3) or shape = ((1,2,3),) if len(batch) > 1 and space.issequence_w(batch[0]): batch = space.listview(batch[0]) new_size = 1 if len(batch) < 1: if old_size == 1: - #Scalars can have an empty size. + # Scalars can have an empty size. new_size = 1 else: new_size = 0 @@ -140,16 +140,16 @@ space.wrap("total size of new array must be unchanged")) return new_shape -#Recalculating strides. Find the steps that the iteration does for each -#dimension, given the stride and shape. Then try to create a new stride that -#fits the new shape, using those steps. If there is a shape/step mismatch -#(meaning that the realignment of elements crosses from one step into another) -#return None so that the caller can raise an exception. +# Recalculating strides. Find the steps that the iteration does for each +# dimension, given the stride and shape. Then try to create a new stride that +# fits the new shape, using those steps. If there is a shape/step mismatch +# (meaning that the realignment of elements crosses from one step into another) +# return None so that the caller can raise an exception. def calc_new_strides(new_shape, old_shape, old_strides): - #Return the proper strides for new_shape, or None - # if the mapping crosses stepping boundaries + # Return the proper strides for new_shape, or None if the mapping crosses + # stepping boundaries - #Assumes that prod(old_shape) ==prod(new_shape), len(old_shape) > 1 and + # Assumes that prod(old_shape) == prod(new_shape), len(old_shape) > 1, and # len(new_shape) > 0 steps = [] last_step = 1 @@ -589,7 +589,7 @@ def descr_set_shape(self, space, w_iterable): concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, + new_shape = get_shape_from_iterable(space, concrete.find_size(), w_iterable) concrete.setshape(space, new_shape) @@ -848,25 +848,26 @@ def descr_reshape(self, space, w_args): """reshape(...) a.reshape(shape) - + Returns an array containing the same data with a new shape. - - Refer to `%s.reshape` for full documentation. - + + Refer to `numpypy.reshape` for full documentation. + See Also -------- - numpy.reshape : equivalent function -""" % 'numpypy' + numpypy.reshape : equivalent function +""" concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, + new_shape = get_shape_from_iterable(space, concrete.find_size(), w_args) - #Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides) if new_strides: - #We can create a view, strides somehow match up. + # We can create a view, strides somehow match up. new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature, ]) + W_NDimSlice.signature, self.signature + ]) ndims = len(new_shape) new_backstrides = [0] * ndims for nd in range(ndims): @@ -874,7 +875,7 @@ arr = W_NDimSlice(self, new_sig, self.start, new_strides, new_backstrides, new_shape) else: - #Create copy with contiguous data + # Create copy with contiguous data arr = concrete.copy() arr.setshape(space, new_shape) return arr @@ -975,7 +976,7 @@ return 'Scalar' def setshape(self, space, new_shape): - # In order to get here, we already checked that prod(new_shape)==1, + # In order to get here, we already checked that prod(new_shape) == 1, # so in order to have a consistent API, let it go through. pass @@ -1175,8 +1176,8 @@ if len(self.shape) < 1: return elif len(self.shape) < 2: - #TODO: this code could be refactored into calc_strides - #but then calc_strides would have to accept a stepping factor + # TODO: this code could be refactored into calc_strides + # but then calc_strides would have to accept a stepping factor strides = [] backstrides = [] s = self.strides[0] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -408,18 +408,19 @@ u = v.reshape(64) assert y[1, 2, 1] == z[5, 1] y[1, 2, 1] = 1000 - #z, y, w, v are views of eachother + # z, y, w, v are views of eachother assert z[5, 1] == 1000 assert v[1, 1, 1] == 1000 assert w[41] == 1000 - #u is not a view, it is a copy! + # u is not a view, it is a copy! assert u[25] == 41 def test_reshape_varargs(self): - skip("How do I do varargs in rpython? reshape should accept a" - " variable number of arguments") + skip("unimplemented yet") + from numpypy import arange z = arange(96).reshape(12, -1) y = z.reshape(4, 3, 8) + assert y.shape == (4, 3, 8) def test_add(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -185,8 +185,7 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - py.test.skip("counting exact number of classes is nonsense") - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, From noreply at buildbot.pypy.org Tue Dec 6 19:53:45 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 19:53:45 +0100 (CET) Subject: [pypy-commit] pypy default: Implement varargs for ndarray.reshape and add a test for a missing error case. Message-ID: <20111206185345.9C9628205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50218:0ccb237a9de2 Date: 2011-12-06 13:53 -0500 http://bitbucket.org/pypy/pypy/changeset/0ccb237a9de2/ Log: Implement varargs for ndarray.reshape and add a test for a missing error case. diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -109,9 +109,6 @@ else: neg_dim = -1 batch = space.listview(w_iterable) - # Allow for shape = (1,2,3) or shape = ((1,2,3),) - if len(batch) > 1 and space.issequence_w(batch[0]): - batch = space.listview(batch[0]) new_size = 1 if len(batch) < 1: if old_size == 1: @@ -845,7 +842,7 @@ return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], shape[:]) - def descr_reshape(self, space, w_args): + def descr_reshape(self, space, args_w): """reshape(...) a.reshape(shape) @@ -857,9 +854,13 @@ -------- numpypy.reshape : equivalent function """ + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newlist(args_w) concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_args) + concrete.find_size(), w_shape) # Since we got to here, prod(new_shape) == self.size new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -415,8 +415,9 @@ # u is not a view, it is a copy! assert u[25] == 41 + raises(ValueError, arange(10).reshape, (5, -1, -1)) + def test_reshape_varargs(self): - skip("unimplemented yet") from numpypy import arange z = arange(96).reshape(12, -1) y = z.reshape(4, 3, 8) From noreply at buildbot.pypy.org Tue Dec 6 20:06:50 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 20:06:50 +0100 (CET) Subject: [pypy-commit] pypy default: remove some dead code, and added missing tests Message-ID: <20111206190650.774DE8205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50219:d62de66be4aa Date: 2011-12-06 14:06 -0500 http://bitbucket.org/pypy/pypy/changeset/d62de66be4aa/ Log: remove some dead code, and added missing tests diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -714,11 +714,6 @@ def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): idx = space.int_w(w_idx) - if not self.shape: - if idx != 0: - raise OperationError(space.w_IndexError, - space.wrap("index out of range")) - return 0 if idx < 0: idx = self.shape[0] + idx if idx < 0 or idx >= self.shape[0]: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -330,8 +330,8 @@ def test_scalar(self): from numpypy import array, dtype a = array(3) - #assert a[0] == 3 raises(IndexError, "a[0]") + raises(IndexError, "a[0] = 5") assert a.size == 1 assert a.shape == () assert a.dtype is dtype(int) @@ -415,6 +415,9 @@ # u is not a view, it is a copy! assert u[25] == 41 + a = zeros((5, 2)) + assert a.reshape(-1).shape == (10,) + raises(ValueError, arange(10).reshape, (5, -1, -1)) def test_reshape_varargs(self): From noreply at buildbot.pypy.org Tue Dec 6 20:28:20 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 20:28:20 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: merged default Message-ID: <20111206192820.61E0A8205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50220:e4002f73fb25 Date: 2011-12-06 14:12 -0500 http://bitbucket.org/pypy/pypy/changeset/e4002f73fb25/ Log: merged default diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/jit/metainterp/test/test_math.py b/pypy/jit/metainterp/test/test_math.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_math.py @@ -0,0 +1,47 @@ +import math +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN + +class MathTests: + + def test_math_sqrt(self): + def f(x): + try: + return math.sqrt(x) + except ValueError: + return -INFINITY + + res = self.interp_operations(f, [0.0]) + assert res == 0.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [25.0]) + assert res == 5.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-0.0]) + assert str(res) == '-0.0' + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [1000000.0]) + assert res == 1000.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-1.0]) + assert res == -INFINITY + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [INFINITY]) + assert isinf(res) and not isnan(res) and res > 0.0 + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [NAN]) + assert isnan(res) and not isinf(res) + self.check_operations_history(call_pure=0) + + +class TestOOtype(MathTests, OOJitMixin): + pass + +class TestLLtype(MathTests, LLJitMixin): + pass diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -13,12 +13,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -36,7 +38,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -19,11 +19,41 @@ def test_keyerror_without_factory(self): from _collections import defaultdict - d1 = defaultdict() - for key in ['foo', (1,)]: - try: - d1[key] - except KeyError, err: - assert err.args[0] == key - else: - assert 0, "expected KeyError" + for d1 in [defaultdict(), defaultdict(None)]: + for key in ['foo', (1,)]: + try: + d1[key] + except KeyError, err: + assert err.args[0] == key + else: + assert 0, "expected KeyError" + + def test_noncallable(self): + from _collections import defaultdict + raises(TypeError, defaultdict, [('a', 5)]) + d = defaultdict(None, [('a', 5)]) + assert d.items() == [('a', 5)] + + def test_kwds(self): + from _collections import defaultdict + d = defaultdict(default_factory=5) + assert d.keys() == ['default_factory'] + + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module._socket.interp_socket import converted_error, W_RSocket from pypy.rlib import rsocket -from pypy.rlib.rsocket import SocketError +from pypy.rlib.rsocket import SocketError, INVALID_SOCKET from pypy.interpreter.error import OperationError def gethostname(space): @@ -284,7 +284,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(-1, space)]) # -1 as per cpython + addr.as_object(INVALID_SOCKET, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -32,6 +32,7 @@ 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', + 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', 'complexfloating': 'interp_boxes.W_ComplexFloatingBox', 'complex128': 'interp_boxes.W_Complex128Box', @@ -78,4 +79,5 @@ 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', 'arange': 'app_numpy.arange', + 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -36,3 +36,40 @@ j += 1 i += step return arr + + +def reshape(a, shape): + '''reshape(a, newshape) + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred + from the length of the array and remaining dimensions. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. + + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raise if the data is copied, + you should assign the new shape to the shape attribute of the array +''' + if not hasattr(a, 'reshape'): + a = numpypy.array(a) + return a.reshape(shape) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -285,6 +285,8 @@ W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, __module__ = "numpypy", + + __new__ = interp2app(W_Float32Box.descr__new__.im_func), ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,6 +98,105 @@ endshape[i] = remainder[i] return endshape +def get_shape_from_iterable(space, old_size, w_iterable): + new_size = 0 + new_shape = [] + if space.isinstance_w(w_iterable, space.w_int): + new_size = space.int_w(w_iterable) + if new_size < 0: + new_size = old_size + new_shape = [new_size] + else: + neg_dim = -1 + batch = space.listview(w_iterable) + new_size = 1 + if len(batch) < 1: + if old_size == 1: + # Scalars can have an empty size. + new_size = 1 + else: + new_size = 0 + new_shape = [] + i = 0 + for elem in batch: + s = space.int_w(elem) + if s < 0: + if neg_dim >= 0: + raise OperationError(space.w_ValueError, space.wrap( + "can only specify one unknown dimension")) + s = 1 + neg_dim = i + new_size *= s + new_shape.append(s) + i += 1 + if neg_dim >= 0: + new_shape[neg_dim] = old_size / new_size + new_size *= new_shape[neg_dim] + if new_size != old_size: + raise OperationError(space.w_ValueError, + space.wrap("total size of new array must be unchanged")) + return new_shape + +# Recalculating strides. Find the steps that the iteration does for each +# dimension, given the stride and shape. Then try to create a new stride that +# fits the new shape, using those steps. If there is a shape/step mismatch +# (meaning that the realignment of elements crosses from one step into another) +# return None so that the caller can raise an exception. +def calc_new_strides(new_shape, old_shape, old_strides): + # Return the proper strides for new_shape, or None if the mapping crosses + # stepping boundaries + + # Assumes that prod(old_shape) == prod(new_shape), len(old_shape) > 1, and + # len(new_shape) > 0 + steps = [] + last_step = 1 + oldI = 0 + new_strides = [] + if old_strides[0] < old_strides[-1]: + for i in range(len(old_shape)): + steps.append(old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[0] + n_new_elems_used = 1 + n_old_elems_to_use = old_shape[0] + for s in new_shape: + new_strides.append(cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI += 1 + if steps[oldI] != steps[oldI - 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI += 1 + if oldI >= len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + else: + for i in range(len(old_shape) - 1, -1, -1): + steps.insert(0, old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[-1] + n_new_elems_used = 1 + oldI = -1 + n_old_elems_to_use = old_shape[-1] + for i in range(len(new_shape) - 1, -1, -1): + s = new_shape[i] + new_strides.insert(0, cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI -= 1 + if steps[oldI] != steps[oldI + 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI -= 1 + if oldI < -len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + return new_strides # Iterators for arrays # -------------------- @@ -444,6 +543,7 @@ return False i = i.next(shapelen) return True + def descr_all(self, space): return space.wrap(self._all()) @@ -459,6 +559,7 @@ return True i = i.next(shapelen) return False + def descr_any(self, space): return space.wrap(self._any()) @@ -483,6 +584,12 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) + def descr_set_shape(self, space, w_iterable): + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_iterable) + concrete.setshape(space, new_shape) + def descr_get_size(self, space): return space.wrap(self.find_size()) @@ -607,11 +714,6 @@ def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): idx = space.int_w(w_idx) - if not self.shape: - if idx != 0: - raise OperationError(space.w_IndexError, - space.wrap("index out of range")) - return 0 if idx < 0: idx = self.shape[0] + idx if idx < 0 or idx >= self.shape[0]: @@ -730,10 +832,49 @@ strides += self.strides[s:] backstrides += self.backstrides[s:] new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature, + W_NDimSlice.signature, self.signature, ]) - return NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) + return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], + shape[:]) + + def descr_reshape(self, space, args_w): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `numpypy.reshape` for full documentation. + + See Also + -------- + numpypy.reshape : equivalent function +""" + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newlist(args_w) + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_shape) + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + # We can create a view, strides somehow match up. + new_sig = signature.Signature.find_sig([ + W_NDimSlice.signature, self.signature + ]) + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(self, new_sig, self.start, new_strides, + new_backstrides, new_shape) + else: + # Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -751,7 +892,7 @@ if len(concrete.shape) < 2: return space.wrap(self) new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature + W_NDimSlice.signature, self.signature ]) strides = [] backstrides = [] @@ -760,8 +901,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) + return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -830,6 +971,11 @@ def debug_repr(self): return 'Scalar' + def setshape(self, space, new_shape): + # In order to get here, we already checked that prod(new_shape) == 1, + # so in order to have a consistent API, let it go through. + pass + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1022,13 +1168,46 @@ return space.wrap(self.shape[0]) return space.wrap(1) + def setshape(self, space, new_shape): + if len(self.shape) < 1: + return + elif len(self.shape) < 2: + # TODO: this code could be refactored into calc_strides + # but then calc_strides would have to accept a stepping factor + strides = [] + backstrides = [] + s = self.strides[0] + if self.order == 'C': + new_shape.reverse() + for sh in new_shape: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + new_shape.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + self.shape = new_shape[:] + return + new_strides = calc_new_strides(new_shape, self.shape, self.strides) + if new_strides is None: + raise OperationError(space.w_AttributeError, space.wrap( + "incompatible shape for a non-contiguous array")) + new_backstrides = [0] * len(new_shape) + for nd in range(len(new_shape)): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + self.strides = new_strides[:] + self.backstrides = new_backstrides[:] + self.shape = new_shape[:] -class NDimSlice(ViewArray): +class W_NDimSlice(ViewArray): signature = signature.BaseSignature() def __init__(self, parent, signature, start, strides, backstrides, shape): - if isinstance(parent, NDimSlice): + if isinstance(parent, W_NDimSlice): parent = parent.parent ViewArray.__init__(self, parent, signature, strides, backstrides, shape) self.start = start @@ -1077,9 +1256,11 @@ def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() + a_iter = array.start_iter() while not iter.done(): - array.setitem(iter.offset, self.getitem(iter.offset)) + array.setitem(a_iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) return array class W_NDimArray(BaseArray): @@ -1137,6 +1318,10 @@ return ArrayIterator(self.size) raise NotImplementedError # use ViewIterator simply, test it + def setshape(self, space, new_shape): + self.shape = new_shape + self.calc_strides(new_shape) + def debug_repr(self): return 'Array' @@ -1261,7 +1446,8 @@ __debug_repr__ = interp2app(BaseArray.descr_debug_repr), dtype = GetSetProperty(BaseArray.descr_get_dtype), - shape = GetSetProperty(BaseArray.descr_get_shape), + shape = GetSetProperty(BaseArray.descr_get_shape, + BaseArray.descr_set_shape), size = GetSetProperty(BaseArray.descr_get_size), T = GetSetProperty(BaseArray.descr_get_transpose), @@ -1279,6 +1465,7 @@ dot = interp2app(BaseArray.descr_dot), copy = interp2app(BaseArray.descr_copy), + reshape = interp2app(BaseArray.descr_reshape), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -251,6 +251,13 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + def test_float32(self): + import numpypy as numpy + + assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] + + assert numpy.float32(12) == numpy.float64(12) + def test_float64(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -158,6 +158,13 @@ assert shape_agreement(self.space, [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + def test_calc_new_strides(self): + from pypy.module.micronumpy.interp_numarray import calc_new_strides + assert calc_new_strides([2, 4], [4, 2], [4, 2]) == [8, 2] + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2]) == [2] class AppTestNumArray(BaseNumpyAppTest): def test_ndarray(self): @@ -216,8 +223,8 @@ assert a[2] == 4 def test_copy(self): - from numpypy import array - a = array(range(5)) + from numpypy import arange, array + a = arange(5) b = a.copy() for i in xrange(5): assert b[i] == a[i] @@ -227,6 +234,11 @@ a = array(1) assert a.copy() == a + a = arange(8) + b = a[::2] + c = b.copy() + assert (c == b).all() + def test_iterator_init(self): from numpypy import array a = array(range(5)) @@ -318,8 +330,8 @@ def test_scalar(self): from numpypy import array, dtype a = array(3) - #assert a[0] == 3 raises(IndexError, "a[0]") + raises(IndexError, "a[0] = 5") assert a.size == 1 assert a.shape == () assert a.dtype is dtype(int) @@ -339,6 +351,81 @@ c = a[:3] assert c.shape == (3,) + def test_set_shape(self): + from numpypy import array, zeros + a = array([]) + a.shape = [] + a = array(range(12)) + a.shape = (3, 4) + assert (a == [range(4), range(4, 8), range(8, 12)]).all() + a.shape = (3, 2, 2) + assert a[1, 1, 1] == 7 + a.shape = (3, -1, 2) + assert a.shape == (3, 2, 2) + a.shape = 12 + assert a.shape == (12, ) + exc = raises(ValueError, "a.shape = 10") + assert str(exc.value) == "total size of new array must be unchanged" + a = array(3) + a.shape = () + #numpy allows this + a.shape = (1,) + + def test_reshape(self): + from numpypy import array, zeros + a = array(range(12)) + exc = raises(ValueError, "b = a.reshape((3, 10))") + assert str(exc.value) == "total size of new array must be unchanged" + b = a.reshape((3, 4)) + assert b.shape == (3, 4) + assert (b == [range(4), range(4, 8), range(8, 12)]).all() + b[:, 0] = 1000 + assert (a == [1000, 1, 2, 3, 1000, 5, 6, 7, 1000, 9, 10, 11]).all() + a = zeros((4, 2, 3)) + a.shape = (12, 2) + + def test_slice_reshape(self): + from numpypy import zeros, arange + a = zeros((4, 2, 3)) + b = a[::2, :, :] + b.shape = (2, 6) + exc = raises(AttributeError, "b.shape = 12") + assert str(exc.value) == \ + "incompatible shape for a non-contiguous array" + b = a[::2, :, :].reshape((2, 6)) + assert b.shape == (2, 6) + b = arange(20)[1:17:2] + b.shape = (4, 2) + assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() + c = b.reshape((2, 4)) + assert (c == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + + z = arange(96).reshape((12, -1)) + assert z.shape == (12, 8) + y = z.reshape((4, 3, 8)) + v = y[:, ::2, :] + w = y.reshape(96) + u = v.reshape(64) + assert y[1, 2, 1] == z[5, 1] + y[1, 2, 1] = 1000 + # z, y, w, v are views of eachother + assert z[5, 1] == 1000 + assert v[1, 1, 1] == 1000 + assert w[41] == 1000 + # u is not a view, it is a copy! + assert u[25] == 41 + + a = zeros((5, 2)) + assert a.reshape(-1).shape == (10,) + + raises(ValueError, arange(10).reshape, (5, -1, -1)) + + def test_reshape_varargs(self): + from numpypy import arange + z = arange(96).reshape(12, -1) + y = z.reshape(4, 3, 8) + assert y.shape == (4, 3, 8) + def test_add(self): from numpypy import array a = array(range(5)) @@ -1168,3 +1255,14 @@ a = arange(0, 0.8, 0.1) assert len(a) == 8 assert arange(False, True, True).dtype is dtype(int) + + +class AppTestRanges(BaseNumpyAppTest): + def test_app_reshape(self): + from numpypy import arange, array, dtype, reshape + a = arange(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) + a = range(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,13 +8,12 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature -from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, - FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.compile import (FakeSpace, + IntObject, Parser, InterpreterState) +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, BaseArray) from pypy.rlib.nonconst import NonConstant -from pypy.rpython.annlowlevel import llstr, hlstr class TestNumpyJIt(LLJitMixin): diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -0,0 +1,27 @@ + +""" +Extra tests for the pure Python PyPy _collections module +(not used in normal PyPy's) +""" + +from pypy.conftest import gettestobjspace + +class AppTestcStringIO: + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) + INVALID_SOCKET = r_uint(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -20,6 +20,7 @@ from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.rffi import sizeof, offsetof +INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): return lltype.malloc(rffi.CCHARP.TO, buffersize, flavor='raw') From noreply at buildbot.pypy.org Tue Dec 6 20:28:21 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 20:28:21 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: make things translate Message-ID: <20111206192821.ACECF8205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50221:894a554a8053 Date: 2011-12-06 14:25 -0500 http://bitbucket.org/pypy/pypy/changeset/894a554a8053/ Log: make things translate diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -120,6 +120,9 @@ assert isinstance(w_obj, interp_boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) + def unpackcomplex(self, w_obj): + raise NotImplementedError + def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) return w_obj.boolval diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -61,9 +61,6 @@ # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ # arctanh = _unimplemented_ufunc - def is_correct_box(self, box): - return isinstance(box, self.BoxType) - class Primitive(object): _mixin_ = True def get_element_size(self): @@ -411,7 +408,9 @@ s += itemtype.get_element_size() return s + @specialize.argtype(1) def box(self, value): + assert isinstance(value, list) return self.BoxType(value) def unbox(self, box): @@ -433,6 +432,9 @@ offset += itemtype.get_element_size() return self.box(boxes) + def is_correct_box(self, box): + return isinstance(box, self.BoxType) + class Complex(BaseCompositeType): BoxType = interp_boxes.W_Complex128Box From noreply at buildbot.pypy.org Tue Dec 6 21:02:07 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 6 Dec 2011 21:02:07 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: more places where signed_defn.h is missing. Made it easier to add it more often Message-ID: <20111206200207.DEA128205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50222:ccaf375ee24b Date: 2011-12-06 21:01 +0100 http://bitbucket.org/pypy/pypy/changeset/ccaf375ee24b/ Log: more places where signed_defn.h is missing. Made it easier to add it more often diff --git a/pypy/translator/c/src/signals.h b/pypy/translator/c/src/signals.h --- a/pypy/translator/c/src/signals.h +++ b/pypy/translator/c/src/signals.h @@ -7,6 +7,7 @@ #include #include +#include "stc/signed_defn.h" #ifdef _WIN32 #include diff --git a/pypy/translator/c/src/signed_defn.h b/pypy/translator/c/src/signed_defn.h --- a/pypy/translator/c/src/signed_defn.h +++ b/pypy/translator/c/src/signed_defn.h @@ -1,5 +1,8 @@ /* this file defines Signed and Unsigned */ +#ifndef SIGNED_DEFN_H +#define SIGNED_DEFN_H + #ifdef _WIN64 typedef __int64 Signed; typedef unsigned __int64 Unsigned; @@ -10,4 +13,6 @@ # define SIGNED_MIN LONG_MIN #endif +#endif + /* end of signed_def.h */ From noreply at buildbot.pypy.org Tue Dec 6 21:16:26 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 Dec 2011 21:16:26 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: hg merge with default Message-ID: <20111206201626.ECD2A8205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50223:c1c81fb495b9 Date: 2011-12-06 17:39 +0200 http://bitbucket.org/pypy/pypy/changeset/c1c81fb495b9/ Log: hg merge with default diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -823,6 +823,15 @@ bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETFIELD_RAW) + # ---------- write barrier for SETINTERIORFIELD_GC ------ + if op.getopnum() == rop.SETINTERIORFIELD_GC: + val = op.getarg(0) + if val is not last_malloc: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: val = op.getarg(0) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -570,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -490,8 +490,8 @@ check(a[i].y.i == n + i * 100 + 2) check(a[i].z.i == n + i * 100 + 3) i += 1 + n -= x.foo return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s - f(123, *[None]*11) # check that the check() are ok return None, f, None def test_compile_framework_7_interior(self): diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -298,7 +298,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +309,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +329,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,14 +339,17 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + return self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) @@ -359,12 +367,22 @@ def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +409,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -608,9 +635,6 @@ metainterp.set_compiled_merge_points(self.original_greenkey, old_loop_tokens) - def reset_counter_from_failure(self): - pass - def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): """Try to compile a new bridge leading from the beginning of the history diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1790,7 +1790,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate diff --git a/pypy/jit/metainterp/test/test_math.py b/pypy/jit/metainterp/test/test_math.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_math.py @@ -0,0 +1,47 @@ +import math +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN + +class MathTests: + + def test_math_sqrt(self): + def f(x): + try: + return math.sqrt(x) + except ValueError: + return -INFINITY + + res = self.interp_operations(f, [0.0]) + assert res == 0.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [25.0]) + assert res == 5.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-0.0]) + assert str(res) == '-0.0' + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [1000000.0]) + assert res == 1000.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-1.0]) + assert res == -INFINITY + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [INFINITY]) + assert isinf(res) and not isnan(res) and res > 0.0 + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [NAN]) + assert isnan(res) and not isinf(res) + self.check_operations_history(call_pure=0) + + +class TestOOtype(MathTests, OOJitMixin): + pass + +class TestLLtype(MathTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1238,6 +1238,31 @@ self.meta_interp(portal, [0, 0, 0], inline=True) self.check_resops(call_may_force=0, call=0) + def test_dont_repeatedly_trace_from_the_same_guard(self): + driver = JitDriver(greens = [], reds = ['level', 'i']) + + def portal(level): + if level == 0: + i = -10 + else: + i = 0 + # + while True: + driver.jit_merge_point(level=level, i=i) + if level == 25: + return 42 + i += 1 + if i <= 0: # <- guard + continue # first make a loop + else: + # then we fail the guard above, doing a recursive call, + # which will itself fail the same guard above, and so on + return portal(level + 1) + + self.meta_interp(portal, [0]) + self.check_loop_count_at_most(2) # and not, e.g., 24 + + class TestLLtype(RecursiveTests, LLJitMixin): pass diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -13,12 +13,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -19,11 +19,22 @@ def test_keyerror_without_factory(self): from _collections import defaultdict - d1 = defaultdict() - for key in ['foo', (1,)]: - try: - d1[key] - except KeyError, err: - assert err.args[0] == key - else: - assert 0, "expected KeyError" + for d1 in [defaultdict(), defaultdict(None)]: + for key in ['foo', (1,)]: + try: + d1[key] + except KeyError, err: + assert err.args[0] == key + else: + assert 0, "expected KeyError" + + def test_noncallable(self): + from _collections import defaultdict + raises(TypeError, defaultdict, [('a', 5)]) + d = defaultdict(None, [('a', 5)]) + assert d.items() == [('a', 5)] + + def test_kwds(self): + from _collections import defaultdict + d = defaultdict(default_factory=5) + assert d.keys() == ['default_factory'] diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module._socket.interp_socket import converted_error, W_RSocket from pypy.rlib import rsocket -from pypy.rlib.rsocket import SocketError +from pypy.rlib.rsocket import SocketError, INVALID_SOCKET from pypy.interpreter.error import OperationError def gethostname(space): @@ -284,7 +284,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(-1, space)]) # -1 as per cpython + addr.as_object(INVALID_SOCKET, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -32,6 +32,7 @@ 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', + 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', } @@ -76,4 +77,5 @@ 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', 'arange': 'app_numpy.arange', + 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -36,3 +36,39 @@ j += 1 i += step return arr + +def reshape(a, shape): + '''reshape(a, newshape) + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred + from the length of the array and remaining dimensions. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. + + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raise if the data is copied, + you should assign the new shape to the shape attribute of the array +''' + if not hasattr(a, 'reshape'): + a = numpypy.array(a) + return a.reshape(shape) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -258,6 +258,8 @@ W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, __module__ = "numpypy", + + __new__ = interp2app(W_Float32Box.descr__new__.im_func), ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,6 +98,107 @@ endshape[i] = remainder[i] return endshape +def get_shape_from_iterable(space, old_size, w_iterable): + new_size = 0 + new_shape = [] + if space.isinstance_w(w_iterable, space.w_int): + new_size = space.int_w(w_iterable) + if new_size < 0: + new_size = old_size + new_shape = [new_size, ] + else: + neg_dim = -1 + batch = space.listview(w_iterable) + #Allow for shape = (1,2,3) or shape = ((1,2,3)) + if len(batch) > 1 and space.issequence_w(batch[0]): + batch = space.listview(batch[0]) + new_size = 1 + if len(batch) < 1: + if old_size == 1: + #Scalars can have an empty size. + new_size = 1 + else: + new_size = 0 + new_shape = [] + i = 0 + for elem in batch: + s = space.int_w(elem) + if s < 0: + if neg_dim >= 0: + raise OperationError(space.w_ValueError, space.wrap( + "can only specify one unknown dimension")) + s = 1 + neg_dim = i + new_size *= s + new_shape.append(s) + i += 1 + if neg_dim >= 0: + new_shape[neg_dim] = old_size / new_size + new_size *= new_shape[neg_dim] + if new_size != old_size: + raise OperationError(space.w_ValueError, + space.wrap("total size of new array must be unchanged")) + return new_shape + +#Recalculating strides. Find the steps that the iteration does for each +#dimension, given the stride and shape. Then try to create a new stride that +#fits the new shape, using those steps. If there is a shape/step mismatch +#(meaning that the realignment of elements crosses from one step into another) +#return None so that the caller can raise an exception. +def calc_new_strides(new_shape, old_shape, old_strides): + #Return the proper strides for new_shape, or None + # if the mapping crosses stepping boundaries + + #Assumes that prod(old_shape) ==prod(new_shape), len(old_shape) > 1 and + # len(new_shape) > 0 + steps = [] + last_step = 1 + oldI = 0 + new_strides = [] + if old_strides[0] < old_strides[-1]: + for i in range(len(old_shape)): + steps.append(old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[0] + n_new_elems_used = 1 + n_old_elems_to_use = old_shape[0] + for s in new_shape: + new_strides.append(cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI += 1 + if steps[oldI] != steps[oldI - 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI += 1 + if oldI >= len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + else: + for i in range(len(old_shape) - 1, -1, -1): + steps.insert(0, old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[-1] + n_new_elems_used = 1 + oldI = -1 + n_old_elems_to_use = old_shape[-1] + for s in new_shape[::-1]: + new_strides.insert(0, cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI -= 1 + if steps[oldI] != steps[oldI + 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI -= 1 + if oldI < -len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + return new_strides # Iterators for arrays # -------------------- @@ -567,6 +668,12 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) + def descr_set_shape(self, space, w_iterable): + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_iterable) + concrete.setshape(space, new_shape) + def descr_get_size(self, space): return space.wrap(self.find_size()) @@ -814,10 +921,44 @@ strides += self.strides[s:] backstrides += self.backstrides[s:] new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature, + W_NDimSlice.signature, self.signature, ]) - return NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) + return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], + shape[:]) + + def descr_reshape(self, space, w_args): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `%s.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function +""" % 'numpypy' + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_args) + #Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + #We can create a view, strides somehow match up. + new_sig = signature.Signature.find_sig([ + W_NDimSlice.signature, self.signature, ]) + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(self, new_sig, self.start, new_strides, + new_backstrides, new_shape) + else: + #Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr def descr_mean(self, space): return space.div(self.descr_sumpromote(space), @@ -836,7 +977,7 @@ if len(concrete.shape) < 2: return space.wrap(self) new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature + W_NDimSlice.signature, self.signature ]) strides = [] backstrides = [] @@ -845,8 +986,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) + return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -915,6 +1056,11 @@ def debug_repr(self): return 'Scalar' + def setshape(self, space, new_shape): + # In order to get here, we already checked that prod(new_shape)==1, + # so in order to have a consistent API, let it go through. + pass + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1047,7 +1193,6 @@ return self.forced_result.start_iter(res_shape) if res_shape is None: res_shape = self.shape # we still force the shape on children - #TODO: use left_start_dim, right_start_dim if they are not [-1, -1] if self.left_start_dim[0] >= 0: ldim = self.left_start_dim[1] rdim = self.right_start_dim[1] @@ -1120,13 +1265,46 @@ return space.wrap(self.shape[0]) return space.wrap(1) + def setshape(self, space, new_shape): + if len(self.shape) < 1: + return + elif len(self.shape) < 2: + #TODO: this code could be refactored into calc_strides + #but then calc_strides would have to accept a stepping factor + strides = [] + backstrides = [] + s = self.strides[0] + if self.order == 'C': + new_shape.reverse() + for sh in new_shape: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + new_shape.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + self.shape = new_shape[:] + return + new_strides = calc_new_strides(new_shape, self.shape, self.strides) + if new_strides is None: + raise OperationError(space.w_AttributeError, space.wrap( + "incompatible shape for a non-contiguous array")) + new_backstrides = [0] * len(new_shape) + for nd in range(len(new_shape)): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + self.strides = new_strides[:] + self.backstrides = new_backstrides[:] + self.shape = new_shape[:] -class NDimSlice(ViewArray): +class W_NDimSlice(ViewArray): signature = signature.BaseSignature() def __init__(self, parent, signature, start, strides, backstrides, shape): - if isinstance(parent, NDimSlice): + if isinstance(parent, W_NDimSlice): parent = parent.parent ViewArray.__init__(self, parent, signature, strides, backstrides, shape) self.start = start @@ -1175,9 +1353,11 @@ def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() + a_iter = array.start_iter() while not iter.done(): - array.setitem(iter.get_offset(), self.getitem(iter.get_offset())) + array.setitem(a_iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) return array class W_NDimArray(BaseArray): @@ -1235,6 +1415,10 @@ return ArrayIterator(self.size) raise NotImplementedError # use ViewIterator simply, test it + def setshape(self, space, new_shape): + self.shape = new_shape + self.calc_strides(new_shape) + def debug_repr(self): return 'Array' @@ -1359,7 +1543,8 @@ __debug_repr__ = interp2app(BaseArray.descr_debug_repr), dtype = GetSetProperty(BaseArray.descr_get_dtype), - shape = GetSetProperty(BaseArray.descr_get_shape), + shape = GetSetProperty(BaseArray.descr_get_shape, + BaseArray.descr_set_shape), size = GetSetProperty(BaseArray.descr_get_size), T = GetSetProperty(BaseArray.descr_get_transpose), @@ -1377,6 +1562,7 @@ dot = interp2app(BaseArray.descr_dot), copy = interp2app(BaseArray.descr_copy), + reshape = interp2app(BaseArray.descr_reshape), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -240,6 +240,13 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + def test_float32(self): + import numpypy as numpy + + assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] + + assert numpy.float32(12) == numpy.float64(12) + def test_float64(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -158,6 +158,13 @@ assert shape_agreement(self.space, [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + def test_calc_new_strides(self): + from pypy.module.micronumpy.interp_numarray import calc_new_strides + assert calc_new_strides([2, 4], [4, 2], [4, 2]) == [8, 2] + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2]) == [2] class AppTestNumArray(BaseNumpyAppTest): def test_ndarray(self): @@ -216,8 +223,8 @@ assert a[2] == 4 def test_copy(self): - from numpypy import array - a = array(range(5)) + from numpypy import arange, array + a = arange(5) b = a.copy() for i in xrange(5): assert b[i] == a[i] @@ -227,6 +234,11 @@ a = array(1) assert a.copy() == a + a = arange(8) + b = a[::2] + c = b.copy() + assert (c == b).all() + def test_iterator_init(self): from numpypy import array a = array(range(5)) @@ -339,6 +351,76 @@ c = a[:3] assert c.shape == (3,) + def test_set_shape(self): + from numpypy import array, zeros + a = array([]) + a.shape = [] + a = array(range(12)) + a.shape = (3, 4) + assert (a == [range(4), range(4, 8), range(8, 12)]).all() + a.shape = (3, 2, 2) + assert a[1, 1, 1] == 7 + a.shape = (3, -1, 2) + assert a.shape == (3, 2, 2) + a.shape = 12 + assert a.shape == (12, ) + exc = raises(ValueError, "a.shape = 10") + assert str(exc.value) == "total size of new array must be unchanged" + a = array(3) + a.shape = () + #numpy allows this + a.shape = (1,) + + def test_reshape(self): + from numpypy import array, zeros + a = array(range(12)) + exc = raises(ValueError, "b = a.reshape((3, 10))") + assert str(exc.value) == "total size of new array must be unchanged" + b = a.reshape((3, 4)) + assert b.shape == (3, 4) + assert (b == [range(4), range(4, 8), range(8, 12)]).all() + b[:, 0] = 1000 + assert (a == [1000, 1, 2, 3, 1000, 5, 6, 7, 1000, 9, 10, 11]).all() + a = zeros((4, 2, 3)) + a.shape = (12, 2) + + def test_slice_reshape(self): + from numpypy import zeros, arange + a = zeros((4, 2, 3)) + b = a[::2, :, :] + b.shape = (2, 6) + exc = raises(AttributeError, "b.shape = 12") + assert str(exc.value) == \ + "incompatible shape for a non-contiguous array" + b = a[::2, :, :].reshape((2, 6)) + assert b.shape == (2, 6) + b = arange(20)[1:17:2] + b.shape = (4, 2) + assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() + c = b.reshape((2, 4)) + assert (c == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + + z = arange(96).reshape((12, -1)) + assert z.shape == (12, 8) + y = z.reshape((4, 3, 8)) + v = y[:, ::2, :] + w = y.reshape(96) + u = v.reshape(64) + assert y[1, 2, 1] == z[5, 1] + y[1, 2, 1] = 1000 + #z, y, w, v are views of eachother + assert z[5, 1] == 1000 + assert v[1, 1, 1] == 1000 + assert w[41] == 1000 + #u is not a view, it is a copy! + assert u[25] == 41 + + def test_reshape_varargs(self): + skip("How do I do varargs in rpython? reshape should accept a" + " variable number of arguments") + z = arange(96).reshape(12, -1) + y = z.reshape(4, 3, 8) + def test_add(self): from numpypy import array a = array(range(5)) @@ -1174,3 +1256,14 @@ a = arange(0, 0.8, 0.1) assert len(a) == 8 assert arange(False, True, True).dtype is dtype(int) + + +class AppTestRanges(BaseNumpyAppTest): + def test_app_reshape(self): + from numpypy import arange, array, dtype, reshape + a = arange(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) + a = range(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,13 +8,12 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature -from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, - FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.compile import (FakeSpace, + IntObject, Parser, InterpreterState) +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, BaseArray) from pypy.rlib.nonconst import NonConstant -from pypy.rpython.annlowlevel import llstr, hlstr class TestNumpyJIt(LLJitMixin): @@ -186,7 +185,8 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, + py.test.skip("counting exact number of classes is nonsense") + self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) + INVALID_SOCKET = r_uint(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -20,6 +20,7 @@ from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.rffi import sizeof, offsetof +INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): return lltype.malloc(rffi.CCHARP.TO, buffersize, flavor='raw') From noreply at buildbot.pypy.org Tue Dec 6 21:16:28 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 Dec 2011 21:16:28 +0100 (CET) Subject: [pypy-commit] pypy numpypy-is_contiguous: refactor calc_steps Message-ID: <20111206201628.5E70C8205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-is_contiguous Changeset: r50224:9166c3425257 Date: 2011-12-06 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/9166c3425257/ Log: refactor calc_steps diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -140,6 +140,21 @@ space.wrap("total size of new array must be unchanged")) return new_shape +def calc_steps(shape, strides): + steps = [] + last_step = 1 + #Irregardless of order, the strides can itterate faster left to right + # or right to left. Take each case seperately. + if strides[0] < strides[-1]: + for i in range(len(shape)): + steps.append(strides[i] / last_step) + last_step *= shape[i] + else: + for i in range(len(shape) - 1, -1, -1): + steps.insert(0, strides[i] / last_step) + last_step *= shape[i] + return steps + #Recalculating strides. Find the steps that the iteration does for each #dimension, given the stride and shape. Then try to create a new stride that #fits the new shape, using those steps. If there is a shape/step mismatch @@ -151,14 +166,10 @@ #Assumes that prod(old_shape) ==prod(new_shape), len(old_shape) > 1 and # len(new_shape) > 0 - steps = [] - last_step = 1 + steps = calc_steps(old_shape, old_strides) oldI = 0 new_strides = [] if old_strides[0] < old_strides[-1]: - for i in range(len(old_shape)): - steps.append(old_strides[i] / last_step) - last_step *= old_shape[i] cur_step = steps[0] n_new_elems_used = 1 n_old_elems_to_use = old_shape[0] @@ -177,9 +188,6 @@ cur_step = steps[oldI] n_old_elems_to_use *= old_shape[oldI] else: - for i in range(len(old_shape) - 1, -1, -1): - steps.insert(0, old_strides[i] / last_step) - last_step *= old_shape[i] cur_step = steps[-1] n_new_elems_used = 1 oldI = -1 From noreply at buildbot.pypy.org Tue Dec 6 21:16:29 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 Dec 2011 21:16:29 +0100 (CET) Subject: [pypy-commit] pypy numpypy-is_contiguous: test, implement is_contiguous for C order Message-ID: <20111206201629.C103B8205C@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-is_contiguous Changeset: r50225:b8c87c4cf664 Date: 2011-12-06 22:14 +0200 http://bitbucket.org/pypy/pypy/changeset/b8c87c4cf664/ Log: test, implement is_contiguous for C order diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -155,6 +155,27 @@ last_step *= shape[i] return steps +def is_contiguous(arr): + #Only views can be non-contiguous + if isinstance(arr, ViewArray): + steps = calc_steps(arr.shape, arr.strides) + for i in range(1, len(steps)): + if steps[i] != steps[0]: + return False + return True + +def is_contiguous_lr(arr): + if arr.strides[-1] < arr.strides[0]: + #rl, not lr + return False + return is_contiguous(arr) + +def is_contiguous_rl(self): + if arr.strides[-1] > arr.strides[0]: + #lr, not rl + return False + return is_contiguous(arr) + #Recalculating strides. Find the steps that the iteration does for each #dimension, given the stride and shape. Then try to create a new stride that #fits the new shape, using those steps. If there is a shape/step mismatch @@ -192,7 +213,8 @@ n_new_elems_used = 1 oldI = -1 n_old_elems_to_use = old_shape[-1] - for s in new_shape[::-1]: + for i in range(len(new_shape) - 1, -1, -1): + s = new_shape[i] new_strides.insert(0, cur_step * n_new_elems_used) n_new_elems_used *= s while n_new_elems_used > n_old_elems_to_use: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -166,6 +166,23 @@ assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None assert calc_new_strides([24], [2, 4, 3], [24, 6, 2]) == [2] + def test_contiguousC(self): + from pypy.module.micronumpy.interp_numarray import is_contiguous, + is_contiguous_rl, is_contiguous_lr + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + assert is_contiguous(a) == True + assert is_contiguous_lr(a) == False + assert is_contiguous_rl(a) == True + b = a.descr_get_transpose(self.space) + assert is_contiguous(b) == True + assert is_contiguous_lr(b) == True + assert is_contiguous_rl(b) == False + b = a.create_slice(self.space, [(0, 10, 2, 5)]) + assert is_contiguous(b) == False + assert is_contiguous_lr(b) == False + assert is_contiguous_rl(b) == False + + class AppTestNumArray(BaseNumpyAppTest): def test_ndarray(self): from numpypy import ndarray, array, dtype From noreply at buildbot.pypy.org Tue Dec 6 21:40:29 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 6 Dec 2011 21:40:29 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: typo :-( Message-ID: <20111206204029.915238205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50226:41a380cec3d1 Date: 2011-12-06 21:39 +0100 http://bitbucket.org/pypy/pypy/changeset/41a380cec3d1/ Log: typo :-( diff --git a/pypy/translator/c/src/signals.h b/pypy/translator/c/src/signals.h --- a/pypy/translator/c/src/signals.h +++ b/pypy/translator/c/src/signals.h @@ -7,7 +7,7 @@ #include #include -#include "stc/signed_defn.h" +#include "src/signed_defn.h" #ifdef _WIN32 #include From noreply at buildbot.pypy.org Tue Dec 6 22:02:03 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 22:02:03 +0100 (CET) Subject: [pypy-commit] pypy default: fix translation Message-ID: <20111206210203.756568205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50227:4d94de61c725 Date: 2011-12-06 16:01 -0500 http://bitbucket.org/pypy/pypy/changeset/4d94de61c725/ Log: fix translation diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -852,7 +852,7 @@ if len(args_w) == 1: w_shape = args_w[0] else: - w_shape = space.newlist(args_w) + w_shape = space.newtuple(args_w) concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, concrete.find_size(), w_shape) From noreply at buildbot.pypy.org Tue Dec 6 22:58:29 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 6 Dec 2011 22:58:29 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge default Message-ID: <20111206215829.587448205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50228:2e5389e1972f Date: 2011-12-06 22:34 +0100 http://bitbucket.org/pypy/pypy/changeset/2e5389e1972f/ Log: merge default diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -37,10 +37,11 @@ i += step return arr + def reshape(a, shape): '''reshape(a, newshape) Gives a new shape to an array without changing its data. - + Parameters ---------- a : array_like @@ -50,21 +51,21 @@ an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. - + Returns ------- reshaped_array : ndarray This will be a new view object if possible; otherwise, it will be a copy. - - + + See Also -------- ndarray.reshape : Equivalent method. - + Notes ----- - + It is not always possible to change the shape of an array without copying the data. If you want an error to be raise if the data is copied, you should assign the new shape to the shape attribute of the array diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -105,17 +105,14 @@ new_size = space.int_w(w_iterable) if new_size < 0: new_size = old_size - new_shape = [new_size, ] + new_shape = [new_size] else: neg_dim = -1 batch = space.listview(w_iterable) - #Allow for shape = (1,2,3) or shape = ((1,2,3)) - if len(batch) > 1 and space.issequence_w(batch[0]): - batch = space.listview(batch[0]) new_size = 1 if len(batch) < 1: if old_size == 1: - #Scalars can have an empty size. + # Scalars can have an empty size. new_size = 1 else: new_size = 0 @@ -140,16 +137,16 @@ space.wrap("total size of new array must be unchanged")) return new_shape -#Recalculating strides. Find the steps that the iteration does for each -#dimension, given the stride and shape. Then try to create a new stride that -#fits the new shape, using those steps. If there is a shape/step mismatch -#(meaning that the realignment of elements crosses from one step into another) -#return None so that the caller can raise an exception. +# Recalculating strides. Find the steps that the iteration does for each +# dimension, given the stride and shape. Then try to create a new stride that +# fits the new shape, using those steps. If there is a shape/step mismatch +# (meaning that the realignment of elements crosses from one step into another) +# return None so that the caller can raise an exception. def calc_new_strides(new_shape, old_shape, old_strides): - #Return the proper strides for new_shape, or None - # if the mapping crosses stepping boundaries + # Return the proper strides for new_shape, or None if the mapping crosses + # stepping boundaries - #Assumes that prod(old_shape) ==prod(new_shape), len(old_shape) > 1 and + # Assumes that prod(old_shape) == prod(new_shape), len(old_shape) > 1, and # len(new_shape) > 0 steps = [] last_step = 1 @@ -589,7 +586,7 @@ def descr_set_shape(self, space, w_iterable): concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, + new_shape = get_shape_from_iterable(space, concrete.find_size(), w_iterable) concrete.setshape(space, new_shape) @@ -717,11 +714,6 @@ def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): idx = space.int_w(w_idx) - if not self.shape: - if idx != 0: - raise OperationError(space.w_IndexError, - space.wrap("index out of range")) - return 0 if idx < 0: idx = self.shape[0] + idx if idx < 0 or idx >= self.shape[0]: @@ -845,28 +837,33 @@ return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], shape[:]) - def descr_reshape(self, space, w_args): + def descr_reshape(self, space, args_w): """reshape(...) a.reshape(shape) - + Returns an array containing the same data with a new shape. - - Refer to `%s.reshape` for full documentation. - + + Refer to `numpypy.reshape` for full documentation. + See Also -------- - numpy.reshape : equivalent function -""" % 'numpypy' + numpypy.reshape : equivalent function +""" + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newlist(args_w) concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_args) - #Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_shape) + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides) if new_strides: - #We can create a view, strides somehow match up. + # We can create a view, strides somehow match up. new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature, ]) + W_NDimSlice.signature, self.signature + ]) ndims = len(new_shape) new_backstrides = [0] * ndims for nd in range(ndims): @@ -874,7 +871,7 @@ arr = W_NDimSlice(self, new_sig, self.start, new_strides, new_backstrides, new_shape) else: - #Create copy with contiguous data + # Create copy with contiguous data arr = concrete.copy() arr.setshape(space, new_shape) return arr @@ -975,7 +972,7 @@ return 'Scalar' def setshape(self, space, new_shape): - # In order to get here, we already checked that prod(new_shape)==1, + # In order to get here, we already checked that prod(new_shape) == 1, # so in order to have a consistent API, let it go through. pass @@ -1175,8 +1172,8 @@ if len(self.shape) < 1: return elif len(self.shape) < 2: - #TODO: this code could be refactored into calc_strides - #but then calc_strides would have to accept a stepping factor + # TODO: this code could be refactored into calc_strides + # but then calc_strides would have to accept a stepping factor strides = [] backstrides = [] s = self.strides[0] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -330,8 +330,8 @@ def test_scalar(self): from numpypy import array, dtype a = array(3) - #assert a[0] == 3 raises(IndexError, "a[0]") + raises(IndexError, "a[0] = 5") assert a.size == 1 assert a.shape == () assert a.dtype is dtype(int) @@ -408,18 +408,23 @@ u = v.reshape(64) assert y[1, 2, 1] == z[5, 1] y[1, 2, 1] = 1000 - #z, y, w, v are views of eachother + # z, y, w, v are views of eachother assert z[5, 1] == 1000 assert v[1, 1, 1] == 1000 assert w[41] == 1000 - #u is not a view, it is a copy! + # u is not a view, it is a copy! assert u[25] == 41 + a = zeros((5, 2)) + assert a.reshape(-1).shape == (10,) + + raises(ValueError, arange(10).reshape, (5, -1, -1)) + def test_reshape_varargs(self): - skip("How do I do varargs in rpython? reshape should accept a" - " variable number of arguments") + from numpypy import arange z = arange(96).reshape(12, -1) y = z.reshape(4, 3, 8) + assert y.shape == (4, 3, 8) def test_add(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -185,8 +185,7 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - py.test.skip("counting exact number of classes is nonsense") - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, From noreply at buildbot.pypy.org Tue Dec 6 22:58:30 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Tue, 6 Dec 2011 22:58:30 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge Message-ID: <20111206215830.7D49F82ABA@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50229:e3ea42ebc83e Date: 2011-12-06 22:35 +0100 http://bitbucket.org/pypy/pypy/changeset/e3ea42ebc83e/ Log: merge diff --git a/pypy/translator/c/src/signals.h b/pypy/translator/c/src/signals.h --- a/pypy/translator/c/src/signals.h +++ b/pypy/translator/c/src/signals.h @@ -7,7 +7,7 @@ #include #include -#include "stc/signed_defn.h" +#include "src/signed_defn.h" #ifdef _WIN32 #include From noreply at buildbot.pypy.org Tue Dec 6 23:05:33 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Dec 2011 23:05:33 +0100 (CET) Subject: [pypy-commit] pypy default: "Clarify." Quotes intended. Message-ID: <20111206220533.E40D88205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50230:245a08e9eb96 Date: 2011-12-06 23:05 +0100 http://bitbucket.org/pypy/pypy/changeset/245a08e9eb96/ Log: "Clarify." Quotes intended. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -519,8 +518,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) From noreply at buildbot.pypy.org Tue Dec 6 23:14:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Dec 2011 23:14:17 +0100 (CET) Subject: [pypy-commit] pypy default: Hum, I'll revert this if I'm wrong, but I don't see how Message-ID: <20111206221417.739BD8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50231:9873bcdab29c Date: 2011-12-06 23:13 +0100 http://bitbucket.org/pypy/pypy/changeset/9873bcdab29c/ Log: Hum, I'll revert this if I'm wrong, but I don't see how w_iterables can ever be None. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -439,9 +439,6 @@ self.w_it = self.space.iter(self.space.next(self.w_iterables)) def next_w(self): - if not self.w_iterables: - # already stopped - raise OperationError(self.space.w_StopIteration, self.space.w_None) if not self.w_it: self._advance() try: From noreply at buildbot.pypy.org Tue Dec 6 23:21:02 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Dec 2011 23:21:02 +0100 (CET) Subject: [pypy-commit] pypy default: Kill! Kill! Arrr arr arr. Message-ID: <20111206222102.7C0DE8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50232:bb527bdf9535 Date: 2011-12-06 23:20 +0100 http://bitbucket.org/pypy/pypy/changeset/bb527bdf9535/ Log: Kill! Kill! Arrr arr arr. diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder From noreply at buildbot.pypy.org Tue Dec 6 23:50:05 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 23:50:05 +0100 (CET) Subject: [pypy-commit] pypy default: typo fix Message-ID: <20111206225005.01BD98205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50233:648537e5b016 Date: 2011-12-06 17:49 -0500 http://bitbucket.org/pypy/pypy/changeset/648537e5b016/ Log: typo fix diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -18,7 +18,7 @@ VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) class W_Dtype(Wrappable): - _immuable_fields_ = ["itemtype", "num", "kind"] + _immutable_fields_ = ["itemtype", "num", "kind"] def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): self.signature = signature.BaseSignature() From noreply at buildbot.pypy.org Tue Dec 6 23:50:06 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 6 Dec 2011 23:50:06 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20111206225006.2B3E38205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50234:33ec28c6d811 Date: 2011-12-06 17:49 -0500 http://bitbucket.org/pypy/pypy/changeset/33ec28c6d811/ Log: merged upstream diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -519,8 +518,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -439,9 +439,6 @@ self.w_it = self.space.iter(self.space.next(self.w_iterables)) def next_w(self): - if not self.w_iterables: - # already stopped - raise OperationError(self.space.w_StopIteration, self.space.w_None) if not self.w_it: self._advance() try: From noreply at buildbot.pypy.org Tue Dec 6 23:51:32 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 6 Dec 2011 23:51:32 +0100 (CET) Subject: [pypy-commit] pypy py3k: Return bytes for Linux abstract namespace sockets Message-ID: <20111206225132.37DB78205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50235:f65121a60b37 Date: 2011-12-05 00:21 +0100 http://bitbucket.org/pypy/pypy/changeset/f65121a60b37/ Log: Return bytes for Linux abstract namespace sockets diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -504,7 +504,12 @@ self.get_path() == other.get_path()) def as_object(self, fd, space): - return space.wrap(self.get_path()) + path = self.get_path() + if _c.linux and len(path) > 0 and path[0] == '\x00': + # Linux abstract namespace + return space.wrapbytes(path) + else: + return space.wrap(path) def from_object(space, w_address): return UNIXAddress(space.str_w(w_address)) From noreply at buildbot.pypy.org Tue Dec 6 23:51:33 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 6 Dec 2011 23:51:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: socket.inet_aton() should return bytes Message-ID: <20111206225133.64CB88205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50236:fc9f0d9b77db Date: 2011-12-05 22:08 +0100 http://bitbucket.org/pypy/pypy/changeset/fc9f0d9b77db/ Log: socket.inet_aton() should return bytes diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -202,9 +202,9 @@ buf = rsocket.inet_aton(ip) except SocketError, e: raise converted_error(space, e) - return space.wrap(buf) + return space.wrapbytes(buf) - at unwrap_spec(packed=str) + at unwrap_spec(packed="bufferstr") def inet_ntoa(space, packed): """inet_ntoa(packed_ip) -> ip_address_string diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -147,10 +147,10 @@ packed = socket.inet_aton(ip) w_p = space.appexec([w_socket, space.wrap(ip)], "(_socket, ip): return _socket.inet_aton(ip)") - assert space.unwrap(w_p) == packed - w_ip = space.appexec([w_socket, space.wrap(packed)], + assert space.bytes_w(w_p) == packed + w_ip = space.appexec([w_socket, w_p], "(_socket, p): return _socket.inet_ntoa(p)") - assert space.unwrap(w_ip) == ip + assert space.unicode_w(w_ip) == ip def test_pton_ntop_ipv4(): if not hasattr(socket, 'inet_pton'): From noreply at buildbot.pypy.org Tue Dec 6 23:51:34 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 6 Dec 2011 23:51:34 +0100 (CET) Subject: [pypy-commit] pypy py3k: socket.getaddrinfo() accept strings for the 'port' parameter Message-ID: <20111206225134.9148E8205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50237:df2edb52b29e Date: 2011-12-05 22:48 +0100 http://bitbucket.org/pypy/pypy/changeset/df2edb52b29e/ Log: socket.getaddrinfo() accept strings for the 'port' parameter diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -270,7 +270,9 @@ port = None elif space.is_true(space.isinstance(w_port, space.w_int)): port = str(space.int_w(w_port)) - elif space.is_true(space.isinstance(w_port, space.w_str)): + elif space.is_true(space.isinstance(w_port, space.w_bytes)): + port = space.bytes_w(w_port) + elif space.is_true(space.isinstance(w_port, space.w_unicode)): port = space.str_w(w_port) else: raise OperationError(space.w_TypeError, diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -223,10 +223,12 @@ w_l = space.appexec([w_socket, space.wrapbytes(host), space.wrap(port)], "(_socket, host, port): return _socket.getaddrinfo(host, port)") assert space.unwrap(w_l) == info - py.test.skip("Unicode conversion is too slow") w_l = space.appexec([w_socket, space.wrap(host), space.wrap(port)], "(_socket, host, port): return _socket.getaddrinfo(host, port)") assert space.unwrap(w_l) == info + w_l = space.appexec([w_socket, space.wrapbytes(host), space.wrap('smtp')], + "(_socket, host, port): return _socket.getaddrinfo(host, port)") + assert space.unwrap(w_l) == socket.getaddrinfo(host, 'smtp') def test_unknown_addr_as_object(): c_addr = lltype.malloc(rsocket._c.sockaddr, flavor='raw') From noreply at buildbot.pypy.org Tue Dec 6 23:51:35 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 6 Dec 2011 23:51:35 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fixes in _ssl module Message-ID: <20111206225135.BD6078205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50238:c0288af60001 Date: 2011-12-06 01:35 +0100 http://bitbucket.org/pypy/pypy/changeset/c0288af60001/ Log: Fixes in _ssl module diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,6 +1,6 @@ from __future__ import with_statement from pypy.rpython.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, wrap_oserror from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -8,6 +8,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.rlib import rpoll, rsocket from pypy.rlib.ropenssl import * +from pypy.rlib.rposix import get_errno from pypy.module._socket import interp_socket import weakref @@ -186,17 +187,19 @@ errno = get_errno() if errno: libssl_ERR_clear_error() - raise_from_errno(space.w_IOError, errno) + raise wrap_oserror(space, OSError(errno, ''), + exception_name = 'w_IOError') else: raise _ssl_seterror(space, None, -1) - ret = libssl_SSL_CTX_use_PrivateKey_file(ss.ctx, key_file, + ret = libssl_SSL_CTX_use_PrivateKey_file(self.ctx, keyfile, SSL_FILETYPE_PEM) if ret != 1: errno = get_errno() if errno: libssl_ERR_clear_error() - raise_from_errno(space.w_IOError, errno) + raise wrap_oserror(space, OSError(errno, ''), + exception_name = 'w_IOError') else: raise _ssl_seterror(space, None, -1) @@ -222,7 +225,8 @@ errno = get_errno() if errno: libssl_ERR_clear_error() - raise_from_errno(space.w_IOError, errno) + raise wrap_oserror(space, OSError(errno, ''), + exception_name = 'w_IOError') else: raise _ssl_seterror(space, None, -1) @@ -249,10 +253,10 @@ w_stats = space.newdict() for name, ssl_func in SSL_CTX_STATS: w_value = space.wrap(ssl_func(self.ctx)) - space.setitem_str(w_stats, attr, w_value) + space.setitem_str(w_stats, name, w_value) return w_stats - def set_default_verify_paths_w(self): + def set_default_verify_paths_w(self, space): ret = libssl_SSL_CTX_set_default_verify_paths(self.ctx) if ret != 1: raise _ssl_seterror(space, None, -1) @@ -264,6 +268,11 @@ verify_mode = GetSetProperty(SSLContext.get_verify_mode_w, SSLContext.set_verify_mode_w), _wrap_socket = interp2app(SSLContext.wrap_socket_w), + set_ciphers = interp2app(SSLContext.set_ciphers_w), + load_cert_chain = interp2app(SSLContext.load_cert_chain_w), + load_verify_locations = interp2app(SSLContext.load_verify_locations_w), + session_stats = interp2app(SSLContext.session_stats_w), + set_default_verify_paths=interp2app(SSLContext.set_default_verify_paths_w), ) diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -189,11 +189,13 @@ ssl_external('SSL_CTX_ctrl', [SSL_CTX, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT) ssl_external('SSL_CTX_set_verify', [SSL_CTX, rffi.INT, rffi.VOIDP], lltype.Void) ssl_external('SSL_CTX_get_verify_mode', [SSL_CTX], rffi.INT) +ssl_external('SSL_CTX_set_default_verify_paths', [SSL_CTX], rffi.INT) ssl_external('SSL_CTX_set_cipher_list', [SSL_CTX, rffi.CCHARP], rffi.INT) ssl_external('SSL_CTX_load_verify_locations', [SSL_CTX, rffi.CCHARP, rffi.CCHARP], rffi.INT) +ssl_external('SSL_CTX_check_private_key', [SSL_CTX], rffi.INT) ssl_external('SSL_CTX_set_session_id_context', [SSL_CTX, rffi.CCHARP, rffi.UINT], rffi.INT) SSL_CTX_STATS_NAMES = """ - number connect connect_good connect_renegotiate accept accept_god + number connect connect_good connect_renegotiate accept accept_good accept_renegotiate hits misses timeouts cache_full""".split() SSL_CTX_STATS = unrolling_iterable( (name, external('SSL_CTX_sess_' + name, [SSL_CTX], rffi.LONG)) @@ -259,6 +261,7 @@ ssl_external('ERR_get_error', [], rffi.INT) ssl_external('ERR_error_string', [rffi.ULONG, rffi.CCHARP], rffi.CCHARP) +ssl_external('ERR_clear_error', [], lltype.Void) ssl_external('SSL_free', [SSL], lltype.Void, threadsafe=False) ssl_external('SSL_CTX_free', [SSL_CTX], lltype.Void, threadsafe=False) From noreply at buildbot.pypy.org Wed Dec 7 11:32:20 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 11:32:20 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: start refactoring signature. not yet rpython Message-ID: <20111207103220.B0FD18205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50239:1f2faa79c08d Date: 2011-12-07 12:21 +0200 http://bitbucket.org/pypy/pypy/changeset/1f2faa79c08d/ Log: start refactoring signature. not yet rpython diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -21,7 +21,6 @@ _immutable_fields_ = ["itemtype", "num", "kind"] def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): - self.signature = signature.BaseSignature() self.itemtype = itemtype self.num = num self.kind = kind @@ -29,6 +28,10 @@ self.char = char self.w_box_type = w_box_type self.alternate_constructors = alternate_constructors + self.array_signature = signature.ArraySignature() + self.scalar_signature = signature.ScalarSignature() + #self.flatiter_signature = signature.FlatiterSignature() + #self.view_signature = signature.ViewSignature() def malloc(self, length): # XXX find out why test_zjit explodes with tracking of allocations @@ -228,4 +231,4 @@ ) def get_dtype_cache(space): - return space.fromcache(DtypeCache) \ No newline at end of file + return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -831,10 +831,7 @@ shape += self.shape[s:] strides += self.strides[s:] backstrides += self.backstrides[s:] - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature, - ]) - return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], + return W_NDimSlice(self, start, strides[:], backstrides[:], shape[:]) def descr_reshape(self, space, args_w): @@ -861,14 +858,11 @@ concrete.shape, concrete.strides) if new_strides: # We can create a view, strides somehow match up. - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) ndims = len(new_shape) new_backstrides = [0] * ndims for nd in range(ndims): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - arr = W_NDimSlice(self, new_sig, self.start, new_strides, + arr = W_NDimSlice(self, self.start, new_strides, new_backstrides, new_shape) else: # Create copy with contiguous data @@ -891,9 +885,6 @@ concrete = self.get_concrete() if len(concrete.shape) < 2: return space.wrap(self) - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) strides = [] backstrides = [] shape = [] @@ -901,7 +892,7 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], + return space.wrap(W_NDimSlice(concrete, self.start, strides[:], backstrides[:], shape[:])) def descr_get_flatiter(self, space): @@ -914,7 +905,7 @@ raise NotImplementedError def descr_debug_repr(self, space): - return space.wrap(self.debug_repr()) + return space.wrap(self.signature.debug_repr()) def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): @@ -934,8 +925,6 @@ """ Intermediate class representing a literal. """ - signature = signature.BaseSignature() - _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): @@ -943,6 +932,7 @@ BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value + self.signature = dtype.scalar_signature def find_size(self): return 1 @@ -968,9 +958,6 @@ def copy(self): return Scalar(self.dtype, self.value) - def debug_repr(self): - return 'Scalar' - def setshape(self, space, new_shape): # In order to get here, we already checked that prod(new_shape) == 1, # so in order to have a consistent API, let it go through. @@ -1054,30 +1041,18 @@ return self.res_dtype def _eval(self, iter): + # XXX deal with forced args assert isinstance(iter, Call1Iterator) val = self.values.eval(iter.child).convert_to(self.res_dtype) sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - return call_sig.func(self.res_dtype, val) + assert isinstance(sig, signature.Call1) + return sig.func(self.res_dtype, val) def start_iter(self, res_shape=None): if self.forced_result is not None: return self.forced_result.start_iter(res_shape) return Call1Iterator(self.values.start_iter(res_shape)) - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - if self.forced_result is not None: - return 'Call1(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call1(%s, %s)' % (call_sig.name, - self.values.debug_repr()) - class Call2(VirtualArray): """ Intermediate class for performing binary operations. @@ -1112,12 +1087,11 @@ lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - return call_sig.func(self.calc_dtype, lhs, rhs) + assert isinstance(sig, signature.Call2) + return sig.func(self.calc_dtype, lhs, rhs) def debug_repr(self): + xxx sig = self.signature assert isinstance(sig, signature.Signature) call_sig = sig.components[0] @@ -1134,11 +1108,10 @@ Class for representing views of arrays, they will reflect changes of parent arrays. Example: slices """ - def __init__(self, parent, signature, strides, backstrides, shape): + def __init__(self, parent, strides, backstrides, shape): self.strides = strides self.backstrides = backstrides BaseArray.__init__(self, shape, parent.order) - self.signature = signature self.parent = parent self.invalidates = parent.invalidates @@ -1203,13 +1176,11 @@ self.shape = new_shape[:] class W_NDimSlice(ViewArray): - signature = signature.BaseSignature() - - def __init__(self, parent, signature, start, strides, backstrides, - shape): + def __init__(self, parent, start, strides, backstrides, shape): if isinstance(parent, W_NDimSlice): parent = parent.parent - ViewArray.__init__(self, parent, signature, strides, backstrides, shape) + ViewArray.__init__(self, parent, strides, backstrides, shape) + self.signature = signature.find_sig(signature.ViewSignature(parent.signature)) self.start = start self.size = 1 for sh in shape: @@ -1272,7 +1243,7 @@ self.size = size self.dtype = dtype self.storage = dtype.malloc(size) - self.signature = dtype.signature + self.signature = dtype.array_signature def get_concrete(self): return self @@ -1470,21 +1441,19 @@ class W_FlatIterator(ViewArray): - signature = signature.BaseSignature() @jit.unroll_safe def __init__(self, arr): size = 1 for sh in arr.shape: size *= sh - new_sig = signature.Signature.find_sig([ - W_FlatIterator.signature, arr.signature - ]) - ViewArray.__init__(self, arr, new_sig, [arr.strides[-1]], + ViewArray.__init__(self, arr, [arr.strides[-1]], [arr.backstrides[-1]], [size]) self.shapelen = len(arr.shape) self.arr = arr self.iter = self.start_iter() + self.signature = signature.find_sig(signature.FlatiterSignature( + arr.signature)) def start_iter(self, res_shape=None): if res_shape is not None and res_shape != self.shape: diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -78,9 +78,8 @@ start = start.next(shapelen) else: value = self.identity.convert_to(dtype) - new_sig = signature.Signature.find_sig([ - self.reduce_signature, obj.signature - ]) + new_sig = signature.find_sig( + signature.ReduceSignature(self.func, obj.signature)) return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): @@ -101,7 +100,6 @@ W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func - self.signature = signature.Call1(func) def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, @@ -117,7 +115,8 @@ if isinstance(w_obj, Scalar): return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) - new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) + new_sig = signature.find_sig(signature.Call1(self.func, + w_obj.signature)) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) w_obj.add_invalidates(w_res) return w_res @@ -133,8 +132,6 @@ W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func self.comparison_func = comparison_func - self.signature = signature.Call2(func) - self.reduce_signature = signature.BaseSignature() def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, @@ -158,9 +155,9 @@ w_rhs.value.convert_to(calc_dtype) ) - new_sig = signature.Signature.find_sig([ - self.signature, w_lhs.signature, w_rhs.signature - ]) + new_sig = signature.find_sig(signature.Call2(self.func, + w_lhs.signature, + w_rhs.signature)) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) w_res = Call2(new_sig, new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -2,53 +2,129 @@ from pypy.rlib.rarithmetic import intmask -def components_eq(lhs, rhs): - if len(lhs) != len(rhs): - return False - for i in range(len(lhs)): - v1, v2 = lhs[i], rhs[i] - if type(v1) is not type(v2) or not v1.eq(v2): - return False - return True +# def components_eq(lhs, rhs): +# if len(lhs) != len(rhs): +# return False +# for i in range(len(lhs)): +# v1, v2 = lhs[i], rhs[i] +# if type(v1) is not type(v2) or not v1.eq(v2): +# return False +# return True -def components_hash(components): - res = 0x345678 - for component in components: - res = intmask((1000003 * res) ^ component.hash()) - return res +# def components_hash(components): +# res = 0x345678 +# for component in components: +# res = intmask((1000003 * res) ^ component.hash()) +# return res -class BaseSignature(object): - _attrs_ = [] +def sigeq(one, two): + return one.eq(two) +def sighash(sig): + return sig.hash() + +known_sigs = r_dict(sigeq, sighash) + +def find_sig(sig): + return known_sigs.setdefault(sig, sig) + +class Signature(object): def eq(self, other): return self is other def hash(self): return compute_identity_hash(self) -class Signature(BaseSignature): - _known_sigs = r_dict(components_eq, components_hash) +class ViewSignature(Signature): + def __init__(self, child): + self.child = child + + def eq(self, other): + if type(self) != type(other): + return False + return self.child.eq(other.child) - _attrs_ = ["components"] - _immutable_fields_ = ["components[*]"] + def hash(self): + return self.child.hash() ^ 0x12345 - def __init__(self, components): - self.components = components + def debug_repr(self): + return 'Slice(%s)' % self.child.debug_repr() - @staticmethod - def find_sig(components): - return Signature._known_sigs.setdefault(components, Signature(components)) +class ArraySignature(Signature): + def debug_repr(self): + return 'Array' -class Call1(BaseSignature): - _immutable_fields_ = ["func", "name"] +class ScalarSignature(Signature): + def debug_repr(self): + return 'Scalar' - def __init__(self, func): +class FlatiterSignature(ViewSignature): + def debug_repr(self): + return 'FlatIter(%s)' % self.child.debug_repr() + +class Call1(Signature): + def __init__(self, func, child): self.func = func - self.name = func.func_name + self.child = child -class Call2(BaseSignature): - _immutable_fields_ = ["func", "name"] + def hash(self): + return compute_identity_hash(self.func) ^ (self.child.hash() << 1) - def __init__(self, func): + def eq(self, other): + if type(other) != type(self): + return False + return self.child.eq(other.child) + + def debug_repr(self): + return 'Call1(%s, %s)' % (self.func.func_name, + self.child.debug_repr()) + +class Call2(Signature): + def __init__(self, func, left, right): self.func = func - self.name = func.func_name + self.left = left + self.right = right + + def hash(self): + return (compute_identity_hash(self.func) ^ (self.left.hash() << 1) ^ + (self.right.hash() << 2)) + + def eq(self, other): + if type(other) != type(self): + return False + return self.left.eq(other.left) and self.right.eq(other.right) + + def debug_repr(self): + return 'Call2(%s, %s, %s)' % (self.func.func_name, + self.left.debug_repr(), + self.right.debug_repr()) + +class ReduceSignature(Call1): + pass + +# class Signature(BaseSignature): +# _known_sigs = r_dict(components_eq, components_hash) + +# _attrs_ = ["components"] +# _immutable_fields_ = ["components[*]"] + +# def __init__(self, components): +# self.components = components + +# @staticmethod +# def find_sig(components): +# return Signature._known_sigs.setdefault(components, Signature(components)) + +# class Call1(BaseSignature): +# _immutable_fields_ = ["func", "name"] + +# def __init__(self, func): +# self.func = func +# self.name = func.func_name + +# class Call2(BaseSignature): +# _immutable_fields_ = ["func", "name"] + +# def __init__(self, func): +# self.func = func +# self.name = func.func_name diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -8,7 +8,8 @@ class MockDtype(object): - signature = signature.BaseSignature() + array_signature = signature.ArraySignature() + scalar_signature = signature.ScalarSignature() def malloc(self, size): return None @@ -877,6 +878,7 @@ assert sin(a).__debug_repr__() == 'Call1(sin, Array)' b = a + a b[0] = 3 + skip("not there") assert b.__debug_repr__() == 'Call2(add, forced=Array)' class AppTestMultiDim(BaseNumpyAppTest): From noreply at buildbot.pypy.org Wed Dec 7 12:02:11 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 12:02:11 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: make this rpython Message-ID: <20111207110211.7B1658205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50240:5ccd3a82d52d Date: 2011-12-07 13:01 +0200 http://bitbucket.org/pypy/pypy/changeset/5ccd3a82d52d/ Log: make this rpython diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1041,12 +1041,11 @@ return self.res_dtype def _eval(self, iter): - # XXX deal with forced args assert isinstance(iter, Call1Iterator) val = self.values.eval(iter.child).convert_to(self.res_dtype) sig = jit.promote(self.signature) assert isinstance(sig, signature.Call1) - return sig.func(self.res_dtype, val) + return sig.unfunc(self.res_dtype, val) def start_iter(self, res_shape=None): if self.forced_result is not None: @@ -1088,20 +1087,7 @@ rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) sig = jit.promote(self.signature) assert isinstance(sig, signature.Call2) - return sig.func(self.calc_dtype, lhs, rhs) - - def debug_repr(self): - xxx - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - if self.forced_result is not None: - return 'Call2(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call2(%s, %s, %s)' % (call_sig.name, - self.left.debug_repr(), - self.right.debug_repr()) + return sig.binfunc(self.calc_dtype, lhs, rhs) class ViewArray(BaseArray): """ @@ -1221,9 +1207,6 @@ def setitem(self, item, value): self.parent.setitem(item, value) - def debug_repr(self): - return 'Slice(%s)' % self.parent.debug_repr() - def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() @@ -1293,9 +1276,6 @@ self.shape = new_shape self.calc_strides(new_shape) - def debug_repr(self): - return 'Array' - def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1477,9 +1457,6 @@ def descr_iter(self): return self - def debug_repr(self): - return 'FlatIter(%s)' % self.arr.debug_repr() - W_FlatIterator.typedef = TypeDef( 'flatiter', diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -79,7 +79,9 @@ else: value = self.identity.convert_to(dtype) new_sig = signature.find_sig( - signature.ReduceSignature(self.func, obj.signature)) + signature.ReduceSignature(self.func, self.name, + dtype.scalar_signature, + obj.signature)) return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): @@ -116,6 +118,7 @@ return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.find_sig(signature.Call1(self.func, + self.name, w_obj.signature)) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) w_obj.add_invalidates(w_res) @@ -156,6 +159,7 @@ ) new_sig = signature.find_sig(signature.Call2(self.func, + self.name, w_lhs.signature, w_rhs.signature)) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,4 +1,4 @@ -from pypy.rlib.objectmodel import r_dict, compute_identity_hash +from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.rlib.rarithmetic import intmask @@ -33,14 +33,17 @@ return self is other def hash(self): - return compute_identity_hash(self) + return compute_hash(self) + + def _freeze_(self): + self._hash = id(self) class ViewSignature(Signature): def __init__(self, child): self.child = child def eq(self, other): - if type(self) != type(other): + if type(self) is not type(other): return False return self.child.eq(other.child) @@ -63,43 +66,49 @@ return 'FlatIter(%s)' % self.child.debug_repr() class Call1(Signature): - def __init__(self, func, child): - self.func = func + def __init__(self, func, name, child): + self.unfunc = func + self.name = name self.child = child def hash(self): - return compute_identity_hash(self.func) ^ (self.child.hash() << 1) + return compute_hash(self.name) ^ self.child.hash() << 1 def eq(self, other): - if type(other) != type(self): + if type(other) is not type(self): return False - return self.child.eq(other.child) + return self.unfunc is other.unfunc and self.child.eq(other.child) def debug_repr(self): - return 'Call1(%s, %s)' % (self.func.func_name, + return 'Call1(%s, %s)' % (self.name, self.child.debug_repr()) class Call2(Signature): - def __init__(self, func, left, right): - self.func = func + def __init__(self, func, name, left, right): + self.binfunc = func + self.name = name self.left = left self.right = right def hash(self): - return (compute_identity_hash(self.func) ^ (self.left.hash() << 1) ^ + return (compute_hash(self.name) ^ (self.left.hash() << 1) ^ (self.right.hash() << 2)) def eq(self, other): - if type(other) != type(self): + if type(other) is not type(self): return False - return self.left.eq(other.left) and self.right.eq(other.right) + return (self.binfunc is other.binfunc and + self.left.eq(other.left) and self.right.eq(other.right)) def debug_repr(self): - return 'Call2(%s, %s, %s)' % (self.func.func_name, + return 'Call2(%s, %s, %s)' % (self.name, self.left.debug_repr(), self.right.debug_repr()) -class ReduceSignature(Call1): +class ForcedSignature(Signature): + pass + +class ReduceSignature(Call2): pass # class Signature(BaseSignature): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -185,6 +185,7 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately + py.test.skip(":(") self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, From noreply at buildbot.pypy.org Wed Dec 7 12:13:44 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 12:13:44 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: fix forced signature Message-ID: <20111207111344.5ECF78205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50241:00c7a228329b Date: 2011-12-07 13:13 +0200 http://bitbucket.org/pypy/pypy/changeset/00c7a228329b/ Log: fix forced signature diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -30,6 +30,7 @@ self.alternate_constructors = alternate_constructors self.array_signature = signature.ArraySignature() self.scalar_signature = signature.ScalarSignature() + self.forced_signature = signature.ForcedSignature() #self.flatiter_signature = signature.FlatiterSignature() #self.view_signature = signature.ViewSignature() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -998,6 +998,7 @@ def force_if_needed(self): if self.forced_result is None: self.forced_result = self.compute() + self.signature = self.find_dtype().forced_signature self._del_sources() def get_concrete(self): diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -106,7 +106,8 @@ self.right.debug_repr()) class ForcedSignature(Signature): - pass + def debug_repr(self): + return 'Forced' class ReduceSignature(Call2): pass diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -878,8 +878,7 @@ assert sin(a).__debug_repr__() == 'Call1(sin, Array)' b = a + a b[0] = 3 - skip("not there") - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + assert b.__debug_repr__() == 'Forced' class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): From noreply at buildbot.pypy.org Wed Dec 7 12:15:41 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 12:15:41 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: reindent Message-ID: <20111207111541.E6BE88205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50242:e08b8dbb9fb8 Date: 2011-12-07 13:13 +0200 http://bitbucket.org/pypy/pypy/changeset/e08b8dbb9fb8/ Log: reindent diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -836,16 +836,16 @@ def descr_reshape(self, space, args_w): """reshape(...) - a.reshape(shape) + a.reshape(shape) - Returns an array containing the same data with a new shape. - - Refer to `numpypy.reshape` for full documentation. - - See Also - -------- - numpypy.reshape : equivalent function -""" + Returns an array containing the same data with a new shape. + + Refer to `numpypy.reshape` for full documentation. + + See Also + -------- + numpypy.reshape : equivalent function + """ if len(args_w) == 1: w_shape = args_w[0] else: From noreply at buildbot.pypy.org Wed Dec 7 12:15:43 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 12:15:43 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: oops, forgot to remove Message-ID: <20111207111543.1614A8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50243:6d4692380e8f Date: 2011-12-07 13:14 +0200 http://bitbucket.org/pypy/pypy/changeset/6d4692380e8f/ Log: oops, forgot to remove diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -35,9 +35,6 @@ def hash(self): return compute_hash(self) - def _freeze_(self): - self._hash = id(self) - class ViewSignature(Signature): def __init__(self, child): self.child = child From noreply at buildbot.pypy.org Wed Dec 7 13:55:35 2011 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 7 Dec 2011 13:55:35 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: fix for GUARD_NOT_INVALIDATED Message-ID: <20111207125535.B1DC98205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50244:93473354e34c Date: 2011-12-07 13:55 +0100 http://bitbucket.org/pypy/pypy/changeset/93473354e34c/ Log: fix for GUARD_NOT_INVALIDATED diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -197,7 +197,14 @@ print 'Failargs: ', op.getfailargs() pos = self.mc.currpos() - self.mc.BKPT() + # For all guards that are not GUARD_NOT_INVALIDATED we emit a + # breakpoint to ensure the location is patched correctly. In the case + # of GUARD_NOT_INVALIDATED we use just a NOP, because it is only + # eventually patched at a later point. + if is_guard_not_invalidated: + self.mc.NOP() + else: + self.mc.BKPT() self.pending_guards.append(GuardToken(descr, failargs=op.getfailargs(), faillocs=arglocs, From noreply at buildbot.pypy.org Wed Dec 7 15:42:41 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 7 Dec 2011 15:42:41 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: float not supported yet Message-ID: <20111207144241.3A12C82ABA@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50246:120ce67e8e79 Date: 2011-12-07 15:40 +0100 http://bitbucket.org/pypy/pypy/changeset/120ce67e8e79/ Log: float not supported yet diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -145,7 +145,7 @@ if loc.is_reg(): self.rm.reg_bindings[arg] = loc elif loc.is_vfp_reg(): - self.vfprm.reg_bindings[arg] = loc + assert 0, "not implemented yet" else: assert loc.is_stack() self.frame_manager.frame_bindings[arg] = loc From noreply at buildbot.pypy.org Wed Dec 7 15:42:42 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 7 Dec 2011 15:42:42 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: correct offset computations due to new stack frames and handle holes in register array more properly Message-ID: <20111207144242.6531782ABB@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50247:60ddbddadec5 Date: 2011-12-07 15:41 +0100 http://bitbucket.org/pypy/pypy/changeset/60ddbddadec5/ Log: correct offset computations due to new stack frames and handle holes in register array more properly diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -237,7 +237,7 @@ assert spilling_depth >= 0 assert spp_loc > stack_loc - regs = rffi.cast(rffi.CCHARP, stack_loc + BACKCHAIN_SIZE) + regs = rffi.cast(rffi.CCHARP, spp_loc) i = -1 fail_index = -1 while(True): @@ -258,12 +258,7 @@ value = decode32(enc, i+1) i += 4 else: - assert group == self.FLOAT_TYPE - adr = decode32(enc, i+1) - value = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] - self.fail_boxes_float.setitem(fail_index, value) - i += 4 - continue + assert 0, "not implemented yet" elif res == self.STACK_LOC: stack_location = decode32(enc, i+1) i += 4 @@ -276,12 +271,11 @@ if group == self.FLOAT_TYPE: assert 0, "not implemented yet" else: - # XXX dirty, fix - #sub = r.managed_regs_sub(reg) + regindex = r.get_managed_reg_index(reg) if IS_PPC_32: - value = decode32(regs, (reg - 3) * WORD) + value = decode32(regs, regindex * WORD) else: - value = decode64(regs, (reg - 3) * WORD) + value = decode64(regs, regindex * WORD) if group == self.INT_TYPE: self.fail_boxes_int.setitem(fail_index, value) @@ -323,9 +317,7 @@ j += 4 else: # REG_LOC reg = ord(res) - # XXX dirty, fix - sub = r.managed_regs_sub(reg) - loc = r.MANAGED_REGS[reg - sub] + loc = r.MANAGED_REGS[r.get_managed_reg_index(reg)] j += 1 locs.append(loc) return locs @@ -656,8 +648,7 @@ mem[j] = self.REF_TYPE j += 1 elif arg.type == FLOAT: - mem[j] = self.FLOAT_TYPE - j += 1 + assert 0, "not implemented yet" else: assert 0, 'unknown type' @@ -678,7 +669,6 @@ mem[j] = self.EMPTY_LOC j += 1 i += 1 - # XXX 64 bit adjustment needed mem[j] = chr(0xFF) @@ -909,7 +899,7 @@ assert 0, "not implemented yet" # XXX this code has to be verified assert not self.stack_in_use - target = StackLocation(0) # write to force index field + target = StackLocation(self.ENCODING_AREA) # write to force index field self.regalloc_mov(loc, target) self.stack_in_use = True elif loc.is_reg(): @@ -934,7 +924,7 @@ assert 0, "not implemented yet" # XXX this code has to be verified assert self.stack_in_use - from_loc = StackLocation(0) + from_loc = StackLocation(self.ENCODING_AREA) self.regalloc_mov(from_loc, loc) self.stack_in_use = False elif loc.is_reg(): @@ -996,9 +986,9 @@ def _write_fail_index(self, fail_index): self.mc.load_imm(r.r0, fail_index) if IS_PPC_32: - self.mc.stw(r.r0.value, r.SPP.value, 0) + self.mc.stw(r.r0.value, r.SPP.value, self.ENCODING_AREA) else: - self.mc.std(r.r0.value, r.SPP.value, 0) + self.mc.std(r.r0.value, r.SPP.value, self.ENCODING_AREA) def load(self, loc, value): assert loc.is_reg() and value.is_imm() diff --git a/pypy/jit/backend/ppc/ppcgen/register.py b/pypy/jit/backend/ppc/ppcgen/register.py --- a/pypy/jit/backend/ppc/ppcgen/register.py +++ b/pypy/jit/backend/ppc/ppcgen/register.py @@ -32,8 +32,7 @@ PARAM_REGS = [r3, r4, r5, r6, r7, r8, r9, r10] -# XXX fix this at some point -def managed_regs_sub(reg): +def get_managed_reg_index(reg): if reg > r13.value: - return 4 - return 3 + return reg - 4 + return reg - 3 From noreply at buildbot.pypy.org Wed Dec 7 15:42:40 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 7 Dec 2011 15:42:40 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Update stack frame sketch Message-ID: <20111207144240.130998205C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50245:141b01d305d7 Date: 2011-12-07 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/141b01d305d7/ Log: Update stack frame sketch diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py --- a/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py +++ b/pypy/jit/backend/ppc/ppcgen/test/test_stackframe.py @@ -6,7 +6,7 @@ | BACK CHAIN | - - - - - --------------------------- - - - - -- - - - - - - - - - | | | CURRENT FRAME - | FPR SAVE AREA | |>> len(NONVOLATILES_FPR) * WORD + | FPR SAVE AREA | |>> len(NONVOLATILES_FPR) * DOUBLEWORD | | | --------------------------- -- | | | @@ -17,7 +17,7 @@ | FLOAT/INT CONVERSION | |>> 1 * WORD | | | --------------------------- -- - | FORCE INDEX | WORD | 1 WORD + | FORCE INDEX | WORD |>> 1 WORD --------------------------- -- | | | | ENCODING AREA | |>> len(MANAGED_REGS) * WORD @@ -30,16 +30,12 @@ | | | | PARAMETER SAVE AREA | |>> max_stack_params * WORD | | | - ---------------------------a -- - | TOC POINTER | WORD | + --------------------------- -- + (64 Bit) | TOC POINTER | WORD | --------------------------- | - | < RESERVED > | WORD | - --------------------------- | - | < RESERVED > | WORD | - --------------------------- |>> 6 WORDS | SAVED LR | WORD | - --------------------------- | - | SAVED CR | WORD | + --------------------------- |>> 4 WORDS (64 Bit) + (64 Bit) | SAVED CR | WORD | 2 WORDS (32 Bit) --------------------------- | | BACK CHAIN | WORD | SP -> --------------------------- -- From noreply at buildbot.pypy.org Wed Dec 7 15:44:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 15:44:16 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: in-progress Message-ID: <20111207144416.29A9C8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50248:ee9bb45f0923 Date: 2011-12-07 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/ee9bb45f0923/ Log: in-progress diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_iter.py @@ -0,0 +1,196 @@ + +from pypy.rlib import jit +from pypy.rlib.objectmodel import instantiate + +# Iterators for arrays +# -------------------- +# all those iterators with the exception of BroadcastIterator iterate over the +# entire array in C order (the last index changes the fastest). This will +# yield all elements. Views iterate over indices and look towards strides and +# backstrides to find the correct position. Notably the offset between +# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between +# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. + +# BroadcastIterator works like that, but for indexes that don't change source +# in the original array, strides[i] == backstrides[i] == 0 + +class BaseIterator(object): + def next(self, shapelen): + raise NotImplementedError + + def done(self): + raise NotImplementedError + + def get_offset(self): + raise NotImplementedError + +class ArrayIterator(BaseIterator): + def __init__(self, size): + self.offset = 0 + self.size = size + + def next(self, shapelen): + arr = instantiate(ArrayIterator) + arr.size = self.size + arr.offset = self.offset + 1 + return arr + + def done(self): + return self.offset >= self.size + + def get_offset(self): + return self.offset + +class OneDimIterator(BaseIterator): + def __init__(self, start, step, stop): + self.offset = start + self.step = step + self.size = stop * step + start + + def next(self, shapelen): + arr = instantiate(OneDimIterator) + arr.size = self.size + arr.step = self.step + arr.offset = self.offset + self.step + return arr + + def done(self): + return self.offset == self.size + + def get_offset(self): + return self.offset + +class ViewIterator(BaseIterator): + def __init__(self, arr): + self.indices = [0] * len(arr.shape) + self.offset = arr.start + self.arr = arr + self._done = False + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + for i in range(shapelen): + indices[i] = self.indices[i] + done = False + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.arr.shape[i] - 1: + indices[i] += 1 + offset += self.arr.strides[i] + break + else: + indices[i] = 0 + offset -= self.arr.backstrides[i] + else: + done = True + res = instantiate(ViewIterator) + res.offset = offset + res.indices = indices + res.arr = self.arr + res._done = done + return res + + def done(self): + return self._done + + def get_offset(self): + return self.offset + +class BroadcastIterator(BaseIterator): + '''Like a view iterator, but will repeatedly access values + for all iterations across a res_shape, folding the offset + using mod() arithmetic + ''' + def __init__(self, arr, res_shape): + self.indices = [0] * len(res_shape) + self.offset = arr.start + #strides are 0 where original shape==1 + self.strides = [] + self.backstrides = [] + for i in range(len(arr.shape)): + if arr.shape[i] == 1: + self.strides.append(0) + self.backstrides.append(0) + else: + self.strides.append(arr.strides[i]) + self.backstrides.append(arr.backstrides[i]) + self.res_shape = res_shape + self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides + self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides + self._done = False + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + _done = False + for i in range(shapelen): + indices[i] = self.indices[i] + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.res_shape[i] - 1: + indices[i] += 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + else: + _done = True + res = instantiate(BroadcastIterator) + res.indices = indices + res.offset = offset + res._done = _done + res.strides = self.strides + res.backstrides = self.backstrides + res.res_shape = self.res_shape + return res + + def done(self): + return self._done + + def get_offset(self): + return self.offset + +class Call2Iterator(BaseIterator): + def __init__(self, left, right): + self.left = left + self.right = right + + def next(self, shapelen): + return Call2Iterator(self.left.next(shapelen), + self.right.next(shapelen)) + + def done(self): + if isinstance(self.left, ConstantIterator): + return self.right.done() + return self.left.done() + + def get_offset(self): + if isinstance(self.left, ConstantIterator): + return self.right.get_offset() + return self.left.get_offset() + +class Call1Iterator(BaseIterator): + def __init__(self, child): + self.child = child + + def next(self, shapelen): + return Call1Iterator(self.child.next(shapelen)) + + def done(self): + return self.child.done() + + def get_offset(self): + return self.child.get_offset() + +class ConstantIterator(BaseIterator): + def next(self, shapelen): + return self + + def done(self): + return False + + def get_offset(self): + return 0 + diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -198,199 +198,6 @@ n_old_elems_to_use *= old_shape[oldI] return new_strides -# Iterators for arrays -# -------------------- -# all those iterators with the exception of BroadcastIterator iterate over the -# entire array in C order (the last index changes the fastest). This will -# yield all elements. Views iterate over indices and look towards strides and -# backstrides to find the correct position. Notably the offset between -# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between -# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. - -# BroadcastIterator works like that, but for indexes that don't change source -# in the original array, strides[i] == backstrides[i] == 0 - -class BaseIterator(object): - def next(self, shapelen): - raise NotImplementedError - - def done(self): - raise NotImplementedError - - def get_offset(self): - raise NotImplementedError - -class ArrayIterator(BaseIterator): - def __init__(self, size): - self.offset = 0 - self.size = size - - def next(self, shapelen): - arr = instantiate(ArrayIterator) - arr.size = self.size - arr.offset = self.offset + 1 - return arr - - def done(self): - return self.offset >= self.size - - def get_offset(self): - return self.offset - -class OneDimIterator(BaseIterator): - def __init__(self, start, step, stop): - self.offset = start - self.step = step - self.size = stop * step + start - - def next(self, shapelen): - arr = instantiate(OneDimIterator) - arr.size = self.size - arr.step = self.step - arr.offset = self.offset + self.step - return arr - - def done(self): - return self.offset == self.size - - def get_offset(self): - return self.offset - -class ViewIterator(BaseIterator): - def __init__(self, arr): - self.indices = [0] * len(arr.shape) - self.offset = arr.start - self.arr = arr - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - for i in range(shapelen): - indices[i] = self.indices[i] - done = False - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.arr.shape[i] - 1: - indices[i] += 1 - offset += self.arr.strides[i] - break - else: - indices[i] = 0 - offset -= self.arr.backstrides[i] - else: - done = True - res = instantiate(ViewIterator) - res.offset = offset - res.indices = indices - res.arr = self.arr - res._done = done - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class BroadcastIterator(BaseIterator): - '''Like a view iterator, but will repeatedly access values - for all iterations across a res_shape, folding the offset - using mod() arithmetic - ''' - def __init__(self, arr, res_shape): - self.indices = [0] * len(res_shape) - self.offset = arr.start - #strides are 0 where original shape==1 - self.strides = [] - self.backstrides = [] - for i in range(len(arr.shape)): - if arr.shape[i] == 1: - self.strides.append(0) - self.backstrides.append(0) - else: - self.strides.append(arr.strides[i]) - self.backstrides.append(arr.backstrides[i]) - self.res_shape = res_shape - self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides - self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - _done = False - for i in range(shapelen): - indices[i] = self.indices[i] - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.res_shape[i] - 1: - indices[i] += 1 - offset += self.strides[i] - break - else: - indices[i] = 0 - offset -= self.backstrides[i] - else: - _done = True - res = instantiate(BroadcastIterator) - res.indices = indices - res.offset = offset - res._done = _done - res.strides = self.strides - res.backstrides = self.backstrides - res.res_shape = self.res_shape - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class Call2Iterator(BaseIterator): - def __init__(self, left, right): - self.left = left - self.right = right - - def next(self, shapelen): - return Call2Iterator(self.left.next(shapelen), - self.right.next(shapelen)) - - def done(self): - if isinstance(self.left, ConstantIterator): - return self.right.done() - return self.left.done() - - def get_offset(self): - if isinstance(self.left, ConstantIterator): - return self.right.get_offset() - return self.left.get_offset() - -class Call1Iterator(BaseIterator): - def __init__(self, child): - self.child = child - - def next(self, shapelen): - return Call1Iterator(self.child.next(shapelen)) - - def done(self): - return self.child.done() - - def get_offset(self): - return self.child.get_offset() - -class ConstantIterator(BaseIterator): - def next(self, shapelen): - return self - - def done(self): - return False - - def get_offset(self): - return 0 - - class BaseArray(Wrappable): _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", "start", 'order'] @@ -1327,6 +1134,7 @@ ) arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) + iters = arr.signature.create_iterator() arr_iter = arr.start_iter(arr.shape) for i in range(len(elems_w)): w_elem = elems_w[i] diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,5 +1,7 @@ from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.rlib.rarithmetic import intmask +from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ + BroadcastIterator, OneDimIterator # def components_eq(lhs, rhs): @@ -35,6 +37,9 @@ def hash(self): return compute_hash(self) + def create_iter(self, array, cache): + raise NotImplementedError + class ViewSignature(Signature): def __init__(self, child): self.child = child @@ -50,10 +55,16 @@ def debug_repr(self): return 'Slice(%s)' % self.child.debug_repr() + def create_iter(self, array, cache): + xxxx + class ArraySignature(Signature): def debug_repr(self): return 'Array' + def create_iter(self, array, cache): + xxx + class ScalarSignature(Signature): def debug_repr(self): return 'Scalar' From noreply at buildbot.pypy.org Wed Dec 7 15:44:17 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 15:44:17 +0100 (CET) Subject: [pypy-commit] pypy default: implement some rudimentary __array_interface__ Message-ID: <20111207144417.5134D8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50249:591be18ef52c Date: 2011-12-07 16:42 +0200 http://bitbucket.org/pypy/pypy/changeset/591be18ef52c/ Log: implement some rudimentary __array_interface__ diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -916,6 +916,14 @@ def descr_debug_repr(self, space): return space.wrap(self.debug_repr()) + def descr_array_iface(self, space): + concrete = self.get_concrete() + addr = rffi.cast(lltype.Signed, concrete.storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -1444,6 +1452,7 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), __debug_repr__ = interp2app(BaseArray.descr_debug_repr), + __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), shape = GetSetProperty(BaseArray.descr_get_shape, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1114,6 +1114,12 @@ b = a[0].copy() assert (b == zeros(10)).all() + def test_array_interface(self): + from numpypy import array + a = array([1, 2, 3]) + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct From noreply at buildbot.pypy.org Wed Dec 7 15:58:04 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 15:58:04 +0100 (CET) Subject: [pypy-commit] pypy default: fixes Message-ID: <20111207145804.22AEA8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50250:a23933bd7963 Date: 2011-12-07 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/a23933bd7963/ Log: fixes diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -918,7 +918,8 @@ def descr_array_iface(self, space): concrete = self.get_concrete() - addr = rffi.cast(lltype.Signed, concrete.storage) + storage = concrete.get_storage(space) + addr = rffi.cast(lltype.Signed, storage) w_d = space.newdict() space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), space.w_False])) @@ -984,6 +985,9 @@ # so in order to have a consistent API, let it go through. pass + def get_storage(self, space): + raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1271,6 +1275,9 @@ a_iter = a_iter.next(len(array.shape)) return array + def get_storage(self, space): + return self.parent.storage + class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one @@ -1333,6 +1340,9 @@ def debug_repr(self): return 'Array' + def get_storage(self, space): + return self.storage + def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1119,6 +1119,10 @@ a = array([1, 2, 3]) i = a.__array_interface__ assert isinstance(i['data'][0], int) + a = a[::2] + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + raises(TypeError, getattr, array(3), '__array_interface__') class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): From noreply at buildbot.pypy.org Wed Dec 7 16:12:26 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 16:12:26 +0100 (CET) Subject: [pypy-commit] pypy default: another fix. I don't think views of virtual arrays are well supported Message-ID: <20111207151226.367978205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50251:060a7ce40fa6 Date: 2011-12-07 17:11 +0200 http://bitbucket.org/pypy/pypy/changeset/060a7ce40fa6/ Log: another fix. I don't think views of virtual arrays are well supported diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1276,7 +1276,7 @@ return array def get_storage(self, space): - return self.parent.storage + return self.parent.get_storage() class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration From noreply at buildbot.pypy.org Wed Dec 7 16:22:04 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 7 Dec 2011 16:22:04 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: update Message-ID: <20111207152204.BCD558205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3972:38bf58fc834d Date: 2011-12-07 10:21 -0500 http://bitbucket.org/pypy/extradoc/changeset/38bf58fc834d/ Log: update diff --git a/planning/micronumpy.txt b/planning/micronumpy.txt --- a/planning/micronumpy.txt +++ b/planning/micronumpy.txt @@ -1,10 +1,6 @@ NEW TASKS --------- -- add in numpy.generic and the various subclasses, use them in returning - instances from subscripting (and possibly internally), also make them valid - for the dtype arguments (numpy-dtype-refactor branch) - - astype - a good sort function @@ -13,14 +9,13 @@ - endianness -- scalar types like numpy.int8 (numpy-dtype-refacotr branch) - - frompyfunc to create ufuncs from python functions - more ufuncs - linspace/other ranges -- change numpy.array into numpy.array and numpy.ndarray +- more attributes/methods on numpy.flatiter -- more attributes/methods on numpy.flatiter +- axis= parameter to various methods + From noreply at buildbot.pypy.org Wed Dec 7 16:26:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 16:26:16 +0100 (CET) Subject: [pypy-commit] pypy default: how-did-it-even-work? Message-ID: <20111207152616.A2D2C8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50252:b5fbdffb1026 Date: 2011-12-07 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/b5fbdffb1026/ Log: how-did-it-even-work? diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1276,7 +1276,7 @@ return array def get_storage(self, space): - return self.parent.get_storage() + return self.parent.get_storage(space) class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration From noreply at buildbot.pypy.org Wed Dec 7 17:32:03 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Wed, 7 Dec 2011 17:32:03 +0100 (CET) Subject: [pypy-commit] pypy default: rmmap is fixed. Signed/unsigned problem, with weird effects in the annotator. Message-ID: <20111207163203.D5B008205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r50253:588a7a11455b Date: 2011-12-07 17:06 +0100 http://bitbucket.org/pypy/pypy/changeset/588a7a11455b/ Log: rmmap is fixed. Signed/unsigned problem, with weird effects in the annotator. had a hard time to understand this. diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -420,7 +420,11 @@ low, high = _get_file_size(self.file_handle) if not high and low <= sys.maxint: return low + # not so sure if the signed/unsigned strictness is a good idea: + high = rffi.cast(lltype.Unsigned, high) + low = rffi.cast(lltype.Unsigned, low) size = (high << 32) + low + size = rffi.cast(lltype.Signed, size) elif _POSIX: st = os.fstat(self.fd) size = st[stat.ST_SIZE] From noreply at buildbot.pypy.org Wed Dec 7 17:32:05 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Wed, 7 Dec 2011 17:32:05 +0100 (CET) Subject: [pypy-commit] pypy default: Merge Message-ID: <20111207163205.0ACB18205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r50254:469c4a362d73 Date: 2011-12-07 17:07 +0100 http://bitbucket.org/pypy/pypy/changeset/469c4a362d73/ Log: Merge diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -916,6 +916,15 @@ def descr_debug_repr(self, space): return space.wrap(self.debug_repr()) + def descr_array_iface(self, space): + concrete = self.get_concrete() + storage = concrete.get_storage(space) + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -976,6 +985,9 @@ # so in order to have a consistent API, let it go through. pass + def get_storage(self, space): + raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1263,6 +1275,9 @@ a_iter = a_iter.next(len(array.shape)) return array + def get_storage(self, space): + return self.parent.get_storage(space) + class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one @@ -1325,6 +1340,9 @@ def debug_repr(self): return 'Array' + def get_storage(self, space): + return self.storage + def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1444,6 +1462,7 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), __debug_repr__ = interp2app(BaseArray.descr_debug_repr), + __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), shape = GetSetProperty(BaseArray.descr_get_shape, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1114,6 +1114,16 @@ b = a[0].copy() assert (b == zeros(10)).all() + def test_array_interface(self): + from numpypy import array + a = array([1, 2, 3]) + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + a = a[::2] + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + raises(TypeError, getattr, array(3), '__array_interface__') + class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct From noreply at buildbot.pypy.org Wed Dec 7 17:39:58 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:39:58 +0100 (CET) Subject: [pypy-commit] pypy default: Throw away and restart another attempt at pypy/bin/checkmodule. Message-ID: <20111207163958.6D20B8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50255:0cec1f036691 Date: 2011-12-07 12:48 +0100 http://bitbucket.org/pypy/pypy/changeset/0cec1f036691/ Log: Throw away and restart another attempt at pypy/bin/checkmodule. diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,15 +1,10 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath import sys @@ -17,27 +12,19 @@ from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) - else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + checkmodule(modname) + print 'Passed.' if __name__ == '__main__': main(sys.argv) diff --git a/pypy/objspace/fake/__init__.py b/pypy/objspace/fake/__init__.py --- a/pypy/objspace/fake/__init__.py +++ b/pypy/objspace/fake/__init__.py @@ -1,2 +0,0 @@ -from objspace import FakeObjSpace -Space = FakeObjSpace diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -1,108 +1,7 @@ -import re -from copy import copy -from pypy.tool.error import debug -from pypy.interpreter.argument import Arguments -from pypy.interpreter.gateway import interp2app -from pypy.rlib.nonconst import NonConstant +from pypy.objspace.fake.objspace import FakeObjSpace +from pypy.translator.driver import TranslationDriver -def my_import(name): - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod -def find_gateways(modname, basepath, module): - identifier = r'[a-zA-Z0-9][a-zA-Z0-9_]*' - r_simplename = re.compile(r'(%s)[.](%s)$' % (identifier, identifier)) - res = [] - for name in module.interpleveldefs.values(): - match = r_simplename.match(name) - if match: - submod_name, obj_name = match.groups() - submod_name = '%s.%s.%s' % (basepath, modname, submod_name) - submod = my_import(submod_name) - obj = getattr(submod, obj_name) - res += find_gw_in_obj(obj) - return res - -def find_gw_in_obj(obj): - if hasattr(obj, 'typedef'): - typedef = obj.typedef - return [gw for gw in typedef.rawdict.values() - if isinstance(gw, interp2app)] - elif hasattr(obj, 'func_code'): - return [interp2app(obj)] - else: - assert False - -## Since the fake objspace is more a hack than a real object space, it -## happens that the annotator complains about operations that cannot -## succeed because it knows too much about the objects involved. For -## example, if it knows that a list is always empty, it will block -## each operations that tries to access that list. This is not what we -## want, because we know that with real objectspaces that operations -## will succeed. - -## As a workaround, we insert dummy rpython code (the function -## dummy_rpython) that manipulates the variables in order to give -## them a more sensible annotation. This is the preferred way to solve -## the problems so far. - -## If the solution above doesn't work, the alternative is to -## substitute the interpreter code with something that doesn't hurt -## the annotator. It's a very ugly hack, better solutions are welcome -## :-) - - -# dummy rpython code to give some variables more sensible annotations -def dummy_rpython(dummy_function): - # to make the annotator flow-in without executing the code - if NonConstant(False): - dummy_function.defs_w = [None] # else the annotator would see an always empty list - -def patch_pypy(): - from pypy.interpreter.baseobjspace import W_Root - - def descr_call_mismatch(self, space, opname, RequiredClass, args): - from pypy.interpreter.error import OperationError - msg = 'This message will never be displayed :-)' - raise OperationError(space.w_TypeError, space.wrap(msg)) - W_Root.descr_call_mismatch = descr_call_mismatch - - -def checkmodule(modname, backend, interactive=False, basepath='pypy.module'): - "Compile a fake PyPy module." - from pypy.objspace.fake.objspace import FakeObjSpace, W_Object - from pypy.translator.driver import TranslationDriver - +def checkmodule(modname): space = FakeObjSpace() - space.config.translating = True - ModuleClass = __import__(basepath + '.%s' % modname, - None, None, ['Module']).Module - module = ModuleClass(space, space.wrap(modname)) - w_moduledict = module.getdict(space) - - gateways = find_gateways(modname, basepath, module) - functions = [gw.__spacebind__(space) for gw in gateways] - arguments = Arguments.frompacked(space, W_Object(), W_Object()) - dummy_function = copy(functions[0]) - - def main(argv): # use the standalone mode not to allow SomeObject - dummy_rpython(dummy_function) - for func in functions: - func.call_args(arguments) - return 0 - - patch_pypy() - driver = TranslationDriver() - driver.setup(main, None) - try: - driver.proceed(['compile_' + backend]) - except SystemExit: - raise - except: - if not interactive: - raise - debug(driver) - raise SystemExit(1) + xxx diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,147 +1,39 @@ -from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, W_Root -from pypy.interpreter.typedef import TypeDef -from pypy.rlib.nonconst import NonConstant -from pypy.rlib.rarithmetic import r_uint -from pypy.rlib.rbigint import rbigint +from pypy.interpreter.baseobjspace import W_Root, ObjSpace +from pypy.translator.driver import TranslationDriver +from pypy.annotation.model import SomeInstance, s_None +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rpython.lltypesystem import lltype -#class W_Type(W_Root): -# _attrs_ = () -class W_Object(W_Root): - _attrs_ = () -W_Object.typedef = TypeDef('foobar') +def is_root(w_obj): + assert isinstance(w_obj, W_Root) -def make_dummy(a=W_Object(), b=W_Object()): - def fn(*args): - if NonConstant(True): - return a - else: - return b - return fn +class Entry(ExtRegistryEntry): + _about_ = is_root -int_dummy = make_dummy(42, 43) -float_dummy = make_dummy(42.0, 42.1) -uint_dummy = make_dummy(r_uint(42), r_uint(43)) -str_dummy = make_dummy('foo', 'bar') -bool_dummy = make_dummy(True, False) -unicode_dummy = make_dummy(u'abc', u'cde') -bigint_dummy = make_dummy(rbigint.fromint(0), rbigint.fromint(1)) + def compute_result_annotation(self, s_w_obj): + s_inst = SomeInstance(self.bookkeeper.getuniqueclassdef(W_Root)) + assert s_inst.contains(s_w_obj) + return s_None + + def specialize_call(self, hop): + return hop.inputconst(lltype.Void, None) + +# ____________________________________________________________ + class FakeObjSpace(ObjSpace): - w_None = W_Object() - w_False = W_Object() - w_True = W_Object() - w_Ellipsis = W_Object() - w_NotImplemented = W_Object() - w_int = W_Object() - w_dict = W_Object() - w_float = W_Object() - w_long = W_Object() - w_tuple = W_Object() - w_str = W_Object() - w_basestring = W_Object() - w_unicode = W_Object() - w_type = W_Object() - w_instance = W_Object() - w_slice = W_Object() - w_hex = W_Object() - w_oct = W_Object() - - def initialize(self): - self.config.objspace.geninterp = False - self.config.objspace.disable_call_speedhacks = True - self.wrap_cache = {} - self.make_builtins() - def _freeze_(self): - return True + def translates(self, func, argtypes=None): + if argtypes is None: + nb_args = func.func_code.co_argcount + argtypes = [W_Root] * nb_args + # + driver = TranslationDriver() + driver.setup(func, argtypes) + driver.proceed(['rtype_lltype']) - def wrap(self, x): - if isinstance(x, Wrappable): - w_result = x.__spacebind__(self) - return w_result - return W_Object() - wrap._annspecialcase_ = "specialize:argtype(1)" - - def unwrap(self, w_obj): - assert isinstance(w_obj, W_Object) - return None - - lookup = make_dummy() - allocate_instance = make_dummy() - getattr = make_dummy() - setattr = make_dummy() - getitem = make_dummy() - setitem = make_dummy() - delitem = make_dummy() - int_w = int_dummy - uint_w = uint_dummy - float_w = float_dummy - unicode_w = unicode_dummy - bigint_w = bigint_dummy - iter = make_dummy() - type = make_dummy() - str = make_dummy() - int = make_dummy() - float = make_dummy() - repr = make_dummy() - id = make_dummy() - len = make_dummy() - str_w = str_dummy - call_args = make_dummy() - new_interned_str = make_dummy() - newint = make_dummy() - newlong = make_dummy() - newfloat = make_dummy() - def newdict(self, module=False): - return self.newfloat() - newlist = make_dummy() - emptylist = make_dummy() - newtuple = make_dummy() - newslice = make_dummy() - lt = make_dummy() - le = make_dummy() - eq = make_dummy() - ne = make_dummy() - gt = make_dummy() - ge = make_dummy() - lt_w = bool_dummy - le_w = bool_dummy - eq_w = bool_dummy - ne_w = bool_dummy - gt_w = bool_dummy - ge_w = bool_dummy - is_w = bool_dummy - is_ = make_dummy() - next = make_dummy() - is_true = bool_dummy - nonzero = make_dummy() - issubtype = make_dummy() - ord = make_dummy() - hash = make_dummy() - delattr = make_dummy() # should return None? - contains = make_dummy() - hex = make_dummy() - oct = make_dummy() - pow = make_dummy() - inplace_pow = make_dummy() - cmp = make_dummy() - - # XXsX missing operations - def coerce(self, *args): raise NotImplementedError("space.coerce()") - def get(self, *args): raise NotImplementedError("space.get()") - def set(self, *args): raise NotImplementedError("space.set()") - def delete(self, *args): raise NotImplementedError("space.delete()") - def userdel(self, *args): raise NotImplementedError("space.userdel()") - def marshal_w(self, *args):raise NotImplementedError("space.marshal_w()") - - gettypefor = make_dummy() - gettypeobject = make_dummy() - unpackiterable = make_dummy([W_Object()], [W_Object()]) - - -## Register all exceptions -import exceptions -for name in ObjSpace.ExceptionTable: - exc = getattr(exceptions, name) - setattr(FakeObjSpace, 'w_' + name, W_Object()) + def add(self, w_x, w_y): + is_root(w_x) + is_root(w_y) + return W_Root() diff --git a/pypy/objspace/fake/test/__init__.py b/pypy/objspace/fake/test/__init__.py deleted file mode 100644 diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py deleted file mode 100644 --- a/pypy/objspace/fake/test/test_checkmodule.py +++ /dev/null @@ -1,7 +0,0 @@ -import py -from pypy.objspace.fake.checkmodule import checkmodule - -def test_dotnet(): - # the only module known to pass checkmodule is _dotnet so far - py.test.skip('fixme') - checkmodule('_dotnet', 'cli') diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/fake/test/test_objspace.py @@ -0,0 +1,13 @@ +from pypy.objspace.fake.objspace import FakeObjSpace + +def test_create(): + FakeObjSpace() + + +class TestTranslate: + def setup_method(self, meth): + self.space = FakeObjSpace() + + def test_simple(self): + space = self.space + space.translates(lambda w_x, w_y: space.add(w_x, w_y)) From noreply at buildbot.pypy.org Wed Dec 7 17:39:59 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:39:59 +0100 (CET) Subject: [pypy-commit] pypy default: Progress. Message-ID: <20111207163959.936468205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50256:ac541142ddcd Date: 2011-12-07 12:57 +0100 http://bitbucket.org/pypy/pypy/changeset/ac541142ddcd/ Log: Progress. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -3,6 +3,7 @@ from pypy.annotation.model import SomeInstance, s_None from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype +from pypy.tool.sourcetools import compile2, func_with_new_name def is_root(w_obj): @@ -12,7 +13,8 @@ _about_ = is_root def compute_result_annotation(self, s_w_obj): - s_inst = SomeInstance(self.bookkeeper.getuniqueclassdef(W_Root)) + s_inst = SomeInstance(self.bookkeeper.getuniqueclassdef(W_Root), + can_be_None=True) assert s_inst.contains(s_w_obj) return s_None @@ -33,7 +35,19 @@ driver.setup(func, argtypes) driver.proceed(['rtype_lltype']) - def add(self, w_x, w_y): - is_root(w_x) - is_root(w_y) - return W_Root() + +def setup(): + for (name, _, arity, _) in ObjSpace.MethodTable: + args = ['w_%d' % i for i in range(arity)] + d = {'is_root': is_root, + 'W_Root': W_Root} + exec compile2("""\ + def meth(self, %s): + %s + return W_Root() + """ % (', '.join(args), + '; '.join(['is_root(%s)' % arg for arg in args]))) in d + meth = func_with_new_name(d['meth'], name) + setattr(FakeObjSpace, name, meth) + +setup() diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -1,4 +1,5 @@ -from pypy.objspace.fake.objspace import FakeObjSpace +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root +from pypy.rlib.unroll import unrolling_iterable def test_create(): FakeObjSpace() @@ -11,3 +12,18 @@ def test_simple(self): space = self.space space.translates(lambda w_x, w_y: space.add(w_x, w_y)) + + def test_methodtable(self): + space = self.space + for fixed_arity in [1, 2, 3, 4]: + # + methodtable = [name for (name, _, arity, _) in space.MethodTable + if arity == fixed_arity] + methodtable = unrolling_iterable(methodtable) + args_w = (W_Root(),) * fixed_arity + # + def f(): + for name in methodtable: + getattr(space, name)(*args_w) + # + space.translates(f) From noreply at buildbot.pypy.org Wed Dec 7 17:40:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:00 +0100 (CET) Subject: [pypy-commit] pypy default: Progress. Message-ID: <20111207164000.B8D618205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50257:65c9c70b487b Date: 2011-12-07 14:29 +0100 http://bitbucket.org/pypy/pypy/changeset/65c9c70b487b/ Log: Progress. diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -1,7 +1,50 @@ -from pypy.objspace.fake.objspace import FakeObjSpace -from pypy.translator.driver import TranslationDriver +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root, is_root +from pypy.interpreter import gateway + + +class ModuleChecker(object): + + def __init__(self): + self.space = FakeObjSpace() + + def load_module(self, modname): + space = self.space + mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) + # force computation and record what we wrap + del space.seen_wrap[:] + module = mod.Module(space, W_Root()) + for name in module.loaders: + module._load_lazily(space, name) + self.seen = space.seen_wrap[:] + + def collect_entry_points(self): + self.entry_points = [] + for value in self.seen: + if isinstance(value, gateway.interp2app): + self.collect_interp2app(value) + + def collect_interp2app(self, interp2app): + space = self.space + activation = interp2app._code.activation + scopelen = interp2app._code.sig.scope_length() + scope_w = [W_Root()] * scopelen + # + def check(): + w_result = activation._run(space, scope_w) + is_root(w_result) + # + self.entry_points.append(check) + + def check_translates(self): + def entry_point(): + for fn in entry_points: + fn() + entry_points = self.entry_points + self.space.translates(entry_point) def checkmodule(modname): - space = FakeObjSpace() - xxx + checker = ModuleChecker() + checker.load_module(modname) + checker.collect_entry_points() + checker.check_translates() diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,19 +1,29 @@ from pypy.interpreter.baseobjspace import W_Root, ObjSpace +from pypy.interpreter import argument from pypy.translator.driver import TranslationDriver from pypy.annotation.model import SomeInstance, s_None from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype from pypy.tool.sourcetools import compile2, func_with_new_name +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import we_are_translated def is_root(w_obj): assert isinstance(w_obj, W_Root) +is_root.expecting = W_Root + +def is_arguments(arg): + assert isinstance(arg, argument.Arguments) +is_arguments.expecting = argument.Arguments + class Entry(ExtRegistryEntry): - _about_ = is_root + _about_ = is_root, is_arguments def compute_result_annotation(self, s_w_obj): - s_inst = SomeInstance(self.bookkeeper.getuniqueclassdef(W_Root), + cls = self.instance.expecting + s_inst = SomeInstance(self.bookkeeper.getuniqueclassdef(cls), can_be_None=True) assert s_inst.contains(s_w_obj) return s_None @@ -26,6 +36,35 @@ class FakeObjSpace(ObjSpace): + def __init__(self): + self.seen_wrap = [] + ObjSpace.__init__(self) + + w_None = W_Root() + w_False = W_Root() + w_True = W_Root() + + def newdict(self, module=False, instance=False, classofinstance=None, + strdict=False): + return W_Root() + + def wrap(self, x): + if not we_are_translated(): + self.seen_wrap.append(x) + return W_Root() + wrap._annspecialcase_ = "specialize:argtype(1)" + + def call_args(self, w_func, args): + is_root(w_func) + is_arguments(args) + return W_Root() + + def gettypefor(self, cls): + assert issubclass(cls, W_Root) + return W_Root() + + # ---------- + def translates(self, func, argtypes=None): if argtypes is None: nb_args = func.func_code.co_argcount diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -0,0 +1,5 @@ +from pypy.objspace.fake.checkmodule import checkmodule + + +def test_itertools_module(): + checkmodule('itertools') diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -27,3 +27,17 @@ getattr(space, name)(*args_w) # space.translates(f) + + def test_newdict(self): + space = self.space + space.translates(lambda: (space.newdict(), + space.newdict(strdict=True))) + + def test_constants(self): + space = self.space + space.translates(lambda: (space.w_None, space.w_True, space.w_False)) + + def test_wrap(self): + space = self.space + space.translates(lambda: (space.wrap(42), space.wrap(42.5), + space.wrap("foo"))) From noreply at buildbot.pypy.org Wed Dec 7 17:40:01 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:01 +0100 (CET) Subject: [pypy-commit] pypy default: extra tests. Message-ID: <20111207164001.DBAD58205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50258:914208fc6836 Date: 2011-12-07 14:30 +0100 http://bitbucket.org/pypy/pypy/changeset/914208fc6836/ Log: extra tests. diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -1,4 +1,5 @@ from pypy.objspace.fake.objspace import FakeObjSpace, W_Root +from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable def test_create(): @@ -41,3 +42,12 @@ space = self.space space.translates(lambda: (space.wrap(42), space.wrap(42.5), space.wrap("foo"))) + + def test_call_args(self): + space = self.space + args = Arguments(space, [W_Root()]) + space.translates(lambda: space.call_args(W_Root(), args)) + + def test_gettypefor(self): + space = self.space + space.translates(lambda: space.gettypefor(W_Root)) From noreply at buildbot.pypy.org Wed Dec 7 17:40:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:03 +0100 (CET) Subject: [pypy-commit] pypy default: Progress. Message-ID: <20111207164003.0A6718205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50259:1a471ba2aa1d Date: 2011-12-07 14:36 +0100 http://bitbucket.org/pypy/pypy/changeset/1a471ba2aa1d/ Log: Progress. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -7,6 +7,7 @@ from pypy.tool.sourcetools import compile2, func_with_new_name from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.nonconst import NonConstant def is_root(w_obj): @@ -40,9 +41,9 @@ self.seen_wrap = [] ObjSpace.__init__(self) - w_None = W_Root() - w_False = W_Root() - w_True = W_Root() + def is_true(self, w_obj): + is_root(w_obj) + return NonConstant(False) def newdict(self, module=False, instance=False, classofinstance=None, strdict=False): @@ -76,6 +77,11 @@ def setup(): + for name in (ObjSpace.ConstantTable + + ObjSpace.ExceptionTable + + ['int', 'str', 'float', 'long', 'tuple', 'list', 'dict']): + setattr(FakeObjSpace, 'w_' + name, W_Root()) + # for (name, _, arity, _) in ObjSpace.MethodTable: args = ['w_%d' % i for i in range(arity)] d = {'is_root': is_root, diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -1,3 +1,4 @@ +import py from pypy.objspace.fake.objspace import FakeObjSpace, W_Root from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable @@ -36,7 +37,9 @@ def test_constants(self): space = self.space - space.translates(lambda: (space.w_None, space.w_True, space.w_False)) + space.translates(lambda: (space.w_None, space.w_True, space.w_False, + space.w_int, space.w_str, + space.w_TypeError)) def test_wrap(self): space = self.space @@ -51,3 +54,9 @@ def test_gettypefor(self): space = self.space space.translates(lambda: space.gettypefor(W_Root)) + + def test_is_true(self): + space = self.space + space.translates(lambda: space.is_true(W_Root())) + py.test.raises(AssertionError, + space.translates, lambda: space.is_true(42)) From noreply at buildbot.pypy.org Wed Dec 7 17:40:04 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:04 +0100 (CET) Subject: [pypy-commit] pypy default: Progress. Message-ID: <20111207164004.2D7758205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50260:548656579bd4 Date: 2011-12-07 14:40 +0100 http://bitbucket.org/pypy/pypy/changeset/548656579bd4/ Log: Progress. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -64,6 +64,12 @@ assert issubclass(cls, W_Root) return W_Root() + def unpackiterable(self, w_iterable, expected_length=-1): + is_root(w_iterable) + if expected_length < 0: + expected_length = 3 + return [W_Root()] * expected_length + # ---------- def translates(self, func, argtypes=None): diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -60,3 +60,8 @@ space.translates(lambda: space.is_true(W_Root())) py.test.raises(AssertionError, space.translates, lambda: space.is_true(42)) + + def test_unpackiterable(self): + space = self.space + space.translates(lambda: (space.unpackiterable(W_Root()), + space.unpackiterable(W_Root(), 42))) From noreply at buildbot.pypy.org Wed Dec 7 17:40:05 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:05 +0100 (CET) Subject: [pypy-commit] pypy default: Progress. Message-ID: <20111207164005.533068205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50261:090ec341827b Date: 2011-12-07 14:55 +0100 http://bitbucket.org/pypy/pypy/changeset/090ec341827b/ Log: Progress. diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -1,50 +1,12 @@ -from pypy.objspace.fake.objspace import FakeObjSpace, W_Root, is_root -from pypy.interpreter import gateway - - -class ModuleChecker(object): - - def __init__(self): - self.space = FakeObjSpace() - - def load_module(self, modname): - space = self.space - mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) - # force computation and record what we wrap - del space.seen_wrap[:] - module = mod.Module(space, W_Root()) - for name in module.loaders: - module._load_lazily(space, name) - self.seen = space.seen_wrap[:] - - def collect_entry_points(self): - self.entry_points = [] - for value in self.seen: - if isinstance(value, gateway.interp2app): - self.collect_interp2app(value) - - def collect_interp2app(self, interp2app): - space = self.space - activation = interp2app._code.activation - scopelen = interp2app._code.sig.scope_length() - scope_w = [W_Root()] * scopelen - # - def check(): - w_result = activation._run(space, scope_w) - is_root(w_result) - # - self.entry_points.append(check) - - def check_translates(self): - def entry_point(): - for fn in entry_points: - fn() - entry_points = self.entry_points - self.space.translates(entry_point) +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root def checkmodule(modname): - checker = ModuleChecker() - checker.load_module(modname) - checker.collect_entry_points() - checker.check_translates() + space = FakeObjSpace() + mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) + # force computation and record what we wrap + module = mod.Module(space, W_Root()) + for name in module.loaders: + module._load_lazily(space, name) + # + space.translates() diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,6 +1,5 @@ from pypy.interpreter.baseobjspace import W_Root, ObjSpace -from pypy.interpreter import argument -from pypy.translator.driver import TranslationDriver +from pypy.interpreter import argument, gateway from pypy.annotation.model import SomeInstance, s_None from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype @@ -8,6 +7,7 @@ from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.nonconst import NonConstant +from pypy.translator.translator import TranslationContext def is_root(w_obj): @@ -38,7 +38,7 @@ class FakeObjSpace(ObjSpace): def __init__(self): - self.seen_wrap = [] + self._seen_extras = [] ObjSpace.__init__(self) def is_true(self, w_obj): @@ -50,11 +50,21 @@ return W_Root() def wrap(self, x): - if not we_are_translated(): - self.seen_wrap.append(x) + if isinstance(x, gateway.interp2app): + self._see_interp2app(x) return W_Root() wrap._annspecialcase_ = "specialize:argtype(1)" + def _see_interp2app(self, interp2app): + "NOT_RPYTHON" + activation = interp2app._code.activation + scopelen = interp2app._code.sig.scope_length() + scope_w = [W_Root()] * scopelen + def check(): + w_result = activation._run(self, scope_w) + is_root(w_result) + self._seen_extras.append(check) + def call_args(self, w_func, args): is_root(w_func) is_arguments(args) @@ -72,14 +82,20 @@ # ---------- - def translates(self, func, argtypes=None): - if argtypes is None: - nb_args = func.func_code.co_argcount - argtypes = [W_Root] * nb_args + def translates(self, func=None, argtypes=None): + if func is not None: + if argtypes is None: + nb_args = func.func_code.co_argcount + argtypes = [W_Root] * nb_args # - driver = TranslationDriver() - driver.setup(func, argtypes) - driver.proceed(['rtype_lltype']) + t = TranslationContext() + ann = t.buildannotator() + if func is not None: + ann.build_types(func, argtypes) + for check in self._seen_extras: + ann.build_types(check, []) + t.buildrtyper().specialize() + t.checkgraphs() def setup(): diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -1,5 +1,25 @@ -from pypy.objspace.fake.checkmodule import checkmodule +from pypy.objspace.fake.checkmodule import checkmodule, FakeObjSpace +from pypy.interpreter.gateway import interp2app +def make_checker(): + check = [] + def see(): + check.append(True) + see._annspecialcase_ = 'specialize:memo' + return see, check + + +def test_wrap_interp2app(): + see, check = make_checker() + space = FakeObjSpace() + assert len(space._seen_extras) == 0 + assert len(check) == 0 + space.wrap(interp2app(lambda space: see())) + assert len(space._seen_extras) == 1 + assert len(check) == 0 + space.translates(lambda: None) + assert len(check) == 1 + def test_itertools_module(): checkmodule('itertools') From noreply at buildbot.pypy.org Wed Dec 7 17:40:06 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:06 +0100 (CET) Subject: [pypy-commit] pypy default: Progress. Message-ID: <20111207164006.788648205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50262:6e9276536c74 Date: 2011-12-07 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/6e9276536c74/ Log: Progress. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -7,6 +7,7 @@ from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.nonconst import NonConstant +from pypy.rlib.rarithmetic import r_uint from pypy.translator.translator import TranslationContext @@ -41,14 +42,61 @@ self._seen_extras = [] ObjSpace.__init__(self) + def str_w(self, w_obj): + is_root(w_obj) + return NonConstant("foobar") + + def int_w(self, w_obj): + is_root(w_obj) + return NonConstant(-42) + + def float_w(self, w_obj): + is_root(w_obj) + return NonConstant(42.5) + + def uint_w(self, w_obj): + is_root(w_obj) + return r_uint(NonConstant(42)) + + def bigint_w(self, w_obj): + from pypy.rlib.rbigint import rbigint + is_root(w_obj) + return rbigint.fromint(NonConstant(42)) + + def unicode_w(self, w_obj): + is_root(w_obj) + return NonConstant(u"foobar") + def is_true(self, w_obj): is_root(w_obj) return NonConstant(False) + def unwrap(self, w_obj): + "NOT_RPYTHON" + raise NotImplementedError + def newdict(self, module=False, instance=False, classofinstance=None, strdict=False): return W_Root() + def newtuple(self, list_w): + is_root(list_w[NonConstant(0)]) + return W_Root() + + def newlist(self, list_w): + is_root(list_w[NonConstant(0)]) + return W_Root() + + def newslice(self, w_start, w_end, w_step): + is_root(w_start) + is_root(w_end) + is_root(w_step) + return W_Root() + + def marshal_w(self, w_obj): + "NOT_RPYTHON" + raise NotImplementedError + def wrap(self, x): if isinstance(x, gateway.interp2app): self._see_interp2app(x) @@ -94,6 +142,7 @@ ann.build_types(func, argtypes) for check in self._seen_extras: ann.build_types(check, []) + #t.viewcg() t.buildrtyper().specialize() t.checkgraphs() @@ -116,5 +165,8 @@ '; '.join(['is_root(%s)' % arg for arg in args]))) in d meth = func_with_new_name(d['meth'], name) setattr(FakeObjSpace, name, meth) + # + for name in ObjSpace.IrregularOpTable: + assert hasattr(FakeObjSpace, name) # missing? setup() diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -1,5 +1,9 @@ -from pypy.objspace.fake.checkmodule import checkmodule, FakeObjSpace -from pypy.interpreter.gateway import interp2app +import py +from pypy.objspace.fake.checkmodule import checkmodule +from pypy.objspace.fake.objspace import FakeObjSpace, is_root +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.gateway import interp2app, W_Root, ObjSpace def make_checker(): @@ -9,7 +13,6 @@ see._annspecialcase_ = 'specialize:memo' return see, check - def test_wrap_interp2app(): see, check = make_checker() space = FakeObjSpace() @@ -18,8 +21,31 @@ space.wrap(interp2app(lambda space: see())) assert len(space._seen_extras) == 1 assert len(check) == 0 - space.translates(lambda: None) + space.translates() assert len(check) == 1 +def test_wrap_interp2app_int(): + see, check = make_checker() + def foobar(space, x, w_y, z): + is_root(w_y) + see() + return space.wrap(x - z) + space = FakeObjSpace() + space.wrap(interp2app(foobar, unwrap_spec=[ObjSpace, int, W_Root, int])) + space.translates() + assert check + + +def test_gettypefor_untranslated(): + py.test.skip("in-progress") + class W_Foo(Wrappable): + pass + W_Foo.typedef = TypeDef('foo', + __module__ = 'barmod', + do_it = interp2app(W_Foo.do_it)) + see, check = make_checker() + space = FakeObjSpace() + space.gettypefor(W_Foo) + def test_itertools_module(): checkmodule('itertools') From noreply at buildbot.pypy.org Wed Dec 7 17:40:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:07 +0100 (CET) Subject: [pypy-commit] pypy default: Still more progress. Message-ID: <20111207164007.9DE278205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50263:391271fcc7f9 Date: 2011-12-07 15:27 +0100 http://bitbucket.org/pypy/pypy/changeset/391271fcc7f9/ Log: Still more progress. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,5 +1,6 @@ -from pypy.interpreter.baseobjspace import W_Root, ObjSpace +from pypy.interpreter.baseobjspace import W_Root, ObjSpace, SpaceCache from pypy.interpreter import argument, gateway +from pypy.interpreter.typedef import TypeDef from pypy.annotation.model import SomeInstance, s_None from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype @@ -106,11 +107,11 @@ def _see_interp2app(self, interp2app): "NOT_RPYTHON" activation = interp2app._code.activation - scopelen = interp2app._code.sig.scope_length() - scope_w = [W_Root()] * scopelen def check(): + scope_w = [W_Root()] * NonConstant(42) w_result = activation._run(self, scope_w) is_root(w_result) + check = func_with_new_name(check, 'check__' + interp2app.name) self._seen_extras.append(check) def call_args(self, w_func, args): @@ -119,8 +120,10 @@ return W_Root() def gettypefor(self, cls): - assert issubclass(cls, W_Root) - return W_Root() + return self.gettypeobject(cls.typedef) + + def gettypeobject(self, typedef): + return self.fromcache(TypeCache).getorbuild(typedef) def unpackiterable(self, w_iterable, expected_length=-1): is_root(w_iterable) @@ -137,11 +140,17 @@ argtypes = [W_Root] * nb_args # t = TranslationContext() + self.t = t # for debugging ann = t.buildannotator() if func is not None: ann.build_types(func, argtypes) - for check in self._seen_extras: - ann.build_types(check, []) + # annotate all _seen_extras, knowing that annotating some may + # grow the list + i = 0 + while i < len(self._seen_extras): + print self._seen_extras + ann.build_types(self._seen_extras[i], []) + i += 1 #t.viewcg() t.buildrtyper().specialize() t.checkgraphs() @@ -170,3 +179,12 @@ assert hasattr(FakeObjSpace, name) # missing? setup() + +# ____________________________________________________________ + +class TypeCache(SpaceCache): + def build(cache, typedef): + assert isinstance(typedef, TypeDef) + for value in typedef.rawdict.values(): + cache.space.wrap(value) + return W_Root() diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -37,15 +37,20 @@ def test_gettypefor_untranslated(): - py.test.skip("in-progress") + see, check = make_checker() class W_Foo(Wrappable): - pass + def do_it(self, space, w_x): + is_root(w_x) + see() + return W_Root() W_Foo.typedef = TypeDef('foo', __module__ = 'barmod', do_it = interp2app(W_Foo.do_it)) - see, check = make_checker() space = FakeObjSpace() space.gettypefor(W_Foo) + assert not check + space.translates() + assert check def test_itertools_module(): checkmodule('itertools') From noreply at buildbot.pypy.org Wed Dec 7 17:40:08 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:08 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the multiple calls to build_types() by passing complete_now=False Message-ID: <20111207164008.C0B788205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50264:30e84d345b46 Date: 2011-12-07 15:36 +0100 http://bitbucket.org/pypy/pypy/changeset/30e84d345b46/ Log: Fix the multiple calls to build_types() by passing complete_now=False diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -131,6 +131,10 @@ expected_length = 3 return [W_Root()] * expected_length + def allocate_instance(self, cls, w_subtype): + "NOT_RPYTHON" + xxx + # ---------- def translates(self, func=None, argtypes=None): @@ -138,19 +142,28 @@ if argtypes is None: nb_args = func.func_code.co_argcount argtypes = [W_Root] * nb_args + else: + func = lambda: None + argtypes = [] # t = TranslationContext() self.t = t # for debugging ann = t.buildannotator() - if func is not None: - ann.build_types(func, argtypes) - # annotate all _seen_extras, knowing that annotating some may - # grow the list - i = 0 - while i < len(self._seen_extras): - print self._seen_extras - ann.build_types(self._seen_extras[i], []) - i += 1 + # + done = 0 + while True: + # annotate all _seen_extras, knowing that annotating some may + # grow the list + while done < len(self._seen_extras): + print self._seen_extras + ann.build_types(self._seen_extras[done], [], + complete_now=False) + done += 1 + # when the list stops growing, really complete + ann.build_types(func, argtypes, complete_now=True) + # if the list did not grow because of completion, we are done + if done == len(self._seen_extras): + break #t.viewcg() t.buildrtyper().specialize() t.checkgraphs() From noreply at buildbot.pypy.org Wed Dec 7 17:40:09 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:09 +0100 (CET) Subject: [pypy-commit] pypy default: Some progress. Message-ID: <20111207164009.EC1228205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50265:90f0debd374c Date: 2011-12-07 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/90f0debd374c/ Log: Some progress. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,4 +1,5 @@ -from pypy.interpreter.baseobjspace import W_Root, ObjSpace, SpaceCache +from pypy.interpreter.baseobjspace import W_Root, ObjSpace +from pypy.interpreter.baseobjspace import Wrappable, SpaceCache from pypy.interpreter import argument, gateway from pypy.interpreter.typedef import TypeDef from pypy.annotation.model import SomeInstance, s_None @@ -6,12 +7,64 @@ from pypy.rpython.lltypesystem import lltype from pypy.tool.sourcetools import compile2, func_with_new_name from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import instantiate, we_are_translated from pypy.rlib.nonconst import NonConstant from pypy.rlib.rarithmetic import r_uint from pypy.translator.translator import TranslationContext +class W_MyObject(Wrappable): + typedef = None + + def getdict(self, space): + return w_obj_or_none() + + def getdictvalue(self, space, attr): + attr + "xx" # check that it's a string + return w_obj_or_none() + + def setdictvalue(self, space, attr, w_value): + attr + "xx" # check that it's a string + is_root(w_value) + return NonConstant(True) + + def deldictvalue(self, space, attr): + attr + "xx" # check that it's a string + return NonConstant(True) + + def setdict(self, space, w_dict): + is_root(w_dict) + + def setclass(self, space, w_subtype): + is_root(w_subtype) + + def str_w(self, space): + return NonConstant("foobar") + + def unicode_w(self, space): + return NonConstant(u"foobar") + + def int_w(self, space): + return NonConstant(-42) + + def uint_w(self, space): + return r_uint(NonConstant(42)) + + def bigint_w(self, space): + from pypy.rlib.rbigint import rbigint + return rbigint.fromint(NonConstant(42)) + + +def w_some_obj(): + if NonConstant(False): + return W_Root() + return W_MyObject() + +def w_obj_or_none(): + if NonConstant(False): + return None + return w_some_obj() + def is_root(w_obj): assert isinstance(w_obj, W_Root) is_root.expecting = W_Root @@ -43,31 +96,10 @@ self._seen_extras = [] ObjSpace.__init__(self) - def str_w(self, w_obj): - is_root(w_obj) - return NonConstant("foobar") - - def int_w(self, w_obj): - is_root(w_obj) - return NonConstant(-42) - def float_w(self, w_obj): is_root(w_obj) return NonConstant(42.5) - def uint_w(self, w_obj): - is_root(w_obj) - return r_uint(NonConstant(42)) - - def bigint_w(self, w_obj): - from pypy.rlib.rbigint import rbigint - is_root(w_obj) - return rbigint.fromint(NonConstant(42)) - - def unicode_w(self, w_obj): - is_root(w_obj) - return NonConstant(u"foobar") - def is_true(self, w_obj): is_root(w_obj) return NonConstant(False) @@ -78,21 +110,21 @@ def newdict(self, module=False, instance=False, classofinstance=None, strdict=False): - return W_Root() + return w_some_obj() def newtuple(self, list_w): is_root(list_w[NonConstant(0)]) - return W_Root() + return w_some_obj() def newlist(self, list_w): is_root(list_w[NonConstant(0)]) - return W_Root() + return w_some_obj() def newslice(self, w_start, w_end, w_step): is_root(w_start) is_root(w_end) is_root(w_step) - return W_Root() + return w_some_obj() def marshal_w(self, w_obj): "NOT_RPYTHON" @@ -101,14 +133,14 @@ def wrap(self, x): if isinstance(x, gateway.interp2app): self._see_interp2app(x) - return W_Root() + return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" def _see_interp2app(self, interp2app): "NOT_RPYTHON" activation = interp2app._code.activation def check(): - scope_w = [W_Root()] * NonConstant(42) + scope_w = [w_some_obj()] * NonConstant(42) w_result = activation._run(self, scope_w) is_root(w_result) check = func_with_new_name(check, 'check__' + interp2app.name) @@ -117,23 +149,25 @@ def call_args(self, w_func, args): is_root(w_func) is_arguments(args) - return W_Root() + return w_some_obj() def gettypefor(self, cls): return self.gettypeobject(cls.typedef) def gettypeobject(self, typedef): + assert typedef is not None return self.fromcache(TypeCache).getorbuild(typedef) def unpackiterable(self, w_iterable, expected_length=-1): is_root(w_iterable) if expected_length < 0: expected_length = 3 - return [W_Root()] * expected_length + return [w_some_obj()] * expected_length def allocate_instance(self, cls, w_subtype): - "NOT_RPYTHON" - xxx + is_root(w_subtype) + return instantiate(cls) + allocate_instance._annspecialcase_ = "specialize:arg(1)" # ---------- @@ -173,16 +207,16 @@ for name in (ObjSpace.ConstantTable + ObjSpace.ExceptionTable + ['int', 'str', 'float', 'long', 'tuple', 'list', 'dict']): - setattr(FakeObjSpace, 'w_' + name, W_Root()) + setattr(FakeObjSpace, 'w_' + name, w_some_obj()) # for (name, _, arity, _) in ObjSpace.MethodTable: args = ['w_%d' % i for i in range(arity)] d = {'is_root': is_root, - 'W_Root': W_Root} + 'w_some_obj': w_some_obj} exec compile2("""\ def meth(self, %s): %s - return W_Root() + return w_some_obj() """ % (', '.join(args), '; '.join(['is_root(%s)' % arg for arg in args]))) in d meth = func_with_new_name(d['meth'], name) @@ -200,4 +234,4 @@ assert isinstance(typedef, TypeDef) for value in typedef.rawdict.values(): cache.space.wrap(value) - return W_Root() + return w_some_obj() diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -52,5 +52,6 @@ space.translates() assert check + def test_itertools_module(): checkmodule('itertools') From noreply at buildbot.pypy.org Wed Dec 7 17:40:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:11 +0100 (CET) Subject: [pypy-commit] pypy default: Fixes. Message-ID: <20111207164011.262C98205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50266:2db948e2fa60 Date: 2011-12-07 16:38 +0100 http://bitbucket.org/pypy/pypy/changeset/2db948e2fa60/ Log: Fixes. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1607,6 +1607,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -146,6 +146,24 @@ check = func_with_new_name(check, 'check__' + interp2app.name) self._seen_extras.append(check) + def call_obj_args(self, w_callable, w_obj, args): + is_root(w_callable) + is_root(w_obj) + is_arguments(args) + return w_some_obj() + + def call(self, w_callable, w_args, w_kwds=None): + is_root(w_callable) + is_root(w_args) + is_root(w_kwds) + return w_some_obj() + + def call_function(self, w_func, *args_w): + is_root(w_func) + for w_arg in list(args_w): + is_root(w_arg) + return w_some_obj() + def call_args(self, w_func, args): is_root(w_func) is_arguments(args) @@ -206,7 +224,8 @@ def setup(): for name in (ObjSpace.ConstantTable + ObjSpace.ExceptionTable + - ['int', 'str', 'float', 'long', 'tuple', 'list', 'dict']): + ['int', 'str', 'float', 'long', 'tuple', 'list', + 'dict', 'unicode']): setattr(FakeObjSpace, 'w_' + name, w_some_obj()) # for (name, _, arity, _) in ObjSpace.MethodTable: From noreply at buildbot.pypy.org Wed Dec 7 17:40:12 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:12 +0100 (CET) Subject: [pypy-commit] pypy default: fix test Message-ID: <20111207164012.483E08205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50267:70d0240ac95a Date: 2011-12-07 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/70d0240ac95a/ Log: fix test diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -1,6 +1,8 @@ import py from pypy.objspace.fake.objspace import FakeObjSpace, W_Root from pypy.interpreter.argument import Arguments +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import Wrappable from pypy.rlib.unroll import unrolling_iterable def test_create(): @@ -53,7 +55,9 @@ def test_gettypefor(self): space = self.space - space.translates(lambda: space.gettypefor(W_Root)) + class W_Foo(Wrappable): + typedef = TypeDef("foo") + space.translates(lambda: space.gettypefor(W_Foo)) def test_is_true(self): space = self.space From noreply at buildbot.pypy.org Wed Dec 7 17:40:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:13 +0100 (CET) Subject: [pypy-commit] pypy default: Kill a dependency to the stdobjspace. Message-ID: <20111207164013.6E3008205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50268:93c1adc1f2e5 Date: 2011-12-07 17:07 +0100 http://bitbucket.org/pypy/pypy/changeset/93c1adc1f2e5/ Log: Kill a dependency to the stdobjspace. diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.inttype import int_typedef -from pypy.objspace.std.typeobject import W_TypeObject from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -33,9 +32,8 @@ _attrs_ = () def descr__new__(space, w_subtype, __args__): - assert isinstance(w_subtype, W_TypeObject) raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", - w_subtype.get_module_type_name() + w_subtype.getname(space, '?') ) def descr_str(self, space): @@ -266,4 +264,4 @@ __module__ = "numpypy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), -) \ No newline at end of file +) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -173,7 +173,7 @@ raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + assert str(exc.value) == "cannot create 'signedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) From noreply at buildbot.pypy.org Wed Dec 7 17:40:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:14 +0100 (CET) Subject: [pypy-commit] pypy default: Fixes. Now micronumpy passes :-) Message-ID: <20111207164014.937B58205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50269:b1a1441f2980 Date: 2011-12-07 17:18 +0100 http://bitbucket.org/pypy/pypy/changeset/b1a1441f2980/ Log: Fixes. Now micronumpy passes :-) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,7 +1,7 @@ from pypy.interpreter.baseobjspace import W_Root, ObjSpace from pypy.interpreter.baseobjspace import Wrappable, SpaceCache from pypy.interpreter import argument, gateway -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.annotation.model import SomeInstance, s_None from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype @@ -113,11 +113,13 @@ return w_some_obj() def newtuple(self, list_w): - is_root(list_w[NonConstant(0)]) + for w_x in list_w: + is_root(w_x) return w_some_obj() def newlist(self, list_w): - is_root(list_w[NonConstant(0)]) + for w_x in list_w: + is_root(w_x) return w_some_obj() def newslice(self, w_start, w_end, w_step): @@ -131,8 +133,11 @@ raise NotImplementedError def wrap(self, x): - if isinstance(x, gateway.interp2app): - self._see_interp2app(x) + if not we_are_translated(): + if isinstance(x, gateway.interp2app): + self._see_interp2app(x) + if isinstance(x, GetSetProperty): + self._see_getsetproperty(x) return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" @@ -146,6 +151,21 @@ check = func_with_new_name(check, 'check__' + interp2app.name) self._seen_extras.append(check) + def _see_getsetproperty(self, getsetproperty): + "NOT_RPYTHON" + space = self + def checkprop(): + getsetproperty.fget(getsetproperty, space, w_some_obj()) + if getsetproperty.fset is not None: + getsetproperty.fset(getsetproperty, space, w_some_obj(), + w_some_obj()) + if getsetproperty.fdel is not None: + getsetproperty.fdel(getsetproperty, space, w_some_obj()) + if not getsetproperty.name.startswith('<'): + checkprop = func_with_new_name(checkprop, + 'checkprop__' + getsetproperty.name) + self._seen_extras.append(checkprop) + def call_obj_args(self, w_callable, w_obj, args): is_root(w_callable) is_root(w_obj) @@ -187,6 +207,18 @@ return instantiate(cls) allocate_instance._annspecialcase_ = "specialize:arg(1)" + def decode_index(self, w_index_or_slice, seqlength): + is_root(w_index_or_slice) + return (NonConstant(42), NonConstant(42), NonConstant(42)) + + def decode_index4(self, w_index_or_slice, seqlength): + is_root(w_index_or_slice) + return (NonConstant(42), NonConstant(42), + NonConstant(42), NonConstant(42)) + + def exec_(self, *args, **kwds): + pass + # ---------- def translates(self, func=None, argtypes=None): @@ -225,7 +257,8 @@ for name in (ObjSpace.ConstantTable + ObjSpace.ExceptionTable + ['int', 'str', 'float', 'long', 'tuple', 'list', - 'dict', 'unicode']): + 'dict', 'unicode', 'complex', 'slice', 'bool', + 'type']): setattr(FakeObjSpace, 'w_' + name, w_some_obj()) # for (name, _, arity, _) in ObjSpace.MethodTable: @@ -254,3 +287,13 @@ for value in typedef.rawdict.values(): cache.space.wrap(value) return w_some_obj() + +class FakeCompiler(object): + pass +FakeObjSpace.default_compiler = FakeCompiler() + +class FakeModule(object): + def get(self, name): + name + "xx" # check that it's a string + return w_some_obj() +FakeObjSpace.sys = FakeModule() diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -1,8 +1,7 @@ import py -from pypy.objspace.fake.checkmodule import checkmodule from pypy.objspace.fake.objspace import FakeObjSpace, is_root from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, W_Root, ObjSpace @@ -35,6 +34,17 @@ space.translates() assert check +def test_wrap_GetSetProperty(): + see, check = make_checker() + def foobar(w_obj, space): + is_root(w_obj) + see() + return space.w_None + space = FakeObjSpace() + space.wrap(GetSetProperty(foobar)) + space.translates() + assert check + def test_gettypefor_untranslated(): see, check = make_checker() @@ -51,7 +61,3 @@ assert not check space.translates() assert check - - -def test_itertools_module(): - checkmodule('itertools') diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -69,3 +69,6 @@ space = self.space space.translates(lambda: (space.unpackiterable(W_Root()), space.unpackiterable(W_Root(), 42))) + + def test_newlist(self): + self.space.newlist([W_Root(), W_Root()]) diff --git a/pypy/objspace/fake/test/test_zmodules.py b/pypy/objspace/fake/test/test_zmodules.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/fake/test/test_zmodules.py @@ -0,0 +1,10 @@ +from pypy.config.pypyoption import working_modules +from pypy.objspace.fake.checkmodule import checkmodule +from pypy.tool.sourcetools import compile2 + + +for name in sorted(working_modules): + exec compile2("""\ + def test_module_%s(): + checkmodule(%r) + """ % (name, name)) diff --git a/pypy/translator/translator.py b/pypy/translator/translator.py --- a/pypy/translator/translator.py +++ b/pypy/translator/translator.py @@ -150,11 +150,11 @@ from pypy.translator.tool.graphpage import FlowGraphPage FlowGraphPage(self).display() - def viewcg(self, center_graph=None): + def viewcg(self, center_graph=None, huge=100): """Shows the whole call graph and the class hierarchy, based on the computed annotations.""" from pypy.translator.tool.graphpage import TranslatorPage - TranslatorPage(self, center_graph=center_graph).display() + TranslatorPage(self, center_graph=center_graph, huge=huge).display() From noreply at buildbot.pypy.org Wed Dec 7 17:40:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:15 +0100 (CET) Subject: [pypy-commit] pypy default: List explicitly some of the modules for which it is known to work. Message-ID: <20111207164015.B62218205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50270:139fdb431fef Date: 2011-12-07 17:29 +0100 http://bitbucket.org/pypy/pypy/changeset/139fdb431fef/ Log: List explicitly some of the modules for which it is known to work. It doesn't work out of the box on any module. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -128,6 +128,12 @@ is_root(w_step) return w_some_obj() + def newint(self, x): + return w_some_obj() + + def newfloat(self, x): + return w_some_obj() + def marshal_w(self, w_obj): "NOT_RPYTHON" raise NotImplementedError diff --git a/pypy/objspace/fake/test/test_zmodules.py b/pypy/objspace/fake/test/test_zmodules.py --- a/pypy/objspace/fake/test/test_zmodules.py +++ b/pypy/objspace/fake/test/test_zmodules.py @@ -1,10 +1,14 @@ -from pypy.config.pypyoption import working_modules from pypy.objspace.fake.checkmodule import checkmodule -from pypy.tool.sourcetools import compile2 -for name in sorted(working_modules): - exec compile2("""\ - def test_module_%s(): - checkmodule(%r) - """ % (name, name)) +def test__bisect(): + checkmodule('_bisect') + +def test__random(): + checkmodule('_random') + +def test_itertools(): + checkmodule('itertools') + +def test_micronumpy(): + checkmodule('micronumpy') From noreply at buildbot.pypy.org Wed Dec 7 17:40:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:16 +0100 (CET) Subject: [pypy-commit] pypy default: Wrong logic. Must only call ann.complete() once at the end. Message-ID: <20111207164016.D8E148205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50271:ea6c0a78b1e6 Date: 2011-12-07 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/ea6c0a78b1e6/ Log: Wrong logic. Must only call ann.complete() once at the end. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -232,28 +232,22 @@ if argtypes is None: nb_args = func.func_code.co_argcount argtypes = [W_Root] * nb_args - else: - func = lambda: None - argtypes = [] # t = TranslationContext() self.t = t # for debugging ann = t.buildannotator() + if func is not None: + ann.build_types(func, argtypes, complete_now=False) # + # annotate all _seen_extras, knowing that annotating some may + # grow the list done = 0 - while True: - # annotate all _seen_extras, knowing that annotating some may - # grow the list - while done < len(self._seen_extras): - print self._seen_extras - ann.build_types(self._seen_extras[done], [], - complete_now=False) - done += 1 - # when the list stops growing, really complete - ann.build_types(func, argtypes, complete_now=True) - # if the list did not grow because of completion, we are done - if done == len(self._seen_extras): - break + while done < len(self._seen_extras): + print self._seen_extras + ann.build_types(self._seen_extras[done], [], + complete_now=False) + done += 1 + ann.complete() #t.viewcg() t.buildrtyper().specialize() t.checkgraphs() diff --git a/pypy/objspace/fake/test/test_zmodules.py b/pypy/objspace/fake/test/test_zmodules.py --- a/pypy/objspace/fake/test/test_zmodules.py +++ b/pypy/objspace/fake/test/test_zmodules.py @@ -7,6 +7,9 @@ def test__random(): checkmodule('_random') +def test_cStringIO(): + checkmodule('cStringIO') + def test_itertools(): checkmodule('itertools') From noreply at buildbot.pypy.org Wed Dec 7 17:40:18 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:40:18 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111207164018.0DB408205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50272:3d44c9c53444 Date: 2011-12-07 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/3d44c9c53444/ Log: merge heads diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -916,6 +916,15 @@ def descr_debug_repr(self, space): return space.wrap(self.debug_repr()) + def descr_array_iface(self, space): + concrete = self.get_concrete() + storage = concrete.get_storage(space) + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -976,6 +985,9 @@ # so in order to have a consistent API, let it go through. pass + def get_storage(self, space): + raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1263,6 +1275,9 @@ a_iter = a_iter.next(len(array.shape)) return array + def get_storage(self, space): + return self.parent.get_storage(space) + class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one @@ -1325,6 +1340,9 @@ def debug_repr(self): return 'Array' + def get_storage(self, space): + return self.storage + def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1444,6 +1462,7 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), __debug_repr__ = interp2app(BaseArray.descr_debug_repr), + __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), shape = GetSetProperty(BaseArray.descr_get_shape, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1114,6 +1114,16 @@ b = a[0].copy() assert (b == zeros(10)).all() + def test_array_interface(self): + from numpypy import array + a = array([1, 2, 3]) + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + a = a[::2] + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + raises(TypeError, getattr, array(3), '__array_interface__') + class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -420,7 +420,11 @@ low, high = _get_file_size(self.file_handle) if not high and low <= sys.maxint: return low + # not so sure if the signed/unsigned strictness is a good idea: + high = rffi.cast(lltype.Unsigned, high) + low = rffi.cast(lltype.Unsigned, low) size = (high << 32) + low + size = rffi.cast(lltype.Signed, size) elif _POSIX: st = os.fstat(self.fd) size = st[stat.ST_SIZE] From noreply at buildbot.pypy.org Wed Dec 7 17:52:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 17:52:11 +0100 (CET) Subject: [pypy-commit] pypy default: A passing test for widening casts from a short unsigned value. Message-ID: <20111207165211.C82DD8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50273:bb4fc57d2649 Date: 2011-12-07 17:51 +0100 http://bitbucket.org/pypy/pypy/changeset/bb4fc57d2649/ Log: A passing test for widening casts from a short unsigned value. diff --git a/pypy/translator/c/test/test_typed.py b/pypy/translator/c/test/test_typed.py --- a/pypy/translator/c/test/test_typed.py +++ b/pypy/translator/c/test/test_typed.py @@ -275,6 +275,14 @@ fn = self.getcompiled(f, [r_longlong]) assert fn(0) == 0 + def test_upcast_int(self): + from pypy.rpython.lltypesystem import rffi + def f(v): + v = rffi.cast(rffi.USHORT, v) + return intmask(v) + fn = self.getcompiled(f, [int]) + assert fn(0x1234CDEF) == 0xCDEF + def test_function_ptr(self): def f1(): return 1 From noreply at buildbot.pypy.org Wed Dec 7 18:18:18 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 18:18:18 +0100 (CET) Subject: [pypy-commit] pypy default: Tweaks. Message-ID: <20111207171818.6527B8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50274:831d216b57f0 Date: 2011-12-07 18:06 +0100 http://bitbucket.org/pypy/pypy/changeset/831d216b57f0/ Log: Tweaks. diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -23,8 +23,16 @@ print >> sys.stderr, "Bad command line" print >> sys.stderr, __doc__ sys.exit(1) - checkmodule(modname) - print 'Passed.' + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 + else: + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -258,7 +258,7 @@ ObjSpace.ExceptionTable + ['int', 'str', 'float', 'long', 'tuple', 'list', 'dict', 'unicode', 'complex', 'slice', 'bool', - 'type']): + 'type', 'basestring']): setattr(FakeObjSpace, 'w_' + name, w_some_obj()) # for (name, _, arity, _) in ObjSpace.MethodTable: @@ -297,3 +297,4 @@ name + "xx" # check that it's a string return w_some_obj() FakeObjSpace.sys = FakeModule() +FakeObjSpace.sys.filesystemencoding = 'foobar' From noreply at buildbot.pypy.org Wed Dec 7 18:18:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 18:18:19 +0100 (CET) Subject: [pypy-commit] pypy default: Also accept a full directory name, for easier tab-completion. Message-ID: <20111207171819.87E738205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50275:aeb5a474cce9 Date: 2011-12-07 18:17 +0100 http://bitbucket.org/pypy/pypy/changeset/aeb5a474cce9/ Log: Also accept a full directory name, for easier tab-completion. diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -7,7 +7,7 @@ modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule @@ -23,6 +23,13 @@ print >> sys.stderr, "Bad command line" print >> sys.stderr, __doc__ sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) try: checkmodule(modname) except Exception, e: From noreply at buildbot.pypy.org Wed Dec 7 18:27:42 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 7 Dec 2011 18:27:42 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: resize float/int conversion area Message-ID: <20111207172742.868BF8205C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50276:c79efdc9a57f Date: 2011-12-07 18:23 +0100 http://bitbucket.org/pypy/pypy/changeset/c79efdc9a57f/ Log: resize float/int conversion area diff --git a/pypy/jit/backend/ppc/ppcgen/arch.py b/pypy/jit/backend/ppc/ppcgen/arch.py --- a/pypy/jit/backend/ppc/ppcgen/arch.py +++ b/pypy/jit/backend/ppc/ppcgen/arch.py @@ -20,5 +20,5 @@ FORCE_INDEX = WORD GPR_SAVE_AREA = len(NONVOLATILES) * WORD FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * DWORD -FLOAT_INT_CONVERSION = 4 * WORD +FLOAT_INT_CONVERSION = WORD MAX_REG_PARAMS = 8 From noreply at buildbot.pypy.org Wed Dec 7 18:27:43 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 7 Dec 2011 18:27:43 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: add sanity check to bl_abs Message-ID: <20111207172743.AB9FB8205C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50277:e20c4260d119 Date: 2011-12-07 18:23 +0100 http://bitbucket.org/pypy/pypy/changeset/e20c4260d119/ Log: add sanity check to bl_abs diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -1015,8 +1015,9 @@ self.bctr() def bl_abs(self, address): - self.load_imm(r.r0, address) + self.alloc_scratch_reg(address) self.mtctr(r.r0.value) + self.free_scratch_reg() self.bctrl() def prepare_insts_blocks(self, show=False): From noreply at buildbot.pypy.org Wed Dec 7 18:27:44 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 7 Dec 2011 18:27:44 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: remove bug in computation of frame size Message-ID: <20111207172744.D207E8205C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50278:ba02345dd112 Date: 2011-12-07 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/ba02345dd112/ Log: remove bug in computation of frame size diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -756,7 +756,7 @@ + self.ENCODING_AREA + regalloc.frame_manager.frame_depth * WORD + self.max_stack_params * WORD - + BACKCHAIN_SIZE) + + BACKCHAIN_SIZE * WORD) return frame_depth From noreply at buildbot.pypy.org Wed Dec 7 18:27:46 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 7 Dec 2011 18:27:46 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: implement CALL according to new frame layout for 32 bit, 64 bit is disabled right now Message-ID: <20111207172746.04FD68205C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50279:d69f7c5cc1e7 Date: 2011-12-07 18:27 +0100 http://bitbucket.org/pypy/pypy/changeset/d69f7c5cc1e7/ Log: implement CALL according to new frame layout for 32 bit, 64 bit is disabled right now diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -348,6 +348,7 @@ n += WORD stack_args.append(None) + """ # adjust SP and compute size of parameter save area if IS_PPC_32: stack_space = BACKCHAIN_SIZE + len(stack_args) * WORD @@ -364,10 +365,13 @@ self.mc.stdu(r.SP.value, r.SP.value, -stack_space) self.mc.mflr(r.r0.value) self.mc.std(r.r0.value, r.SP.value, stack_space + 2 * WORD) + """ + # compute maximum of parameters passed + self.max_stack_params = max(self.max_stack_params, len(stack_args)) + + """ # then we push everything on the stack - self.max_stack_params = max(self.max_stack_params, len(stack_args))\ - + MAX_REG_PARAMS for i, arg in enumerate(stack_args): if IS_PPC_32: abi = 2 @@ -380,6 +384,25 @@ self.mc.stw(r.r0.value, r.SP.value, offset) else: self.mc.std(r.r0.value, r.SP.value, offset) + """ + + # compute offset at which parameters are stored + if IS_PPC_32: + param_offset = BACKCHAIN_SIZE * WORD + else: + param_offset = ((BACKCHAIN_SIZE + MAX_REG_PARAMS) + * WORD) # space for first 8 parameters + + self.mc.alloc_scratch_reg() + for i, arg in enumerate(stack_args): + offset = param_offset + i * WORD + if arg is not None: + self.mc.load_imm(r.r0, arg.value) + if IS_PPC_32: + self.mc.stw(r.r0.value, r.SP.value, offset) + else: + self.mc.std(r.r0.value, r.SP.value, offset) + self.mc.free_scratch_reg() # collect variables that need to go in registers # and the registers they will be stored in @@ -411,6 +434,7 @@ # remap values stored in core registers remap_frame_layout(self, non_float_locs, non_float_regs, r.r0) + """ #the actual call if IS_PPC_32: self.mc.bl_abs(adr) @@ -426,6 +450,13 @@ self.mc.ld(r.r0.value, r.SP.value, stack_space + 2 * WORD) self.mc.mtlr(r.r0.value) self.mc.addi(r.SP.value, r.SP.value, stack_space) + """ + + # the actual call + if IS_PPC_32: + self.mc.bl_abs(adr) + else: + assert 0 self.mark_gc_roots(force_index) regalloc.possibly_free_vars(args) From noreply at buildbot.pypy.org Wed Dec 7 18:29:19 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Wed, 7 Dec 2011 18:29:19 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: merge default Message-ID: <20111207172919.C10C98205C@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50280:da31cc0f6eee Date: 2011-12-07 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/da31cc0f6eee/ Log: merge default diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -519,8 +518,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -1608,6 +1607,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -439,9 +439,6 @@ self.w_it = self.space.iter(self.space.next(self.w_iterables)) def next_w(self): - if not self.w_iterables: - # already stopped - raise OperationError(self.space.w_StopIteration, self.space.w_None) if not self.w_it: self._advance() try: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.inttype import int_typedef -from pypy.objspace.std.typeobject import W_TypeObject from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -33,9 +32,8 @@ _attrs_ = () def descr__new__(space, w_subtype, __args__): - assert isinstance(w_subtype, W_TypeObject) raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", - w_subtype.get_module_type_name() + w_subtype.getname(space, '?') ) def descr_str(self, space): @@ -266,4 +264,4 @@ __module__ = "numpypy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), -) \ No newline at end of file +) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -18,7 +18,7 @@ VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) class W_Dtype(Wrappable): - _immuable_fields_ = ["itemtype", "num", "kind"] + _immutable_fields_ = ["itemtype", "num", "kind"] def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): self.signature = signature.BaseSignature() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -852,7 +852,7 @@ if len(args_w) == 1: w_shape = args_w[0] else: - w_shape = space.newlist(args_w) + w_shape = space.newtuple(args_w) concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, concrete.find_size(), w_shape) @@ -916,6 +916,15 @@ def descr_debug_repr(self, space): return space.wrap(self.debug_repr()) + def descr_array_iface(self, space): + concrete = self.get_concrete() + storage = concrete.get_storage(space) + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -976,6 +985,9 @@ # so in order to have a consistent API, let it go through. pass + def get_storage(self, space): + raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1263,6 +1275,9 @@ a_iter = a_iter.next(len(array.shape)) return array + def get_storage(self, space): + return self.parent.get_storage(space) + class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one @@ -1325,6 +1340,9 @@ def debug_repr(self): return 'Array' + def get_storage(self, space): + return self.storage + def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1444,6 +1462,7 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), __debug_repr__ = interp2app(BaseArray.descr_debug_repr), + __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), shape = GetSetProperty(BaseArray.descr_get_shape, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -174,7 +174,7 @@ raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + assert str(exc.value) == "cannot create 'signedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1114,6 +1114,16 @@ b = a[0].copy() assert (b == zeros(10)).all() + def test_array_interface(self): + from numpypy import array + a = array([1, 2, 3]) + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + a = a[::2] + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + raises(TypeError, getattr, array(3), '__array_interface__') + class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct diff --git a/pypy/objspace/fake/__init__.py b/pypy/objspace/fake/__init__.py --- a/pypy/objspace/fake/__init__.py +++ b/pypy/objspace/fake/__init__.py @@ -1,2 +0,0 @@ -from objspace import FakeObjSpace -Space = FakeObjSpace diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -1,108 +1,12 @@ -import re -from copy import copy -from pypy.tool.error import debug -from pypy.interpreter.argument import Arguments -from pypy.interpreter.gateway import interp2app -from pypy.rlib.nonconst import NonConstant +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root -def my_import(name): - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod -def find_gateways(modname, basepath, module): - identifier = r'[a-zA-Z0-9][a-zA-Z0-9_]*' - r_simplename = re.compile(r'(%s)[.](%s)$' % (identifier, identifier)) - res = [] - for name in module.interpleveldefs.values(): - match = r_simplename.match(name) - if match: - submod_name, obj_name = match.groups() - submod_name = '%s.%s.%s' % (basepath, modname, submod_name) - submod = my_import(submod_name) - obj = getattr(submod, obj_name) - res += find_gw_in_obj(obj) - return res - -def find_gw_in_obj(obj): - if hasattr(obj, 'typedef'): - typedef = obj.typedef - return [gw for gw in typedef.rawdict.values() - if isinstance(gw, interp2app)] - elif hasattr(obj, 'func_code'): - return [interp2app(obj)] - else: - assert False - -## Since the fake objspace is more a hack than a real object space, it -## happens that the annotator complains about operations that cannot -## succeed because it knows too much about the objects involved. For -## example, if it knows that a list is always empty, it will block -## each operations that tries to access that list. This is not what we -## want, because we know that with real objectspaces that operations -## will succeed. - -## As a workaround, we insert dummy rpython code (the function -## dummy_rpython) that manipulates the variables in order to give -## them a more sensible annotation. This is the preferred way to solve -## the problems so far. - -## If the solution above doesn't work, the alternative is to -## substitute the interpreter code with something that doesn't hurt -## the annotator. It's a very ugly hack, better solutions are welcome -## :-) - - -# dummy rpython code to give some variables more sensible annotations -def dummy_rpython(dummy_function): - # to make the annotator flow-in without executing the code - if NonConstant(False): - dummy_function.defs_w = [None] # else the annotator would see an always empty list - -def patch_pypy(): - from pypy.interpreter.baseobjspace import W_Root - - def descr_call_mismatch(self, space, opname, RequiredClass, args): - from pypy.interpreter.error import OperationError - msg = 'This message will never be displayed :-)' - raise OperationError(space.w_TypeError, space.wrap(msg)) - W_Root.descr_call_mismatch = descr_call_mismatch - - -def checkmodule(modname, backend, interactive=False, basepath='pypy.module'): - "Compile a fake PyPy module." - from pypy.objspace.fake.objspace import FakeObjSpace, W_Object - from pypy.translator.driver import TranslationDriver - +def checkmodule(modname): space = FakeObjSpace() - space.config.translating = True - ModuleClass = __import__(basepath + '.%s' % modname, - None, None, ['Module']).Module - module = ModuleClass(space, space.wrap(modname)) - w_moduledict = module.getdict(space) - - gateways = find_gateways(modname, basepath, module) - functions = [gw.__spacebind__(space) for gw in gateways] - arguments = Arguments.frompacked(space, W_Object(), W_Object()) - dummy_function = copy(functions[0]) - - def main(argv): # use the standalone mode not to allow SomeObject - dummy_rpython(dummy_function) - for func in functions: - func.call_args(arguments) - return 0 - - patch_pypy() - driver = TranslationDriver() - driver.setup(main, None) - try: - driver.proceed(['compile_' + backend]) - except SystemExit: - raise - except: - if not interactive: - raise - debug(driver) - raise SystemExit(1) + mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) + # force computation and record what we wrap + module = mod.Module(space, W_Root()) + for name in module.loaders: + module._load_lazily(space, name) + # + space.translates() diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,147 +1,300 @@ -from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, W_Root -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import W_Root, ObjSpace +from pypy.interpreter.baseobjspace import Wrappable, SpaceCache +from pypy.interpreter import argument, gateway +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.annotation.model import SomeInstance, s_None +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rpython.lltypesystem import lltype +from pypy.tool.sourcetools import compile2, func_with_new_name +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import instantiate, we_are_translated from pypy.rlib.nonconst import NonConstant from pypy.rlib.rarithmetic import r_uint -from pypy.rlib.rbigint import rbigint +from pypy.translator.translator import TranslationContext -#class W_Type(W_Root): -# _attrs_ = () -class W_Object(W_Root): - _attrs_ = () -W_Object.typedef = TypeDef('foobar') +class W_MyObject(Wrappable): + typedef = None -def make_dummy(a=W_Object(), b=W_Object()): - def fn(*args): - if NonConstant(True): - return a - else: - return b - return fn + def getdict(self, space): + return w_obj_or_none() -int_dummy = make_dummy(42, 43) -float_dummy = make_dummy(42.0, 42.1) -uint_dummy = make_dummy(r_uint(42), r_uint(43)) -str_dummy = make_dummy('foo', 'bar') -bool_dummy = make_dummy(True, False) -unicode_dummy = make_dummy(u'abc', u'cde') -bigint_dummy = make_dummy(rbigint.fromint(0), rbigint.fromint(1)) + def getdictvalue(self, space, attr): + attr + "xx" # check that it's a string + return w_obj_or_none() + + def setdictvalue(self, space, attr, w_value): + attr + "xx" # check that it's a string + is_root(w_value) + return NonConstant(True) + + def deldictvalue(self, space, attr): + attr + "xx" # check that it's a string + return NonConstant(True) + + def setdict(self, space, w_dict): + is_root(w_dict) + + def setclass(self, space, w_subtype): + is_root(w_subtype) + + def str_w(self, space): + return NonConstant("foobar") + + def unicode_w(self, space): + return NonConstant(u"foobar") + + def int_w(self, space): + return NonConstant(-42) + + def uint_w(self, space): + return r_uint(NonConstant(42)) + + def bigint_w(self, space): + from pypy.rlib.rbigint import rbigint + return rbigint.fromint(NonConstant(42)) + + +def w_some_obj(): + if NonConstant(False): + return W_Root() + return W_MyObject() + +def w_obj_or_none(): + if NonConstant(False): + return None + return w_some_obj() + +def is_root(w_obj): + assert isinstance(w_obj, W_Root) +is_root.expecting = W_Root + +def is_arguments(arg): + assert isinstance(arg, argument.Arguments) +is_arguments.expecting = argument.Arguments + + +class Entry(ExtRegistryEntry): + _about_ = is_root, is_arguments + + def compute_result_annotation(self, s_w_obj): + cls = self.instance.expecting + s_inst = SomeInstance(self.bookkeeper.getuniqueclassdef(cls), + can_be_None=True) + assert s_inst.contains(s_w_obj) + return s_None + + def specialize_call(self, hop): + return hop.inputconst(lltype.Void, None) + +# ____________________________________________________________ + class FakeObjSpace(ObjSpace): - w_None = W_Object() - w_False = W_Object() - w_True = W_Object() - w_Ellipsis = W_Object() - w_NotImplemented = W_Object() - w_int = W_Object() - w_dict = W_Object() - w_float = W_Object() - w_long = W_Object() - w_tuple = W_Object() - w_str = W_Object() - w_basestring = W_Object() - w_unicode = W_Object() - w_type = W_Object() - w_instance = W_Object() - w_slice = W_Object() - w_hex = W_Object() - w_oct = W_Object() - - def initialize(self): - self.config.objspace.geninterp = False - self.config.objspace.disable_call_speedhacks = True - self.wrap_cache = {} - self.make_builtins() - def _freeze_(self): - return True + def __init__(self): + self._seen_extras = [] + ObjSpace.__init__(self) + + def float_w(self, w_obj): + is_root(w_obj) + return NonConstant(42.5) + + def is_true(self, w_obj): + is_root(w_obj) + return NonConstant(False) + + def unwrap(self, w_obj): + "NOT_RPYTHON" + raise NotImplementedError + + def newdict(self, module=False, instance=False, classofinstance=None, + strdict=False): + return w_some_obj() + + def newtuple(self, list_w): + for w_x in list_w: + is_root(w_x) + return w_some_obj() + + def newlist(self, list_w): + for w_x in list_w: + is_root(w_x) + return w_some_obj() + + def newslice(self, w_start, w_end, w_step): + is_root(w_start) + is_root(w_end) + is_root(w_step) + return w_some_obj() + + def newint(self, x): + return w_some_obj() + + def newfloat(self, x): + return w_some_obj() + + def marshal_w(self, w_obj): + "NOT_RPYTHON" + raise NotImplementedError def wrap(self, x): - if isinstance(x, Wrappable): - w_result = x.__spacebind__(self) - return w_result - return W_Object() + if not we_are_translated(): + if isinstance(x, gateway.interp2app): + self._see_interp2app(x) + if isinstance(x, GetSetProperty): + self._see_getsetproperty(x) + return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" - def unwrap(self, w_obj): - assert isinstance(w_obj, W_Object) - return None + def _see_interp2app(self, interp2app): + "NOT_RPYTHON" + activation = interp2app._code.activation + def check(): + scope_w = [w_some_obj()] * NonConstant(42) + w_result = activation._run(self, scope_w) + is_root(w_result) + check = func_with_new_name(check, 'check__' + interp2app.name) + self._seen_extras.append(check) - lookup = make_dummy() - allocate_instance = make_dummy() - getattr = make_dummy() - setattr = make_dummy() - getitem = make_dummy() - setitem = make_dummy() - delitem = make_dummy() - int_w = int_dummy - uint_w = uint_dummy - float_w = float_dummy - unicode_w = unicode_dummy - bigint_w = bigint_dummy - iter = make_dummy() - type = make_dummy() - str = make_dummy() - int = make_dummy() - float = make_dummy() - repr = make_dummy() - id = make_dummy() - len = make_dummy() - str_w = str_dummy - call_args = make_dummy() - new_interned_str = make_dummy() - newint = make_dummy() - newlong = make_dummy() - newfloat = make_dummy() - def newdict(self, module=False): - return self.newfloat() - newlist = make_dummy() - emptylist = make_dummy() - newtuple = make_dummy() - newslice = make_dummy() - lt = make_dummy() - le = make_dummy() - eq = make_dummy() - ne = make_dummy() - gt = make_dummy() - ge = make_dummy() - lt_w = bool_dummy - le_w = bool_dummy - eq_w = bool_dummy - ne_w = bool_dummy - gt_w = bool_dummy - ge_w = bool_dummy - is_w = bool_dummy - is_ = make_dummy() - next = make_dummy() - is_true = bool_dummy - nonzero = make_dummy() - issubtype = make_dummy() - ord = make_dummy() - hash = make_dummy() - delattr = make_dummy() # should return None? - contains = make_dummy() - hex = make_dummy() - oct = make_dummy() - pow = make_dummy() - inplace_pow = make_dummy() - cmp = make_dummy() + def _see_getsetproperty(self, getsetproperty): + "NOT_RPYTHON" + space = self + def checkprop(): + getsetproperty.fget(getsetproperty, space, w_some_obj()) + if getsetproperty.fset is not None: + getsetproperty.fset(getsetproperty, space, w_some_obj(), + w_some_obj()) + if getsetproperty.fdel is not None: + getsetproperty.fdel(getsetproperty, space, w_some_obj()) + if not getsetproperty.name.startswith('<'): + checkprop = func_with_new_name(checkprop, + 'checkprop__' + getsetproperty.name) + self._seen_extras.append(checkprop) - # XXsX missing operations - def coerce(self, *args): raise NotImplementedError("space.coerce()") - def get(self, *args): raise NotImplementedError("space.get()") - def set(self, *args): raise NotImplementedError("space.set()") - def delete(self, *args): raise NotImplementedError("space.delete()") - def userdel(self, *args): raise NotImplementedError("space.userdel()") - def marshal_w(self, *args):raise NotImplementedError("space.marshal_w()") + def call_obj_args(self, w_callable, w_obj, args): + is_root(w_callable) + is_root(w_obj) + is_arguments(args) + return w_some_obj() - gettypefor = make_dummy() - gettypeobject = make_dummy() - unpackiterable = make_dummy([W_Object()], [W_Object()]) + def call(self, w_callable, w_args, w_kwds=None): + is_root(w_callable) + is_root(w_args) + is_root(w_kwds) + return w_some_obj() + def call_function(self, w_func, *args_w): + is_root(w_func) + for w_arg in list(args_w): + is_root(w_arg) + return w_some_obj() -## Register all exceptions -import exceptions -for name in ObjSpace.ExceptionTable: - exc = getattr(exceptions, name) - setattr(FakeObjSpace, 'w_' + name, W_Object()) + def call_args(self, w_func, args): + is_root(w_func) + is_arguments(args) + return w_some_obj() + + def gettypefor(self, cls): + return self.gettypeobject(cls.typedef) + + def gettypeobject(self, typedef): + assert typedef is not None + return self.fromcache(TypeCache).getorbuild(typedef) + + def unpackiterable(self, w_iterable, expected_length=-1): + is_root(w_iterable) + if expected_length < 0: + expected_length = 3 + return [w_some_obj()] * expected_length + + def allocate_instance(self, cls, w_subtype): + is_root(w_subtype) + return instantiate(cls) + allocate_instance._annspecialcase_ = "specialize:arg(1)" + + def decode_index(self, w_index_or_slice, seqlength): + is_root(w_index_or_slice) + return (NonConstant(42), NonConstant(42), NonConstant(42)) + + def decode_index4(self, w_index_or_slice, seqlength): + is_root(w_index_or_slice) + return (NonConstant(42), NonConstant(42), + NonConstant(42), NonConstant(42)) + + def exec_(self, *args, **kwds): + pass + + # ---------- + + def translates(self, func=None, argtypes=None): + if func is not None: + if argtypes is None: + nb_args = func.func_code.co_argcount + argtypes = [W_Root] * nb_args + # + t = TranslationContext() + self.t = t # for debugging + ann = t.buildannotator() + if func is not None: + ann.build_types(func, argtypes, complete_now=False) + # + # annotate all _seen_extras, knowing that annotating some may + # grow the list + done = 0 + while done < len(self._seen_extras): + print self._seen_extras + ann.build_types(self._seen_extras[done], [], + complete_now=False) + done += 1 + ann.complete() + #t.viewcg() + t.buildrtyper().specialize() + t.checkgraphs() + + +def setup(): + for name in (ObjSpace.ConstantTable + + ObjSpace.ExceptionTable + + ['int', 'str', 'float', 'long', 'tuple', 'list', + 'dict', 'unicode', 'complex', 'slice', 'bool', + 'type', 'basestring']): + setattr(FakeObjSpace, 'w_' + name, w_some_obj()) + # + for (name, _, arity, _) in ObjSpace.MethodTable: + args = ['w_%d' % i for i in range(arity)] + d = {'is_root': is_root, + 'w_some_obj': w_some_obj} + exec compile2("""\ + def meth(self, %s): + %s + return w_some_obj() + """ % (', '.join(args), + '; '.join(['is_root(%s)' % arg for arg in args]))) in d + meth = func_with_new_name(d['meth'], name) + setattr(FakeObjSpace, name, meth) + # + for name in ObjSpace.IrregularOpTable: + assert hasattr(FakeObjSpace, name) # missing? + +setup() + +# ____________________________________________________________ + +class TypeCache(SpaceCache): + def build(cache, typedef): + assert isinstance(typedef, TypeDef) + for value in typedef.rawdict.values(): + cache.space.wrap(value) + return w_some_obj() + +class FakeCompiler(object): + pass +FakeObjSpace.default_compiler = FakeCompiler() + +class FakeModule(object): + def get(self, name): + name + "xx" # check that it's a string + return w_some_obj() +FakeObjSpace.sys = FakeModule() +FakeObjSpace.sys.filesystemencoding = 'foobar' diff --git a/pypy/objspace/fake/test/__init__.py b/pypy/objspace/fake/test/__init__.py deleted file mode 100644 diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -1,7 +1,63 @@ import py -from pypy.objspace.fake.checkmodule import checkmodule +from pypy.objspace.fake.objspace import FakeObjSpace, is_root +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, W_Root, ObjSpace -def test_dotnet(): - # the only module known to pass checkmodule is _dotnet so far - py.test.skip('fixme') - checkmodule('_dotnet', 'cli') + +def make_checker(): + check = [] + def see(): + check.append(True) + see._annspecialcase_ = 'specialize:memo' + return see, check + +def test_wrap_interp2app(): + see, check = make_checker() + space = FakeObjSpace() + assert len(space._seen_extras) == 0 + assert len(check) == 0 + space.wrap(interp2app(lambda space: see())) + assert len(space._seen_extras) == 1 + assert len(check) == 0 + space.translates() + assert len(check) == 1 + +def test_wrap_interp2app_int(): + see, check = make_checker() + def foobar(space, x, w_y, z): + is_root(w_y) + see() + return space.wrap(x - z) + space = FakeObjSpace() + space.wrap(interp2app(foobar, unwrap_spec=[ObjSpace, int, W_Root, int])) + space.translates() + assert check + +def test_wrap_GetSetProperty(): + see, check = make_checker() + def foobar(w_obj, space): + is_root(w_obj) + see() + return space.w_None + space = FakeObjSpace() + space.wrap(GetSetProperty(foobar)) + space.translates() + assert check + + +def test_gettypefor_untranslated(): + see, check = make_checker() + class W_Foo(Wrappable): + def do_it(self, space, w_x): + is_root(w_x) + see() + return W_Root() + W_Foo.typedef = TypeDef('foo', + __module__ = 'barmod', + do_it = interp2app(W_Foo.do_it)) + space = FakeObjSpace() + space.gettypefor(W_Foo) + assert not check + space.translates() + assert check diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/fake/test/test_objspace.py @@ -0,0 +1,74 @@ +import py +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root +from pypy.interpreter.argument import Arguments +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import Wrappable +from pypy.rlib.unroll import unrolling_iterable + +def test_create(): + FakeObjSpace() + + +class TestTranslate: + def setup_method(self, meth): + self.space = FakeObjSpace() + + def test_simple(self): + space = self.space + space.translates(lambda w_x, w_y: space.add(w_x, w_y)) + + def test_methodtable(self): + space = self.space + for fixed_arity in [1, 2, 3, 4]: + # + methodtable = [name for (name, _, arity, _) in space.MethodTable + if arity == fixed_arity] + methodtable = unrolling_iterable(methodtable) + args_w = (W_Root(),) * fixed_arity + # + def f(): + for name in methodtable: + getattr(space, name)(*args_w) + # + space.translates(f) + + def test_newdict(self): + space = self.space + space.translates(lambda: (space.newdict(), + space.newdict(strdict=True))) + + def test_constants(self): + space = self.space + space.translates(lambda: (space.w_None, space.w_True, space.w_False, + space.w_int, space.w_str, + space.w_TypeError)) + + def test_wrap(self): + space = self.space + space.translates(lambda: (space.wrap(42), space.wrap(42.5), + space.wrap("foo"))) + + def test_call_args(self): + space = self.space + args = Arguments(space, [W_Root()]) + space.translates(lambda: space.call_args(W_Root(), args)) + + def test_gettypefor(self): + space = self.space + class W_Foo(Wrappable): + typedef = TypeDef("foo") + space.translates(lambda: space.gettypefor(W_Foo)) + + def test_is_true(self): + space = self.space + space.translates(lambda: space.is_true(W_Root())) + py.test.raises(AssertionError, + space.translates, lambda: space.is_true(42)) + + def test_unpackiterable(self): + space = self.space + space.translates(lambda: (space.unpackiterable(W_Root()), + space.unpackiterable(W_Root(), 42))) + + def test_newlist(self): + self.space.newlist([W_Root(), W_Root()]) diff --git a/pypy/objspace/fake/test/test_zmodules.py b/pypy/objspace/fake/test/test_zmodules.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/fake/test/test_zmodules.py @@ -0,0 +1,17 @@ +from pypy.objspace.fake.checkmodule import checkmodule + + +def test__bisect(): + checkmodule('_bisect') + +def test__random(): + checkmodule('_random') + +def test_cStringIO(): + checkmodule('cStringIO') + +def test_itertools(): + checkmodule('itertools') + +def test_micronumpy(): + checkmodule('micronumpy') diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -421,7 +421,11 @@ low, high = _get_file_size(self.file_handle) if not high and low <= sys.maxint: return low + # not so sure if the signed/unsigned strictness is a good idea: + high = rffi.cast(lltype.Unsigned, high) + low = rffi.cast(lltype.Unsigned, low) size = (high << 32) + low + size = rffi.cast(lltype.Signed, size) elif _POSIX: st = os.fstat(self.fd) size = st[stat.ST_SIZE] diff --git a/pypy/translator/c/test/test_typed.py b/pypy/translator/c/test/test_typed.py --- a/pypy/translator/c/test/test_typed.py +++ b/pypy/translator/c/test/test_typed.py @@ -275,6 +275,14 @@ fn = self.getcompiled(f, [r_longlong]) assert fn(0) == 0 + def test_upcast_int(self): + from pypy.rpython.lltypesystem import rffi + def f(v): + v = rffi.cast(rffi.USHORT, v) + return intmask(v) + fn = self.getcompiled(f, [int]) + assert fn(0x1234CDEF) == 0xCDEF + def test_function_ptr(self): def f1(): return 1 diff --git a/pypy/translator/translator.py b/pypy/translator/translator.py --- a/pypy/translator/translator.py +++ b/pypy/translator/translator.py @@ -150,11 +150,11 @@ from pypy.translator.tool.graphpage import FlowGraphPage FlowGraphPage(self).display() - def viewcg(self, center_graph=None): + def viewcg(self, center_graph=None, huge=100): """Shows the whole call graph and the class hierarchy, based on the computed annotations.""" from pypy.translator.tool.graphpage import TranslatorPage - TranslatorPage(self, center_graph=center_graph).display() + TranslatorPage(self, center_graph=center_graph, huge=huge).display() From noreply at buildbot.pypy.org Wed Dec 7 19:22:25 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 19:22:25 +0100 (CET) Subject: [pypy-commit] pypy default: Add list-comprehension-operation to checkconfig by default, have a test that Message-ID: <20111207182225.C300B8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50281:f8b238a469f4 Date: 2011-12-07 20:21 +0200 http://bitbucket.org/pypy/pypy/changeset/f8b238a469f4/ Log: Add list-comprehension-operation to checkconfig by default, have a test that checks if micronumpy translates diff --git a/pypy/module/micronumpy/test/test_ztranslation.py b/pypy/module/micronumpy/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_ztranslation.py @@ -0,0 +1,5 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_numpy_translates(): + checkmodule('micronumpy') diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -9,4 +9,4 @@ for name in module.loaders: module._load_lazily(space, name) # - space.translates() + space.translates(**{'translation.list_comprehension_operations':True}) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -11,6 +11,7 @@ from pypy.rlib.nonconst import NonConstant from pypy.rlib.rarithmetic import r_uint from pypy.translator.translator import TranslationContext +from pypy.tool.option import make_config class W_MyObject(Wrappable): @@ -227,13 +228,14 @@ # ---------- - def translates(self, func=None, argtypes=None): + def translates(self, func=None, argtypes=None, **kwds): + config = make_config(None, **kwds) if func is not None: if argtypes is None: nb_args = func.func_code.co_argcount argtypes = [W_Root] * nb_args # - t = TranslationContext() + t = TranslationContext(config=config) self.t = t # for debugging ann = t.buildannotator() if func is not None: From noreply at buildbot.pypy.org Wed Dec 7 20:26:28 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 20:26:28 +0100 (CET) Subject: [pypy-commit] pypy default: Remove it from there then. Message-ID: <20111207192628.021A18205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50282:1ef0414a0491 Date: 2011-12-07 20:20 +0100 http://bitbucket.org/pypy/pypy/changeset/1ef0414a0491/ Log: Remove it from there then. diff --git a/pypy/objspace/fake/test/test_zmodules.py b/pypy/objspace/fake/test/test_zmodules.py --- a/pypy/objspace/fake/test/test_zmodules.py +++ b/pypy/objspace/fake/test/test_zmodules.py @@ -12,6 +12,3 @@ def test_itertools(): checkmodule('itertools') - -def test_micronumpy(): - checkmodule('micronumpy') From noreply at buildbot.pypy.org Wed Dec 7 20:26:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 20:26:29 +0100 (CET) Subject: [pypy-commit] pypy default: Distribute test_zmodules's content to the various modules. Message-ID: <20111207192629.3A7D582ABA@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50283:38243eb2f78b Date: 2011-12-07 20:25 +0100 http://bitbucket.org/pypy/pypy/changeset/38243eb2f78b/ Log: Distribute test_zmodules's content to the various modules. diff --git a/pypy/module/_bisect/test/test_ztranslation.py b/pypy/module/_bisect/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_bisect/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_bisect') diff --git a/pypy/module/_random/test/test_ztranslation.py b/pypy/module/_random/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_random/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_random') diff --git a/pypy/module/cStringIO/test/test_ztranslation.py b/pypy/module/cStringIO/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/cStringIO/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('cStringIO') diff --git a/pypy/module/itertools/test/test_ztranslation.py b/pypy/module/itertools/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/itertools/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('itertools') diff --git a/pypy/objspace/fake/test/test_zmodules.py b/pypy/objspace/fake/test/test_zmodules.py deleted file mode 100644 --- a/pypy/objspace/fake/test/test_zmodules.py +++ /dev/null @@ -1,14 +0,0 @@ -from pypy.objspace.fake.checkmodule import checkmodule - - -def test__bisect(): - checkmodule('_bisect') - -def test__random(): - checkmodule('_random') - -def test_cStringIO(): - checkmodule('cStringIO') - -def test_itertools(): - checkmodule('itertools') From noreply at buildbot.pypy.org Wed Dec 7 20:30:15 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 20:30:15 +0100 (CET) Subject: [pypy-commit] pypy default: add a simple wrapper and some tests Message-ID: <20111207193015.353618205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50284:bc01389b6c4c Date: 2011-12-07 21:29 +0200 http://bitbucket.org/pypy/pypy/changeset/bc01389b6c4c/ Log: add a simple wrapper and some tests diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -56,6 +56,12 @@ DEFINED = DefinedConstantDouble(macro) return configure(CConfig)['DEFINED'] +def getdefinedinteger(macro, c_header_source): + class CConfig: + _compilation_info_ = eci_from_header(c_header_source) + DEFINED = DefinedConstantInteger(macro) + return configure(CConfig)['DEFINED'] + def has(name, c_header_source, include_dirs=None): class CConfig: _compilation_info_ = eci_from_header(c_header_source, include_dirs) diff --git a/pypy/rpython/tool/test/test_rffi_platform.py b/pypy/rpython/tool/test/test_rffi_platform.py --- a/pypy/rpython/tool/test/test_rffi_platform.py +++ b/pypy/rpython/tool/test/test_rffi_platform.py @@ -108,6 +108,12 @@ '#define ALFKJLKJFLKJFKLEJDLKEWMECEE') assert res +def test_defined_constant(): + res = rffi_platform.getdefineddouble('ABCDFGH', '#define ABCDFGH 2.0') + assert res == 2.0 + res = rffi_platform.getdefinedinteger('ABCDFGH', '#define ABCDFGH 2') + assert res == 2 + def test_defined_constant_float(): value = rffi_platform.getdefineddouble('BLAH', '#define BLAH 1.0') assert value == 1.0 From noreply at buildbot.pypy.org Wed Dec 7 20:35:42 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 20:35:42 +0100 (CET) Subject: [pypy-commit] pypy default: Fix an occasional IndexError. Message-ID: <20111207193542.EE5038205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50285:80bbb425d34a Date: 2011-12-07 20:34 +0100 http://bitbucket.org/pypy/pypy/changeset/80bbb425d34a/ Log: Fix an occasional IndexError. (I should try to figure out if the trunk py lib still has the same issue, and write a test for it if it does, but I don't really follow the trunk py lib, so I'm just checking this here and it can be ignored if we update the py lib.) diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 From noreply at buildbot.pypy.org Wed Dec 7 20:35:44 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Dec 2011 20:35:44 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for ctypes on Python 2.5: "c_byte" is a better approximation of Message-ID: <20111207193544.3ADA48205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50286:34288a8ab7fd Date: 2011-12-07 20:35 +0100 http://bitbucket.org/pypy/pypy/changeset/34288a8ab7fd/ Log: Fix for ctypes on Python 2.5: "c_byte" is a better approximation of the missing "c_bool", although still not perfect. diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -112,7 +112,7 @@ rffi.LONGLONG: ctypes.c_longlong, rffi.ULONGLONG: ctypes.c_ulonglong, rffi.SIZE_T: ctypes.c_size_t, - lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_long), + lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_byte), llmemory.Address: ctypes.c_void_p, llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX From noreply at buildbot.pypy.org Wed Dec 7 20:37:21 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 20:37:21 +0100 (CET) Subject: [pypy-commit] pypy default: Pass mode around, everyone supports it, it's just about passing it Message-ID: <20111207193721.4B7608205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50287:b4badea8f26f Date: 2011-12-07 21:35 +0200 http://bitbucket.org/pypy/pypy/changeset/b4badea8f26f/ Log: Pass mode around, everyone supports it, it's just about passing it diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -639,11 +639,11 @@ return dlsym(self.lib, name) class CDLL(RawCDLL): - def __init__(self, libname): + def __init__(self, libname, mode=-1): """Load the library, or raises DLOpenError.""" RawCDLL.__init__(self, rffi.cast(DLLHANDLE, -1)) with rffi.scoped_str2charp(libname) as ll_libname: - self.lib = dlopen(ll_libname) + self.lib = dlopen(ll_libname, mode) def __del__(self): if self.lib != rffi.cast(DLLHANDLE, -1): diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -393,11 +393,11 @@ # XXX: it partially duplicate the code in clibffi.py class CDLL(object): - def __init__(self, libname): + def __init__(self, libname, mode=-1): """Load the library, or raises DLOpenError.""" self.lib = rffi.cast(DLLHANDLE, 0) with rffi.scoped_str2charp(libname) as ll_libname: - self.lib = dlopen(ll_libname) + self.lib = dlopen(ll_libname, mode) def __del__(self): if self.lib: diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -443,3 +443,4 @@ assert p[1] == 34 lltype.free(p, flavor='raw') lltype.free(ffi_point_struct, flavor='raw') + From noreply at buildbot.pypy.org Wed Dec 7 20:37:22 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 20:37:22 +0100 (CET) Subject: [pypy-commit] pypy default: Pass mode Message-ID: <20111207193722.8DC7D8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50288:cfff5b5f0b13 Date: 2011-12-07 21:35 +0200 http://bitbucket.org/pypy/pypy/changeset/cfff5b5f0b13/ Log: Pass mode diff --git a/pypy/module/_ffi/interp_ffi.py b/pypy/module/_ffi/interp_ffi.py --- a/pypy/module/_ffi/interp_ffi.py +++ b/pypy/module/_ffi/interp_ffi.py @@ -457,14 +457,14 @@ # ======================================================================== class W_CDLL(Wrappable): - def __init__(self, space, name): + def __init__(self, space, name, mode): self.space = space if name is None: self.name = "" else: self.name = name try: - self.cdll = libffi.CDLL(name) + self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: raise operationerrfmt(space.w_OSError, '%s: %s', self.name, e.msg or 'unspecified error') @@ -492,9 +492,9 @@ "No symbol %s found in library %s", name, self.name) return space.wrap(address_as_uint) - at unwrap_spec(name='str_or_None') -def descr_new_cdll(space, w_type, name): - return space.wrap(W_CDLL(space, name)) + at unwrap_spec(name='str_or_None', mode=int) +def descr_new_cdll(space, w_type, name, mode=-1): + return space.wrap(W_CDLL(space, name, mode)) W_CDLL.typedef = TypeDef( From noreply at buildbot.pypy.org Wed Dec 7 20:37:23 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 20:37:23 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20111207193723.CE2008205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50289:3e5a23d598b4 Date: 2011-12-07 21:36 +0200 http://bitbucket.org/pypy/pypy/changeset/3e5a23d598b4/ Log: merge diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -112,7 +112,7 @@ rffi.LONGLONG: ctypes.c_longlong, rffi.ULONGLONG: ctypes.c_ulonglong, rffi.SIZE_T: ctypes.c_size_t, - lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_long), + lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_byte), llmemory.Address: ctypes.c_void_p, llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX From noreply at buildbot.pypy.org Wed Dec 7 20:41:31 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 20:41:31 +0100 (CET) Subject: [pypy-commit] pypy default: pass a mode here Message-ID: <20111207194131.13AB58205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50290:315da4da5ab0 Date: 2011-12-07 21:41 +0200 http://bitbucket.org/pypy/pypy/changeset/315da4da5ab0/ Log: pass a mode here diff --git a/pypy/module/_ffi/interp_ffi.py b/pypy/module/_ffi/interp_ffi.py --- a/pypy/module/_ffi/interp_ffi.py +++ b/pypy/module/_ffi/interp_ffi.py @@ -509,6 +509,6 @@ def get_libc(space): from pypy.rlib.clibffi import get_libc_name try: - return space.wrap(W_CDLL(space, get_libc_name())) + return space.wrap(W_CDLL(space, get_libc_name(), -1)) except OSError, e: raise wrap_oserror(space, e) From notifications-noreply at bitbucket.org Wed Dec 7 20:42:06 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Wed, 07 Dec 2011 19:42:06 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20111207194206.21156.6216@bitbucket03.managed.contegix.com> You have received a notification from Jeff Terrace. Hi, I forked pypy. My fork is at https://bitbucket.org/jterrace/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Wed Dec 7 21:31:27 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Dec 2011 21:31:27 +0100 (CET) Subject: [pypy-commit] pypy default: we *must* specify RTLD_NOW. Message-ID: <20111207203127.A665C8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50291:e37e4e6e97b8 Date: 2011-12-07 22:30 +0200 http://bitbucket.org/pypy/pypy/changeset/e37e4e6e97b8/ Log: we *must* specify RTLD_NOW. diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/pypy/rlib/rdynload.py b/pypy/rlib/rdynload.py --- a/pypy/rlib/rdynload.py +++ b/pypy/rlib/rdynload.py @@ -87,9 +87,10 @@ """ if mode == -1: if RTLD_LOCAL is not None: - mode = RTLD_LOCAL | RTLD_NOW + mode = RTLD_LOCAL else: - mode = RTLD_NOW + mode = 0 + mode |= RTLD_NOW res = c_dlopen(name, rffi.cast(rffi.INT, mode)) if not res: err = dlerror() From noreply at buildbot.pypy.org Thu Dec 8 10:23:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 10:23:50 +0100 (CET) Subject: [pypy-commit] pypy default: Comment out this hack. I guess it was used for "checkmodule", Message-ID: <20111208092350.2CB998205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50292:104e7a656174 Date: 2011-12-08 10:23 +0100 http://bitbucket.org/pypy/pypy/changeset/104e7a656174/ Log: Comment out this hack. I guess it was used for "checkmodule", which likely doesn't work any more with the clr module. diff --git a/pypy/module/clr/boxing_rules.py b/pypy/module/clr/boxing_rules.py --- a/pypy/module/clr/boxing_rules.py +++ b/pypy/module/clr/boxing_rules.py @@ -43,11 +43,11 @@ def tocli(self): return box(self._value) -from pypy.objspace.fake.objspace import W_Object as W_Object_Fake -from pypy.rlib.nonconst import NonConstant +##from pypy.objspace.fake.objspace import W_Object as W_Object_Fake +##from pypy.rlib.nonconst import NonConstant -class __extend__(W_Object_Fake): - __metaclass__ = extendabletype +##class __extend__(W_Object_Fake): +## __metaclass__ = extendabletype - def tocli(self): - return NonConstant(None) +## def tocli(self): +## return NonConstant(None) From noreply at buildbot.pypy.org Thu Dec 8 11:42:22 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 8 Dec 2011 11:42:22 +0100 (CET) Subject: [pypy-commit] pypy default: make the dynamic interiorfield optimization work correctly with unsigned values Message-ID: <20111208104222.B57C48205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50293:617e1464542c Date: 2011-12-08 05:41 -0500 http://bitbucket.org/pypy/pypy/changeset/617e1464542c/ Log: make the dynamic interiorfield optimization work correctly with unsigned values diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -234,6 +234,9 @@ # longlongs are treated as floats, see # e.g. llsupport/descr.py:getDescrClass is_float = True + elif kind == 'u': + # they're all False + pass else: assert False, "unsupported ffitype or kind" # diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -147,6 +147,29 @@ self.check_resops({'jump': 2, 'int_lt': 2, 'setinteriorfield_raw': 4, 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2}) + def test_array_getitem_uint8(self): + myjitdriver = JitDriver( + greens = [], + reds = ["n", "i", "s", "data"], + ) + def f(data, n): + i = s = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, s=s, data=data) + s += rffi.cast(lltype.Signed, array_getitem(types.uchar, 1, data, 0, 0)) + i += 1 + return s + + def main(n): + with lltype.scoped_alloc(rffi.CArray(rffi.UCHAR), 1) as data: + data[0] = rffi.cast(rffi.UCHAR, 200) + return f(data, n) + + assert self.meta_interp(main, [10]) == 2000 + self.check_resops({'jump': 2, 'int_lt': 2, 'getinteriorfield_raw': 2, + 'guard_true': 2, 'int_add': 4}) + + class TestFfiCall(FfiCallTests, LLJitMixin): supports_all = False From noreply at buildbot.pypy.org Thu Dec 8 11:42:23 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 8 Dec 2011 11:42:23 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20111208104223.DAEFB8205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50294:26bac7f772ca Date: 2011-12-08 05:42 -0500 http://bitbucket.org/pypy/pypy/changeset/26bac7f772ca/ Log: merged upstream diff --git a/pypy/module/clr/boxing_rules.py b/pypy/module/clr/boxing_rules.py --- a/pypy/module/clr/boxing_rules.py +++ b/pypy/module/clr/boxing_rules.py @@ -43,11 +43,11 @@ def tocli(self): return box(self._value) -from pypy.objspace.fake.objspace import W_Object as W_Object_Fake -from pypy.rlib.nonconst import NonConstant +##from pypy.objspace.fake.objspace import W_Object as W_Object_Fake +##from pypy.rlib.nonconst import NonConstant -class __extend__(W_Object_Fake): - __metaclass__ = extendabletype +##class __extend__(W_Object_Fake): +## __metaclass__ = extendabletype - def tocli(self): - return NonConstant(None) +## def tocli(self): +## return NonConstant(None) From noreply at buildbot.pypy.org Thu Dec 8 12:07:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 12:07:39 +0100 (CET) Subject: [pypy-commit] pypy default: Add a failing test for fixedview() on a list of floats Message-ID: <20111208110739.5A0458205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50295:15d89540a5c3 Date: 2011-12-08 11:07 +0000 http://bitbucket.org/pypy/pypy/changeset/15d89540a5c3/ Log: Add a failing test for fixedview() on a list of floats that expands to too much stuff for now. diff --git a/pypy/jit/codewriter/codewriter.py b/pypy/jit/codewriter/codewriter.py --- a/pypy/jit/codewriter/codewriter.py +++ b/pypy/jit/codewriter/codewriter.py @@ -13,7 +13,7 @@ class CodeWriter(object): callcontrol = None # for tests - debug = False + debug = True def __init__(self, cpu=None, jitdrivers_sd=[]): self.cpu = cpu diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py --- a/pypy/module/_multiprocessing/test/test_semaphore.py +++ b/pypy/module/_multiprocessing/test/test_semaphore.py @@ -70,8 +70,10 @@ maxvalue = 1 sem = SemLock(kind, value, maxvalue) - assert sem.acquire() - assert not sem.acquire(timeout=0.1) + res = sem.acquire() + assert res == True + res = sem.acquire(timeout=0.1) + assert res == False def test_semaphore_rebuild(self): from _multiprocessing import SemLock diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -116,4 +116,15 @@ guard_no_overflow(descr=...) --TICK-- jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=) - """) \ No newline at end of file + """) + + def test_floatlist_unpack_without_calls(self): + def fn(n): + l = [2.3, 3.4, 4.5] + for i in range(n): + x, y, z = l # ID: look + # + log = self.run(fn, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('look') + assert 'call' not in log.opnames(ops) From noreply at buildbot.pypy.org Thu Dec 8 12:18:04 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 12:18:04 +0100 (CET) Subject: [pypy-commit] pypy default: Revert this, committed by accident. (The other accidental commit is Message-ID: <20111208111804.6A0498205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50296:3ff9c88dca39 Date: 2011-12-08 12:17 +0100 http://bitbucket.org/pypy/pypy/changeset/3ff9c88dca39/ Log: Revert this, committed by accident. (The other accidental commit is actually good and a complete detail, so I'm not going to uncommit it just to commit it again...) diff --git a/pypy/jit/codewriter/codewriter.py b/pypy/jit/codewriter/codewriter.py --- a/pypy/jit/codewriter/codewriter.py +++ b/pypy/jit/codewriter/codewriter.py @@ -13,7 +13,7 @@ class CodeWriter(object): callcontrol = None # for tests - debug = True + debug = False def __init__(self, cpu=None, jitdrivers_sd=[]): self.cpu = cpu From noreply at buildbot.pypy.org Thu Dec 8 14:37:38 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 14:37:38 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Rework the source code. Kill a few features that don't really make Message-ID: <20111208133738.D38508205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50297:363bac62f6dc Date: 2011-12-08 14:23 +0100 http://bitbucket.org/pypy/pypy/changeset/363bac62f6dc/ Log: Rework the source code. Kill a few features that don't really make sense to have; write by hand a fast decision tree to pick which tuple to specialize for; general fixes left and right. diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -4,35 +4,27 @@ from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.tupleobject import W_TupleObject from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice -from pypy.objspace.std.floatobject import _hash_float from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import compute_hash from pypy.rlib.unroll import unrolling_iterable -class Any(object): +class NotSpecialised(Exception): pass -class NotSpecialised(Exception): - pass - -_specialisations = [] - -def makespecialisedtuple(space, list_w): - for specialisedClass in unrolling_iterable(_specialisations): - try: - return specialisedClass.try_specialisation(space, list_w) - except NotSpecialised: - pass - raise NotSpecialised - class W_SpecialisedTupleObject(W_Object): from pypy.objspace.std.tupletype import tuple_typedef as typedef __slots__ = [] + def __repr__(self): + """ representation for debugging purposes """ + reprlist = [repr(item) for item in self._to_unwrapped_list()] + return "%s(%s)" % (self.__class__.__name__, ', '.join(reprlist)) + def tolist(self): raise NotImplementedError - def _tolistunwrapped(self): + def _to_unwrapped_list(self): + "NOT_RPYTHON" raise NotImplementedError def length(self): @@ -52,7 +44,7 @@ def unwrap(self, space): return tuple(self._to_unwrapped_list()) - + def make_specialised_class(typetuple): assert type(typetuple) == tuple @@ -61,79 +53,68 @@ iter_n = unrolling_iterable(range(nValues)) class cls(W_SpecialisedTupleObject): - def __init__(self, space, values): - #print cls,cls.__class__, values + def __init__(self, space, *values): + self.space = space assert len(values) == nValues for i in iter_n: - if typetuple[i] != Any: + if typetuple[i] != object: assert isinstance(values[i], typetuple[i]) - self.space = space + setattr(self, 'value%s' % i, values[i]) + + @classmethod + def make(cls, space, *values_w): + unwrappedparams = () for i in iter_n: - setattr(self, 'value%s' % i, values[i]) - - - @classmethod - def try_specialisation(cls, space, paramlist): - if len(paramlist) != nValues: - raise NotSpecialised - for param,val_type in unrolling_iterable(zip(paramlist, typetuple)): + w_obj = values_w[i] + val_type = typetuple[i] if val_type == int: - if space.type(param) != space.w_int: - raise NotSpecialised + unwrapped = space.int_w(w_obj) elif val_type == float: - if space.type(param) != space.w_float: - raise NotSpecialised + unwrapped = space.float_w(w_obj) elif val_type == str: - if space.type(param) != space.w_str: - raise NotSpecialised - elif val_type == Any: - pass + unwrapped = space.str_w(w_obj) + elif val_type == object: + unwrapped = w_obj else: - raise NotSpecialised - unwrappedparams = [None] * nValues - for i in iter_n: - if typetuple[i] == int: - unwrappedparams[i] = space.int_w(paramlist[i]) - elif typetuple[i] == float: - unwrappedparams[i] = space.float_w(paramlist[i]) - elif typetuple[i] == str: - unwrappedparams[i] = space.str_w(paramlist[i]) - elif typetuple[i] == Any: - unwrappedparams[i] = paramlist[i] - else: - raise NotSpecialised - return cls(space, unwrappedparams) - + raise AssertionError + unwrappedparams += (unwrapped,) + return cls(space, *unwrappedparams) + def length(self): return nValues - + def tolist(self): list_w = [None] * nValues for i in iter_n: - if typetuple[i] == Any: - list_w[i] = getattr(self, 'value%s' % i) - else: - list_w[i] = self.space.wrap(getattr(self, 'value%s' % i)) + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + list_w[i] = value return list_w - + def _to_unwrapped_list(self): - list_w = [None] * nValues + "NOT_RPYTHON" + list_w = [None] * nValues for i in iter_n: - if typetuple[i] == Any: - list_w[i] = space.unwrap(getattr(self, 'value%s' % i))#xxx - else: - list_w[i] = getattr(self, 'value%s' % i) + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + value = self.space.unwrap(value) + list_w[i] = value return list_w - + def hash(self, space): + # XXX duplicate logic from tupleobject.py mult = 1000003 x = 0x345678 z = 2 for i in iter_n: value = getattr(self, 'value%s' % i) - if typetuple[i] == Any: - y = space.int_w(space.hash(value)) - elif typetuple[i] == float: # get correct hash for float which is an integer & other less frequent cases + if typetuple[i] == object: + y = space.int_w(space.hash(value)) + elif typetuple[i] == float: + # get the correct hash for float which is an + # integer & other less frequent cases + from pypy.objspace.std.floatobject import _hash_float y = _hash_float(space, value) else: y = compute_hash(value) @@ -142,57 +123,109 @@ mult += 82520 + z + z x += 97531 return space.wrap(intmask(x)) - + def _eq(self, w_other): - if not isinstance(w_other, cls): #so we will be sure we are comparing same types + if not isinstance(w_other, cls): + # if we are not comparing same types, give up raise FailedToImplement for i in iter_n: - if typetuple[i] == Any: - if not self.space.is_true(self.space.eq(getattr(self, 'value%s' % i), getattr(w_other, 'value%s' % i))): - return False + myval = getattr(self, 'value%s' % i) + otherval = getattr(w_other, 'value%s' % i) + if typetuple[i] == object: + if not self.space.eq_w(myval, otherval): + return False else: - if getattr(self, 'value%s' % i) != getattr(w_other, 'value%s' % i): - return False + if myval != otherval: + return False else: return True - + def eq(self, space, w_other): return space.newbool(self._eq(w_other)) - + def ne(self, space, w_other): return space.newbool(not self._eq(w_other)) - - def _compare(self, compare_op, w_other): - if not isinstance(w_other, cls): - raise FailedToImplement - ncmp = min(self.length(), w_other.length()) - for i in iter_n: - if typetuple[i] == Any:#like space.eq on wrapped or two params? - raise FailedToImplement - if ncmp > i: - l_val = getattr(self, 'value%s' % i) - r_val = getattr(w_other, 'value%s' % i) - if l_val != r_val: - return compare_op(l_val, r_val) - return compare_op(self.length(), w_other.length()) - + +## def _compare(self, compare_op, w_other): +## if not isinstance(w_other, cls): +## raise FailedToImplement +## ncmp = min(self.length(), w_other.length()) +## for i in iter_n: +## if typetuple[i] == Any:#like space.eq on wrapped or two params? +## raise FailedToImplement +## if ncmp > i: +## l_val = getattr(self, 'value%s' % i) +## r_val = getattr(w_other, 'value%s' % i) +## if l_val != r_val: +## return compare_op(l_val, r_val) +## return compare_op(self.length(), w_other.length()) + def getitem(self, index): for i in iter_n: if index == i: - if typetuple[i] == Any: - return getattr(self, 'value%s' % i) - else: - return self.space.wrap(getattr(self, 'value%s' % i)) + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + return value raise IndexError - cls.__name__ = 'W_SpecialisedTupleObject' + ''.join([t.__name__.capitalize() for t in typetuple]) + cls.__name__ = ('W_SpecialisedTupleObject_' + + ''.join([t.__name__[0] for t in typetuple])) _specialisations.append(cls) return cls -make_specialised_class((float, float)) -for _typ1 in [int, str, Any]: - for _typ2 in [int, str, Any]: - make_specialised_class((_typ1, _typ2)) +# ---------- current specialized versions ---------- + +_specialisations = [] +Cls_ii = make_specialised_class((int, int)) +Cls_is = make_specialised_class((int, str)) +Cls_io = make_specialised_class((int, object)) +Cls_si = make_specialised_class((str, int)) +Cls_ss = make_specialised_class((str, str)) +Cls_so = make_specialised_class((str, object)) +Cls_oi = make_specialised_class((object, int)) +Cls_os = make_specialised_class((object, str)) +Cls_oo = make_specialised_class((object, object)) +Cls_ff = make_specialised_class((float, float)) +Cls_ooo = make_specialised_class((object, object, object)) + +def makespecialisedtuple(space, list_w): + if len(list_w) == 2: + w_arg1, w_arg2 = list_w + w_type1 = space.type(w_arg1) + w_type2 = space.type(w_arg2) + # + if w_type1 is space.w_int: + if w_type2 is space.w_int: + return Cls_ii.make(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_is.make(space, w_arg1, w_arg2) + else: + return Cls_io.make(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_str: + if w_type2 is space.w_int: + return Cls_si.make(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_ss.make(space, w_arg1, w_arg2) + else: + return Cls_so.make(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_float and w_type2 is space.w_float: + return Cls_ff.make(space, w_arg1, w_arg2) + # + else: + if w_type2 is space.w_int: + return Cls_oi.make(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_os.make(space, w_arg1, w_arg2) + else: + return Cls_oo.make(space, w_arg1, w_arg2) + # + elif len(list_w) == 3: + return Cls_ooo.make(space, list_w[0], list_w[1], list_w[2]) + else: + raise NotSpecialised # ____________________________________________________________ @@ -224,23 +257,23 @@ start += step return space.newtuple(subitems) -def mul_specialisedtuple_times(space, w_tuple, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - if times == 1 and space.type(w_tuple) == space.w_tuple: - return w_tuple - items = w_tuple.tolist() - return space.newtuple(items * times) +##def mul_specialisedtuple_times(space, w_tuple, w_times): +## try: +## times = space.getindex_w(w_times, space.w_OverflowError) +## except OperationError, e: +## if e.match(space, space.w_TypeError): +## raise FailedToImplement +## raise +## if times == 1 and space.type(w_tuple) == space.w_tuple: +## return w_tuple +## items = w_tuple.tolist() +## return space.newtuple(items * times) -def mul__SpecialisedTuple_ANY(space, w_tuple, w_times): - return mul_specialisedtuple_times(space, w_tuple, w_times) +##def mul__SpecialisedTuple_ANY(space, w_tuple, w_times): +## return mul_specialisedtuple_times(space, w_tuple, w_times) -def mul__ANY_SpecialisedTuple(space, w_times, w_tuple): - return mul_specialisedtuple_times(space, w_tuple, w_times) +##def mul__ANY_SpecialisedTuple(space, w_times, w_tuple): +## return mul_specialisedtuple_times(space, w_tuple, w_times) def eq__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): return w_tuple1.eq(space, w_tuple2) @@ -248,19 +281,19 @@ def ne__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): return w_tuple1.ne(space, w_tuple2) -from operator import lt, le, ge, gt +##from operator import lt, le, ge, gt -def lt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): - return space.newbool(w_tuple1._compare(lt, w_tuple2)) +##def lt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(lt, w_tuple2)) -def le__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): - return space.newbool(w_tuple1._compare(le, w_tuple2)) +##def le__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(le, w_tuple2)) -def ge__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): - return space.newbool(w_tuple1._compare(ge, w_tuple2)) +##def ge__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(ge, w_tuple2)) -def gt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): - return space.newbool(w_tuple1._compare(gt, w_tuple2)) +##def gt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(gt, w_tuple2)) def hash__SpecialisedTuple(space, w_tuple): return w_tuple.hash(space) diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -1,10 +1,10 @@ -import py +import py, sys from pypy.objspace.std.tupleobject import W_TupleObject from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject from pypy.objspace.std.specialisedtupleobject import _specialisations from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace -from pypy.objspace.std.test.test_tupleobject import AppTestW_TupleObject +from pypy.objspace.std.test import test_tupleobject from pypy.interpreter import gateway @@ -19,7 +19,7 @@ def test_isspecialisedtupleobjectintint(self): w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) - assert isinstance(w_tuple, W_SpecialisedTupleObjectIntInt) + assert isinstance(w_tuple, W_SpecialisedTupleObject_ii) def test_isnotspecialisedtupleobject(self): w_tuple = self.space.newtuple([self.space.wrap({})]) @@ -27,8 +27,8 @@ def test_specialisedtupleclassname(self): w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) - assert w_tuple.__class__.__name__ == 'W_SpecialisedTupleObjectIntInt' - + assert w_tuple.__class__.__name__ == 'W_SpecialisedTupleObject_ii' + def test_hash_against_normal_tuple(self): N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) @@ -62,13 +62,17 @@ assert len(list_w) == 1 assert self.space.eq_w(list_w[0], self.space.wrap(5)) -class AppTestW_SpecialisedTupleObject(AppTestW_TupleObject): +class AppTestW_SpecialisedTupleObject: def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) def forbid_delegation(space, w_tuple): def delegation_forbidden(): - raise NotImplementedError + # haaaack + if sys._getframe(2).f_code.co_name == '_mm_repr_tupleS0': + return old_tolist() + raise NotImplementedError, w_tuple + old_tolist = w_tuple.tolist w_tuple.tolist = delegation_forbidden return w_tuple cls.w_forbid_delegation = cls.space.wrap(gateway.interp2app(forbid_delegation)) @@ -80,20 +84,23 @@ return ("SpecialisedTupleObject" + expected) in r def test_createspecialisedtuple(self): - spec = {int: 'Int', - float: 'Float', - str: 'Str', - list: 'Any'} + spec = {int: 'i', + float: 'f', + str: 's', + list: 'o'} # for x in [42, 4.2, "foo", []]: for y in [43, 4.3, "bar", []]: expected1 = spec[type(x)] expected2 = spec[type(y)] - if (expected1 == 'Float') ^ (expected2 == 'Float'): - if expected1 == 'Float': expected1 = 'Any' - if expected2 == 'Float': expected2 = 'Any' + if (expected1 == 'f') ^ (expected2 == 'f'): + if expected1 == 'f': expected1 = 'o' + if expected2 == 'f': expected2 = 'o' obj = (x, y) - assert self.isspecialised(obj, expected1 + expected2) + assert self.isspecialised(obj, '_' + expected1 + expected2) + # + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') def test_len(self): t = self.forbid_delegation((42,43)) @@ -104,31 +111,37 @@ assert not self.isspecialised((1.5,)) def test_slicing_to_specialised(self): - assert self.isspecialised((1, 2, 3)[0:2]) - assert self.isspecialised((1, '2', 3)[0:5:2]) + t = (1, 2, 3) + assert self.isspecialised(t[0:2]) + t = (1, '2', 3) + assert self.isspecialised(t[0:5:2]) def test_adding_to_specialised(self): - assert self.isspecialised((1,)+(2,)) + t = (1,) + assert self.isspecialised(t + (2,)) def test_multiply_to_specialised(self): - assert self.isspecialised((1,)*2) + t = (1,) + assert self.isspecialised(t * 2) def test_slicing_from_specialised(self): - assert (1,2,3)[0:2:1] == (1,2) + t = (1, 2, 3) + assert t[0:2:1] == (1, 2) def test_eq_no_delegation(self): - a = self.forbid_delegation((1,2)) - b = (1,2) + t = (1,) + a = self.forbid_delegation(t + (2,)) + b = (1, 2) assert a == b - - c = (2,1) + + c = (2, 1) assert not a == c - + def test_eq_can_delegate(self): a = (1,2) b = (1,3,2) assert not a == b - + values = [2, 2L, 2.0, 1, 1L, 1.0] for x in values: for y in values: @@ -144,11 +157,11 @@ assert a != c def test_ordering(self): - a = self.forbid_delegation((1,2)) + a = (1,2) #self.forbid_delegation((1,2)) --- code commented out assert a < (2,2) assert a < (1,3) assert not a < (1,2) - + assert a <= (2,2) assert a <= (1,2) assert not a <= (1,1) @@ -160,15 +173,34 @@ assert a > (0,2) assert a > (1,1) assert not a > (1,3) + + assert (2,2) > a + assert (1,3) > a + assert not (1,2) > a + + assert (2,2) >= a + assert (1,2) >= a + assert not (1,1) >= a + + assert (0,2) <= a + assert (1,2) <= a + assert not (1,3) <= a + assert (0,2) < a + assert (1,1) < a + assert not (1,3) < a + def test_hash(self): a = (1,2) - b = (1,) + (2,) # else a and b refer to same constant + b = (1,) + b += (2,) # else a and b refer to same constant assert hash(a) == hash(b) c = (2,4) assert hash(a) != hash(c) + assert hash(a) == hash((1L, 2L)) == hash((1.0, 2.0)) == hash((1.0, 2L)) + def test_getitem(self): t = self.forbid_delegation((5,3)) assert (t)[0] == 5 @@ -176,34 +208,25 @@ assert (t)[-1] == 3 assert (t)[-2] == 5 raises(IndexError, "t[2]") - + raises(IndexError, "t[-3]") + def test_three_tuples(self): - if not self.isspecialised((1,2,3)): - skip('3-tuples of ints are not specialised, so skip specific tests on them') - b = self.forbid_delegation((1,2,3)) + b = self.forbid_delegation((1, 2, 3)) c = (1,) - d = c + (2,3) + d = c + (2, 3) assert self.isspecialised(d) assert b == d - assert b <= d - + def test_mongrel(self): a = self.forbid_delegation((1, 2.2, '333')) - if not self.isspecialised(a): - skip('my chosen kind of mixed type tuple is not specialised, so skip specific tests on them') + assert self.isspecialised(a) assert len(a) == 3 assert a[0] == 1 and a[1] == 2.2 and a[2] == '333' - assert a == (1,) + (2.2,) + ('333',) - assert a < (1, 2.2, '334') - - def test_mongrel_with_any(self): - a = self.forbid_delegation((1, 2.2, '333',[])) - b = (1, 2.2) + ('333', []) - if not self.isspecialised(a): - skip('my chosen kind of mixed type tuple is not specialised, so skip specific tests on them') - assert len(a) == 4 - assert a[0] == 1 and a[1] == 2.2 and a[2] == '333' and a[3] == [] - assert a != (1, 2.2, '334', []) -# assert b == a -# assert a == (1,) + (2.2,) + ('333',) + ([],) -# assert a < (1, 2.2, '334', {}) + b = ('333',) + assert a == (1, 2.2,) + b + assert not a != (1, 2.2) + b + + +class AppTestAll(test_tupleobject.AppTestW_TupleObject): + def test_mul_identity(self): + skip("not working with specialisedtuple") diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py --- a/pypy/objspace/std/test/test_tupleobject.py +++ b/pypy/objspace/std/test/test_tupleobject.py @@ -280,6 +280,8 @@ assert () * 10 == () assert (5,) * 3 == (5,5,5) assert (5,2) * 2 == (5,2,5,2) + + def test_mul_identity(self): t = (1,2,3) assert (t * 1) is t From noreply at buildbot.pypy.org Thu Dec 8 14:37:40 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 14:37:40 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: translation fix Message-ID: <20111208133740.082D98205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50298:38ecd0cebaa2 Date: 2011-12-08 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/38ecd0cebaa2/ Log: translation fix diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -53,17 +53,9 @@ iter_n = unrolling_iterable(range(nValues)) class cls(W_SpecialisedTupleObject): - def __init__(self, space, *values): + def __init__(self, space, *values_w): self.space = space - assert len(values) == nValues - for i in iter_n: - if typetuple[i] != object: - assert isinstance(values[i], typetuple[i]) - setattr(self, 'value%s' % i, values[i]) - - @classmethod - def make(cls, space, *values_w): - unwrappedparams = () + assert len(values_w) == nValues for i in iter_n: w_obj = values_w[i] val_type = typetuple[i] @@ -77,8 +69,7 @@ unwrapped = w_obj else: raise AssertionError - unwrappedparams += (unwrapped,) - return cls(space, *unwrappedparams) + setattr(self, 'value%s' % i, unwrapped) def length(self): return nValues @@ -197,33 +188,33 @@ # if w_type1 is space.w_int: if w_type2 is space.w_int: - return Cls_ii.make(space, w_arg1, w_arg2) + return Cls_ii(space, w_arg1, w_arg2) elif w_type2 is space.w_str: - return Cls_is.make(space, w_arg1, w_arg2) + return Cls_is(space, w_arg1, w_arg2) else: - return Cls_io.make(space, w_arg1, w_arg2) + return Cls_io(space, w_arg1, w_arg2) # elif w_type1 is space.w_str: if w_type2 is space.w_int: - return Cls_si.make(space, w_arg1, w_arg2) + return Cls_si(space, w_arg1, w_arg2) elif w_type2 is space.w_str: - return Cls_ss.make(space, w_arg1, w_arg2) + return Cls_ss(space, w_arg1, w_arg2) else: - return Cls_so.make(space, w_arg1, w_arg2) + return Cls_so(space, w_arg1, w_arg2) # elif w_type1 is space.w_float and w_type2 is space.w_float: - return Cls_ff.make(space, w_arg1, w_arg2) + return Cls_ff(space, w_arg1, w_arg2) # else: if w_type2 is space.w_int: - return Cls_oi.make(space, w_arg1, w_arg2) + return Cls_oi(space, w_arg1, w_arg2) elif w_type2 is space.w_str: - return Cls_os.make(space, w_arg1, w_arg2) + return Cls_os(space, w_arg1, w_arg2) else: - return Cls_oo.make(space, w_arg1, w_arg2) + return Cls_oo(space, w_arg1, w_arg2) # elif len(list_w) == 3: - return Cls_ooo.make(space, list_w[0], list_w[1], list_w[2]) + return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) else: raise NotSpecialised From noreply at buildbot.pypy.org Thu Dec 8 14:47:43 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 14:47:43 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Add 'withspecialisedtuple' by default, at least for testing the result. Message-ID: <20111208134743.060588205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50299:1f3ae2d48981 Date: 2011-12-08 14:47 +0100 http://bitbucket.org/pypy/pypy/changeset/1f3ae2d48981/ Log: Add 'withspecialisedtuple' by default, at least for testing the result. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -366,6 +366,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) From noreply at buildbot.pypy.org Thu Dec 8 15:02:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 15:02:03 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: hg merge default Message-ID: <20111208140203.D19DD8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50300:4f1c5b700152 Date: 2011-12-08 14:59 +0100 http://bitbucket.org/pypy/pypy/changeset/4f1c5b700152/ Log: hg merge default diff too long, truncating to 10000 out of 28312 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,3 +1,4 @@ b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 +ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -74,7 +74,8 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + f = open(name, "w") + f.close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -201,7 +201,7 @@ RegrTest('test_difflib.py'), RegrTest('test_dircache.py', core=True), RegrTest('test_dis.py'), - RegrTest('test_distutils.py'), + RegrTest('test_distutils.py', skip=True), RegrTest('test_dl.py', skip=True), RegrTest('test_doctest.py', usemodules="thread"), RegrTest('test_doctest2.py'), diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py --- a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py +++ b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py @@ -1,6 +1,5 @@ import unittest from ctypes import * -from ctypes.test import xfail class MyInt(c_int): def __cmp__(self, other): @@ -27,7 +26,6 @@ self.assertEqual(None, cb()) - @xfail def test_int_callback(self): args = [] def func(arg): diff --git a/lib-python/2.7/pkgutil.py b/lib-python/modified-2.7/pkgutil.py copy from lib-python/2.7/pkgutil.py copy to lib-python/modified-2.7/pkgutil.py --- a/lib-python/2.7/pkgutil.py +++ b/lib-python/modified-2.7/pkgutil.py @@ -244,7 +244,8 @@ return mod def get_data(self, pathname): - return open(pathname, "rb").read() + with open(pathname, "rb") as f: + return f.read() def _reopen(self): if self.file and self.file.closed: diff --git a/lib-python/modified-2.7/test/test_import.py b/lib-python/modified-2.7/test/test_import.py --- a/lib-python/modified-2.7/test/test_import.py +++ b/lib-python/modified-2.7/test/test_import.py @@ -64,6 +64,7 @@ except ImportError, err: self.fail("import from %s failed: %s" % (ext, err)) else: + # XXX importing .pyw is missing on Windows self.assertEqual(mod.a, a, "module loaded (%s) but contents invalid" % mod) self.assertEqual(mod.b, b, diff --git a/lib-python/modified-2.7/test/test_repr.py b/lib-python/modified-2.7/test/test_repr.py --- a/lib-python/modified-2.7/test/test_repr.py +++ b/lib-python/modified-2.7/test/test_repr.py @@ -254,8 +254,14 @@ eq = self.assertEqual touch(os.path.join(self.subpkgname, self.pkgname + os.extsep + 'py')) from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation - eq(repr(areallylongpackageandmodulenametotestreprtruncation), - "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) + # On PyPy, we use %r to format the file name; on CPython it is done + # with '%s'. It seems to me that %r is safer . + if '__pypy__' in sys.builtin_module_names: + eq(repr(areallylongpackageandmodulenametotestreprtruncation), + "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) + else: + eq(repr(areallylongpackageandmodulenametotestreprtruncation), + "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) eq(repr(sys), "") def test_type(self): diff --git a/lib-python/2.7/test/test_subprocess.py b/lib-python/modified-2.7/test/test_subprocess.py copy from lib-python/2.7/test/test_subprocess.py copy to lib-python/modified-2.7/test/test_subprocess.py --- a/lib-python/2.7/test/test_subprocess.py +++ b/lib-python/modified-2.7/test/test_subprocess.py @@ -16,11 +16,11 @@ # Depends on the following external programs: Python # -if mswindows: - SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' - 'os.O_BINARY);') -else: - SETBINARY = '' +#if mswindows: +# SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' +# 'os.O_BINARY);') +#else: +# SETBINARY = '' try: @@ -420,8 +420,9 @@ self.assertStderrEqual(stderr, "") def test_universal_newlines(self): - p = subprocess.Popen([sys.executable, "-c", - 'import sys,os;' + SETBINARY + + # NB. replaced SETBINARY with the -u flag + p = subprocess.Popen([sys.executable, "-u", "-c", + 'import sys,os;' + #SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' @@ -448,8 +449,9 @@ def test_universal_newlines_communicate(self): # universal newlines through communicate() - p = subprocess.Popen([sys.executable, "-c", - 'import sys,os;' + SETBINARY + + # NB. replaced SETBINARY with the -u flag + p = subprocess.Popen([sys.executable, "-u", "-c", + 'import sys,os;' + #SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -124,7 +124,8 @@ # for now, we always allow types.pointer, else a lot of tests # break. We need to rethink how pointers are represented, though if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: - raise ArgumentError, "expected %s instance, got %s" % (type(value), ffitype) + raise ArgumentError("expected %s instance, got %s" % (type(value), + ffitype)) return value._get_buffer_value() def _cast_addr(obj, _, tp): diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -17,7 +17,7 @@ if len(f) == 3: if (not hasattr(tp, '_type_') or not isinstance(tp._type_, str) - or tp._type_ not in "iIhHbBlL"): + or tp._type_ not in "iIhHbBlLqQ"): #XXX: are those all types? # we just dont get the type name # in the interp levle thrown TypeError diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,117 +1,6 @@ -"""qvfgbcvna naq hgbcvna punvef -qlfgbcvna naq hgbcvna punvef -V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur png nf jryy? -V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur punve nf jryy? -jr cnffrq gur RH erivrj -cbfg RhebClguba fcevag fgnegf 12.IVV.2007, 10nz -RhebClguba raqrq -n Pyrna Ragrecevfrf cebqhpgvba -npnqrzl vf n pbzcyvpngrq ebyr tnzr -npnqrzvn vf n pbzcyvpngrq ebyr tnzr -jbexvat pbqr vf crn fbhc -abg lbhe snhyg, zber yvxr vg'f n zbivat gnetrg -guvf fragrapr vf snyfr -abguvat vf gehr -Yncfnat Fbhpubat -Oenpunzhgnaqn -fbeel, V'yy grnpu gur pnpghf ubj gb fjvz yngre -Jul fb znal znal znal znal znal ivbyvaf? -Jul fb znal znal znal znal znal bowrpgf? -"eha njnl naq yvir ba n snez" nccebnpu gb fbsgjner qrirybczrag -"va snpg, lbh zvtug xabj zber nobhg gur genafyngvba gbbypunva nsgre znfgrevat eclguba guna fbzr angvir fcrnxre xabjf nobhg uvf zbgure gbathr" - kbeNkNk -"jurer qvq nyy gur ivbyvaf tb?" -- ClCl fgnghf oybt: uggc://zberclcl.oybtfcbg.pbz/ -uggc://kxpq.pbz/353/ -pnfhnyvgl ivbyngvbaf naq sylvat -wrgmg abpu fpubxbynqvtre -R09 2X @PNN:85? -vs lbh'er gelvat gb oybj hc fghss, jub pnerf? -vs fghss oybjf hc, lbh pner -2008 jvyy or gur lrne bs clcl ba gur qrfxgbc -2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl -2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl, Wnahnel jvyy or gur zbagu bs gur nyc gbcf -lrf, ohg jung'g gur frafr bs 0 < "qhena qhena" -eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb -2009 jvyy or gur lrne bs WVG ba gur qrfxgbc -N ynathntr vf n qvnyrpg jvgu na nezl naq anil -gbcvpf ner sbe gur srroyr zvaqrq -2009 vf gur lrne bs ersyrpgvba ba gur qrfxgbc -gur tybor vf bhe cbal, gur pbfzbf bhe erny ubefr -jub nz V naq vs lrf, ubj znal? -cebtenzzvat va orq vf n cresrpgyl svar npgvivgl -zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja -EClguba: jr hfr vg fb lbh qba'g unir gb -Zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja. EClguba: haqrpvqrq. -guvatf jvyy or avpr naq fghss -qba'g cbfg yvaxf gb cngragf urer -Abg lbhe hfhny nanylfrf. -Gur Neg bs gur Punaary -Clguba 300 -V fhccbfr ZO bs UGZY cre frpbaq vf abg gur hfhny fcrrq zrnfher crbcyr jbhyq rkcrpg sbe n wvg -gur fha arire frgf ba gur ClCl rzcver -ghegyrf ner snfgre guna lbh guvax -cebtenzzvat vf na nrfgrguvp raqrnibhe -P vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner -trezna vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner -trezna vf tbbq sbe artngvbaf, whfg abg sbe jevgvat fbsgjner -# nffreg qvq abg penfu -lbh fubhyq fgneg n cresrpg fbsgjner zbirzrag -lbh fubhyq fgneg n cresrpg punaary gbcvp zbirzrag -guvf vf n cresrpg punaary gbcvp -guvf vf n frys-ersreragvny punaary gbcvp -crrcubcr bcgvzvmngvbaf ner jung n Fhssvpvragyl Fzneg Pbzcvyre hfrf -"crrcubcr" bcgvzvmngvbaf ner jung na bcgvzvfgvp Pbzcvyre hfrf -pubbfr lbhe unpx -gur 'fhcre' xrljbeq vf abg gung uhttnoyr -wlguba cngpurf ner abg rabhtu sbe clcl -- qb lbh xabj oreyva? - nyy bs vg? - jryy, whfg oreyva -- ubj jvyy gur snpg gung gurl ner hfrq va bhe ercy punatr bhe gbcvpf? -- ubj pna vg rire unir jbexrq? -- jurer fubhyq gur unpx or fgberq? -- Vg'f uneq gb fnl rknpgyl jung pbafgvghgrf erfrnepu va gur pbzchgre jbeyq, ohg nf n svefg nccebkvzngvba, vg'f fbsgjner gung qbrfa'g unir hfref. -- Cebtenzzvat vf nyy nobhg xabjvat jura gb obvy gur benatr fcbatr qbaxrl npebff gur cuvyyvcvarf -- Jul fb znal, znal, znal, znal, znal, znal qhpxyvatf? -- ab qrgnvy vf bofpher rabhtu gb abg unir fbzr pbqr qrcraqvat ba vg. -- jung V trarenyyl jnag vf serr fcrrqhcf -- nyy bs ClCl vf kv-dhnyvgl -"lbh pna nyjnlf xvyy -9 be bf._rkvg() vs lbh'er va n uheel" -Ohernhpengf ohvyq npnqrzvp rzcverf juvpu puhea bhg zrnavatyrff fbyhgvbaf gb veeryrinag ceboyrzf. -vg'f abg n unpx, vg'f n jbexnebhaq -ClCl qbrfa'g unir pbcbylinevnqvp qrcraqragyl-zbabzbecurq ulcresyhknqf -ClCl qbrfa'g punatr gur shaqnzragny culfvpf pbafgnagf -Qnapr bs gur Fhtnecyhz Snvel -Wnin vf whfg tbbq rabhtu gb or cenpgvpny, ohg abg tbbq rabhtu gb or hfnoyr. -RhebClguba vf unccravat, qba'g rkcrpg nal dhvpx erfcbafr gvzrf. -"V jbhyq yvxr gb fgnl njnl sebz ernyvgl gura" -"gung'f jul gur 'be' vf ernyyl na 'naq' " -jvgu nyy nccebcevngr pbagrkghnyvfngvbavat -qba'g gevc ba gur cbjre pbeq -vzcyrzragvat YBTB va YBTB: "ghegyrf nyy gur jnl qbja" -gur ohooyrfbeg jbhyq or gur jebat jnl gb tb -gur cevapvcyr bs pbafreingvba bs zrff -gb fnir n gerr, rng n ornire -Qre Ovore znpugf evpugvt: Antg nyyrf xnchgg. -"Nal jbeyqivrj gung vfag jenpxrq ol frys-qbhog naq pbashfvba bire vgf bja vqragvgl vf abg n jbeyqivrj sbe zr." - Fpbgg Nnebafba -jr oryvrir va cnapnxrf, znlor -jr oryvrir va ghegyrf, znlor -jr qrsvavgryl oryvrir va zrgn -gur zngevk unf lbh -"Yvsr vf uneq, gura lbh anc" - n png -Vf Nezva ubzr jura gur havirefr prnfrf gb rkvfg? -Qhrffryqbes fcevag fgnegrq -frys.nobeeg("pnaabg ybnq negvpyrf") -QRAGVFGEL FLZOBY YVTUG IREGVPNY NAQ JNIR -"Gur UUH pnzchf vf n tbbq Dhnxr yriry" - Nezva -"Gur UUH pnzchf jbhyq or n greevoyr dhnxr yriry - lbh'q arire unir n pyhr jurer lbh ner" - zvpunry -N enqvbnpgvir png unf 18 unys-yvirf. - : j [fvtu] -f -pbybe-pbqrq oyhrf -"Neebtnapr va pbzchgre fpvrapr vf zrnfherq va anab-Qvwxfgenf." -ClCl arrqf n Whfg-va-Gvzr WVG -"Lbh pna'g gvzr geniry whfg ol frggvat lbhe pybpxf jebat" -Gjb guernqf jnyx vagb n one. Gur onexrrcre ybbxf hc naq lryyf, "url, V jnag qba'g nal pbaqvgvbaf enpr yvxr gvzr ynfg!" Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! Clguba 2.k vf abg qrnq Riregvzr fbzrbar nethrf jvgu "Fznyygnyx unf nyjnlf qbar K", vg vf nyjnlf n tbbq uvag gung fbzrguvat arrqf gb or punatrq snfg. - Znephf Qraxre @@ -119,7 +8,6 @@ __kkk__ naq __ekkk__ if bcrengvba fybgf: cnegvpyr dhnaghz fhcrecbfvgvba xvaq bs sha ClCl vf na rkpvgvat grpuabybtl gung yrgf lbh gb jevgr snfg, cbegnoyr, zhygv-cyngsbez vagrecergref jvgu yrff rssbeg Nezva: "Cebybt vf n zrff.", PS: "Ab, vg'f irel pbby!", Nezva: "Vfa'g guvf jung V fnvq?" - tbbq, grfgf ner hfrshy fbzrgvzrf :-) ClCl vf yvxr nofheq gurngre jr unir ab nagv-vzcbffvoyr fgvpx gung znxrf fher gung nyy lbhe cebtenzf unyg clcl vf n enpr orgjrra crbcyr funivat lnxf naq gur havirefr cebqhpvat zber orneqrq lnxf. Fb sne, gur havirefr vf jvaavat @@ -136,14 +24,14 @@ ClCl 1.1.0orgn eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy "gurer fubhyq or bar naq bayl bar boivbhf jnl gb qb vg". ClCl inevnag: "gurer pna or A unys-ohttl jnlf gb qb vg" 1.1 svany eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy -1.1 svany eryrnfrq | nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba + nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba Vf gurer n clcl gvzr? - vs lbh pna srry vg (?) gura gurer vf ab, abezny jbex vf fhpu zhpu yrff gvevat guna inpngvbaf ab, abezny jbex vf fb zhpu yrff gvevat guna inpngvbaf -SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva. +-SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva.- vg'f Fhaqnl, znlor vg'f Fhaqnl, ntnva -"3 + 3 = 8" Nagb va gur WVG gnyx +"3 + 3 = 8" - Nagb va gur WVG gnyx RPBBC vf unccravat RPBBC vf svavfurq cflpb rngf bar oenva cre vapu bs cebterff @@ -175,10 +63,108 @@ "nu, whfg va gvzr qbphzragngvba" (__nc__) ClCl vf abg n erny IZ: ab frtsnhyg unaqyref gb qb gur ener pnfrf lbh pna'g unir obgu pbairavrapr naq fcrrq -gur WVG qbrfa'g jbex ba BF/K (abi'09) -ab fhccbeg sbe BF/K evtug abj! (abi'09) fyvccref urvtug pna or zrnfherq va k86 ertvfgref clcl vf n enpr orgjrra gur vaqhfgel gelvat gb ohvyq znpuvarf jvgu zber naq zber erfbheprf, naq gur clcl qrirybcref gelvat gb rng nyy bs gurz. Fb sne, gur jvaare vf fgvyy hapyrne +"znl pbagnva ahgf naq/be lbhat cbvagref" +vg'f nyy irel fvzcyr, yvxr gur ubyvqnlf +unccl ClCl'f lrne 2010! +fnzhryr fnlf gung jr ybfg n enmbe. fb jr pna'g funir lnxf +"yrg'f abg or bofpher, hayrff jr ernyyl arrq gb" + (abg guernq-fnsr, ohg jryy, abguvat vf) +clcl unf znal ceboyrzf, ohg rnpu bar unf znal fbyhgvbaf +whfg nabgure vgrz (1.333...) ba bhe erny-ahzorerq gbqb yvfg +ClCl vf Fuveg Bevtnzv erfrnepu + nafjrevat n dhrfgvba: "ab -- sbe ng yrnfg bar cbffvoyr vagrecergngvba bs lbhe fragrapr" +eryrnfr 1.2 hcpbzvat +ClCl 1.2 eryrnfrq - uggc://clcl.bet/ +AB IPF QVFPHFFVBAF +EClguba vf n svar pnzry unve oehfu +ClCl vf n npghnyyl n ivfhnyvmngvba cebwrpg, jr whfg ohvyq vagrecergref gb unir vagrerfgvat qngn gb ivfhnyvmr +clcl vf yvxr fnhfntrf +naq abj sbe fbzrguvat pbzcyrgryl qvssrerag +n 10gu bs sberire vf 1u45 +pbeerpg pbqr qbrfag arrq nal grfgf +cbfgfgehpghenyvfz rgp. +clcl UVG trarengbe +gur arj clcl fcbeg vf gb cnff clcl ohtf nf pclguba ohtf +jr unir zhpu zber vagrecergref guna hfref +ClCl 1.3 njnvgvat eryrnfr +ClCl 1.3 eryrnfrq +vg frrzf gb zr gung bapr lbh frggyr ba na rkrphgvba / bowrpg zbqry naq / be olgrpbqr sbezng, lbh'ir nyernql qrpvqrq jung ynathntrf (jurer gur 'f' frrzf fhcresyhbhf) fhccbeg vf tbvat gb or svefg pynff sbe +"Nyy ceboyrzf va ClCl pna or fbyirq ol nabgure yriry bs vagrecergngvba" +ClCl 1.3 eryrnfrq (jvaqbjf ovanevrf vapyhqrq) +jul qvq lbh thlf unir gb znxr gur ohvygva sbeghar zber vagrerfgvat guna npghny jbex? v whfg pngpurq zlfrys erfgnegvat clcl 20 gvzrf +"jr hfrq gb unir n zrff jvgu na bofpher vagresnpr, abj jr unir zrff urer naq bofpher vagresnpr gurer. cebterff" crqebavf ba n clcl fcevag +"phcf bs pbssrr ner yvxr nanybtvrf va gung V'z znxvat bar evtug abj" +"vg'f nyjnlf hc gb hf, va n jnl be gur bgure" +ClCl vf infg, naq pbagnvaf zhygvghqrf +qravny vf eneryl n tbbq qrohttvat grpuavdhr +"Yrg'f tb." - "Jr pna'g" - "Jul abg?" - "Jr'er jnvgvat sbe n Genafyngvba." - (qrfcnvevatyl) "Nu!" +'gung'f qrsvavgryl n pnfr bs "hu????"' +va gurbel gurer vf gur Ybbc, va cenpgvpr gurer ner oevqtrf +gur uneqqevir - pbafgnag qngn cvytevzntr +ClCl vf n gbby gb xrrc bgurejvfr qnatrebhf zvaqf fnsryl bpphcvrq. +jr ner n trareny senzrjbex ohvyg ba pbafvfgrag nccyvpngvba bs nqubp-arff +gur jnl gb nibvq n jbexnebhaq vf gb vagebqhpr n fgebatre jbexnebhaq fbzrjurer ryfr +pnyyvat gur genafyngvba gbby punva n 'fpevcg' vf xvaq bs bssrafvir +ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq pbafhzr nyy gur zrzbel ng nal gvzr +ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq qvr ng nal gvzr orpnhfr bs gur 32-ovg 4TO yvzvg bs ENZ +Qh jvefg rora tranh qnf reervpura, jbena xrvare tynhog +vs fjvgmreynaq jrer jurer terrpr vf (ba vfynaqf) jbhyq gurl nyy or pbaarpgrq ol oevqtrf? +genafyngvat clcl jvgu pclguba vf fbbbbbb fybj +ClCl 1.4 eryrnfrq! +Jr ner abg urebrf, whfg irel cngvrag. +QBAR zrnaf vg'f qbar +jul gurer vf ab "ClCl 1.4 eryrnfrq" va gbcvp nal zber? +fabj! fabj! +svanyyl, zrephevny zvtengvba vf unccravat! +Gur zvtengvba gb zrephevny vf pbzcyrgrq! uggc://ovgohpxrg.bet/clcl/clcl +fabj! fabj! (gre) +unccl arj lrne +naq anaanaw gb lbh nf jryy +Frrvat nf gur ynjf bs culfvpf ner ntnvafg lbh, lbh unir gb pnershyyl pbafvqre lbhe fpbcr fb gung lbhe tbnyf ner ernfbanoyr. +nf hfhny va clcl, gur fbyhgvba nccrnef pbzcyrgryl qvfcebcbegvbangr gb gur ceboyrz naq vafgrnq jr'yy tb sbe n pbzcyrgryl qvssrerag fvzcyre nccebnpu gb gur bevtvany ceboyrz +fabj, fabj! +va clcl lbh ner nyjnlf ng gur jebat yriry, va bar jnl be gur bgure +jryy, vg'f jebat ohg abg fb "irel jebat" nf vg ybbxrq + V ybir clcl +ynmvarff vzcngvrapr naq uhoevf +fabj, fabj +EClguba: guvatf lbh jbhyqa'g qb va Clguba, naq pna'g qb va P. +vg vf gur rkcrpgrq orunivbe, rkprcg jura lbh qba'g rkcrpg vg +erqrsvavat lryybj frrzf yvxr n orggre vqrn +"gung'f ubjrire whfg ratvarrevat" (svwny) +"[vg] whfg fubjf ntnva gung eclguba vf bofpher" (psobym) +"naljnl, clguba vf n infg ynathntr" (svwny) +bhg-bs-yvr-thneqf +"gurer ner qnlf ba juvpu lbh ybbx nebhaq naq abguvat fubhyq unir rire jbexrq" (svwny) +clcl vf n orggre xvaq bs sbbyvfuarff - ynp +ehaavat grfgf vf rffragvny sbe qrirybcvat clcl -- hu? qvq V oernx gur grfg? (svwny) +V'ir tbg guvf sybbe jnk gung'f nyfb n TERNG qrffreg gbccvat!! +rknexha: "gur cneg gung V gubhtug jnf tbvat gb or uneq jnf gevivny, fb abj V whfg unir guvf cneg gung V qvqa'g rira guvax bs gung vf uneq" +V fhccbfr jr pna yvir jvgu gur bofphevgl, nf ybat nf gurer vf n pbzzrag znxvat vg yvtugre +V nz n ovt oryvrire va ernfbaf. ohg gur nccnerag xvaq ner zl snibevgr. +clcl: trg n WVG sbe serr (jryy gur svefg qnl lbh jba'g znantr naq vg jvyy or irel sehfgengvat) + thgjbegu: bu, jr fubhyq znxr gur WVG zntvpnyyl orggre, jvgu qrpbengbef naq fghss +vg'f n pbzcyrgr unpx, ohg n irel zvavzny bar (nevtngb) +svefg gurl ynhtu ng lbh, gura gurl vtaber lbh, gura gurl svtug lbh, gura lbh jva +ClCl vf snzvyl sevraqyl +jr yvxr pbzcynvagf +gbqnl jr'er snfgre guna lrfgreqnl (hfhnyyl) +ClCl naq PClguba: gurl ner zbegny rarzvrf vagrag ba xvyyvat rnpu bgure +nethnoyl, rirelguvat vf n avpur +clcl unf ynlref yvxr bavbaf: crryvat gurz onpx jvyy znxr lbh pel +EClguba zntvpnyyl znxrf lbh evpu naq snzbhf (fnlf fb ba gur gva) +Vf evtbobg nebhaq jura gur havirefr prnfrf gb rkvfg? +ClCl vf gbb pbby sbe dhrelfgevatf. +< nevtngb> gura jung bpphef? < svwny> tbbq fghss V oryvrir +ClCl 1.6 eryrnfrq! + jurer ner gur grfgf? +uggc://gjvgcvp.pbz/52nr8s +N enaqbz dhbgr +Nyy rkprcgoybpxf frrz fnar. +N cvax tyvggrel ebgngvat ynzoqn +"vg'f yvxryl grzcbenel hagvy sberire" nevtb """ def some_topic(): diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,6 +231,9 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None +sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] +sqlite.sqlite3_enable_load_extension.restype = c_int + ########################################## # END Wrapped SQLite C API and constants ########################################## @@ -705,6 +708,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() + + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") + DML, DQL, DDL = range(3) class Cursor(object): diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/lib_pypy/pyrepl/commands.py b/lib_pypy/pyrepl/commands.py --- a/lib_pypy/pyrepl/commands.py +++ b/lib_pypy/pyrepl/commands.py @@ -33,10 +33,9 @@ class Command(object): finish = 0 kills_digit_arg = 1 - def __init__(self, reader, (event_name, event)): + def __init__(self, reader, cmd): self.reader = reader - self.event = event - self.event_name = event_name + self.event_name, self.event = cmd def do(self): pass diff --git a/lib_pypy/pyrepl/pygame_console.py b/lib_pypy/pyrepl/pygame_console.py --- a/lib_pypy/pyrepl/pygame_console.py +++ b/lib_pypy/pyrepl/pygame_console.py @@ -130,7 +130,7 @@ s.fill(c, [0, 600 - bmargin, 800, bmargin]) s.fill(c, [800 - rmargin, 0, lmargin, 600]) - def refresh(self, screen, (cx, cy)): + def refresh(self, screen, cxy): self.screen = screen self.pygame_screen.fill(colors.bg, [0, tmargin + self.cur_top + self.scroll, @@ -139,8 +139,8 @@ line_top = self.cur_top width, height = self.fontsize - self.cxy = (cx, cy) - cp = self.char_pos(cx, cy) + self.cxy = cxy + cp = self.char_pos(*cxy) if cp[1] < tmargin: self.scroll = - (cy*self.fh + self.cur_top) self.repaint() @@ -148,7 +148,7 @@ self.scroll += (600 - bmargin) - (cp[1] + self.fh) self.repaint() if self.curs_vis: - self.pygame_screen.blit(self.cursor, self.char_pos(cx, cy)) + self.pygame_screen.blit(self.cursor, self.char_pos(*cxy)) for line in screen: if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh): if line: diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -231,7 +231,11 @@ return ''.join(chars) def _histline(self, line): - return unicode(line.rstrip('\n'), ENCODING) + line = line.rstrip('\n') + try: + return unicode(line, ENCODING) + except UnicodeDecodeError: # bah, silently fall back... + return unicode(line, 'utf-8') def get_history_length(self): return self.saved_history_length @@ -268,7 +272,10 @@ f = open(os.path.expanduser(filename), 'w') for entry in history: if isinstance(entry, unicode): - entry = entry.encode(ENCODING) + try: + entry = entry.encode(ENCODING) + except UnicodeEncodeError: # bah, silently fall back... + entry = entry.encode('utf-8') entry = entry.replace('\n', '\r\n') # multiline history support f.write(entry + '\n') f.close() diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -163,7 +163,7 @@ def change_encoding(self, encoding): self.encoding = encoding - def refresh(self, screen, (cx, cy)): + def refresh(self, screen, cxy): # this function is still too long (over 90 lines) if not self.__gone_tall: @@ -198,6 +198,7 @@ # we make sure the cursor is on the screen, and that we're # using all of the screen if we can + cx, cy = cxy if cy < offset: offset = cy elif cy >= offset + height: @@ -411,7 +412,12 @@ e.args[4] == 'unexpected end of data': pass else: - raise + # was: "raise". But it crashes pyrepl, and by extension the + # pypy currently running, in which we are e.g. in the middle + # of some debugging session. Argh. Instead just print an + # error message to stderr and continue running, for now. + self.partial_char = '' + sys.stderr.write('\n%s: %s\n' % (e.__class__.__name__, e)) else: self.partial_char = '' self.event_queue.push(c) diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -38,9 +38,27 @@ _setlogmask.argtypes = (c_int,) _setlogmask.restype = c_int +_S_log_open = False +_S_ident_o = None + +def _get_argv(): + try: + import sys + script = sys.argv[0] + if isinstance(script, str): + return script[script.rfind('/')+1:] or None + except Exception: + pass + return None + @builtinify -def openlog(ident, option, facility): - _openlog(ident, option, facility) +def openlog(ident=None, logoption=0, facility=LOG_USER): + global _S_ident_o, _S_log_open + if ident is None: + ident = _get_argv() + _S_ident_o = c_char_p(ident) # keepalive + _openlog(_S_ident_o, logoption, facility) + _S_log_open = True @builtinify def syslog(arg1, arg2=None): @@ -48,11 +66,18 @@ priority, message = arg1, arg2 else: priority, message = LOG_INFO, arg1 + # if log is not opened, open it now + if not _S_log_open: + openlog() _syslog(priority, "%s", message) @builtinify def closelog(): - _closelog() + global _S_log_open, S_ident_o + if _S_log_open: + _closelog() + _S_log_open = False + _S_ident_o = None @builtinify def setlogmask(mask): diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 @@ -307,7 +308,7 @@ self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo - self.typename = self.type.__name__ + self.typename = getattr(self.type, "__name__", "???") self.traceback = py.code.Traceback(tb) def __repr__(self): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -92,7 +92,7 @@ module_import_dependencies = { # no _rawffi if importing pypy.rlib.clibffi raises ImportError - # or CompilationError + # or CompilationError or py.test.skip.Exception "_rawffi" : ["pypy.rlib.clibffi"], "_ffi" : ["pypy.rlib.clibffi"], @@ -113,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError), e: + except (ImportError, CompilationError, py.test.skip.Exception), e: errcls = e.__class__.__name__ config.add_warning( "The module %r is disabled\n" % (modname,) + @@ -285,6 +285,9 @@ "actually create the full list until the resulting " "list is mutated", default=False), + BoolOption("withliststrategies", + "enable optimized ways to store lists of primitives ", + default=True), BoolOption("withtypeversion", "version type objects when changing them", diff --git a/pypy/config/test/test_translationoption.py b/pypy/config/test/test_translationoption.py new file mode 100644 --- /dev/null +++ b/pypy/config/test/test_translationoption.py @@ -0,0 +1,10 @@ +import py +from pypy.config.translationoption import get_combined_translation_config +from pypy.config.translationoption import set_opt_level +from pypy.config.config import ConflictConfigError + + +def test_no_gcrootfinder_with_boehm(): + config = get_combined_translation_config() + config.translation.gcrootfinder = "shadowstack" + py.test.raises(ConflictConfigError, set_opt_level, config, '0') diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, @@ -398,6 +398,10 @@ # make_sure_not_resized often relies on it, so we always enable them config.translation.suggest(list_comprehension_operations=True) + # finally, make the choice of the gc definitive. This will fail + # if we have specified strange inconsistent settings. + config.translation.gc = config.translation.gc + # ---------------------------------------------------------------- def set_platform(config): diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -270,7 +270,12 @@ - *slicing*: the slice start must be within bounds. The stop doesn't need to, but it must not be smaller than the start. All negative indexes are disallowed, except for - the [:-1] special case. No step. + the [:-1] special case. No step. Slice deletion follows the same rules. + + - *slice assignment*: + only supports ``lst[x:y] = sublist``, if ``len(sublist) == y - x``. + In other words, slice assignment cannot change the total length of the list, + but just replace items. - *other operators*: ``+``, ``+=``, ``in``, ``*``, ``*=``, ``==``, ``!=`` work as expected. diff --git a/pypy/doc/config/objspace.std.withliststrategies.txt b/pypy/doc/config/objspace.std.withliststrategies.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withliststrategies.txt @@ -0,0 +1,2 @@ +Enable list strategies: Use specialized representations for lists of primitive +objects, such as ints. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -262,6 +262,26 @@ documented as such (as e.g. for hasattr()), in most cases PyPy lets the exception propagate instead. +Object Identity of Primitive Values, ``is`` and ``id`` +------------------------------------------------------- + +Object identity of primitive values works by value equality, not by identity of +the wrapper. This means that ``x + 1 is x + 1`` is always true, for arbitrary +integers ``x``. The rule applies for the following types: + + - ``int`` + + - ``float`` + + - ``long`` + + - ``complex`` + +This change requires some changes to ``id`` as well. ``id`` fulfills the +following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the +above types will return a value that is computed from the argument, and can +thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long). + Miscellaneous ------------- @@ -284,14 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. -* Do not compare immutable objects with ``is``. For example on CPython - it is true that ``x is 0`` works, i.e. does the same as ``type(x) is - int and x == 0``, but it is so by accident. If you do instead - ``x is 1000``, then it stops working, because 1000 is too large and - doesn't come from the internal cache. In PyPy it fails to work in - both cases, because we have no need for a cache at all. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. -* Also, object identity of immutable keys in dictionaries is not necessarily - preserved. .. include:: _ref.txt diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,6 +1,3 @@ -.. include:: needswork.txt - -.. needs work, it talks about svn. also, it is not really user documentation Making a PyPy Release ======================= @@ -12,11 +9,8 @@ forgetting things. A set of todo files may also work. Check and prioritize all issues for the release, postpone some if necessary, -create new issues also as necessary. A meeting (or meetings) should be -organized to decide what things are priorities, should go in and work for -the release. - -An important thing is to get the documentation into an up-to-date state! +create new issues also as necessary. An important thing is to get +the documentation into an up-to-date state! Release Steps ---------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -23,17 +23,20 @@ PyPy's implementation of the Python ``long`` type is slower than CPython's. Find out why and optimize them. +Make bytearray type fast +------------------------ + +PyPy's bytearray type is very inefficient. It would be an interesting +task to look into possible optimizations on this. + Numpy improvements ------------------ -This is more of a project-container than a single project. Possible ideas: +The numpy is rapidly progressing in pypy, so feel free to come to IRC and +ask for proposed topic. A not necesarilly up-to-date `list of topics`_ +is also available. -* experiment with auto-vectorization using SSE or implement vectorization - without automatically detecting it for array operations. - -* improve numpy, for example implement memory views. - -* interface with fortran/C libraries. +.. _`list of topics`: https://bitbucket.org/pypy/extradoc/src/extradoc/planning/micronumpy.txt Improving the jitviewer ------------------------ diff --git a/pypy/doc/release-1.7.0.rst b/pypy/doc/release-1.7.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-1.7.0.rst @@ -0,0 +1,94 @@ +================================== +PyPy 1.7 - widening the sweet spot +================================== + +We're pleased to announce the 1.7 release of PyPy. As became a habit, this +release brings a lot of bugfixes and performance improvements over the 1.6 +release. However, unlike the previous releases, the focus has been on widening +the "sweet spot" of PyPy. That is, classes of Python code that PyPy can greatly +speed up should be vastly improved with this release. You can download the 1.7 +release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 1.7 and cpython 2.7.1`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 32/64 or +Windows 32. Windows 64 work is ongoing, but not yet natively supported. + +The main topic of this release is widening the range of code which PyPy +can greatly speed up. On average on +our benchmark suite, PyPy 1.7 is around **30%** faster than PyPy 1.6 and up +to **20 times** faster on some benchmarks. + +.. _`pypy 1.7 and cpython 2.7.1`: http://speed.pypy.org + + +Highlights +========== + +* Numerous performance improvements. There are too many examples which python + constructs now should behave faster to list them. + +* Bugfixes and compatibility fixes with CPython. + +* Windows fixes. + +* PyPy now comes with stackless features enabled by default. However, + any loop using stackless features will interrupt the JIT for now, so no real + performance improvement for stackless-based programs. Contact pypy-dev for + info how to help on removing this restriction. + +* NumPy effort in PyPy was renamed numpypy. In order to try using it, simply + write:: + + import numpypy as numpy + + at the beginning of your program. There is a huge progress on numpy in PyPy + since 1.6, the main feature being implementation of dtypes. + +* JSON encoder (but not decoder) has been replaced with a new one. This one + is written in pure Python, but is known to outperform CPython's C extension + up to **2 times** in some cases. It's about **20 times** faster than + the one that we had in 1.6. + +* The memory footprint of some of our RPython modules has been drastically + improved. This should impact any applications using for example cryptography, + like tornado. + +* There was some progress in exposing even more CPython C API via cpyext. + +Things that didn't make it, expect in 1.8 soon +============================================== + +There is an ongoing work, which while didn't make it to the release, is +probably worth mentioning here. This is what you should probably expect in +1.8 some time soon: + +* Specialized list implementation. There is a branch that implements lists of + integers/floats/strings as compactly as array.array. This should drastically + improve performance/memory impact of some applications + +* NumPy effort is progressing forward, with multi-dimensional arrays coming + soon. + +* There are two brand new JIT assembler backends, notably for the PowerPC and + ARM processors. + +Fundraising +=========== + +It's maybe worth mentioning that we're running fundraising campaigns for +NumPy effort in PyPy and for Python 3 in PyPy. In case you want to see any +of those happen faster, we urge you to donate to `numpy proposal`_ or +`py3k proposal`_. In case you want PyPy to progress, but you trust us with +the general direction, you can always donate to the `general pot`_. + +.. _`numpy proposal`: http://pypy.org/numpydonate.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`general pot`: http://pypy.org diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -188,6 +187,12 @@ # ------------------------------------------------------------------- + def is_w(self, space, w_other): + return self is w_other + + def unique_id(self, space): + return space.wrap(compute_unique_id(self)) + def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) raise OperationError(space.w_TypeError, w_msg) @@ -513,8 +518,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -681,9 +686,17 @@ """shortcut for space.is_true(space.eq(w_obj1, w_obj2))""" return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2)) - def is_w(self, w_obj1, w_obj2): - """shortcut for space.is_true(space.is_(w_obj1, w_obj2))""" - return self.is_true(self.is_(w_obj1, w_obj2)) + def is_(self, w_one, w_two): + return self.newbool(self.is_w(w_one, w_two)) + + def is_w(self, w_one, w_two): + # done by a method call on w_two (and not on w_one, because of the + # expected programming style where we say "if x is None" or + # "if x is object"). + return w_two.is_w(self, w_one) + + def id(self, w_obj): + return w_obj.unique_id(self) def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -777,22 +790,63 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - # If we know the expected length we can preallocate. if expected_length == -1: + # xxx special hack for speed + from pypy.interpreter.generator import GeneratorIterator + if isinstance(w_iterator, GeneratorIterator): + lst_w = [] + w_iterator.unpack_into(lst_w) + return lst_w + # /xxx + return self._unpackiterable_unknown_length(w_iterator, w_iterable) + else: + lst_w = self._unpackiterable_known_length(w_iterator, + expected_length) + return lst_w[:] # make the resulting list resizable + + @jit.dont_look_inside + def _unpackiterable_unknown_length(self, w_iterator, w_iterable): + # Unpack a variable-size list of unknown length. + # The JIT does not look inside this function because it + # contains a loop (made explicit with the decorator above). + # + # If we can guess the expected length we can preallocate. + try: + lgt_estimate = self.len_w(w_iterable) + except OperationError, o: + if (not o.match(self, self.w_AttributeError) and + not o.match(self, self.w_TypeError)): + raise + items = [] + else: try: - lgt_estimate = self.len_w(w_iterable) - except OperationError, o: - if (not o.match(self, self.w_AttributeError) and - not o.match(self, self.w_TypeError)): + items = newlist(lgt_estimate) + except MemoryError: + items = [] # it might have lied + # + while True: + try: + w_item = self.next(w_iterator) + except OperationError, e: + if not e.match(self, self.w_StopIteration): raise - items = [] - else: - try: - items = newlist(lgt_estimate) - except MemoryError: - items = [] # it might have lied - else: - items = [None] * expected_length + break # done + items.append(w_item) + # + return items + + @jit.dont_look_inside + def _unpackiterable_known_length(self, w_iterator, expected_length): + # Unpack a known length list, without letting the JIT look inside. + # Implemented by just calling the @jit.unroll_safe version, but + # the JIT stopped looking inside already. + return self._unpackiterable_known_length_jitlook(w_iterator, + expected_length) + + @jit.unroll_safe + def _unpackiterable_known_length_jitlook(self, w_iterator, + expected_length): + items = [None] * expected_length idx = 0 while True: try: @@ -801,26 +855,29 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and idx == expected_length: + if idx == expected_length: raise OperationError(self.w_ValueError, - self.wrap("too many values to unpack")) - if expected_length == -1: - items.append(w_item) - else: - items[idx] = w_item + self.wrap("too many values to unpack")) + items[idx] = w_item idx += 1 - if expected_length != -1 and idx < expected_length: + if idx < expected_length: if idx == 1: plural = "" else: plural = "s" - raise OperationError(self.w_ValueError, - self.wrap("need more than %d value%s to unpack" % - (idx, plural))) + raise operationerrfmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, plural) return items - unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, - 'unpackiterable_unroll')) + def unpackiterable_unroll(self, w_iterable, expected_length): + # Like unpackiterable(), but for the cases where we have + # an expected_length and want to unroll when JITted. + # Returns a fixed-size list. + w_iterator = self.iter(w_iterable) + assert expected_length != -1 + return self._unpackiterable_known_length_jitlook(w_iterator, + expected_length) def fixedview(self, w_iterable, expected_length=-1): """ A fixed list view of w_iterable. Don't modify the result @@ -835,6 +892,16 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_str(self, w_list): + """ Return a list of unwrapped strings out of a list of strings. If the + argument is not a list or does not contain only strings, return None. + May return None anyway. + """ + return None + + def newlist_str(self, list_s): + return self.newlist([self.wrap(s) for s in list_s]) + @jit.unroll_safe def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" @@ -969,9 +1036,6 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) - def id(self, w_obj): - return self.wrap(compute_unique_id(w_obj)) - # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the __builtin__ @@ -1543,6 +1607,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -1,14 +1,15 @@ +from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped +from pypy.interpreter.pyopcode import LoopBlock from pypy.rlib import jit -from pypy.interpreter.pyopcode import LoopBlock +from pypy.rlib.objectmodel import specialize class GeneratorIterator(Wrappable): "An iterator created by a generator." _immutable_fields_ = ['pycode'] - + def __init__(self, frame): self.space = frame.space self.frame = frame # turned into None when frame_finished_execution @@ -81,7 +82,7 @@ # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: self.frame = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) else: return w_result # YIELDed finally: @@ -97,21 +98,21 @@ def throw(self, w_type, w_val, w_tb): from pypy.interpreter.pytraceback import check_traceback space = self.space - + msg = "throw() third argument must be a traceback object" if space.is_w(w_tb, space.w_None): tb = None else: tb = check_traceback(space, w_tb, msg) - + operr = OperationError(w_type, w_val, tb) operr.normalize_exception(space) return self.send_ex(space.w_None, operr) - + def descr_next(self): """x.next() -> the next value, or raise StopIteration""" return self.send_ex(self.space.w_None) - + def descr_close(self): """x.close(arg) -> raise GeneratorExit inside generator.""" assert isinstance(self, GeneratorIterator) @@ -124,7 +125,7 @@ e.match(space, space.w_GeneratorExit): return space.w_None raise - + if w_retval is not None: msg = "generator ignored GeneratorExit" raise OperationError(space.w_RuntimeError, space.wrap(msg)) @@ -155,3 +156,44 @@ "interrupting generator of ") break block = block.previous + + # Results can be either an RPython list of W_Root, or it can be an + # app-level W_ListObject, which also has an append() method, that's why we + # generate 2 versions of the function and 2 jit drivers. + def _create_unpack_into(): + jitdriver = jit.JitDriver(greens=['pycode'], + reds=['self', 'frame', 'results']) + def unpack_into(self, results): + """This is a hack for performance: runs the generator and collects + all produced items in a list.""" + # XXX copied and simplified version of send_ex() + space = self.space + if self.running: + raise OperationError(space.w_ValueError, + space.wrap('generator already executing')) + frame = self.frame + if frame is None: # already finished + return + self.running = True + try: + pycode = self.pycode + while True: + jitdriver.jit_merge_point(self=self, frame=frame, + results=results, pycode=pycode) + try: + w_result = frame.execute_frame(space.w_None) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + # if the frame is now marked as finished, it was RETURNed from + if frame.frame_finished_execution: + break + results.append(w_result) # YIELDed + finally: + frame.f_backref = jit.vref_None + self.running = False + self.frame = None + return unpack_into + unpack_into = _create_unpack_into() + unpack_into_w = _create_unpack_into() \ No newline at end of file diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py --- a/pypy/interpreter/test/test_executioncontext.py +++ b/pypy/interpreter/test/test_executioncontext.py @@ -292,7 +292,7 @@ import os, sys print sys.executable, self.tmpfile if sys.platform == "win32": - cmdformat = '""%s" "%s""' # excellent! tons of "! + cmdformat = '"%s" "%s"' else: cmdformat = "'%s' '%s'" g = os.popen(cmdformat % (sys.executable, self.tmpfile), 'r') diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -587,7 +587,7 @@ assert isinstance(meth2, Method) assert meth2.call_args(args) == obj1 # Check method returned from unbound_method.__get__() - w_meth3 = descr_function_get(space, func, None, space.type(obj2)) + w_meth3 = descr_function_get(space, func, space.w_None, space.type(obj2)) meth3 = space.unwrap(w_meth3) w_meth4 = meth3.descr_method_get(obj2, space.w_None) meth4 = space.unwrap(w_meth4) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -117,7 +117,7 @@ g = f() raises(NameError, g.throw, NameError, "Error", None) - + def test_throw_fail(self): def f(): yield 1 @@ -129,7 +129,7 @@ yield 1 g = f() raises(TypeError, g.throw, list()) - + def test_throw_fail3(self): def f(): yield 1 @@ -188,7 +188,7 @@ g = f() g.next() raises(NameError, g.close) - + def test_close_fail(self): def f(): try: @@ -267,3 +267,15 @@ assert r.startswith("= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -111,6 +111,16 @@ def repr_of_descr(self): return '<%s %s %s>' % (self._clsname, self.name, self.offset) +class DynamicFieldDescr(BaseFieldDescr): + def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): + self.offset = offset + self._fieldsize = fieldsize + self._is_pointer_field = is_pointer + self._is_float_field = is_float + self._is_field_signed = is_signed + + def get_field_size(self, translate_support_code): + return self._fieldsize class NonGcPtrFieldDescr(BaseFieldDescr): _clsname = 'NonGcPtrFieldDescr' @@ -182,6 +192,7 @@ def repr_of_descr(self): return '<%s>' % self._clsname + class NonGcPtrArrayDescr(BaseArrayDescr): _clsname = 'NonGcPtrArrayDescr' def get_item_size(self, translate_support_code): @@ -211,6 +222,13 @@ def get_ofs_length(self, translate_support_code): return -1 +class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): + def __init__(self, itemsize): + self.itemsize = itemsize + + def get_item_size(self, translate_support_code): + return self.itemsize + class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): _clsname = 'NonGcPtrArrayNoLengthDescr' def get_item_size(self, translate_support_code): @@ -305,12 +323,16 @@ _clsname = '' loop_token = None arg_classes = '' # <-- annotation hack - ffi_flags = 0 + ffi_flags = 1 - def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): + def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): self.arg_classes = arg_classes # string of "r" and "i" (ref/int) self.extrainfo = extrainfo self.ffi_flags = ffi_flags + # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which + # makes sense on Windows as it's the one for all the C functions + # we are compiling together with the JIT. On non-Windows platforms + # it is just ignored anyway. def __repr__(self): res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) @@ -351,6 +373,10 @@ return False # unless overridden def create_call_stub(self, rtyper, RESULT): + from pypy.rlib.clibffi import FFI_DEFAULT_ABI + assert self.get_call_conv() == FFI_DEFAULT_ABI, ( + "%r: create_call_stub() with a non-default call ABI" % (self,)) + def process(c): if c == 'L': assert longlong.supports_longlong @@ -445,7 +471,7 @@ """ _clsname = 'DynamicIntCallDescr' - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): + def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) assert isinstance(result_sign, bool) self._result_size = chr(result_size) diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -8,7 +8,7 @@ class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo, ffi_flags): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -648,14 +648,10 @@ # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1< -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +283,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -450,8 +446,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +461,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +491,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy @@ -1067,6 +1072,8 @@ self.PerformDiscard(op, [base_loc, ofs, itemsize, fieldsize, index_loc, temp_loc, value_loc]) + consider_setinteriorfield_raw = consider_setinteriorfield_gc + def consider_strsetitem(self, op): args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) @@ -1143,9 +1150,22 @@ # 'index' but must be in a different register than 'base'. self.rm.possibly_free_var(op.getarg(1)) result_loc = self.force_allocate_reg(op.result, [op.getarg(0)]) + assert isinstance(result_loc, RegLoc) + # two cases: 1) if result_loc is a normal register, use it as temp_loc + if not result_loc.is_xmm: + temp_loc = result_loc + else: + # 2) if result_loc is an xmm register, we (likely) need another + # temp_loc that is a normal register. It can be in the same + # register as 'index' but not 'base'. + tempvar = TempBox() + temp_loc = self.rm.force_allocate_reg(tempvar, [op.getarg(0)]) + self.rm.possibly_free_var(tempvar) self.rm.possibly_free_var(op.getarg(0)) self.Perform(op, [base_loc, ofs, itemsize, fieldsize, - index_loc, sign_loc], result_loc) + index_loc, temp_loc, sign_loc], result_loc) + + consider_getinteriorfield_raw = consider_getinteriorfield_gc def consider_int_is_true(self, op, guard_op): # doesn't need arg to be in a register @@ -1419,8 +1439,11 @@ # i.e. the n'th word beyond the fixed frame size. return -WORD * (FRAME_FIXED_SIZE + position) +def _valid_addressing_size(size): + return size == 1 or size == 2 or size == 4 or size == 8 + def _get_scale(size): - assert size == 1 or size == 2 or size == 4 or size == 8 + assert _valid_addressing_size(size) if size < 4: return size - 1 # 1, 2 => 0, 1 else: diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -17,7 +17,7 @@ class AssemblerLocation(object): # XXX: Is adding "width" here correct? - __slots__ = ('value', 'width') + _attrs_ = ('value', 'width', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -25,6 +25,9 @@ def is_memory_reference(self): return self.location_code() in ('b', 's', 'j', 'a', 'm') + def location_code(self): + return self._location_code + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -38,6 +41,8 @@ class StackLoc(AssemblerLocation): _immutable_ = True + _location_code = 'b' + def __init__(self, position, ebp_offset, num_words, type): assert ebp_offset < 0 # so no confusion with RegLoc.value self.position = position @@ -49,9 +54,6 @@ def __repr__(self): return '%d(%%ebp)' % (self.value,) - def location_code(self): - return 'b' - def assembler(self): return repr(self) @@ -63,8 +65,10 @@ self.is_xmm = is_xmm if self.is_xmm: self.width = 8 + self._location_code = 'x' else: self.width = WORD + self._location_code = 'r' def __repr__(self): if self.is_xmm: return rx86.R.xmmnames[self.value] @@ -79,12 +83,6 @@ assert not self.is_xmm return RegLoc(rx86.high_byte(self.value), False) - def location_code(self): - if self.is_xmm: - return 'x' - else: - return 'r' - def assembler(self): return '%' + repr(self) @@ -97,14 +95,13 @@ class ImmedLoc(AssemblerLocation): _immutable_ = True width = WORD + _location_code = 'i' + def __init__(self, value): from pypy.rpython.lltypesystem import rffi, lltype # force as a real int self.value = rffi.cast(lltype.Signed, value) - def location_code(self): - return 'i' - def getint(self): return self.value @@ -149,9 +146,6 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) - def location_code(self): - return self._location_code - def value_a(self): return self.loc_a @@ -191,6 +185,7 @@ # we want a width of 8 (... I think. Check this!) _immutable_ = True width = 8 + _location_code = 'j' def __init__(self, address): self.value = address @@ -198,9 +193,6 @@ def __repr__(self): return '' % (self.value,) - def location_code(self): - return 'j' - if IS_X86_32: class FloatImmedLoc(AssemblerLocation): # This stands for an immediate float. It cannot be directly used in @@ -209,6 +201,7 @@ # instead; see below. _immutable_ = True width = 8 + _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage @@ -229,9 +222,6 @@ floatvalue = longlong.getrealfloat(self.aslonglong) return '' % (floatvalue,) - def location_code(self): - raise NotImplementedError - if IS_X86_64: def FloatImmedLoc(floatstorage): from pypy.rlib.longlong2float import float2longlong @@ -270,6 +260,11 @@ else: raise AssertionError(methname + " undefined") +def _missing_binary_insn(name, code1, code2): + raise AssertionError(name + "_" + code1 + code2 + " missing") +_missing_binary_insn._dont_inline_ = True + + class LocationCodeBuilder(object): _mixin_ = True @@ -303,6 +298,8 @@ else: # For this case, we should not need the scratch register more than here. self._load_scratch(val2) + if name == 'MOV' and loc1 is X86_64_SCRATCH_REG: + return # don't need a dummy "MOV r11, r11" INSN(self, loc1, X86_64_SCRATCH_REG) def invoke(self, codes, val1, val2): @@ -310,6 +307,23 @@ _rx86_getattr(self, methname)(val1, val2) invoke._annspecialcase_ = 'specialize:arg(1)' + def has_implementation_for(loc1, loc2): + # A memo function that returns True if there is any NAME_xy that could match. + # If it returns False we know the whole subcase can be omitted from translated + # code. Without this hack, the size of most _binaryop INSN functions ends up + # quite large in C code. + if loc1 == '?': + return any([has_implementation_for(loc1, loc2) + for loc1 in unrolling_location_codes]) + methname = name + "_" + loc1 + loc2 + if not hasattr(rx86.AbstractX86CodeBuilder, methname): + return False + # any NAME_j should have a NAME_m as a fallback, too. Check it + if loc1 == 'j': assert has_implementation_for('m', loc2), methname + if loc2 == 'j': assert has_implementation_for(loc1, 'm'), methname + return True + has_implementation_for._annspecialcase_ = 'specialize:memo' + def INSN(self, loc1, loc2): code1 = loc1.location_code() code2 = loc2.location_code() @@ -325,6 +339,8 @@ assert code2 not in ('j', 'i') for possible_code2 in unrolling_location_codes: + if not has_implementation_for('?', possible_code2): + continue if code2 == possible_code2: val2 = getattr(loc2, "value_" + possible_code2)() # @@ -335,28 +351,32 @@ # # Regular case for possible_code1 in unrolling_location_codes: + if not has_implementation_for(possible_code1, + possible_code2): + continue if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): + fits32 = rx86.fits_in_32bits + if possible_code1 == 'j' and not fits32(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): + return + if possible_code2 == 'j' and not fits32(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) - elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): + return + if possible_code1 == 'm' and not fits32(val1[1]): val1 = self._fix_static_offset_64_m(val1) - invoke(self, "a" + possible_code2, val1, val2) - elif possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]): + if possible_code2 == 'm' and not fits32(val2[1]): val2 = self._fix_static_offset_64_m(val2) - invoke(self, possible_code1 + "a", val1, val2) - else: - if possible_code1 == 'a' and not rx86.fits_in_32bits(val1[3]): - val1 = self._fix_static_offset_64_a(val1) - if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]): - val2 = self._fix_static_offset_64_a(val2) - invoke(self, possible_code1 + possible_code2, val1, val2) + if possible_code1 == 'a' and not fits32(val1[3]): + val1 = self._fix_static_offset_64_a(val1) + if possible_code2 == 'a' and not fits32(val2[3]): + val2 = self._fix_static_offset_64_a(val2) + invoke(self, possible_code1 + possible_code2, val1, val2) return + _missing_binary_insn(name, code1, code2) return func_with_new_name(INSN, "INSN_" + name) @@ -431,12 +451,14 @@ def _fix_static_offset_64_m(self, (basereg, static_offset)): # For cases where an AddressLoc has the location_code 'm', but # where the static offset does not fit in 32-bits. We have to fall - # back to the X86_64_SCRATCH_REG. Note that this returns a location - # encoded as mode 'a'. These are all possibly rare cases; don't try + # back to the X86_64_SCRATCH_REG. Returns a new location encoded + # as mode 'm' too. These are all possibly rare cases; don't try # to reuse a past value of the scratch register at all. self._scratch_register_known = False self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset) - return (basereg, X86_64_SCRATCH_REG.value, 0, 0) + self.LEA_ra(X86_64_SCRATCH_REG.value, + (basereg, X86_64_SCRATCH_REG.value, 0, 0)) + return (X86_64_SCRATCH_REG.value, 0) def _fix_static_offset_64_a(self, (basereg, scalereg, scale, static_offset)): diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -745,6 +745,7 @@ assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') add_insn('j', abs_, immediate(2)) + add_insn('m', mem_reg_plus_const(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/jit/backend/x86/test/test_fficall.py b/pypy/jit/backend/x86/test/test_fficall.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/x86/test/test_fficall.py @@ -0,0 +1,8 @@ +import py +from pypy.jit.metainterp.test import test_fficall +from pypy.jit.backend.x86.test.test_basic import Jit386Mixin + +class TestFfiLookups(Jit386Mixin, test_fficall.FfiLookupTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_fficall.py + supports_all = True diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py --- a/pypy/jit/backend/x86/test/test_regloc.py +++ b/pypy/jit/backend/x86/test/test_regloc.py @@ -146,8 +146,10 @@ expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov rcx, [rdx+r11] - '\x4A\x8B\x0C\x1A' + # lea r11, [rdx+r11] + '\x4E\x8D\x1C\x1A' + # mov rcx, [r11] + '\x49\x8B\x0B' ) assert cb.getvalue() == expected_instructions @@ -174,6 +176,30 @@ # ------------------------------------------------------------ + def test_MOV_64bit_constant_into_r11(self): + base_constant = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(r11, imm(base_constant)) + + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + ) + assert cb.getvalue() == expected_instructions + + def test_MOV_64bit_address_into_r11(self): + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(r11, heap(base_addr)) + + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + + # mov r11, [r11] + '\x4D\x8B\x1B' + ) + assert cb.getvalue() == expected_instructions + def test_MOV_immed32_into_64bit_address_1(self): immed = -0x01234567 base_addr = 0xFEDCBA9876543210 @@ -217,8 +243,10 @@ expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov [rdx+r11], -0x01234567 - '\x4A\xC7\x04\x1A\x99\xBA\xDC\xFE' + # lea r11, [rdx+r11] + '\x4E\x8D\x1C\x1A' + # mov [r11], -0x01234567 + '\x49\xC7\x03\x99\xBA\xDC\xFE' ) assert cb.getvalue() == expected_instructions @@ -300,8 +328,10 @@ '\x48\xBA\xEF\xCD\xAB\x89\x67\x45\x23\x01' # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov [rax+r11], rdx - '\x4A\x89\x14\x18' + # lea r11, [rax+r11] + '\x4E\x8D\x1C\x18' + # mov [r11], rdx + '\x49\x89\x13' # pop rdx '\x5A' ) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -455,6 +455,9 @@ EffectInfo.MOST_GENERAL, ffi_flags=-1) calldescr.get_call_conv = lambda: ffi # <==== hack + # ^^^ we patch get_call_conv() so that the test also makes sense + # on Linux, because clibffi.get_call_conv() would always + # return FFI_DEFAULT_ABI on non-Windows platforms. funcbox = ConstInt(rawstart) i1 = BoxInt() i2 = BoxInt() diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + n -= x.foo + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/backend/x86/test/test_ztranslation.py b/pypy/jit/backend/x86/test/test_ztranslation.py --- a/pypy/jit/backend/x86/test/test_ztranslation.py +++ b/pypy/jit/backend/x86/test/test_ztranslation.py @@ -1,6 +1,6 @@ import py, os, sys from pypy.tool.udir import udir -from pypy.rlib.jit import JitDriver, unroll_parameters +from pypy.rlib.jit import JitDriver, unroll_parameters, set_param from pypy.rlib.jit import PARAMETERS, dont_look_inside from pypy.rlib.jit import promote from pypy.jit.metainterp.jitprof import Profiler @@ -47,9 +47,9 @@ def f(i, j): for param, _ in unroll_parameters: defl = PARAMETERS[param] - jitdriver.set_param(param, defl) - jitdriver.set_param("threshold", 3) - jitdriver.set_param("trace_eagerness", 2) + set_param(jitdriver, param, defl) + set_param(jitdriver, "threshold", 3) + set_param(jitdriver, "trace_eagerness", 2) total = 0 frame = Frame(i) while frame.i > 3: @@ -213,8 +213,8 @@ else: return Base() def myportal(i): - jitdriver.set_param("threshold", 3) - jitdriver.set_param("trace_eagerness", 2) + set_param(jitdriver, "threshold", 3) + set_param(jitdriver, "trace_eagerness", 2) total = 0 n = i while True: diff --git a/pypy/jit/backend/x86/tool/viewcode.py b/pypy/jit/backend/x86/tool/viewcode.py --- a/pypy/jit/backend/x86/tool/viewcode.py +++ b/pypy/jit/backend/x86/tool/viewcode.py @@ -58,7 +58,7 @@ assert not p.returncode, ('Encountered an error running objdump: %s' % stderr) # drop some objdump cruft - lines = stdout.splitlines()[6:] + lines = stdout.splitlines(True)[6:] # drop some objdump cruft return format_code_dump_with_labels(originaddr, lines, label_list) def format_code_dump_with_labels(originaddr, lines, label_list): @@ -97,7 +97,7 @@ stdout, stderr = p.communicate() assert not p.returncode, ('Encountered an error running nm: %s' % stderr) - for line in stdout.splitlines(): + for line in stdout.splitlines(True): match = re_symbolentry.match(line) if match: addr = long(match.group(1), 16) diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -212,7 +212,10 @@ elidable = False loopinvariant = False if op.opname == "direct_call": - func = getattr(get_funcobj(op.args[0].value), '_callable', None) + funcobj = get_funcobj(op.args[0].value) + assert getattr(funcobj, 'calling_conv', 'c') == 'c', ( + "%r: getcalldescr() with a non-default call ABI" % (op,)) + func = getattr(funcobj, '_callable', None) elidable = getattr(func, "_elidable_function_", False) loopinvariant = getattr(func, "_jit_loop_invariant_", False) if loopinvariant: diff --git a/pypy/jit/codewriter/codewriter.py b/pypy/jit/codewriter/codewriter.py --- a/pypy/jit/codewriter/codewriter.py +++ b/pypy/jit/codewriter/codewriter.py @@ -104,6 +104,8 @@ else: name = 'unnamed' % id(ssarepr) i = 1 + # escape names for windows + name = name.replace('', '_(lambda)_') extra = '' while name+extra in self._seen_files: i += 1 diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -48,6 +48,8 @@ OS_LIBFFI_PREPARE = 60 OS_LIBFFI_PUSH_ARG = 61 OS_LIBFFI_CALL = 62 + OS_LIBFFI_GETARRAYITEM = 63 + OS_LIBFFI_SETARRAYITEM = 64 # OS_LLONG_INVERT = 69 OS_LLONG_ADD = 70 @@ -78,6 +80,9 @@ # OS_MATH_SQRT = 100 + # for debugging: + _OS_CANRAISE = set([OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL]) + def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, write_descrs_fields, write_descrs_arrays, extraeffect=EF_CAN_RAISE, @@ -116,6 +121,8 @@ result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex + if result.check_can_raise(): + assert oopspecindex in cls._OS_CANRAISE cls._cache[key] = result return result @@ -125,6 +132,10 @@ def check_can_invalidate(self): return self.can_invalidate + def check_is_elidable(self): + return (self.extraeffect == self.EF_ELIDABLE_CAN_RAISE or + self.extraeffect == self.EF_ELIDABLE_CANNOT_RAISE) + def check_forces_virtual_or_virtualizable(self): return self.extraeffect >= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE @@ -230,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + return True # better safe than sorry + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -481,8 +500,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) @@ -1053,35 +1086,20 @@ # jit.codewriter.support. for _op, _oopspec in [('llong_invert', 'INVERT'), - ('ullong_invert', 'INVERT'), ('llong_lt', 'LT'), ('llong_le', 'LE'), ('llong_eq', 'EQ'), ('llong_ne', 'NE'), ('llong_gt', 'GT'), ('llong_ge', 'GE'), - ('ullong_lt', 'ULT'), - ('ullong_le', 'ULE'), - ('ullong_eq', 'EQ'), - ('ullong_ne', 'NE'), - ('ullong_gt', 'UGT'), - ('ullong_ge', 'UGE'), ('llong_add', 'ADD'), ('llong_sub', 'SUB'), ('llong_mul', 'MUL'), ('llong_and', 'AND'), ('llong_or', 'OR'), ('llong_xor', 'XOR'), - ('ullong_add', 'ADD'), - ('ullong_sub', 'SUB'), - ('ullong_mul', 'MUL'), - ('ullong_and', 'AND'), - ('ullong_or', 'OR'), - ('ullong_xor', 'XOR'), ('llong_lshift', 'LSHIFT'), ('llong_rshift', 'RSHIFT'), - ('ullong_lshift', 'LSHIFT'), - ('ullong_rshift', 'URSHIFT'), ('cast_int_to_longlong', 'FROM_INT'), ('truncate_longlong_to_int', 'TO_INT'), ('cast_float_to_longlong', 'FROM_FLOAT'), @@ -1104,6 +1122,21 @@ ('cast_uint_to_ulonglong', 'FROM_UINT'), ('cast_float_to_ulonglong', 'FROM_FLOAT'), ('cast_ulonglong_to_float', 'U_TO_FLOAT'), + ('ullong_invert', 'INVERT'), + ('ullong_lt', 'ULT'), + ('ullong_le', 'ULE'), + ('ullong_eq', 'EQ'), + ('ullong_ne', 'NE'), + ('ullong_gt', 'UGT'), + ('ullong_ge', 'UGE'), + ('ullong_add', 'ADD'), + ('ullong_sub', 'SUB'), + ('ullong_mul', 'MUL'), + ('ullong_and', 'AND'), + ('ullong_or', 'OR'), + ('ullong_xor', 'XOR'), + ('ullong_lshift', 'LSHIFT'), + ('ullong_rshift', 'URSHIFT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): @@ -1134,7 +1167,7 @@ def rewrite_op_llong_is_true(self, op): v = varoftype(op.args[0].concretetype) - op0 = SpaceOperation('cast_int_to_longlong', + op0 = SpaceOperation('cast_primitive', [Constant(0, lltype.Signed)], v) args = [op.args[0], v] @@ -1615,6 +1648,12 @@ elif oopspec_name.startswith('libffi_call_'): oopspecindex = EffectInfo.OS_LIBFFI_CALL extraeffect = EffectInfo.EF_RANDOM_EFFECTS + elif oopspec_name == 'libffi_array_getitem': + oopspecindex = EffectInfo.OS_LIBFFI_GETARRAYITEM + extraeffect = EffectInfo.EF_CANNOT_RAISE + elif oopspec_name == 'libffi_array_setitem': + oopspecindex = EffectInfo.OS_LIBFFI_SETARRAYITEM + extraeffect = EffectInfo.EF_CANNOT_RAISE else: assert False, 'unsupported oopspec: %s' % oopspec_name return self._handle_oopspec_call(op, args, oopspecindex, extraeffect) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -37,9 +37,11 @@ return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, - type_system="lltype"): + type_system="lltype", translationoptions={}): # build the normal ll graphs for ll_function t = TranslationContext() + for key, value in translationoptions.items(): + setattr(t.config.translation, key, value) annpolicy = AnnotatorPolicy() annpolicy.allow_someobjects = False a = t.buildannotator(policy=annpolicy) @@ -256,6 +258,9 @@ y = ~r_ulonglong(xll) return u_to_longlong(y) +def _ll_1_ullong_invert(xull): + return ~xull + def _ll_2_llong_lt(xll, yll): return xll < yll @@ -274,16 +279,22 @@ def _ll_2_llong_ge(xll, yll): return xll >= yll -def _ll_2_llong_ult(xull, yull): +def _ll_2_ullong_eq(xull, yull): + return xull == yull + +def _ll_2_ullong_ne(xull, yull): + return xull != yull + +def _ll_2_ullong_ult(xull, yull): return xull < yull -def _ll_2_llong_ule(xull, yull): +def _ll_2_ullong_ule(xull, yull): return xull <= yull -def _ll_2_llong_ugt(xull, yull): +def _ll_2_ullong_ugt(xull, yull): return xull > yull -def _ll_2_llong_uge(xull, yull): +def _ll_2_ullong_uge(xull, yull): return xull >= yull def _ll_2_llong_add(xll, yll): @@ -310,14 +321,41 @@ z = r_ulonglong(xll) ^ r_ulonglong(yll) return u_to_longlong(z) +def _ll_2_ullong_add(xull, yull): + z = (xull) + (yull) + return (z) + +def _ll_2_ullong_sub(xull, yull): + z = (xull) - (yull) + return (z) + +def _ll_2_ullong_mul(xull, yull): + z = (xull) * (yull) + return (z) + +def _ll_2_ullong_and(xull, yull): + z = (xull) & (yull) + return (z) + +def _ll_2_ullong_or(xull, yull): + z = (xull) | (yull) + return (z) + +def _ll_2_ullong_xor(xull, yull): + z = (xull) ^ (yull) + return (z) + def _ll_2_llong_lshift(xll, y): z = r_ulonglong(xll) << y return u_to_longlong(z) +def _ll_2_ullong_lshift(xull, y): + return xull << y + def _ll_2_llong_rshift(xll, y): return xll >> y -def _ll_2_llong_urshift(xull, y): +def _ll_2_ullong_urshift(xull, y): return xull >> y def _ll_1_llong_from_int(x): @@ -561,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -5,7 +5,7 @@ from pypy.jit.codewriter.format import assert_format from pypy.jit.codewriter import longlong from pypy.jit.metainterp.history import AbstractDescr -from pypy.rpython.lltypesystem import lltype, rclass, rstr +from pypy.rpython.lltypesystem import lltype, rclass, rstr, rffi from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.translator.unsimplify import varoftype from pypy.rlib.rarithmetic import ovfcheck, r_uint, r_longlong, r_ulonglong @@ -743,7 +743,6 @@ """, transform=True) def test_force_cast(self): - from pypy.rpython.lltypesystem import rffi # NB: we don't need to test for INT here, the logic in jtransform is # general enough so that if we have the below cases it should # generalize also to INT @@ -849,7 +848,6 @@ transform=True) def test_force_cast_pointer(self): - from pypy.rpython.lltypesystem import rffi def h(p): return rffi.cast(rffi.VOIDP, p) self.encoding_test(h, [lltype.nullptr(rffi.CCHARP.TO)], """ @@ -857,7 +855,6 @@ """, transform=True) def test_force_cast_floats(self): - from pypy.rpython.lltypesystem import rffi # Caststs to lltype.Float def f(n): return rffi.cast(lltype.Float, n) @@ -964,7 +961,6 @@ """, transform=True) def test_direct_ptradd(self): - from pypy.rpython.lltypesystem import rffi def f(p, n): return lltype.direct_ptradd(p, n) self.encoding_test(f, [lltype.nullptr(rffi.CCHARP.TO), 123], """ @@ -975,7 +971,6 @@ def check_force_cast(FROM, TO, operations, value): """Check that the test is correctly written...""" - from pypy.rpython.lltypesystem import rffi import re r = re.compile('(\w+) \%i\d, \$(-?\d+)') # diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1180,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -298,7 +298,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +309,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +329,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,14 +339,17 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + return self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) @@ -359,12 +367,22 @@ def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +409,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -608,9 +635,6 @@ metainterp.set_compiled_merge_points(self.original_greenkey, old_loop_tokens) - def reset_counter_from_failure(self): - pass - def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): """Try to compile a new bridge leading from the beginning of the history diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -340,6 +340,8 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.GETINTERIORFIELD_RAW, + rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, ): # list of opcodes never executed by pyjitpl diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -999,13 +999,13 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns - def check_loops(self, expected=None, everywhere=False, **check): + def check_resops(self, expected=None, **check): insns = {} for loop in self.loops: - if not everywhere: - if getattr(loop, '_ignore_during_counting', False): - continue insns = loop.summary(adding_insns=insns) + return self._check_insns(insns, expected, check) + + def _check_insns(self, insns, expected, check): if expected is not None: insns.pop('debug_merge_point', None) assert insns == expected @@ -1016,6 +1016,25 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + + # XXX hacked version, ignore and remove me when jit-targets is merged. + loops = self.get_all_loops() + loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX + assert len(loops) == 1 + loop, = loops + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + insns = {} + for op in loop.operations: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_consistency(self): "NOT_RPYTHON" for loop in self.loops: diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -55,7 +55,7 @@ def optimize_loop_1(metainterp_sd, loop, enable_opts, - inline_short_preamble=True, retraced=False, bridge=False): + inline_short_preamble=True, retraced=False): """Optimize loop.operations to remove internal overheadish operations. """ @@ -64,7 +64,7 @@ if unroll: optimize_unroll(metainterp_sd, loop, optimizations) else: - optimizer = Optimizer(metainterp_sd, loop, optimizations, bridge) + optimizer = Optimizer(metainterp_sd, loop, optimizations) optimizer.propagate_all_forward() def optimize_bridge_1(metainterp_sd, bridge, enable_opts, @@ -76,7 +76,7 @@ except KeyError: pass optimize_loop_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced, bridge=True) + inline_short_preamble, retraced) if __name__ == '__main__': print ALL_OPTS_NAMES diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -1,11 +1,13 @@ +from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method +from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.rlib import clibffi, libffi +from pypy.rlib.debug import debug_print +from pypy.rlib.libffi import Func +from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.libffi import Func -from pypy.rlib.debug import debug_print -from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.resoperation import rop, ResOperation -from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.rpython.lltypesystem import llmemory, rffi class FuncInfo(object): @@ -78,7 +80,7 @@ def new(self): return OptFfiCall() - + def begin_optimization(self, funcval, op): self.rollback_maybe('begin_optimization', op) self.funcinfo = FuncInfo(funcval, self.optimizer.cpu, op) @@ -116,6 +118,9 @@ ops = self.do_push_arg(op) elif oopspec == EffectInfo.OS_LIBFFI_CALL: ops = self.do_call(op) + elif (oopspec == EffectInfo.OS_LIBFFI_GETARRAYITEM or + oopspec == EffectInfo.OS_LIBFFI_SETARRAYITEM): + ops = self.do_getsetarrayitem(op, oopspec) # for op in ops: self.emit_operation(op) @@ -190,6 +195,56 @@ ops.append(newop) return ops + def do_getsetarrayitem(self, op, oopspec): + ffitypeval = self.getvalue(op.getarg(1)) + widthval = self.getvalue(op.getarg(2)) + offsetval = self.getvalue(op.getarg(5)) + if not ffitypeval.is_constant() or not widthval.is_constant() or not offsetval.is_constant(): + return [op] + + ffitypeaddr = ffitypeval.box.getaddr() + ffitype = llmemory.cast_adr_to_ptr(ffitypeaddr, clibffi.FFI_TYPE_P) + offset = offsetval.box.getint() + width = widthval.box.getint() + descr = self._get_interior_descr(ffitype, width, offset) + + arglist = [ + self.getvalue(op.getarg(3)).force_box(self.optimizer), + self.getvalue(op.getarg(4)).force_box(self.optimizer), + ] + if oopspec == EffectInfo.OS_LIBFFI_GETARRAYITEM: + opnum = rop.GETINTERIORFIELD_RAW + elif oopspec == EffectInfo.OS_LIBFFI_SETARRAYITEM: + opnum = rop.SETINTERIORFIELD_RAW + arglist.append(self.getvalue(op.getarg(6)).force_box(self.optimizer)) + else: + assert False + return [ + ResOperation(opnum, arglist, op.result, descr=descr), + ] + + def _get_interior_descr(self, ffitype, width, offset): + kind = libffi.types.getkind(ffitype) + is_pointer = is_float = is_signed = False + if ffitype is libffi.types.pointer: + is_pointer = True + elif kind == 'i': + is_signed = True + elif kind == 'f' or kind == 'I' or kind == 'U': + # longlongs are treated as floats, see + # e.g. llsupport/descr.py:getDescrClass + is_float = True + elif kind == 'u': + # they're all False + pass + else: + assert False, "unsupported ffitype or kind" + # + fieldsize = rffi.getintfield(ffitype, 'c_size') + return self.optimizer.cpu.interiorfielddescrof_dynamic( + offset, width, fieldsize, is_pointer, is_float, is_signed + ) + def propagate_forward(self, op): if self.logops is not None: debug_print(self.logops.repr_of_resop(op)) diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -43,7 +43,7 @@ optheap.optimizer.ensure_imported(cached_fieldvalue) cached_fieldvalue = self._cached_fields.get(structvalue, None) - if cached_fieldvalue is not fieldvalue: + if not fieldvalue.same_value(cached_fieldvalue): # common case: store the 'op' as lazy_setfield, and register # myself in the optheap's _lazy_setfields_and_arrayitems list self._lazy_setfield = op @@ -140,6 +140,15 @@ getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)], result, op.getdescr()) shortboxes.add_potential(getop, synthetic=True) + if op.getopnum() == rop.SETARRAYITEM_GC: + result = op.getarg(2) + if isinstance(result, Const): + newresult = result.clonebox() + optimizer.make_constant(newresult, result) + result = newresult + getop = ResOperation(rop.GETARRAYITEM_GC, [op.getarg(0), op.getarg(1)], + result, op.getdescr()) + shortboxes.add_potential(getop, synthetic=True) elif op.result is not None: shortboxes.add_potential(op) @@ -225,7 +234,7 @@ or op.is_ovf()): self.posponedop = op else: - self.next_optimization.propagate_forward(op) + Optimization.emit_operation(self, op) def emitting_operation(self, op): if op.has_no_side_effect(): diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -1,3 +1,4 @@ +import sys from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0, \ MODE_ARRAY, MODE_STR, MODE_UNICODE from pypy.jit.metainterp.history import ConstInt @@ -5,36 +6,18 @@ IntUpperBound) from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.rlib.rarithmetic import LONG_BIT class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def setup(self): - self.posponedop = None - self.nextop = None - def new(self): - assert self.posponedop is None return OptIntBounds() - - def flush(self): - assert self.posponedop is None - - def setup(self): - self.posponedop = None - self.nextop = None def propagate_forward(self, op): - if op.is_ovf(): - self.posponedop = op - return - if self.posponedop: - self.nextop = op - op = self.posponedop - self.posponedop = None - dispatch_opt(self, op) def opt_default(self, op): @@ -126,14 +109,29 @@ r.intbound.intersect(v1.intbound.div_bound(v2.intbound)) def optimize_INT_MOD(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + known_nonneg = (v1.intbound.known_ge(IntBound(0, 0)) and + v2.intbound.known_ge(IntBound(0, 0))) + if known_nonneg and v2.is_constant(): + val = v2.box.getint() + if (val & (val-1)) == 0: + # nonneg % power-of-two ==> nonneg & (power-of-two - 1) + arg1 = op.getarg(0) + arg2 = ConstInt(val-1) + op = op.copy_and_change(rop.INT_AND, args=[arg1, arg2]) self.emit_operation(op) - v2 = self.getvalue(op.getarg(1)) if v2.is_constant(): val = v2.box.getint() r = self.getvalue(op.result) if val < 0: + if val == -sys.maxint-1: + return # give up val = -val - r.intbound.make_gt(IntBound(-val, -val)) + if known_nonneg: + r.intbound.make_ge(IntBound(0, 0)) + else: + r.intbound.make_gt(IntBound(-val, -val)) r.intbound.make_lt(IntBound(val, val)) def optimize_INT_LSHIFT(self, op): @@ -153,72 +151,84 @@ def optimize_INT_RSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) + b = v1.intbound.rshift_bound(v2.intbound) + if b.has_lower and b.has_upper and b.lower == b.upper: + # constant result (likely 0, for rshifts that kill all bits) + self.make_constant_int(op.result, b.lower) + else: + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(b) + + def optimize_GUARD_NO_OVERFLOW(self, op): + lastop = self.last_emitted_operation + if lastop is not None: + opnum = lastop.getopnum() + args = lastop.getarglist() + result = lastop.result + # If the INT_xxx_OVF was replaced with INT_xxx, then we can kill + # the GUARD_NO_OVERFLOW. + if (opnum == rop.INT_ADD or + opnum == rop.INT_SUB or + opnum == rop.INT_MUL): + return + # Else, synthesize the non overflowing op for optimize_default to + # reuse, as well as the reverse op + elif opnum == rop.INT_ADD_OVF: + self.pure(rop.INT_ADD, args[:], result) + self.pure(rop.INT_SUB, [result, args[1]], args[0]) + self.pure(rop.INT_SUB, [result, args[0]], args[1]) + elif opnum == rop.INT_SUB_OVF: + self.pure(rop.INT_SUB, args[:], result) + self.pure(rop.INT_ADD, [result, args[1]], args[0]) + self.pure(rop.INT_SUB, [args[0], result], args[1]) + elif opnum == rop.INT_MUL_OVF: + self.pure(rop.INT_MUL, args[:], result) self.emit_operation(op) - r = self.getvalue(op.result) - r.intbound.intersect(v1.intbound.rshift_bound(v2.intbound)) + + def optimize_GUARD_OVERFLOW(self, op): + # If INT_xxx_OVF was replaced by INT_xxx, *but* we still see + # GUARD_OVERFLOW, then the loop is invalid. + lastop = self.last_emitted_operation + if lastop is None: + raise InvalidLoop + opnum = lastop.getopnum() + if opnum not in (rop.INT_ADD_OVF, rop.INT_SUB_OVF, rop.INT_MUL_OVF): + raise InvalidLoop + self.emit_operation(op) def optimize_INT_ADD_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.add_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Transform into INT_ADD and remove guard + if resbound.bounded(): + # Transform into INT_ADD. The following guard will be killed + # by optimize_GUARD_NO_OVERFLOW; if we see instead an + # optimize_GUARD_OVERFLOW, then InvalidLoop. op = op.copy_and_change(rop.INT_ADD) - self.optimize_INT_ADD(op) # emit the op - else: - self.emit_operation(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - self.emit_operation(self.nextop) - if self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Synthesize the non overflowing op for optimize_default to reuse - self.pure(rop.INT_ADD, op.getarglist()[:], op.result) - # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) - self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) - + self.emit_operation(op) # emit the op + r = self.getvalue(op.result) + r.intbound.intersect(resbound) def optimize_INT_SUB_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.sub_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Transform into INT_SUB and remove guard + if resbound.bounded(): op = op.copy_and_change(rop.INT_SUB) - self.optimize_INT_SUB(op) # emit the op - else: - self.emit_operation(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - self.emit_operation(self.nextop) - if self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Synthesize the non overflowing op for optimize_default to reuse - self.pure(rop.INT_SUB, op.getarglist()[:], op.result) - # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) - self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) - + self.emit_operation(op) # emit the op + r = self.getvalue(op.result) + r.intbound.intersect(resbound) def optimize_INT_MUL_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.mul_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Transform into INT_MUL and remove guard + if resbound.bounded(): op = op.copy_and_change(rop.INT_MUL) - self.optimize_INT_MUL(op) # emit the op - else: - self.emit_operation(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - self.emit_operation(self.nextop) - if self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Synthesize the non overflowing op for optimize_default to reuse - self.pure(rop.INT_MUL, op.getarglist()[:], op.result) - + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(resbound) def optimize_INT_LT(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py --- a/pypy/jit/metainterp/optimizeopt/intutils.py +++ b/pypy/jit/metainterp/optimizeopt/intutils.py @@ -1,4 +1,5 @@ -from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift, LONG_BIT +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT +from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.history import BoxInt, ConstInt import sys @@ -13,6 +14,10 @@ self.has_lower = True self.upper = upper self.lower = lower + # check for unexpected overflows: + if not we_are_translated(): + assert type(upper) is not long + assert type(lower) is not long # Returns True if the bound was updated def make_le(self, other): @@ -169,10 +174,10 @@ other.known_ge(IntBound(0, 0)) and \ other.known_lt(IntBound(LONG_BIT, LONG_BIT)): try: - vals = (ovfcheck_lshift(self.upper, other.upper), - ovfcheck_lshift(self.upper, other.lower), - ovfcheck_lshift(self.lower, other.upper), - ovfcheck_lshift(self.lower, other.lower)) + vals = (ovfcheck(self.upper << other.upper), + ovfcheck(self.upper << other.lower), + ovfcheck(self.lower << other.upper), + ovfcheck(self.lower << other.lower)) return IntBound(min4(vals), max4(vals)) except (OverflowError, ValueError): return IntUnbounded() diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -1,12 +1,12 @@ from pypy.jit.metainterp import jitprof, resume, compile from pypy.jit.metainterp.executor import execute_nonspec -from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF +from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF, INT from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ ImmutableIntUnbounded, \ IntLowerBound, MININT, MAXINT from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method, args_dict) -from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.tool.pairtype import extendabletype from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -95,6 +95,10 @@ return guards def import_from(self, other, optimizer): + if self.level == LEVEL_CONSTANT: + assert other.level == LEVEL_CONSTANT + assert other.box.same_constant(self.box) + return assert self.level <= LEVEL_NONNULL if other.level == LEVEL_CONSTANT: self.make_constant(other.get_key_box()) @@ -141,6 +145,13 @@ return not box.nonnull() return False + def same_value(self, other): + if not other: + return False + if self.is_constant() and other.is_constant(): + return self.box.same_constant(other.box) + return self is other + def make_constant(self, constbox): """Replace 'self.box' with a Const box.""" assert isinstance(constbox, Const) @@ -236,9 +247,10 @@ CONST_1 = ConstInt(1) CVAL_ZERO = ConstantValue(CONST_0) CVAL_ZERO_FLOAT = ConstantValue(Const._new(0.0)) -CVAL_UNINITIALIZED_ZERO = ConstantValue(CONST_0) llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL) oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL) +REMOVED = AbstractResOp(None) + class Optimization(object): next_optimization = None @@ -250,6 +262,7 @@ raise NotImplementedError def emit_operation(self, op): + self.last_emitted_operation = op self.next_optimization.propagate_forward(op) # FIXME: Move some of these here? @@ -317,24 +330,25 @@ def forget_numberings(self, box): self.optimizer.forget_numberings(box) + class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=None, bridge=False): + def __init__(self, metainterp_sd, loop, optimizations=None): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop - self.bridge = bridge self.values = {} self.interned_refs = self.cpu.ts.new_ref_dict() + self.interned_ints = {} self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd) self.bool_boxes = {} self.producer = {} self.pendingfields = [] - self.exception_might_have_happened = False self.quasi_immutable_deps = None self.opaque_pointers = {} self.replaces_guard = {} self._newoperations = [] + self.seen_results = {} self.optimizer = self self.optpure = None self.optearlyforce = None @@ -352,6 +366,7 @@ optimizations[-1].next_optimization = self for o in optimizations: o.optimizer = self + o.last_emitted_operation = None o.setup() else: optimizations = [] @@ -398,6 +413,9 @@ if not value: return box return self.interned_refs.setdefault(value, box) + #elif constbox.type == INT: + # value = constbox.getint() + # return self.interned_ints.setdefault(value, box) else: return box @@ -483,7 +501,6 @@ return CVAL_ZERO def propagate_all_forward(self): - self.exception_might_have_happened = self.bridge self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) @@ -526,6 +543,10 @@ op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True + if op.result: + if op.result in self.seen_results: + raise ValueError, "invalid optimization" + self.seen_results[op.result] = None self._newoperations.append(op) def replace_op(self, old_op, new_op): @@ -543,9 +564,12 @@ descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - newboxes = modifier.finish(self.values, self.pendingfields) - if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here - compile.giveup() + try: + newboxes = modifier.finish(self.values, self.pendingfields) + if len(newboxes) > self.metainterp_sd.options.failargs_limit: + raise resume.TagOverflow + except resume.TagOverflow: + raise compile.giveup() descr.store_final_boxes(op, newboxes) # if op.getopnum() == rop.GUARD_VALUE: diff --git a/pypy/jit/metainterp/optimizeopt/pure.py b/pypy/jit/metainterp/optimizeopt/pure.py --- a/pypy/jit/metainterp/optimizeopt/pure.py +++ b/pypy/jit/metainterp/optimizeopt/pure.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, REMOVED from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method, args_dict) @@ -61,7 +61,10 @@ oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.getdescr() is op.getdescr(): assert oldop.getopnum() == op.getopnum() + # this removes a CALL_PURE that has the same (non-constant) + # arguments as a previous CALL_PURE. self.make_equal_to(op.result, self.getvalue(oldop.result)) + self.last_emitted_operation = REMOVED return else: self.pure_operations[args] = op @@ -72,6 +75,13 @@ self.emit_operation(ResOperation(rop.CALL, args, op.result, op.getdescr())) + def optimize_GUARD_NO_EXCEPTION(self, op): + if self.last_emitted_operation is REMOVED: + # it was a CALL_PURE that was killed; so we also kill the + # following GUARD_NO_EXCEPTION + return + self.emit_operation(op) + def flush(self): assert self.posponedop is None diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -294,12 +304,6 @@ raise InvalidLoop self.optimize_GUARD_CLASS(op) - def optimize_GUARD_NO_EXCEPTION(self, op): - if not self.optimizer.exception_might_have_happened: - return - self.emit_operation(op) - self.optimizer.exception_might_have_happened = False - def optimize_CALL_LOOPINVARIANT(self, op): arg = op.getarg(0) # 'arg' must be a Const, because residual_call in codewriter @@ -310,6 +314,7 @@ resvalue = self.loop_invariant_results.get(key, None) if resvalue is not None: self.make_equal_to(op.result, resvalue) + self.last_emitted_operation = REMOVED return # change the op to be a normal call, from the backend's point of view # there is no reason to have a separate operation for this @@ -444,10 +449,19 @@ except KeyError: pass else: + # this removes a CALL_PURE with all constant arguments. self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED return self.emit_operation(op) + def optimize_GUARD_NO_EXCEPTION(self, op): + if self.last_emitted_operation is REMOVED: + # it was a CALL_PURE or a CALL_LOOPINVARIANT that was killed; + # so we also kill the following GUARD_NO_EXCEPTION + return + self.emit_operation(op) + def optimize_INT_FLOORDIV(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) @@ -477,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -9,6 +9,7 @@ from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.rlib.rarithmetic import LONG_BIT def test_store_final_boxes_in_guard(): @@ -680,25 +681,60 @@ # ---------- - def test_fold_guard_no_exception(self): - ops = """ - [i] - guard_no_exception() [] - i1 = int_add(i, 3) - guard_no_exception() [] + def test_keep_guard_no_exception(self): + ops = """ + [i1] i2 = call(i1, descr=nonwritedescr) guard_no_exception() [i1, i2] - guard_no_exception() [] - i3 = call(i2, descr=nonwritedescr) - jump(i1) # the exception is considered lost when we loop back - """ - expected = """ - [i] - i1 = int_add(i, 3) - i2 = call(i1, descr=nonwritedescr) + jump(i2) + """ + self.optimize_loop(ops, ops) + + def test_keep_guard_no_exception_with_call_pure_that_is_not_folded(self): + ops = """ + [i1] + i2 = call_pure(123456, i1, descr=nonwritedescr) guard_no_exception() [i1, i2] - i3 = call(i2, descr=nonwritedescr) - jump(i1) + jump(i2) + """ + expected = """ + [i1] + i2 = call(123456, i1, descr=nonwritedescr) + guard_no_exception() [i1, i2] + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_remove_guard_no_exception_with_call_pure_on_constant_args(self): + arg_consts = [ConstInt(i) for i in (123456, 81)] + call_pure_results = {tuple(arg_consts): ConstInt(5)} + ops = """ + [i1] + i3 = same_as(81) + i2 = call_pure(123456, i3, descr=nonwritedescr) + guard_no_exception() [i1, i2] + jump(i2) + """ + expected = """ + [i1] + jump(5) + """ + self.optimize_loop(ops, expected, call_pure_results) + + def test_remove_guard_no_exception_with_duplicated_call_pure(self): + ops = """ + [i1] + i2 = call_pure(123456, i1, descr=nonwritedescr) + guard_no_exception() [i1, i2] + i3 = call_pure(123456, i1, descr=nonwritedescr) + guard_no_exception() [i1, i2, i3] + jump(i3) + """ + expected = """ + [i1] + i2 = call(123456, i1, descr=nonwritedescr) + guard_no_exception() [i1, i2] + jump(i2) """ self.optimize_loop(ops, expected) @@ -976,6 +1012,29 @@ """ self.optimize_loop(ops, expected) + def test_virtual_array_of_struct_forced(self): + ops = """ + [f0, f1] + p0 = new_array(1, descr=complexarraydescr) + setinteriorfield_gc(p0, 0, f0, descr=complexrealdescr) + setinteriorfield_gc(p0, 0, f1, descr=compleximagdescr) + f2 = getinteriorfield_gc(p0, 0, descr=complexrealdescr) + f3 = getinteriorfield_gc(p0, 0, descr=compleximagdescr) + f4 = float_mul(f2, f3) + i0 = escape(f4, p0) + finish(i0) + """ + expected = """ + [f0, f1] + f2 = float_mul(f0, f1) + p0 = new_array(1, descr=complexarraydescr) + setinteriorfield_gc(p0, 0, f0, descr=complexrealdescr) + setinteriorfield_gc(p0, 0, f1, descr=compleximagdescr) + i0 = escape(f2, p0) + finish(i0) + """ + self.optimize_loop(ops, expected) + def test_nonvirtual_1(self): ops = """ [i] @@ -4099,6 +4158,38 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_str_concat_constant_lengths(self): + ops = """ + [i0] + p0 = newstr(1) + strsetitem(p0, 0, i0) + p1 = newstr(0) + p2 = call(0, p0, p1, descr=strconcatdescr) + i1 = call(0, p2, p0, descr=strequaldescr) + finish(i1) + """ + expected = """ + [i0] + finish(1) + """ + self.optimize_strunicode_loop(ops, expected) + + def test_str_concat_constant_lengths_2(self): + ops = """ + [i0] + p0 = newstr(0) + p1 = newstr(1) + strsetitem(p1, 0, i0) + p2 = call(0, p0, p1, descr=strconcatdescr) + i1 = call(0, p2, p1, descr=strequaldescr) + finish(i1) + """ + expected = """ + [i0] + finish(1) + """ + self.optimize_strunicode_loop(ops, expected) + def test_str_slice_1(self): ops = """ [p1, i1, i2] @@ -4201,6 +4292,27 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_str_slice_plain_virtual(self): + ops = """ + [] + p0 = newstr(11) + copystrcontent(s"hello world", p0, 0, 0, 11) + p1 = call(0, p0, 0, 5, descr=strslicedescr) + finish(p1) + """ + expected = """ + [] + p0 = newstr(11) + copystrcontent(s"hello world", p0, 0, 0, 11) + # Eventually this should just return s"hello", but ATM this test is + # just verifying that it doesn't return "\0\0\0\0\0", so being + # slightly underoptimized is ok. + p1 = newstr(5) + copystrcontent(p0, p1, 0, 0, 5) + finish(p1) + """ + self.optimize_strunicode_loop(ops, expected) + # ---------- def optimize_strunicode_loop_extradescrs(self, ops, optops): class FakeCallInfoCollection: @@ -4691,11 +4803,11 @@ i5 = int_ge(i0, 0) guard_true(i5) [] i1 = int_mod(i0, 42) - i2 = int_rshift(i1, 63) + i2 = int_rshift(i1, %d) i3 = int_and(42, i2) i4 = int_add(i1, i3) finish(i4) - """ + """ % (LONG_BIT-1) expected = """ [i0] i5 = int_ge(i0, 0) @@ -4703,21 +4815,41 @@ i1 = int_mod(i0, 42) finish(i1) """ - py.test.skip("in-progress") self.optimize_loop(ops, expected) - # Also, 'n % power-of-two' can be turned into int_and(), - # but that's a bit harder to detect here because it turns into - # several operations, and of course it is wrong to just turn + # 'n % power-of-two' can be turned into int_and(); at least that's + # easy to do now if n is known to be non-negative. + ops = """ + [i0] + i5 = int_ge(i0, 0) + guard_true(i5) [] + i1 = int_mod(i0, 8) + i2 = int_rshift(i1, %d) + i3 = int_and(42, i2) + i4 = int_add(i1, i3) + finish(i4) + """ % (LONG_BIT-1) + expected = """ + [i0] + i5 = int_ge(i0, 0) + guard_true(i5) [] + i1 = int_and(i0, 7) + finish(i1) + """ + self.optimize_loop(ops, expected) + + # Of course any 'maybe-negative % power-of-two' can be turned into + # int_and(), but that's a bit harder to detect here because it turns + # into several operations, and of course it is wrong to just turn # int_mod(i0, 16) into int_and(i0, 15). ops = """ [i0] i1 = int_mod(i0, 16) - i2 = int_rshift(i1, 63) + i2 = int_rshift(i1, %d) i3 = int_and(16, i2) i4 = int_add(i1, i3) finish(i4) - """ + """ % (LONG_BIT-1) expected = """ [i0] i4 = int_and(i0, 15) @@ -4726,6 +4858,16 @@ py.test.skip("harder") self.optimize_loop(ops, expected) + def test_intmod_bounds_bug1(self): + ops = """ + [i0] + i1 = int_mod(i0, %d) + i2 = int_eq(i1, 0) + guard_false(i2) [] + finish() + """ % (-(1<<(LONG_BIT-1)),) + self.optimize_loop(ops, ops) + def test_bounded_lazy_setfield(self): ops = """ [p0, i0] @@ -4808,6 +4950,27 @@ def test_plain_virtual_string_copy_content(self): ops = """ + [i1] + p0 = newstr(6) + copystrcontent(s"hello!", p0, 0, 0, 6) + p1 = call(0, p0, s"abc123", descr=strconcatdescr) + i0 = strgetitem(p1, i1) + finish(i0) + """ + expected = """ + [i1] + p0 = newstr(6) + copystrcontent(s"hello!", p0, 0, 0, 6) + p1 = newstr(12) + copystrcontent(p0, p1, 0, 0, 6) + copystrcontent(s"abc123", p1, 0, 6, 6) + i0 = strgetitem(p1, i1) + finish(i0) + """ + self.optimize_strunicode_loop(ops, expected) + + def test_plain_virtual_string_copy_content_2(self): + ops = """ [] p0 = newstr(6) copystrcontent(s"hello!", p0, 0, 0, 6) @@ -4819,10 +4982,7 @@ [] p0 = newstr(6) copystrcontent(s"hello!", p0, 0, 0, 6) - p1 = newstr(12) - copystrcontent(p0, p1, 0, 0, 6) - copystrcontent(s"abc123", p1, 0, 6, 6) - i0 = strgetitem(p1, 0) + i0 = strgetitem(p0, 0) finish(i0) """ self.optimize_strunicode_loop(ops, expected) @@ -4839,6 +4999,34 @@ """ self.optimize_loop(ops, expected) + def test_known_equal_ints(self): + py.test.skip("in-progress") + ops = """ + [i0, i1, i2, p0] + i3 = int_eq(i0, i1) + guard_true(i3) [] + + i4 = int_lt(i2, i0) + guard_true(i4) [] + i5 = int_lt(i2, i1) + guard_true(i5) [] + + i6 = getarrayitem_gc(p0, i2) + finish(i6) + """ + expected = """ + [i0, i1, i2, p0] + i3 = int_eq(i0, i1) + guard_true(i3) [] + + i4 = int_lt(i2, i0) + guard_true(i4) [] + + i6 = getarrayitem_gc(p0, i3) + finish(i6) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -931,17 +931,14 @@ [i] guard_no_exception() [] i1 = int_add(i, 3) - guard_no_exception() [] i2 = call(i1, descr=nonwritedescr) guard_no_exception() [i1, i2] - guard_no_exception() [] i3 = call(i2, descr=nonwritedescr) jump(i1) # the exception is considered lost when we loop back """ - # note that 'guard_no_exception' at the very start is kept around - # for bridges, but not for loops preamble = """ [i] + guard_no_exception() [] # occurs at the start of bridges, so keep it i1 = int_add(i, 3) i2 = call(i1, descr=nonwritedescr) guard_no_exception() [i1, i2] @@ -950,6 +947,7 @@ """ expected = """ [i] + guard_no_exception() [] # occurs at the start of bridges, so keep it i1 = int_add(i, 3) i2 = call(i1, descr=nonwritedescr) guard_no_exception() [i1, i2] @@ -958,6 +956,23 @@ """ self.optimize_loop(ops, expected, preamble) + def test_bug_guard_no_exception(self): + ops = """ + [] + i0 = call(123, descr=nonwritedescr) + p0 = call(0, "xy", descr=s2u_descr) # string -> unicode + guard_no_exception() [] + escape(p0) + jump() + """ + expected = """ + [] + i0 = call(123, descr=nonwritedescr) + escape(u"xy") + jump() + """ + self.optimize_loop(ops, expected) + # ---------- def test_call_loopinvariant(self): @@ -1176,6 +1191,75 @@ """ self.optimize_loop(ops, expected, preamble) + def test_virtual_recursive(self): + ops = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + i1 = int_add(i0, 1) + setfield_gc(p2, i1, descr=valuedescr) + jump(p1) + """ + preamble = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i3 = int_add(i0, 1) + jump(i3) + """ + expected = """ + [i0] + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize_loop(ops, expected, preamble) + + def test_virtual_recursive_forced(self): + ops = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + i1 = int_add(i0, 1) + setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p0, p1, descr=nextdescr) + jump(p1) + """ + preamble = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i1 = int_add(i0, 1) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + setfield_gc(p0, p1, descr=nextdescr) + jump(p1) + """ + loop = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i1 = int_add(i0, 1) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p0, p1, descr=nextdescr) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) + jump(p1) + """ + self.optimize_loop(ops, loop, preamble) + def test_virtual_constant_isnull(self): ops = """ [i0] @@ -2168,13 +2252,13 @@ ops = """ [p0, i0, p1, i1, i2] setfield_gc(p0, i1, descr=valuedescr) - copystrcontent(p0, i0, p1, i1, i2) + copystrcontent(p0, p1, i0, i1, i2) escape() jump(p0, i0, p1, i1, i2) """ expected = """ [p0, i0, p1, i1, i2] - copystrcontent(p0, i0, p1, i1, i2) + copystrcontent(p0, p1, i0, i1, i2) setfield_gc(p0, i1, descr=valuedescr) escape() jump(p0, i0, p1, i1, i2) @@ -4783,6 +4867,52 @@ """ self.optimize_loop(ops, expected) + + def test_division_nonneg(self): + py.test.skip("harder") + # this is how an app-level division turns into right now + ops = """ + [i4] + i1 = int_ge(i4, 0) + guard_true(i1) [] + i16 = int_floordiv(i4, 3) + i18 = int_mul(i16, 3) + i19 = int_sub(i4, i18) + i21 = int_rshift(i19, %d) + i22 = int_add(i16, i21) + finish(i22) + """ % (LONG_BIT-1) + expected = """ + [i4] + i1 = int_ge(i4, 0) + guard_true(i1) [] + i16 = int_floordiv(i4, 3) + finish(i16) + """ + self.optimize_loop(ops, expected) + + def test_division_by_2(self): + py.test.skip("harder") + ops = """ + [i4] + i1 = int_ge(i4, 0) + guard_true(i1) [] + i16 = int_floordiv(i4, 2) + i18 = int_mul(i16, 2) + i19 = int_sub(i4, i18) + i21 = int_rshift(i19, %d) + i22 = int_add(i16, i21) + finish(i22) + """ % (LONG_BIT-1) + expected = """ + [i4] + i1 = int_ge(i4, 0) + guard_true(i1) [] + i16 = int_rshift(i4, 1) + finish(i16) + """ + self.optimize_loop(ops, expected) + def test_subsub_ovf(self): ops = """ [i0] @@ -5377,6 +5507,96 @@ jump() """ self.optimize_loop(ops, expected) + # ---------- + ops = """ + [p1] + p0 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + setfield_gc(p0, p1, descr=immut_ptrval) + escape(p0) + jump(p1) + """ + self.optimize_loop(ops, ops) + # ---------- + ops = """ + [] + p0 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + p1 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p1, 1242, descr=immut_intval) + setfield_gc(p0, p1, descr=immut_ptrval) + escape(p0) + jump() + """ + class PtrObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(slf, other): + if slf is other: + return 1 + p1 = other.container.ptrval + p1cast = lltype.cast_pointer(lltype.Ptr(self.INTOBJ_IMMUT), p1) + return p1cast.intval == 1242 + self.namespace['ptrobj1242'] = lltype._ptr(llmemory.GCREF, + PtrObj1242()) + expected = """ + [] + escape(ConstPtr(ptrobj1242)) + jump() + """ + self.optimize_loop(ops, expected) + + def test_immutable_constantfold_recursive(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + setfield_gc(p0, p0, descr=immut_ptrval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class PtrObjSelf(object): + _TYPE = llmemory.GCREF.TO + def __eq__(slf, other): + if slf is other: + return 1 + p1 = other.container.ptrval + p1cast = lltype.cast_pointer(lltype.Ptr(self.PTROBJ_IMMUT), p1) + return p1cast.ptrval == p1 + self.namespace['ptrobjself'] = lltype._ptr(llmemory.GCREF, + PtrObjSelf()) + expected = """ + [] + escape(ConstPtr(ptrobjself)) + jump() + """ + self.optimize_loop(ops, expected) + # + ops = """ + [] + p0 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + p1 = new_with_vtable(ConstClass(ptrobj_immut_vtable)) + setfield_gc(p0, p1, descr=immut_ptrval) + setfield_gc(p1, p0, descr=immut_ptrval) + escape(p0) + jump() + """ + class PtrObjSelf2(object): + _TYPE = llmemory.GCREF.TO + def __eq__(slf, other): + if slf is other: + return 1 + p1 = other.container.ptrval + p1cast = lltype.cast_pointer(lltype.Ptr(self.PTROBJ_IMMUT), p1) + p2 = p1cast.ptrval + assert p2 != p1 + p2cast = lltype.cast_pointer(lltype.Ptr(self.PTROBJ_IMMUT), p2) + return p2cast.ptrval == p1 + self.namespace['ptrobjself2'] = lltype._ptr(llmemory.GCREF, + PtrObjSelf2()) + expected = """ + [] + escape(ConstPtr(ptrobjself2)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble): @@ -6235,12 +6455,15 @@ def test_str2unicode_constant(self): ops = """ [] + escape(1213) p0 = call(0, "xy", descr=s2u_descr) # string -> unicode + guard_no_exception() [] escape(p0) jump() """ expected = """ [] + escape(1213) escape(u"xy") jump() """ @@ -6250,6 +6473,7 @@ ops = """ [p0] p1 = call(0, p0, descr=s2u_descr) # string -> unicode + guard_no_exception() [] escape(p1) jump(p1) """ @@ -6258,6 +6482,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] @@ -7309,6 +7548,150 @@ """ self.optimize_loop(ops, expected) + def test_repeated_constant_setfield_mixed_with_guard(self): + ops = """ + [p22, p18] + setfield_gc(p22, 2, descr=valuedescr) + guard_nonnull_class(p18, ConstClass(node_vtable)) [] + setfield_gc(p22, 2, descr=valuedescr) + jump(p22, p18) + """ + preamble = """ + [p22, p18] + setfield_gc(p22, 2, descr=valuedescr) + guard_nonnull_class(p18, ConstClass(node_vtable)) [] + jump(p22, p18) + """ + short = """ + [p22, p18] + i1 = getfield_gc(p22, descr=valuedescr) + guard_value(i1, 2) [] + jump(p22, p18) + """ + expected = """ + [p22, p18] + jump(p22, p18) + """ + self.optimize_loop(ops, expected, preamble, expected_short=short) + + def test_repeated_setfield_mixed_with_guard(self): + ops = """ + [p22, p18, i1] + i2 = getfield_gc(p22, descr=valuedescr) + call(i2, descr=nonwritedescr) + setfield_gc(p22, i1, descr=valuedescr) + guard_nonnull_class(p18, ConstClass(node_vtable)) [] + setfield_gc(p22, i1, descr=valuedescr) + jump(p22, p18, i1) + """ + preamble = """ + [p22, p18, i1] + i2 = getfield_gc(p22, descr=valuedescr) + call(i2, descr=nonwritedescr) + setfield_gc(p22, i1, descr=valuedescr) + guard_nonnull_class(p18, ConstClass(node_vtable)) [] + jump(p22, p18, i1, i1) + """ + short = """ + [p22, p18, i1] + i2 = getfield_gc(p22, descr=valuedescr) + jump(p22, p18, i1, i2) + """ + expected = """ + [p22, p18, i1, i2] + call(i2, descr=nonwritedescr) + setfield_gc(p22, i1, descr=valuedescr) + jump(p22, p18, i1, i1) + """ + self.optimize_loop(ops, expected, preamble, expected_short=short) + + def test_cache_setfield_across_loop_boundaries(self): + ops = """ + [p1] + p2 = getfield_gc(p1, descr=valuedescr) + guard_nonnull_class(p2, ConstClass(node_vtable)) [] + call(p2, descr=nonwritedescr) + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p1, p3, descr=valuedescr) + jump(p1) + """ + expected = """ + [p1, p2] + call(p2, descr=nonwritedescr) + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p1, p3, descr=valuedescr) + jump(p1, p3) + """ + self.optimize_loop(ops, expected) + + def test_cache_setarrayitem_across_loop_boundaries(self): + ops = """ + [p1] + p2 = getarrayitem_gc(p1, 3, descr=arraydescr) + guard_nonnull_class(p2, ConstClass(node_vtable)) [] + call(p2, descr=nonwritedescr) + p3 = new_with_vtable(ConstClass(node_vtable)) + setarrayitem_gc(p1, 3, p3, descr=arraydescr) + jump(p1) + """ + expected = """ + [p1, p2] + call(p2, descr=nonwritedescr) + p3 = new_with_vtable(ConstClass(node_vtable)) + setarrayitem_gc(p1, 3, p3, descr=arraydescr) + jump(p1, p3) + """ + self.optimize_loop(ops, expected) + + def test_setarrayitem_p0_p0(self): + ops = """ + [i0, i1] + p0 = escape() + setarrayitem_gc(p0, 2, p0, descr=arraydescr) + jump(i0, i1) + """ + expected = """ + [i0, i1] + p0 = escape() + setarrayitem_gc(p0, 2, p0, descr=arraydescr) + jump(i0, i1) + """ + self.optimize_loop(ops, expected) + + def test_setfield_p0_p0(self): + ops = """ + [i0, i1] + p0 = escape() + setfield_gc(p0, p0, descr=arraydescr) + jump(i0, i1) + """ + expected = """ + [i0, i1] + p0 = escape() + setfield_gc(p0, p0, descr=arraydescr) + jump(i0, i1) + """ + self.optimize_loop(ops, expected) + + def test_setfield_p0_p1_p0(self): + ops = """ + [i0, i1] + p0 = escape() + p1 = escape() + setfield_gc(p0, p1, descr=adescr) + setfield_gc(p1, p0, descr=bdescr) + jump(i0, i1) + """ + expected = """ + [i0, i1] + p0 = escape() + p1 = escape() + setfield_gc(p0, p1, descr=adescr) + setfield_gc(p1, p0, descr=bdescr) + jump(i0, i1) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -139,6 +139,12 @@ noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + PTROBJ_IMMUT = lltype.GcStruct('PTROBJ_IMMUT', ('parent', OBJECT), + ('ptrval', lltype.Ptr(OBJECT)), + hints={'immutable': True}) + ptrobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + immut_ptrval = cpu.fielddescrof(PTROBJ_IMMUT, 'ptrval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -183,6 +189,7 @@ can_invalidate=True)) arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [arraydescr], [], [arraydescr], + EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_ARRAYCOPY)) @@ -212,12 +219,14 @@ _oopspecindex = getattr(EffectInfo, _os) locals()[_name] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], oopspecindex=_oopspecindex)) + EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + oopspecindex=_oopspecindex)) # _oopspecindex = getattr(EffectInfo, _os.replace('STR', 'UNI')) locals()[_name.replace('str', 'unicode')] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], oopspecindex=_oopspecindex)) + EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + oopspecindex=_oopspecindex)) s2u_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [], oopspecindex=EffectInfo.OS_STR2UNICODE)) @@ -243,6 +252,7 @@ register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) + register_known_gctype(cpu, ptrobj_immut_vtable, PTROBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -87,14 +87,36 @@ def _get_descr(self): raise NotImplementedError - def _is_immutable_and_filled_with_constants(self, optforce): + def _is_immutable_and_filled_with_constants(self, memo=None): + # check if it is possible to force the given structure into a + # compile-time constant: this is allowed only if it is declared + # immutable, if all fields are already filled, and if each field + # is either a compile-time constant or (recursively) a structure + # which also answers True to the same question. + # + # check that all fields are filled. The following equality check + # also fails if count == -1, meaning "not an immutable at all". count = self._get_descr().count_fields_if_immutable() - if count != len(self._fields): # always the case if count == -1 + if count != len(self._fields): return False + # + # initialize 'memo' + if memo is None: + memo = {} + elif self in memo: + return True # recursive case: assume yes + memo[self] = None + # for value in self._fields.itervalues(): - subbox = value.force_box(optforce) - if not isinstance(subbox, Const): - return False + if value.is_constant(): + pass # it is a constant value: ok + elif (isinstance(value, AbstractVirtualStructValue) + and value.is_virtual()): + # recursive check + if not value._is_immutable_and_filled_with_constants(memo): + return False + else: + return False # not a constant at all return True def force_at_end_of_preamble(self, already_forced, optforce): @@ -114,7 +136,7 @@ if not we_are_translated(): op.name = 'FORCE ' + self.source_op.name - if self._is_immutable_and_filled_with_constants(optforce): + if self._is_immutable_and_filled_with_constants(): box = optforce.optimizer.constant_fold(op) self.make_constant(box) for ofs, value in self._fields.iteritems(): @@ -294,7 +316,12 @@ optforce.emit_operation(self.source_op) self.box = box = self.source_op.result for index in range(len(self._items)): - for descr, value in self._items[index].iteritems(): + iteritems = self._items[index].iteritems() + # random order is fine, except for tests + if not we_are_translated(): + iteritems = list(iteritems) + iteritems.sort(key = lambda (x, y): x.sort_key()) + for descr, value in iteritems: subbox = value.force_box(optforce) op = ResOperation(rop.SETINTERIORFIELD_GC, [box, ConstInt(index), subbox], None, descr=descr diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -551,6 +551,7 @@ optimizer.produce_potential_short_preamble_ops(self) self.short_boxes = {} + self.short_boxes_in_production = {} for box in self.potential_ops.keys(): try: @@ -606,6 +607,10 @@ return if isinstance(box, Const): return + if box in self.short_boxes_in_production: + raise BoxNotProducable + self.short_boxes_in_production[box] = True + if box in self.potential_ops: ops = self.prioritized_alternatives(box) produced_one = False diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py --- a/pypy/jit/metainterp/optimizeopt/vstring.py +++ b/pypy/jit/metainterp/optimizeopt/vstring.py @@ -1,8 +1,9 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.history import (BoxInt, Const, ConstInt, ConstPtr, - get_const_ptr_for_string, get_const_ptr_for_unicode) + get_const_ptr_for_string, get_const_ptr_for_unicode, BoxPtr, REF, INT) from pypy.jit.metainterp.optimizeopt import optimizer, virtualize -from pypy.jit.metainterp.optimizeopt.optimizer import CONST_0, CONST_1, llhelper +from pypy.jit.metainterp.optimizeopt.optimizer import CONST_0, CONST_1 +from pypy.jit.metainterp.optimizeopt.optimizer import llhelper, REMOVED from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import specialize, we_are_translated @@ -106,7 +107,12 @@ if not we_are_translated(): op.name = 'FORCE' optforce.emit_operation(op) - self.string_copy_parts(optforce, box, CONST_0, self.mode) + self.initialize_forced_string(optforce, box, CONST_0, self.mode) + + def initialize_forced_string(self, string_optimizer, targetbox, + offsetbox, mode): + return self.string_copy_parts(string_optimizer, targetbox, + offsetbox, mode) class VStringPlainValue(VAbstractStringValue): @@ -114,11 +120,20 @@ _lengthbox = None # cache only def setup(self, size): - self._chars = [optimizer.CVAL_UNINITIALIZED_ZERO] * size + # in this list, None means: "it's probably uninitialized so far, + # but maybe it was actually filled." So to handle this case, + # strgetitem cannot be virtual-ized and must be done as a residual + # operation. By contrast, any non-None value means: we know it + # is initialized to this value; strsetitem() there makes no sense. + # Also, as long as self.is_virtual(), then we know that no-one else + # could have written to the string, so we know that in this case + # "None" corresponds to "really uninitialized". + self._chars = [None] * size def setup_slice(self, longerlist, start, stop): assert 0 <= start <= stop <= len(longerlist) self._chars = longerlist[start:stop] + # slice the 'longerlist', which may also contain Nones def getstrlen(self, _, mode): if self._lengthbox is None: @@ -126,42 +141,66 @@ return self._lengthbox def getitem(self, index): - return self._chars[index] + return self._chars[index] # may return None! def setitem(self, index, charvalue): assert isinstance(charvalue, optimizer.OptValue) + assert self._chars[index] is None, ( + "setitem() on an already-initialized location") self._chars[index] = charvalue + def is_completely_initialized(self): + for c in self._chars: + if c is None: + return False + return True + @specialize.arg(1) def get_constant_string_spec(self, mode): for c in self._chars: - if c is optimizer.CVAL_UNINITIALIZED_ZERO or not c.is_constant(): + if c is None or not c.is_constant(): return None return mode.emptystr.join([mode.chr(c.box.getint()) for c in self._chars]) def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode): - if not self.is_virtual() and targetbox is not self.box: - lengthbox = self.getstrlen(string_optimizer, mode) - srcbox = self.force_box(string_optimizer) - return copy_str_content(string_optimizer, srcbox, targetbox, - CONST_0, offsetbox, lengthbox, mode) + if not self.is_virtual() and not self.is_completely_initialized(): + return VAbstractStringValue.string_copy_parts( + self, string_optimizer, targetbox, offsetbox, mode) + else: + return self.initialize_forced_string(string_optimizer, targetbox, + offsetbox, mode) + + def initialize_forced_string(self, string_optimizer, targetbox, + offsetbox, mode): for i in range(len(self._chars)): - charbox = self._chars[i].force_box(string_optimizer) - if not (isinstance(charbox, Const) and charbox.same_constant(CONST_0)): - string_optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, - offsetbox, - charbox], - None)) + assert isinstance(targetbox, BoxPtr) # ConstPtr never makes sense + charvalue = self.getitem(i) + if charvalue is not None: + charbox = charvalue.force_box(string_optimizer) + if not (isinstance(charbox, Const) and + charbox.same_constant(CONST_0)): + op = ResOperation(mode.STRSETITEM, [targetbox, + offsetbox, + charbox], + None) + string_optimizer.emit_operation(op) offsetbox = _int_add(string_optimizer, offsetbox, CONST_1) return offsetbox def get_args_for_fail(self, modifier): if self.box is None and not modifier.already_seen_virtual(self.keybox): - charboxes = [value.get_key_box() for value in self._chars] + charboxes = [] + for value in self._chars: + if value is not None: + box = value.get_key_box() + else: + box = None + charboxes.append(box) modifier.register_virtual_fields(self.keybox, charboxes) for value in self._chars: - value.get_args_for_fail(modifier) + if value is not None: + value.get_args_for_fail(modifier) def _make_virtual(self, modifier): return modifier.make_vstrplain(self.mode is mode_unicode) @@ -169,6 +208,7 @@ class VStringConcatValue(VAbstractStringValue): """The concatenation of two other strings.""" + _attrs_ = ('left', 'right', 'lengthbox') lengthbox = None # or the computed length @@ -277,6 +317,7 @@ for i in range(lengthbox.value): charbox = _strgetitem(string_optimizer, srcbox, srcoffsetbox, mode) srcoffsetbox = _int_add(string_optimizer, srcoffsetbox, CONST_1) + assert isinstance(targetbox, BoxPtr) # ConstPtr never makes sense string_optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox], @@ -287,6 +328,7 @@ nextoffsetbox = _int_add(string_optimizer, offsetbox, lengthbox) else: nextoffsetbox = None + assert isinstance(targetbox, BoxPtr) # ConstPtr never makes sense op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox], None) @@ -373,6 +415,7 @@ def optimize_STRSETITEM(self, op): value = self.getvalue(op.getarg(0)) + assert not value.is_constant() # strsetitem(ConstPtr) never makes sense if value.is_virtual() and isinstance(value, VStringPlainValue): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: @@ -406,11 +449,20 @@ # if isinstance(value, VStringPlainValue): # even if no longer virtual if vindex.is_constant(): - res = value.getitem(vindex.box.getint()) - # If it is uninitialized we can't return it, it was set by a - # COPYSTRCONTENT, not a STRSETITEM - if res is not optimizer.CVAL_UNINITIALIZED_ZERO: - return res + result = value.getitem(vindex.box.getint()) + if result is not None: + return result + # + if isinstance(value, VStringConcatValue) and vindex.is_constant(): + len1box = value.left.getstrlen(self, mode) + if isinstance(len1box, ConstInt): + index = vindex.box.getint() + len1 = len1box.getint() + if index < len1: + return self.strgetitem(value.left, vindex, mode) + else: + vindex = optimizer.ConstantValue(ConstInt(index - len1)) + return self.strgetitem(value.right, vindex, mode) # resbox = _strgetitem(self, value.force_box(self), vindex.force_box(self), mode) return self.getvalue(resbox) @@ -432,6 +484,11 @@ def _optimize_COPYSTRCONTENT(self, op, mode): # args: src dst srcstart dststart length + assert op.getarg(0).type == REF + assert op.getarg(1).type == REF + assert op.getarg(2).type == INT + assert op.getarg(3).type == INT + assert op.getarg(4).type == INT src = self.getvalue(op.getarg(0)) dst = self.getvalue(op.getarg(1)) srcstart = self.getvalue(op.getarg(2)) @@ -473,6 +530,11 @@ optimize_CALL_PURE = optimize_CALL + def optimize_GUARD_NO_EXCEPTION(self, op): + if self.last_emitted_operation is REMOVED: + return + self.emit_operation(op) + def opt_call_str_STR2UNICODE(self, op): # Constant-fold unicode("constant string"). # More generally, supporting non-constant but virtual cases is @@ -487,6 +549,7 @@ except UnicodeDecodeError: return False self.make_constant(op.result, get_const_ptr_for_unicode(u)) + self.last_emitted_operation = REMOVED return True def opt_call_stroruni_STR_CONCAT(self, op, mode): @@ -503,13 +566,12 @@ vstart = self.getvalue(op.getarg(2)) vstop = self.getvalue(op.getarg(3)) # - if (isinstance(vstr, VStringPlainValue) and vstart.is_constant() - and vstop.is_constant()): - # slicing with constant bounds of a VStringPlainValue - value = self.make_vstring_plain(op.result, op, mode) - value.setup_slice(vstr._chars, vstart.box.getint(), - vstop.box.getint()) - return True + #if (isinstance(vstr, VStringPlainValue) and vstart.is_constant() + # and vstop.is_constant()): + # value = self.make_vstring_plain(op.result, op, mode) + # value.setup_slice(vstr._chars, vstart.box.getint(), + # vstop.box.getint()) + # return True # vstr.ensure_nonnull() lengthbox = _int_sub(self, vstop.force_box(self), diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -243,6 +243,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) @@ -1345,10 +1357,8 @@ if effect == effectinfo.EF_LOOPINVARIANT: return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, descr, False, False) - exc = (effect != effectinfo.EF_CANNOT_RAISE and - effect != effectinfo.EF_ELIDABLE_CANNOT_RAISE) - pure = (effect == effectinfo.EF_ELIDABLE_CAN_RAISE or - effect == effectinfo.EF_ELIDABLE_CANNOT_RAISE) + exc = effectinfo.check_can_raise() + pure = effectinfo.check_is_elidable() return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) def do_residual_or_indirect_call(self, funcbox, calldescr, argboxes): @@ -1780,7 +1790,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -90,7 +90,10 @@ return op def __repr__(self): - return self.repr() + try: + return self.repr() + except NotImplementedError: + return object.__repr__(self) def repr(self, graytext=False): # RPython-friendly version @@ -458,6 +461,7 @@ 'GETARRAYITEM_GC/2d', 'GETARRAYITEM_RAW/2d', 'GETINTERIORFIELD_GC/2d', + 'GETINTERIORFIELD_RAW/2d', 'GETFIELD_GC/1d', 'GETFIELD_RAW/1d', '_MALLOC_FIRST', @@ -476,6 +480,7 @@ 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', + 'SETINTERIORFIELD_RAW/3d', 'SETFIELD_GC/2d', 'SETFIELD_RAW/2d', 'STRSETITEM/3', @@ -489,6 +494,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -93,12 +93,14 @@ TAGMASK = 3 +class TagOverflow(Exception): + pass + def tag(value, tagbits): - if tagbits >> 2: - raise ValueError + assert 0 <= tagbits <= 3 sx = value >> 13 if sx != 0 and sx != -1: - raise ValueError + raise TagOverflow return rffi.r_short(value<<2|tagbits) def untag(value): @@ -126,6 +128,7 @@ UNASSIGNED = tag(-1<<13, TAGBOX) UNASSIGNEDVIRTUAL = tag(-1<<13, TAGVIRTUAL) NULLREF = tag(-1, TAGCONST) +UNINITIALIZED = tag(-2, TAGCONST) # used for uninitialized string characters class ResumeDataLoopMemo(object): @@ -152,7 +155,7 @@ return self._newconst(const) try: return tag(val, TAGINT) - except ValueError: + except TagOverflow: pass tagged = self.large_ints.get(val, UNASSIGNED) if not tagged_eq(tagged, UNASSIGNED): @@ -428,8 +431,7 @@ fieldnum = self._gettagged(fieldbox) # the index is limited to 2147483647 (64-bit machines only) if itemindex > 2147483647: - from pypy.jit.metainterp import compile - compile.giveup() + raise TagOverflow itemindex = rffi.cast(rffi.INT, itemindex) # rd_pendingfields[i].lldescr = lldescr @@ -439,6 +441,8 @@ self.storage.rd_pendingfields = rd_pendingfields def _gettagged(self, box): + if box is None: + return UNINITIALIZED if isinstance(box, Const): return self.memo.getconst(box) else: @@ -572,7 +576,9 @@ string = decoder.allocate_string(length) decoder.virtuals_cache[index] = string for i in range(length): - decoder.string_setitem(string, i, self.fieldnums[i]) + charnum = self.fieldnums[i] + if not tagged_eq(charnum, UNINITIALIZED): + decoder.string_setitem(string, i, charnum) return string def debug_prints(self): @@ -625,7 +631,9 @@ string = decoder.allocate_unicode(length) decoder.virtuals_cache[index] = string for i in range(length): - decoder.unicode_setitem(string, i, self.fieldnums[i]) + charnum = self.fieldnums[i] + if not tagged_eq(charnum, UNINITIALIZED): + decoder.unicode_setitem(string, i, charnum) return string def debug_prints(self): diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -12,7 +12,7 @@ from pypy.rlib.rfloat import isnan def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): + supports_longlong=False, translationoptions={}, **kwds): from pypy.jit.codewriter import support class FakeJitCell(object): @@ -42,7 +42,8 @@ enable_opts = ALL_OPTS_DICT func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) + rtyper = support.annotate(func, values, type_system=type_system, + translationoptions=translationoptions) graphs = rtyper.annotator.translator.graphs testself.all_graphs = graphs result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] @@ -154,9 +155,11 @@ class JitMixin: basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) + def check_resops(self, expected=None, **check): + get_stats().check_resops(expected=expected, **check) + def check_simple_loop(self, expected=None, **check): + get_stats().check_simple_loop(expected=expected, **check) + def check_loop_count(self, count): """NB. This is a hack; use check_tree_loop_count() or check_enter_count() for the real thing. diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -79,9 +79,8 @@ res = self.meta_interp(f, [6, 7]) assert res == 42 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + if self.basic: found = 0 for op in get_stats().loops[0]._all_operations(): @@ -108,7 +107,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 1323 self.check_loop_count(1) - self.check_loops(int_mul=1) + self.check_simple_loop(int_mul=1) def test_loop_variant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -125,7 +124,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 1323 self.check_loop_count(1) - self.check_loops(int_mul_ovf=1) + self.check_simple_loop(int_mul_ovf=1) def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -140,9 +139,10 @@ res = self.meta_interp(f, [6, 7]) assert res == 252 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_simple_loop(int_mul=0) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) + def test_loop_invariant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -158,10 +158,11 @@ res = self.meta_interp(f, [6, 7]) assert res == 308 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) + self.check_simple_loop(int_mul_ovf=0) + self.check_resops({'jump': 2, 'int_lshift': 2, 'int_gt': 2, + 'int_mul_ovf': 1, 'int_add': 4, + 'guard_true': 2, 'guard_no_overflow': 1, + 'int_sub': 2}) def test_loop_invariant_mul_bridge1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -194,11 +195,9 @@ res = self.meta_interp(f, [6, 32]) assert res == 1167 self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - + self.check_resops({'int_lt': 3, 'int_gt': 2, 'int_add': 5, + 'guard_true': 3, 'int_sub': 4, 'jump': 4, + 'int_mul': 2, 'guard_false': 2}) def test_loop_invariant_mul_bridge_maintaining2(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -216,10 +215,9 @@ res = self.meta_interp(f, [6, 32]) assert res == 1692 self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) + self.check_resops({'int_lt': 3, 'int_gt': 2, 'int_add': 5, + 'guard_true': 3, 'int_sub': 4, 'jump': 4, + 'int_mul': 2, 'guard_false': 2}) def test_loop_invariant_mul_bridge_maintaining3(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) @@ -237,10 +235,9 @@ res = self.meta_interp(f, [6, 32, 16]) assert res == 1692 self.check_loop_count(3) - self.check_loops({'int_add': 2, 'int_lt': 1, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, 'int_mul': 1, - 'int_gt': 2, 'guard_true': 2}) + self.check_resops({'int_lt': 2, 'int_gt': 4, 'guard_false': 2, + 'guard_true': 4, 'int_sub': 4, 'jump': 4, + 'int_mul': 3, 'int_add': 4}) def test_loop_invariant_intbox(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -261,9 +258,9 @@ res = self.meta_interp(f, [6, 7]) assert res == 252 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'getfield_gc_pure': 1, 'int_mul': 1, + 'guard_true': 2, 'int_sub': 2}) def test_loops_are_transient(self): import gc, weakref @@ -381,7 +378,7 @@ assert res == 0 # CALL_PURE is recorded in the history, but turned into a CALL # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=0) def test_constfold_call_elidable(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -397,7 +394,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) def test_constfold_call_elidable_2(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -417,7 +414,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) def test_elidable_function_returning_object(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -442,7 +439,7 @@ res = self.meta_interp(f, [21, 5]) assert res == -1 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) + self.check_resops(call_pure=0, call=0, getfield_gc=1, int_sub=2) def test_elidable_raising(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -463,12 +460,12 @@ res = self.meta_interp(f, [22, 6]) assert res == -3 # the CALL_PURE is constant-folded away during tracing - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) # res = self.meta_interp(f, [22, -5]) assert res == 0 # raises: becomes CALL and is not constant-folded away - self.check_loops(int_sub=1, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=2) def test_elidable_raising_2(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) @@ -489,12 +486,12 @@ res = self.meta_interp(f, [22, 6]) assert res == -3 # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) + self.check_resops(call_pure=0, call=0, int_sub=2) # res = self.meta_interp(f, [22, -5]) assert res == 0 # raises: becomes CALL and is not constant-folded away - self.check_loops(int_sub=1, call=1, call_pure=0) + self.check_resops(call_pure=0, call=2, int_sub=2) def test_constant_across_mp(self): myjitdriver = JitDriver(greens = [], reds = ['n']) @@ -533,7 +530,7 @@ policy = StopAtXPolicy(externfn) res = self.meta_interp(f, [31], policy=policy) assert res == 42 - self.check_loops(int_mul=1, int_mod=0) + self.check_resops(int_mul=2, int_mod=0) def test_we_are_jitted(self): myjitdriver = JitDriver(greens = [], reds = ['y']) @@ -835,7 +832,7 @@ return n res = self.meta_interp(f, [20, 1, 2]) assert res == 0 - self.check_loops(call=0) + self.check_resops(call=0) def test_abs(self): myjitdriver = JitDriver(greens = [], reds = ['i', 't']) @@ -865,9 +862,8 @@ res = self.meta_interp(f, [6, 7]) assert res == 42.0 self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'float_gt': 2, 'float_add': 2, + 'float_sub': 2, 'guard_true': 2}) def test_print(self): myjitdriver = JitDriver(greens = [], reds = ['n']) @@ -1038,7 +1034,7 @@ return x res = self.meta_interp(f, [20], enable_opts='') assert res == f(20) - self.check_loops(call=0) + self.check_resops(call=0) def test_zerodivisionerror(self): # test the case of exception-raising operation that is not delegated @@ -1256,15 +1252,18 @@ n -= 1 x += n return x - def f(n, threshold): - myjitdriver.set_param('threshold', threshold) + def f(n, threshold, arg): + if arg: + set_param(myjitdriver, 'threshold', threshold) + else: + set_param(None, 'threshold', threshold) return g(n) - res = self.meta_interp(f, [10, 3]) + res = self.meta_interp(f, [10, 3, 1]) assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 self.check_tree_loop_count(2) - res = self.meta_interp(f, [10, 13]) + res = self.meta_interp(f, [10, 13, 0]) assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 self.check_tree_loop_count(0) @@ -1348,7 +1347,7 @@ res = self.meta_interp(f, [6, 7]) assert res == 42 self.check_loop_count(1) - self.check_loops(call=1) + self.check_resops(call=2) def test_merge_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1375,8 +1374,7 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_value=3) - self.check_loops(guard_class=0, guard_value=6, everywhere=True) + self.check_resops(guard_class=0, guard_value=6) def test_merge_guardnonnull_guardclass(self): from pypy.rlib.objectmodel import instantiate @@ -1404,11 +1402,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, - guard_nonnull_class=2, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, - guard_nonnull_class=4, guard_isnull=2, - everywhere=True) + self.check_resops(guard_class=0, guard_nonnull=4, + guard_nonnull_class=4, guard_isnull=2) + def test_merge_guardnonnull_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1435,11 +1431,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, guard_value=2, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, guard_value=4, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, + guard_nonnull_class=0, guard_isnull=2) + def test_merge_guardnonnull_guardvalue_2(self): from pypy.rlib.objectmodel import instantiate @@ -1466,11 +1460,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=2, guard_value=2, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=4, guard_value=4, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4, + guard_nonnull_class=0, guard_isnull=2) + def test_merge_guardnonnull_guardclass_guardvalue(self): from pypy.rlib.objectmodel import instantiate @@ -1500,11 +1492,9 @@ return x res = self.meta_interp(f, [399], listops=True) assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=3, guard_value=3, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=6, guard_value=6, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) + self.check_resops(guard_class=0, guard_nonnull=6, guard_value=6, + guard_nonnull_class=0, guard_isnull=2) + def test_residual_call_doesnt_lose_info(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) @@ -1530,8 +1520,7 @@ y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 return y.v res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) + self.check_resops(getarrayitem_gc=0, getfield_gc=1) def test_guard_isnull_nonnull(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) @@ -1559,7 +1548,7 @@ return res res = self.meta_interp(f, [21]) assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) + self.check_resops(guard_nonnull=2, guard_isnull=2) def test_loop_invariant1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) @@ -1586,8 +1575,7 @@ return res res = self.meta_interp(g, [21]) assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) + self.check_resops(call=1) def test_bug_optimizeopt_mutates_ops(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) @@ -1707,7 +1695,7 @@ return x res = self.meta_interp(f, [8]) assert res == 0 - self.check_loops(jit_debug=2) + self.check_resops(jit_debug=4) def test_assert_green(self): def f(x, promote_flag): @@ -1749,9 +1737,10 @@ res = self.meta_interp(g, [6, 7]) assert res == 6*8 + 6**8 self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) + self.check_resops({'guard_class': 2, 'int_gt': 4, + 'getfield_gc': 4, 'guard_true': 4, + 'int_sub': 4, 'jump': 4, 'int_mul': 2, + 'int_add': 2}) def test_multiple_specialied_versions_array(self): myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', @@ -1792,7 +1781,7 @@ res = self.meta_interp(g, [6, 14]) assert res == g(6, 14) self.check_loop_count(9) - self.check_loops(getarrayitem_gc=8, everywhere=True) + self.check_resops(getarrayitem_gc=8) def test_multiple_specialied_versions_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) @@ -1980,8 +1969,8 @@ res = self.meta_interp(g, [3, 23]) assert res == 7068153 self.check_loop_count(7) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) + self.check_resops(guard_true=6, guard_class=2, int_mul=3, + int_add=3, guard_false=3) def test_dont_trace_every_iteration(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) @@ -2225,27 +2214,27 @@ return sa assert self.meta_interp(f1, [5, 5]) == 50 - self.check_loops(int_rshift=0, everywhere=True) + self.check_resops(int_rshift=0) for f in (f1, f2): assert self.meta_interp(f, [5, 6]) == 50 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [10, 5]) == 100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [10, 6]) == 100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [5, 31]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) def test_overflowing_shift_neg(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) @@ -2270,27 +2259,27 @@ return sa assert self.meta_interp(f1, [-5, 5]) == -50 - self.check_loops(int_rshift=0, everywhere=True) + self.check_resops(int_rshift=0) for f in (f1, f2): assert self.meta_interp(f, [-5, 6]) == -50 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-10, 5]) == -100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-10, 6]) == -100 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) assert self.meta_interp(f, [-5, 31]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=3, everywhere=True) + self.check_resops(int_rshift=3) def test_pure_op_not_to_be_propagated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa']) @@ -2328,8 +2317,8 @@ get_printable_location=get_printable_location) bytecode = "0j10jc20a3" def f(): - myjitdriver.set_param('threshold', 7) - myjitdriver.set_param('trace_eagerness', 1) + set_param(myjitdriver, 'threshold', 7) + set_param(myjitdriver, 'trace_eagerness', 1) i = j = c = a = 1 while True: myjitdriver.jit_merge_point(i=i, j=j, c=c, a=a) @@ -2430,8 +2419,7 @@ if counter > 10: return 7 assert self.meta_interp(build, []) == 7 - self.check_loops(getfield_gc_pure=0) - self.check_loops(getfield_gc_pure=2, everywhere=True) + self.check_resops(getfield_gc_pure=2) def test_args_becomming_equal(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) @@ -2564,7 +2552,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=2, int_ge=0, int_le=0) + self.check_resops(int_lt=4, int_le=0, int_ge=0, int_gt=2) def test_intbounds_not_generalized1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa']) @@ -2581,7 +2569,8 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=3, int_ge=2, int_le=1) + self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) + def test_intbounds_not_generalized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) @@ -2601,13 +2590,13 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_loops(int_gt=1, int_lt=2, int_ge=1, int_le=1) + self.check_resops(int_lt=4, int_le=3, int_ge=3, int_gt=2) def test_retrace_limit1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) def f(n, limit): - myjitdriver.set_param('retrace_limit', limit) + set_param(myjitdriver, 'retrace_limit', limit) sa = i = a = 0 while i < n: myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a) @@ -2625,8 +2614,8 @@ myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) def f(n, limit): - myjitdriver.set_param('retrace_limit', 3) - myjitdriver.set_param('max_retrace_guards', limit) + set_param(myjitdriver, 'retrace_limit', 3) + set_param(myjitdriver, 'max_retrace_guards', limit) sa = i = a = 0 while i < n: myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a) @@ -2645,7 +2634,7 @@ myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'node']) def f(n, limit): - myjitdriver.set_param('retrace_limit', limit) + set_param(myjitdriver, 'retrace_limit', limit) sa = i = a = 0 node = [1, 2, 3] node[1] = n @@ -2668,10 +2657,10 @@ myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'i', 'sa']) bytecode = "0+sI0+SI" def f(n): - myjitdriver.set_param('threshold', 3) - myjitdriver.set_param('trace_eagerness', 1) - myjitdriver.set_param('retrace_limit', 5) - myjitdriver.set_param('function_threshold', -1) + set_param(None, 'threshold', 3) + set_param(None, 'trace_eagerness', 1) + set_param(None, 'retrace_limit', 5) + set_param(None, 'function_threshold', -1) pc = sa = i = 0 while pc < len(bytecode): myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i) @@ -2728,9 +2717,9 @@ myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa']) bytecode = "ij+Jj+JI" def f(n, a): - myjitdriver.set_param('threshold', 5) - myjitdriver.set_param('trace_eagerness', 1) - myjitdriver.set_param('retrace_limit', 2) + set_param(None, 'threshold', 5) + set_param(None, 'trace_eagerness', 1) + set_param(None, 'retrace_limit', 2) pc = sa = i = j = 0 while pc < len(bytecode): myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i, j=j, a=a) @@ -2793,8 +2782,8 @@ return B(self.val + 1) myjitdriver = JitDriver(greens = [], reds = ['sa', 'a']) def f(): - myjitdriver.set_param('threshold', 3) - myjitdriver.set_param('trace_eagerness', 2) + set_param(None, 'threshold', 3) + set_param(None, 'trace_eagerness', 2) a = A(0) sa = 0 while a.val < 8: @@ -2824,8 +2813,8 @@ return B(self.val + 1) myjitdriver = JitDriver(greens = [], reds = ['sa', 'b', 'a']) def f(b): - myjitdriver.set_param('threshold', 6) - myjitdriver.set_param('trace_eagerness', 4) + set_param(None, 'threshold', 6) + set_param(None, 'trace_eagerness', 4) a = A(0) sa = 0 while a.val < 15: @@ -2855,17 +2844,17 @@ return a[0].intvalue res = self.meta_interp(f, [100]) assert res == -2 - #self.check_loops(getarrayitem_gc=0, setarrayitem_gc=0) -- xxx? + self.check_resops(setarrayitem_gc=2, getarrayitem_gc=1) def test_retrace_ending_up_retracing_another_loop(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'i', 'sa']) bytecode = "0+sI0+SI" def f(n): - myjitdriver.set_param('threshold', 3) - myjitdriver.set_param('trace_eagerness', 1) - myjitdriver.set_param('retrace_limit', 5) - myjitdriver.set_param('function_threshold', -1) + set_param(None, 'threshold', 3) + set_param(None, 'trace_eagerness', 1) + set_param(None, 'retrace_limit', 5) + set_param(None, 'function_threshold', -1) pc = sa = i = 0 while pc < len(bytecode): myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i) @@ -2955,7 +2944,7 @@ i += 1 res = self.meta_interp(f, [32]) assert res == f(32) - self.check_loops(arraylen_gc=2) + self.check_resops(arraylen_gc=3) def test_ulonglong_mod(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'i']) @@ -3142,9 +3131,9 @@ a = A(a.i + 1) self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) + self.check_resops(new_with_vtable=1) def test_two_loopinvariant_arrays1(self): from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -3236,7 +3225,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_loops(arraylen_gc=2, everywhere=True) + self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): if sys.platform == "win32": @@ -3273,7 +3262,7 @@ lock.release() return n res = self.meta_interp(f, [10, 1]) - self.check_loops(getfield_gc=2) + self.check_resops(getfield_gc=4) assert res == f(10, 1) def test_jit_merge_point_with_raw_pointer(self): @@ -3337,10 +3326,10 @@ res = self.meta_interp(main, [0, 10, 2], enable_opts='') assert res == main(0, 10, 2) - self.check_loops(call=1) + self.check_resops(call=1) res = self.meta_interp(main, [1, 10, 2], enable_opts='') assert res == main(1, 10, 2) - self.check_loops(call=0) + self.check_resops(call=0) def test_look_inside_iff_virtual(self): # There's no good reason for this to be look_inside_iff, but it's a test! @@ -3365,10 +3354,10 @@ i += f(A(2), n) res = self.meta_interp(main, [0], enable_opts='') assert res == main(0) - self.check_loops(call=1, getfield_gc=0) + self.check_resops(call=1, getfield_gc=0) res = self.meta_interp(main, [1], enable_opts='') assert res == main(1) - self.check_loops(call=0, getfield_gc=0) + self.check_resops(call=0, getfield_gc=0) def test_reuse_elidable_result(self): driver = JitDriver(reds=['n', 's'], greens = []) @@ -3381,10 +3370,9 @@ return s res = self.meta_interp(main, [10]) assert res == main(10) - self.check_loops({ - 'call': 1, 'guard_no_exception': 1, 'guard_true': 1, 'int_add': 2, - 'int_gt': 1, 'int_sub': 1, 'strlen': 1, 'jump': 1, - }) + self.check_resops({'int_gt': 2, 'strlen': 2, 'guard_true': 2, + 'int_sub': 2, 'jump': 2, 'call': 2, + 'guard_no_exception': 2, 'int_add': 4}) def test_look_inside_iff_const_getarrayitem_gc_pure(self): driver = JitDriver(greens=['unroll'], reds=['s', 'n']) @@ -3416,10 +3404,10 @@ res = self.meta_interp(main, [0, 10]) assert res == main(0, 10) # 2 calls, one for f() and one for char_mul - self.check_loops(call=2) + self.check_resops(call=4) res = self.meta_interp(main, [1, 10]) assert res == main(1, 10) - self.check_loops(call=0) + self.check_resops(call=0) def test_setarrayitem_followed_by_arraycopy(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'x', 'y']) @@ -3513,12 +3501,15 @@ def f(n): while n > 0: myjitdriver.jit_merge_point(n=n) - n = g({"key": n}) + x = {"key": n} + n = g(x) + del x["key"] return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) def test_virtual_opaque_ptr(self): myjitdriver = JitDriver(greens = [], reds = ["n"]) @@ -3537,7 +3528,9 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) + def test_virtual_opaque_dict(self): myjitdriver = JitDriver(greens = [], reds = ["n"]) @@ -3557,8 +3550,165 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({"int_sub": 1, "int_gt": 1, "guard_true": 1, "jump": 1}) - + self.check_resops({'int_gt': 2, 'getfield_gc': 1, 'int_eq': 1, + 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'guard_false': 1}) + + + def test_convert_from_SmallFunctionSetPBCRepr_to_FunctionsPBCRepr(self): + f1 = lambda n: n+1 + f2 = lambda n: n+2 + f3 = lambda n: n+3 + f4 = lambda n: n+4 + f5 = lambda n: n+5 + f6 = lambda n: n+6 + f7 = lambda n: n+7 + f8 = lambda n: n+8 + def h(n, x): + return x(n) + h._dont_inline = True + def g(n, x): + return h(n, x) + g._dont_inline = True + def f(n): + n = g(n, f1) + n = g(n, f2) + n = h(n, f3) + n = h(n, f4) + n = h(n, f5) + n = h(n, f6) + n = h(n, f7) + n = h(n, f8) + return n + assert f(5) == 41 + translationoptions = {'withsmallfuncsets': 3} + self.interp_operations(f, [5], translationoptions=translationoptions) + + + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) class TestLLtype(BaseLLtypeTests, LLJitMixin): @@ -3613,7 +3763,9 @@ o = o.dec() pc += 1 return pc - res = self.meta_interp(main, [False, 100, True], taggedpointers=True) + topt = {'taggedpointers': True} + res = self.meta_interp(main, [False, 100, True], + translationoptions=topt) def test_rerased(self): eraseX, uneraseX = rerased.new_erasing_pair("X") @@ -3638,10 +3790,24 @@ else: return rerased.unerase_int(e) # - x = self.interp_operations(f, [-128, 0], taggedpointers=True) + topt = {'taggedpointers': True} + x = self.interp_operations(f, [-128, 0], translationoptions=topt) assert x == -128 bigint = sys.maxint//2 + 1 - x = self.interp_operations(f, [bigint, 0], taggedpointers=True) + x = self.interp_operations(f, [bigint, 0], translationoptions=topt) assert x == -42 - x = self.interp_operations(f, [1000, 1], taggedpointers=True) + x = self.interp_operations(f, [1000, 1], translationoptions=topt) assert x == 999 + + def test_ll_arraycopy(self): + from pypy.rlib import rgc + A = lltype.GcArray(lltype.Char) + a = lltype.malloc(A, 10) + for i in range(10): a[i] = chr(i) + b = lltype.malloc(A, 10) + # + def f(c, d, e): + rgc.ll_arraycopy(a, b, c, d, e) + return 42 + self.interp_operations(f, [1, 2, 3]) + self.check_operations_history(call=1, guard_no_exception=0) diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -20,12 +20,12 @@ n -= 1 return 42 self.meta_interp(f, [20]) - self.check_loops({'call': 2, # calls to a helper function - 'guard_no_exception': 2, # follows the calls - 'int_sub': 1, - 'int_gt': 1, - 'guard_true': 1, - 'jump': 1}) + self.check_resops({'call': 4, # calls to a helper function + 'guard_no_exception': 4, # follows the calls + 'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 2}) def test_class_of_allocated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) @@ -78,7 +78,7 @@ return 1 res = self.meta_interp(f, [20], enable_opts='') assert res == 1 - self.check_loops(call=1) # for the case B(), but not for the case A() + self.check_resops(call=1) # for the case B(), but not for the case A() class TestLLtype(DelTests, LLJitMixin): @@ -103,7 +103,7 @@ break return 42 self.meta_interp(f, [20]) - self.check_loops(getfield_raw=1, setfield_raw=1, call=0, call_pure=0) + self.check_resops(call_pure=0, setfield_raw=2, call=0, getfield_raw=2) class TestOOtype(DelTests, OOJitMixin): def setup_class(cls): diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -91,7 +91,7 @@ res1 = f(100) res2 = self.meta_interp(f, [100], listops=True) assert res1 == res2 - self.check_loops(int_mod=1) # the hash was traced and eq, but cached + self.check_resops(int_mod=2) # the hash was traced and eq, but cached def test_dict_setdefault(self): myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) @@ -107,7 +107,7 @@ assert f(100) == 50 res = self.meta_interp(f, [100], listops=True) assert res == 50 - self.check_loops(new=0, new_with_vtable=0) + self.check_resops(new=0, new_with_vtable=0) def test_dict_as_counter(self): myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) @@ -128,7 +128,7 @@ assert f(100) == 50 res = self.meta_interp(f, [100], listops=True) assert res == 50 - self.check_loops(int_mod=1) # key + eq, but cached + self.check_resops(int_mod=2) # key + eq, but cached def test_repeated_lookup(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'd']) @@ -153,12 +153,13 @@ res = self.meta_interp(f, [100], listops=True) assert res == f(50) - self.check_loops({"call": 5, "getfield_gc": 1, "getinteriorfield_gc": 1, - "guard_false": 1, "guard_no_exception": 4, - "guard_true": 1, "int_and": 1, "int_gt": 1, - "int_is_true": 1, "int_sub": 1, "jump": 1, - "new_with_vtable": 1, "new": 1, "new_array": 1, - "setfield_gc": 3, }) + self.check_resops({'new_array': 2, 'getfield_gc': 2, + 'guard_true': 2, 'jump': 2, + 'new_with_vtable': 2, 'getinteriorfield_gc': 2, + 'setfield_gc': 6, 'int_gt': 2, 'int_sub': 2, + 'call': 10, 'int_and': 2, + 'guard_no_exception': 8, 'new': 2, + 'guard_false': 2, 'int_is_true': 2}) class TestOOtype(DictTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -35,10 +35,8 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_loops({'jump': 1, - 'int_gt': 1, 'guard_true': 1, - 'int_sub': 1}) - + self.check_resops({'jump': 2, 'guard_true': 2, + 'int_gt': 2, 'int_sub': 2}) def test_bridge_from_guard_exception(self): myjitdriver = JitDriver(greens = [], reds = ['n']) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -1,19 +1,18 @@ +import py -import py +from pypy.jit.metainterp.test.support import LLJitMixin +from pypy.rlib.jit import JitDriver, promote, dont_look_inside +from pypy.rlib.libffi import (ArgChain, IS_32_BIT, array_getitem, array_setitem, + types) +from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import r_singlefloat, r_longlong, r_ulonglong -from pypy.rlib.jit import JitDriver, promote, dont_look_inside +from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.libffi import ArgChain -from pypy.rlib.libffi import IS_32_BIT -from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi -from pypy.rlib.objectmodel import specialize from pypy.tool.sourcetools import func_with_new_name -from pypy.jit.metainterp.test.support import LLJitMixin -class TestFfiCall(LLJitMixin, _TestLibffiCall): - supports_all = False # supports_{floats,longlong,singlefloats} +class FfiCallTests(_TestLibffiCall): # ===> ../../../rlib/test/test_libffi.py def call(self, funcspec, args, RESULT, is_struct=False, jitif=[]): @@ -68,23 +67,23 @@ 'byval': False} supported = all(d[check] for check in jitif) if supported: - self.check_loops( - call_release_gil=1, # a CALL_RELEASE_GIL, and no other CALLs + self.check_resops( + call_release_gil=2, # a CALL_RELEASE_GIL, and no other CALLs call=0, call_may_force=0, - guard_no_exception=1, - guard_not_forced=1, - int_add=1, - int_lt=1, - guard_true=1, - jump=1) + guard_no_exception=2, + guard_not_forced=2, + int_add=2, + int_lt=2, + guard_true=2, + jump=2) else: - self.check_loops( + self.check_resops( call_release_gil=0, # no CALL_RELEASE_GIL - int_add=1, - int_lt=1, - guard_true=1, - jump=1) + int_add=2, + int_lt=2, + guard_true=2, + jump=2) return res def test_byval_result(self): @@ -92,6 +91,90 @@ test_byval_result.__doc__ = _TestLibffiCall.test_byval_result.__doc__ test_byval_result.dont_track_allocations = True +class FfiLookupTests(object): + def test_array_fields(self): + myjitdriver = JitDriver( + greens = [], + reds = ["n", "i", "points", "result_point"], + ) -class TestFfiCallSupportAll(TestFfiCall): + POINT = lltype.Struct("POINT", + ("x", lltype.Signed), + ("y", lltype.Signed), + ) + def f(points, result_point, n): + i = 0 + while i < n: + myjitdriver.jit_merge_point(i=i, points=points, n=n, + result_point=result_point) + x = array_getitem( + types.slong, rffi.sizeof(lltype.Signed) * 2, points, i, 0 + ) + y = array_getitem( + types.slong, rffi.sizeof(lltype.Signed) * 2, points, i, rffi.sizeof(lltype.Signed) + ) + + cur_x = array_getitem( + types.slong, rffi.sizeof(lltype.Signed) * 2, result_point, 0, 0 + ) + cur_y = array_getitem( + types.slong, rffi.sizeof(lltype.Signed) * 2, result_point, 0, rffi.sizeof(lltype.Signed) + ) + + array_setitem( + types.slong, rffi.sizeof(lltype.Signed) * 2, result_point, 0, 0, cur_x + x + ) + array_setitem( + types.slong, rffi.sizeof(lltype.Signed) * 2, result_point, 0, rffi.sizeof(lltype.Signed), cur_y + y + ) + i += 1 + + def main(n): + with lltype.scoped_alloc(rffi.CArray(POINT), n) as points: + with lltype.scoped_alloc(rffi.CArray(POINT), 1) as result_point: + for i in xrange(n): + points[i].x = i * 2 + points[i].y = i * 2 + 1 + points = rffi.cast(rffi.CArrayPtr(lltype.Char), points) + result_point[0].x = 0 + result_point[0].y = 0 + result_point = rffi.cast(rffi.CArrayPtr(lltype.Char), result_point) + f(points, result_point, n) + result_point = rffi.cast(rffi.CArrayPtr(POINT), result_point) + return result_point[0].x * result_point[0].y + + assert self.meta_interp(main, [10]) == main(10) == 9000 + self.check_resops({'jump': 2, 'int_lt': 2, 'setinteriorfield_raw': 4, + 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2}) + + def test_array_getitem_uint8(self): + myjitdriver = JitDriver( + greens = [], + reds = ["n", "i", "s", "data"], + ) + def f(data, n): + i = s = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, s=s, data=data) + s += rffi.cast(lltype.Signed, array_getitem(types.uchar, 1, data, 0, 0)) + i += 1 + return s + + def main(n): + with lltype.scoped_alloc(rffi.CArray(rffi.UCHAR), 1) as data: + data[0] = rffi.cast(rffi.UCHAR, 200) + return f(data, n) + + assert self.meta_interp(main, [10]) == 2000 + self.check_resops({'jump': 2, 'int_lt': 2, 'getinteriorfield_raw': 2, + 'guard_true': 2, 'int_add': 4}) + + +class TestFfiCall(FfiCallTests, LLJitMixin): + supports_all = False + +class TestFfiCallSupportAll(FfiCallTests, LLJitMixin): supports_all = True # supports_{floats,longlong,singlefloats} + +class TestFfiLookup(FfiLookupTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -25,7 +25,7 @@ res = self.meta_interp(g, [7]) assert res == -2 self.check_loop_count(2) - self.check_loops(guard_value=0) + self.check_resops(guard_value=0) def test_green_field_2(self): myjitdriver = JitDriver(greens=['ctx.x'], reds=['ctx']) @@ -50,7 +50,7 @@ res = self.meta_interp(g, [7]) assert res == -22 self.check_loop_count(6) - self.check_loops(guard_value=0) + self.check_resops(guard_value=0) class TestLLtypeGreenFieldsTests(GreenFieldsTests, LLJitMixin): diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -1,5 +1,5 @@ """Tests for multiple JitDrivers.""" -from pypy.rlib.jit import JitDriver, unroll_safe +from pypy.rlib.jit import JitDriver, unroll_safe, set_param from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.warmspot import get_stats @@ -88,7 +88,7 @@ assert res == loop2(4, 40) # we expect only one int_sub, corresponding to the single # compiled instance of loop1() - self.check_loops(int_sub=1) + self.check_resops(int_sub=2) # the following numbers are not really expectations of the test # itself, but just the numbers that we got after looking carefully # at the generated machine code @@ -113,7 +113,7 @@ return n # def loop2(g, r): - myjitdriver1.set_param('function_threshold', 0) + set_param(None, 'function_threshold', 0) while r > 0: myjitdriver2.can_enter_jit(g=g, r=r) myjitdriver2.jit_merge_point(g=g, r=r) @@ -154,7 +154,7 @@ res = self.meta_interp(loop2, [4, 40], repeat=7, inline=True) assert res == loop2(4, 40) # we expect no int_sub, but a residual call - self.check_loops(int_sub=0, call=1) + self.check_resops(call=2, int_sub=0) def test_multiple_jits_trace_too_long(self): myjitdriver1 = JitDriver(greens=["n"], reds=["i", "box"]) diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -6,8 +6,8 @@ class ListTests: def check_all_virtualized(self): - self.check_loops(new_array=0, setarrayitem_gc=0, getarrayitem_gc=0, - arraylen_gc=0) + self.check_resops(setarrayitem_gc=0, new_array=0, arraylen_gc=0, + getarrayitem_gc=0) def test_simple_array(self): jitdriver = JitDriver(greens = [], reds = ['n']) @@ -20,7 +20,7 @@ return n res = self.meta_interp(f, [10], listops=True) assert res == 0 - self.check_loops(int_sub=1) + self.check_resops(int_sub=2) self.check_all_virtualized() def test_list_pass_around(self): @@ -56,7 +56,8 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) # one setitem should be gone by now - self.check_loops(call=1, setarrayitem_gc=2, getarrayitem_gc=1) + self.check_resops(setarrayitem_gc=4, getarrayitem_gc=2, call=2) + def test_ll_fixed_setitem_fast(self): jitdriver = JitDriver(greens = [], reds = ['n', 'l']) @@ -93,7 +94,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) - self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0) + self.check_resops(setarrayitem_gc=0, call=0, getarrayitem_gc=0) def test_vlist_alloc_and_set(self): # the check_loops fails, because [non-null] * n is not supported yet @@ -141,7 +142,7 @@ res = self.meta_interp(f, [5], listops=True) assert res == 7 - self.check_loops(call=0) + self.check_resops(call=0) def test_fold_getitem_1(self): jitdriver = JitDriver(greens = ['pc', 'n', 'l'], reds = ['total']) @@ -161,7 +162,7 @@ res = self.meta_interp(f, [4], listops=True) assert res == f(4) - self.check_loops(call=0) + self.check_resops(call=0) def test_fold_getitem_2(self): jitdriver = JitDriver(greens = ['pc', 'n', 'l'], reds = ['total', 'x']) @@ -186,7 +187,7 @@ res = self.meta_interp(f, [4], listops=True) assert res == f(4) - self.check_loops(call=0, getfield_gc=0) + self.check_resops(call=0, getfield_gc=0) def test_fold_indexerror(self): jitdriver = JitDriver(greens = [], reds = ['total', 'n', 'lst']) @@ -206,7 +207,7 @@ res = self.meta_interp(f, [15], listops=True) assert res == f(15) - self.check_loops(guard_exception=0) + self.check_resops(guard_exception=0) def test_virtual_resize(self): jitdriver = JitDriver(greens = [], reds = ['n', 's']) @@ -224,9 +225,8 @@ return s res = self.meta_interp(f, [15], listops=True) assert res == f(15) - self.check_loops({"int_add": 1, "int_sub": 1, "int_gt": 1, - "guard_true": 1, "jump": 1}) - + self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + 'guard_true': 2, 'int_sub': 2}) class TestOOtype(ListTests, OOJitMixin): pass @@ -258,4 +258,4 @@ assert res == f(37) # There is the one actual field on a, plus several fields on the list # itself - self.check_loops(getfield_gc=10, everywhere=True) + self.check_resops(getfield_gc=10) diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -1,5 +1,5 @@ import py -from pypy.rlib.jit import JitDriver, hint +from pypy.rlib.jit import JitDriver, hint, set_param from pypy.rlib.objectmodel import compute_hash from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -60,7 +60,8 @@ assert res == f(6, 13) self.check_loop_count(1) if self.enable_opts: - self.check_loops(getfield_gc = 0, setfield_gc = 1) + self.check_resops(setfield_gc=2, getfield_gc=0) + def test_loop_with_two_paths(self): from pypy.rpython.lltypesystem import lltype @@ -180,7 +181,10 @@ assert res == 42 self.check_loop_count(1) # the 'int_eq' and following 'guard' should be constant-folded - self.check_loops(int_eq=0, guard_true=1, guard_false=0) + if 'unroll' in self.enable_opts: + self.check_resops(int_eq=0, guard_true=2, guard_false=0) + else: + self.check_resops(int_eq=0, guard_true=1, guard_false=0) if self.basic: found = 0 for op in get_stats().loops[0]._all_operations(): @@ -364,7 +368,7 @@ myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'j', 'n', 'x']) bytecode = "IzJxji" def f(n, threshold): - myjitdriver.set_param('threshold', threshold) + set_param(myjitdriver, 'threshold', threshold) i = j = x = 0 pos = 0 op = '-' @@ -411,7 +415,7 @@ myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'j', 'n', 'x']) bytecode = "IzJxji" def f(nval, threshold): - myjitdriver.set_param('threshold', threshold) + set_param(myjitdriver, 'threshold', threshold) i, j, x = A(0), A(0), A(0) n = A(nval) pos = 0 @@ -643,8 +647,12 @@ res = self.meta_interp(main_interpreter_loop, [1]) assert res == 102 self.check_loop_count(1) - self.check_loops({'int_add' : 3, 'int_gt' : 1, - 'guard_false' : 1, 'jump' : 1}) + if 'unroll' in self.enable_opts: + self.check_resops({'int_add' : 6, 'int_gt' : 2, + 'guard_false' : 2, 'jump' : 2}) + else: + self.check_resops({'int_add' : 3, 'int_gt' : 1, + 'guard_false' : 1, 'jump' : 1}) def test_automatic_promotion(self): myjitdriver = JitDriver(greens = ['i'], @@ -686,7 +694,7 @@ self.check_loop_count(1) # These loops do different numbers of ops based on which optimizer we # are testing with. - self.check_loops(self.automatic_promotion_result) + self.check_resops(self.automatic_promotion_result) def test_can_enter_jit_outside_main_loop(self): myjitdriver = JitDriver(greens=[], reds=['i', 'j', 'a']) diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -8,7 +8,8 @@ enable_opts = ALL_OPTS_NAMES automatic_promotion_result = { - 'int_add' : 3, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1, + 'int_gt': 2, 'guard_false': 2, 'jump': 2, 'int_add': 6, + 'guard_value': 1 } # ====> test_loop.py diff --git a/pypy/jit/metainterp/test/test_math.py b/pypy/jit/metainterp/test/test_math.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_math.py @@ -0,0 +1,47 @@ +import math +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN + +class MathTests: + + def test_math_sqrt(self): + def f(x): + try: + return math.sqrt(x) + except ValueError: + return -INFINITY + + res = self.interp_operations(f, [0.0]) + assert res == 0.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [25.0]) + assert res == 5.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-0.0]) + assert str(res) == '-0.0' + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [1000000.0]) + assert res == 1000.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-1.0]) + assert res == -INFINITY + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [INFINITY]) + assert isinf(res) and not isnan(res) and res > 0.0 + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [NAN]) + assert isnan(res) and not isinf(res) + self.check_operations_history(call_pure=0) + + +class TestOOtype(MathTests, OOJitMixin): + pass + +class TestLLtype(MathTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -73,8 +73,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -103,7 +102,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=3) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -134,8 +133,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0) def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -160,7 +158,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=2) def test_change_during_tracing_2(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -186,7 +184,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=0, getfield_gc=1) + self.check_resops(guard_not_invalidated=0, getfield_gc=2) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -212,7 +210,7 @@ assert g(100, 7) == 700707 res = self.meta_interp(g, [100, 7]) assert res == 700707 - self.check_loops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=4, getfield_gc=0) def test_invalidate_while_running(self): jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) @@ -324,8 +322,8 @@ assert f(100, 15) == 3009 res = self.meta_interp(f, [100, 15]) assert res == 3009 - self.check_loops(guard_not_invalidated=4, getfield_gc=0, - call_may_force=0, guard_not_forced=0) + self.check_resops(guard_not_invalidated=8, guard_not_forced=0, + call_may_force=0, getfield_gc=0) def test_list_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -347,9 +345,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - everywhere=True) + self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=2, + getarrayitem_gc=0, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -385,9 +382,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 714 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - arraylen_gc=0, everywhere=True) + self.check_resops(getarrayitem_gc_pure=0, guard_not_invalidated=2, + arraylen_gc=0, getarrayitem_gc=0, getfield_gc=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -421,9 +417,8 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_loops(guard_not_invalidated=2, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - everywhere=True) + self.check_resops(guard_not_invalidated=2, getfield_gc=0, + getarrayitem_gc=0, getarrayitem_gc_pure=0) # from pypy.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -460,9 +455,9 @@ assert f(100, 15) == 3009 res = self.meta_interp(f, [100, 15]) assert res == 3009 - self.check_loops(guard_not_invalidated=4, getfield_gc=0, - getarrayitem_gc=0, getarrayitem_gc_pure=0, - call_may_force=0, guard_not_forced=0) + self.check_resops(call_may_force=0, getfield_gc=0, + getarrayitem_gc_pure=0, guard_not_forced=0, + getarrayitem_gc=0, guard_not_invalidated=8) def test_invalidated_loop_is_not_used_any_more_as_target(self): myjitdriver = JitDriver(greens=['foo'], reds=['x']) diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1,5 +1,5 @@ import py -from pypy.rlib.jit import JitDriver, we_are_jitted, hint +from pypy.rlib.jit import JitDriver, hint, set_param from pypy.rlib.jit import unroll_safe, dont_look_inside, promote from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror @@ -143,11 +143,11 @@ f = self.get_interpreter(codes) assert self.meta_interp(f, [0, 0, 0], enable_opts='') == 42 - self.check_loops(int_add = 1, call_may_force = 1, call = 0) + self.check_resops(call_may_force=1, int_add=1, call=0) assert self.meta_interp(f, [0, 0, 0], enable_opts='', inline=True) == 42 - self.check_loops(int_add = 2, call_may_force = 0, call = 0, - guard_no_exception = 0) + self.check_resops(call=0, int_add=2, call_may_force=0, + guard_no_exception=0) def test_inline_jitdriver_check(self): code = "021" @@ -160,7 +160,7 @@ inline=True) == 42 # the call is fully inlined, because we jump to subcode[1], thus # skipping completely the JUMP_BACK in subcode[0] - self.check_loops(call_may_force = 0, call_assembler = 0, call = 0) + self.check_resops(call=0, call_may_force=0, call_assembler=0) def test_guard_failure_in_inlined_function(self): def p(pc, code): @@ -308,8 +308,8 @@ pc += 1 return n def main(n): - myjitdriver.set_param('threshold', 3) - myjitdriver.set_param('trace_eagerness', 5) + set_param(None, 'threshold', 3) + set_param(None, 'trace_eagerness', 5) return f("c-l", n) expected = main(100) res = self.meta_interp(main, [100], enable_opts='', inline=True) @@ -329,7 +329,7 @@ return recursive(n - 1) + 1 return 0 def loop(n): - myjitdriver.set_param("threshold", 10) + set_param(myjitdriver, "threshold", 10) pc = 0 while n: myjitdriver.can_enter_jit(n=n) @@ -351,8 +351,8 @@ return 0 myjitdriver = JitDriver(greens=[], reds=['n']) def loop(n): - myjitdriver.set_param("threshold", 4) - myjitdriver.set_param("trace_eagerness", 2) + set_param(None, "threshold", 4) + set_param(None, "trace_eagerness", 2) while n: myjitdriver.can_enter_jit(n=n) myjitdriver.jit_merge_point(n=n) @@ -482,19 +482,19 @@ TRACE_LIMIT = 66 def main(inline): - myjitdriver.set_param("threshold", 10) - myjitdriver.set_param('function_threshold', 60) + set_param(None, "threshold", 10) + set_param(None, 'function_threshold', 60) if inline: - myjitdriver.set_param('inlining', True) + set_param(None, 'inlining', True) else: - myjitdriver.set_param('inlining', False) + set_param(None, 'inlining', False) return loop(100) res = self.meta_interp(main, [0], enable_opts='', trace_limit=TRACE_LIMIT) - self.check_loops(call_may_force=1, call=0) + self.check_resops(call=0, call_may_force=1) res = self.meta_interp(main, [1], enable_opts='', trace_limit=TRACE_LIMIT) - self.check_loops(call_may_force=0, call=0) + self.check_resops(call=0, call_may_force=0) def test_trace_from_start(self): def p(pc, code): @@ -564,11 +564,11 @@ pc += 1 return n def g(m): - myjitdriver.set_param('inlining', True) + set_param(None, 'inlining', True) # carefully chosen threshold to make sure that the inner function # cannot be inlined, but the inner function on its own is small # enough - myjitdriver.set_param('trace_limit', 40) + set_param(None, 'trace_limit', 40) if m > 1000000: f('', 0) result = 0 @@ -576,7 +576,7 @@ result += f('-c-----------l-', i+100) self.meta_interp(g, [10], backendopt=True) self.check_aborted_count(1) - self.check_loops(call_assembler=1, call=0) + self.check_resops(call=0, call_assembler=2) self.check_tree_loop_count(3) def test_directly_call_assembler(self): @@ -625,8 +625,7 @@ try: compile.compile_tmp_callback = my_ctc self.meta_interp(portal, [2, 5], inline=True) - self.check_loops(call_assembler=2, call_may_force=0, - everywhere=True) + self.check_resops(call_may_force=0, call_assembler=2) finally: compile.compile_tmp_callback = original_ctc # check that we made a temporary callback @@ -681,8 +680,7 @@ try: compile.compile_tmp_callback = my_ctc self.meta_interp(main, [2, 5], inline=True) - self.check_loops(call_assembler=2, call_may_force=0, - everywhere=True) + self.check_resops(call_may_force=0, call_assembler=2) finally: compile.compile_tmp_callback = original_ctc # check that we made a temporary callback @@ -1021,7 +1019,7 @@ res = self.meta_interp(portal, [2, 0], inline=True, policy=StopAtXPolicy(residual)) assert res == portal(2, 0) - self.check_loops(call_assembler=4, everywhere=True) + self.check_resops(call_assembler=4) def test_inline_without_hitting_the_loop(self): driver = JitDriver(greens = ['codeno'], reds = ['i'], @@ -1045,7 +1043,7 @@ assert portal(0) == 70 res = self.meta_interp(portal, [0], inline=True) assert res == 70 - self.check_loops(call_assembler=0) + self.check_resops(call_assembler=0) def test_inline_with_hitting_the_loop_sometimes(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], @@ -1071,7 +1069,7 @@ assert portal(0, 1) == 2095 res = self.meta_interp(portal, [0, 1], inline=True) assert res == 2095 - self.check_loops(call_assembler=12, everywhere=True) + self.check_resops(call_assembler=12) def test_inline_with_hitting_the_loop_sometimes_exc(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], @@ -1109,7 +1107,7 @@ assert main(0, 1) == 2095 res = self.meta_interp(main, [0, 1], inline=True) assert res == 2095 - self.check_loops(call_assembler=12, everywhere=True) + self.check_resops(call_assembler=12) def test_handle_jitexception_in_portal(self): # a test for _handle_jitexception_in_portal in blackhole.py @@ -1207,9 +1205,9 @@ driver.can_enter_jit(c=c, i=i, v=v) break - def main(c, i, set_param, v): - if set_param: - driver.set_param('function_threshold', 0) + def main(c, i, _set_param, v): + if _set_param: + set_param(driver, 'function_threshold', 0) portal(c, i, v) self.meta_interp(main, [10, 10, False, False], inline=True) @@ -1238,7 +1236,32 @@ i += 1 self.meta_interp(portal, [0, 0, 0], inline=True) - self.check_loops(call=0, call_may_force=0) + self.check_resops(call_may_force=0, call=0) + + def test_dont_repeatedly_trace_from_the_same_guard(self): + driver = JitDriver(greens = [], reds = ['level', 'i']) + + def portal(level): + if level == 0: + i = -10 + else: + i = 0 + # + while True: + driver.jit_merge_point(level=level, i=i) + if level == 25: + return 42 + i += 1 + if i <= 0: # <- guard + continue # first make a loop + else: + # then we fail the guard above, doing a recursive call, + # which will itself fail the same guard above, and so on + return portal(level + 1) + + self.meta_interp(portal, [0]) + self.check_loop_count_at_most(2) # and not, e.g., 24 + class TestLLtype(RecursiveTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py --- a/pypy/jit/metainterp/test/test_resume.py +++ b/pypy/jit/metainterp/test/test_resume.py @@ -23,11 +23,11 @@ assert tag(-3, 2) == rffi.r_short(-3<<2|2) assert tag((1<<13)-1, 3) == rffi.r_short(((1<<15)-1)|3) assert tag(-1<<13, 3) == rffi.r_short((-1<<15)|3) - py.test.raises(ValueError, tag, 3, 5) - py.test.raises(ValueError, tag, 1<<13, 0) - py.test.raises(ValueError, tag, (1<<13)+1, 0) - py.test.raises(ValueError, tag, (-1<<13)-1, 0) - py.test.raises(ValueError, tag, (-1<<13)-5, 0) + py.test.raises(AssertionError, tag, 3, 5) + py.test.raises(TagOverflow, tag, 1<<13, 0) + py.test.raises(TagOverflow, tag, (1<<13)+1, 0) + py.test.raises(TagOverflow, tag, (-1<<13)-1, 0) + py.test.raises(TagOverflow, tag, (-1<<13)-5, 0) def test_untag(): assert untag(tag(3, 1)) == (3, 1) @@ -1135,16 +1135,11 @@ assert ptr2.parent.next == ptr class CompareableConsts(object): - def __init__(self): - self.oldeq = None - def __enter__(self): - assert self.oldeq is None - self.oldeq = Const.__eq__ Const.__eq__ = Const.same_box - + def __exit__(self, type, value, traceback): - Const.__eq__ = self.oldeq + del Const.__eq__ def test_virtual_adder_make_varray(): b2s, b4s = [BoxPtr(), BoxInt(4)] @@ -1323,8 +1318,7 @@ assert rffi.cast(lltype.Signed, pf[1].fieldnum) == 1062 assert rffi.cast(lltype.Signed, pf[1].itemindex) == 2147483647 # - from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole - py.test.raises(SwitchToBlackhole, modifier._add_pending_fields, + py.test.raises(TagOverflow, modifier._add_pending_fields, [(array_a, 42, 63, 2147483648)]) def test_resume_reader_fields_and_arrayitems(): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -20,9 +20,8 @@ return c res = self.meta_interp(f, [1]) assert res == 2 - self.check_loops({'jump': 1, - 'int_sub': 1, 'int_gt' : 1, - 'guard_true': 1}) # all folded away + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) # all folded away def test_red_builtin_send(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'counter']) @@ -41,12 +40,9 @@ return res res = self.meta_interp(f, [1], policy=StopAtXPolicy(externfn)) assert res == 2 - if self.type_system == 'ootype': - self.check_loops(call=1, oosend=1) # 'len' remains - else: - # 'len' becomes a getfield('num_items') for now in lltype, - # which is itself encoded as a 'getfield_gc' - self.check_loops(call=1, getfield_gc=1) + # 'len' becomes a getfield('num_items') for now in lltype, + # which is itself encoded as a 'getfield_gc' + self.check_resops(call=2, getfield_gc=2) def test_send_to_single_target_method(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'counter']) @@ -70,11 +66,10 @@ res = self.meta_interp(f, [1], policy=StopAtXPolicy(externfn), backendopt=True) assert res == 43 - self.check_loops({'call': 1, 'guard_no_exception': 1, - 'getfield_gc': 1, - 'int_add': 1, - 'jump': 1, 'int_gt' : 1, 'guard_true' : 1, - 'int_sub' : 1}) + self.check_resops({'int_gt': 2, 'getfield_gc': 2, + 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'call': 2, 'guard_no_exception': 2, + 'int_add': 2}) def test_red_send_to_green_receiver(self): myjitdriver = JitDriver(greens = ['i'], reds = ['counter', 'j']) @@ -97,7 +92,7 @@ return res res = self.meta_interp(f, [4, -1]) assert res == 145 - self.check_loops(int_add = 1, everywhere=True) + self.check_resops(int_add=1) def test_oosend_base(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'w']) @@ -132,7 +127,7 @@ assert res == 17 res = self.meta_interp(f, [4, 14]) assert res == 1404 - self.check_loops(guard_class=0, new_with_vtable=0, new=0) + self.check_resops(guard_class=1, new=0, new_with_vtable=0) def test_three_receivers(self): myjitdriver = JitDriver(greens = [], reds = ['y']) @@ -205,8 +200,7 @@ # of the body in a single bigger loop with no failing guard except # the final one. self.check_loop_count(1) - self.check_loops(guard_class=0, - int_add=2, int_sub=2) + self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) def test_oosend_guard_failure_2(self): @@ -247,8 +241,7 @@ res = self.meta_interp(f, [4, 28]) assert res == f(4, 28) self.check_loop_count(1) - self.check_loops(guard_class=0, - int_add=2, int_sub=2) + self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) def test_oosend_different_initial_class(self): @@ -285,8 +278,8 @@ # However, this doesn't match the initial value of 'w'. # XXX This not completely easy to check... self.check_loop_count(1) - self.check_loops(int_add=0, int_lshift=1, guard_class=0, - new_with_vtable=0, new=0) + self.check_resops(guard_class=1, new_with_vtable=0, int_lshift=2, + int_add=0, new=0) def test_indirect_call_unknown_object_1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y']) @@ -566,10 +559,7 @@ policy = StopAtXPolicy(new, A.foo.im_func, B.foo.im_func) res = self.meta_interp(fn, [0, 20], policy=policy) assert res == 42 - if self.type_system == 'ootype': - self.check_loops(oosend=1) - else: - self.check_loops(call=1) + self.check_resops(call=2) def test_residual_oosend_with_void(self): @@ -597,10 +587,7 @@ policy = StopAtXPolicy(new, A.foo.im_func) res = self.meta_interp(fn, [1, 20], policy=policy) assert res == 41 - if self.type_system == 'ootype': - self.check_loops(oosend=1) - else: - self.check_loops(call=1) + self.check_resops(call=2) def test_constfold_pure_oosend(self): myjitdriver = JitDriver(greens=[], reds = ['i', 'obj']) @@ -621,10 +608,7 @@ policy = StopAtXPolicy(A.foo.im_func) res = self.meta_interp(fn, [1, 20], policy=policy) assert res == 42 - if self.type_system == 'ootype': - self.check_loops(oosend=0) - else: - self.check_loops(call=0) + self.check_resops(call=0) def test_generalize_loop(self): myjitdriver = JitDriver(greens=[], reds = ['i', 'obj']) diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -76,7 +76,7 @@ return lst[i] res = self.meta_interp(f, [21], listops=True) assert res == f(21) - self.check_loops(call=0) + self.check_resops(call=0) def test_getitem_neg(self): myjitdriver = JitDriver(greens = [], reds = ['i', 'n']) @@ -92,7 +92,7 @@ return x res = self.meta_interp(f, [-2], listops=True) assert res == 41 - self.check_loops(call=0, guard_value=0) + self.check_resops(call=0, guard_value=0) # we don't support resizable lists on ootype #class TestOOtype(ListTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -30,7 +30,7 @@ return i res = self.meta_interp(f, [10, True, _str('h')], listops=True) assert res == 5 - self.check_loops(**{self.CALL: 1, self.CALL_PURE: 0, 'everywhere': True}) + self.check_resops(**{self.CALL: 1, self.CALL_PURE: 0}) def test_eq_folded(self): _str = self._str @@ -50,7 +50,7 @@ return i res = self.meta_interp(f, [10, True, _str('h')], listops=True) assert res == 5 - self.check_loops(**{self.CALL: 0, self.CALL_PURE: 0}) + self.check_resops(**{self.CALL: 0, self.CALL_PURE: 0}) def test_newstr(self): _str, _chr = self._str, self._chr @@ -85,7 +85,7 @@ n -= 1 return 42 self.meta_interp(f, [6]) - self.check_loops(newstr=0, strsetitem=0, strlen=0, + self.check_resops(newstr=0, strsetitem=0, strlen=0, newunicode=0, unicodesetitem=0, unicodelen=0) def test_char2string_escape(self): @@ -126,7 +126,7 @@ return total res = self.meta_interp(f, [6]) assert res == 21 - self.check_loops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, + self.check_resops(newstr=0, strgetitem=0, strsetitem=0, strlen=0, newunicode=0, unicodegetitem=0, unicodesetitem=0, unicodelen=0) @@ -147,7 +147,7 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(newstr=0, strsetitem=0, + self.check_resops(newstr=0, strsetitem=0, newunicode=0, unicodesetitem=0, call=0, call_pure=0) @@ -168,12 +168,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=0, copystrcontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=4, + strsetitem=0, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=0, - copyunicodecontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=0, call=2, + copyunicodecontent=4, newunicode=2) def test_strconcat_escape_str_char(self): _str, _chr = self._str, self._chr @@ -192,12 +191,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=2, strsetitem=2, + call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=2, newunicode=2) def test_strconcat_escape_char_str(self): _str, _chr = self._str, self._chr @@ -216,12 +214,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=2, + strsetitem=2, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=1, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=2, newunicode=2) def test_strconcat_escape_char_char(self): _str, _chr = self._str, self._chr @@ -239,12 +236,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=2, copystrcontent=0, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=0, + strsetitem=4, call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=2, - copyunicodecontent=0, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=4, call=2, + copyunicodecontent=0, newunicode=2) def test_strconcat_escape_str_char_str(self): _str, _chr = self._str, self._chr @@ -263,12 +259,11 @@ return 42 self.meta_interp(f, [6, 7]) if _str is str: - self.check_loops(newstr=1, strsetitem=1, copystrcontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, copystrcontent=4, strsetitem=2, + call=2, newstr=2) else: - self.check_loops(newunicode=1, unicodesetitem=1, - copyunicodecontent=2, - call=1, call_pure=0) # escape + self.check_resops(call_pure=0, unicodesetitem=2, call=2, + copyunicodecontent=4, newunicode=2) def test_strconcat_guard_fail(self): _str = self._str @@ -325,7 +320,7 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(newstr=0, newunicode=0) + self.check_resops(newunicode=0, newstr=0) def test_str_slice_len_surviving(self): _str = self._str @@ -491,7 +486,7 @@ def __init__(self, s): self.defaultencoding = s _str = self._str - sys = Sys(_str('ascii')) + sys = Sys(_str('ascii')) mydriver = JitDriver(reds = ['n', 'sa'], greens = []) def f(n): sa = 0 @@ -504,13 +499,13 @@ sys.defaultencoding = _str('utf-8') return sa assert self.meta_interp(f, [8]) == f(8) - self.check_loops({'int_add': 1, 'guard_true': 1, 'int_sub': 1, - 'jump': 1, 'int_is_true': 1, - 'guard_not_invalidated': 1}) + self.check_resops({'jump': 2, 'int_is_true': 2, 'int_add': 2, + 'guard_true': 2, 'guard_not_invalidated': 2, + 'int_sub': 2}) def test_promote_string(self): driver = JitDriver(greens = [], reds = ['n']) - + def f(n): while n < 21: driver.jit_merge_point(n=n) @@ -519,7 +514,7 @@ return 0 self.meta_interp(f, [0]) - self.check_loops(call=3 + 1) # one for int2str + self.check_resops(call=7) #class TestOOtype(StringTests, OOJitMixin): # CALL = "oosend" @@ -552,9 +547,8 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(call=1, # escape() - newunicode=1, unicodegetitem=0, - unicodesetitem=1, copyunicodecontent=1) + self.check_resops(unicodesetitem=2, newunicode=2, call=4, + copyunicodecontent=2, unicodegetitem=0) def test_str2unicode_fold(self): _str = self._str @@ -572,9 +566,9 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(call_pure=0, call=1, - newunicode=0, unicodegetitem=0, - unicodesetitem=0, copyunicodecontent=0) + self.check_resops(call_pure=0, unicodesetitem=0, call=2, + newunicode=0, unicodegetitem=0, + copyunicodecontent=0) def test_join_chars(self): jitdriver = JitDriver(reds=['a', 'b', 'c', 'i'], greens=[]) @@ -596,9 +590,8 @@ # The "".join should be unrolled, since the length of x is known since # it is virtual, ensure there are no calls to ll_join_chars, or # allocations. - self.check_loops({ - "guard_true": 5, "int_is_true": 3, "int_lt": 2, "int_add": 2, "jump": 2, - }, everywhere=True) + self.check_resops({'jump': 2, 'guard_true': 5, 'int_lt': 2, + 'int_add': 2, 'int_is_true': 3}) def test_virtual_copystringcontent(self): jitdriver = JitDriver(reds=['n', 'result'], greens=[]) diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -72,16 +72,16 @@ res = self.meta_interp(main, [0, 6], listops=True, backendopt=True) assert res == 5040 - self.check_loops({'int_mul':1, 'jump':1, - 'int_sub':1, 'int_le':1, 'guard_false':1}) + self.check_resops({'jump': 2, 'int_le': 2, 'guard_value': 1, + 'int_mul': 2, 'guard_false': 2, 'int_sub': 2}) def test_tl_2(self): main = self._get_main() res = self.meta_interp(main, [1, 10], listops=True, backendopt=True) assert res == main(1, 10) - self.check_loops({'int_sub':1, 'int_le':1, - 'guard_false':1, 'jump':1}) + self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 2, + 'guard_false': 2, 'guard_value': 1}) def test_tl_call(self, listops=True, policy=None): from pypy.jit.tl.tl import interp diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -593,6 +593,32 @@ res = self.interp_operations(fn, [sys.maxint]) assert res == 12 + def test_opaque_list(self): + from pypy.rlib.rerased import new_erasing_pair + erase, unerase = new_erasing_pair("test_opaque_list") + def fn(n, ca, cb): + l1 = [n] + l2 = [n] + a1 = erase(l1) + a2 = erase(l1) + a = a1 + if ca: + a = a2 + if n < -100: + unerase(a).append(5) + b = a1 + if cb: + b = a + return unerase(a)[0] + unerase(b)[0] + res = self.interp_operations(fn, [7, 0, 1]) + assert res == 7 * 2 + self.check_operations_history(getarrayitem_gc=0, + getfield_gc=0) + res = self.interp_operations(fn, [-7, 1, 1]) + assert res == -7 * 2 + self.check_operations_history(getarrayitem_gc=0, + getfield_gc=0) + def test_copy_str_content(self): def fn(n): a = StringBuilder() @@ -601,4 +627,4 @@ return x[0] res = self.interp_operations(fn, [0]) assert res == 1 - self.check_operations_history(getarrayitem_gc=0, getarrayitem_gc_pure=0 ) \ No newline at end of file + self.check_operations_history(getarrayitem_gc=0, getarrayitem_gc_pure=0) diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -31,8 +31,9 @@ res = self.meta_interp(f, [10]) assert res == 55 * 10 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=2, new=0) + def test_virtualized2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) @@ -53,8 +54,8 @@ n -= 1 return node1.value * node2.value assert f(10) == self.meta_interp(f, [10]) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, + new=0) def test_virtualized_circular1(self): class MyNode(): @@ -79,8 +80,8 @@ res = self.meta_interp(f, [10]) assert res == 55 * 10 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=3, new=0) def test_virtualized_float(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -97,7 +98,7 @@ res = self.meta_interp(f, [10]) assert res == f(10) self.check_loop_count(1) - self.check_loops(new=0, float_add=0) + self.check_resops(new=0, float_add=1) def test_virtualized_float2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -115,7 +116,8 @@ res = self.meta_interp(f, [10]) assert res == f(10) self.check_loop_count(1) - self.check_loops(new=0, float_add=1) + self.check_resops(new=0, float_add=2) + def test_virtualized_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -139,8 +141,8 @@ res = self.meta_interp(f, [10]) assert res == 55 * 30 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, + new=0) def test_nonvirtual_obj_delays_loop(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -160,8 +162,8 @@ res = self.meta_interp(f, [500]) assert res == 640 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=1, new=0) def test_two_loops_with_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -184,8 +186,9 @@ res = self.meta_interp(f, [18]) assert res == f(18) self.check_loop_count(2) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, + getfield_gc=2, new=0) + def test_two_loops_with_escaping_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) @@ -212,8 +215,8 @@ res = self.meta_interp(f, [20], policy=StopAtXPolicy(externfn)) assert res == f(20) self.check_loop_count(3) - self.check_loops(**{self._new_op: 1}) - self.check_loops(int_mul=0, call=1) + self.check_resops(**{self._new_op: 1}) + self.check_resops(int_mul=0, call=1) def test_two_virtuals(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'prev']) @@ -236,7 +239,7 @@ res = self.meta_interp(f, [12]) assert res == 78 - self.check_loops(new_with_vtable=0, new=0) + self.check_resops(new_with_vtable=0, new=0) def test_specialied_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) @@ -281,7 +284,7 @@ res = self.meta_interp(f, [20]) assert res == 9 - self.check_loops(new_with_vtable=0, new=0) + self.check_resops(new_with_vtable=0, new=0) def test_immutable_constant_getfield(self): myjitdriver = JitDriver(greens = ['stufflist'], reds = ['n', 'i']) @@ -307,7 +310,7 @@ res = self.meta_interp(f, [10, 1, 0], listops=True) assert res == 0 - self.check_loops(getfield_gc=0) + self.check_resops(getfield_gc=0) def test_escapes(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) @@ -336,7 +339,7 @@ res = self.meta_interp(f, [10], policy=StopAtXPolicy(g)) assert res == 3 - self.check_loops(**{self._new_op: 1}) + self.check_resops(**{self._new_op: 1}) def test_virtual_on_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) @@ -366,7 +369,7 @@ res = self.meta_interp(f, [10]) assert res == 2 - self.check_loops(new=0, new_with_vtable=0) + self.check_resops(new=0, new_with_vtable=0) def test_bridge_from_interpreter(self): mydriver = JitDriver(reds = ['n', 'f'], greens = []) @@ -609,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -758,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): @@ -841,7 +865,7 @@ del t2 return i assert self.meta_interp(f, []) == 10 - self.check_loops(new_array=0) + self.check_resops(new_array=0) def test_virtual_streq_bug(self): mydriver = JitDriver(reds = ['i', 's', 'a'], greens = []) @@ -942,8 +966,8 @@ res = self.meta_interp(f, [16]) assert res == f(16) - self.check_loops(getfield_gc=2) - + self.check_resops(getfield_gc=7) + # ____________________________________________________________ # Run 1: all the tests instantiate a real RPython class @@ -985,10 +1009,8 @@ res = self.meta_interp(f, [10]) assert res == 20 self.check_loop_count(1) - self.check_loops(new=0, new_with_vtable=0, - getfield_gc=0, setfield_gc=0) - - + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=0, + new=0) class TestOOtype_Instance(VirtualTests, OOJitMixin): _new_op = 'new_with_vtable' diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -77,7 +77,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 30 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_preexisting_access_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -102,7 +102,7 @@ assert f(5) == 185 res = self.meta_interp(f, [5]) assert res == 185 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_two_paths_access(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -124,7 +124,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10118 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_synchronize_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -146,7 +146,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_virtualizable_and_greens(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'xy'], @@ -174,7 +174,7 @@ return res res = self.meta_interp(f, [40]) assert res == 50 * 4 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_double_frame(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy', 'other'], @@ -197,8 +197,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_loops(getfield_gc=0, setfield_gc=1) - self.check_loops(getfield_gc=1, setfield_gc=2, everywhere=True) + self.check_resops(setfield_gc=2, getfield_gc=1) # ------------------------------ @@ -248,8 +247,8 @@ return xy2.inst_l1[2] res = self.meta_interp(f, [16]) assert res == 3001 + 16 * 80 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, setarrayitem_gc=0) + self.check_resops(setarrayitem_gc=0, setfield_gc=0, + getarrayitem_gc=0, getfield_gc=0) def test_synchronize_arrays_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -279,8 +278,7 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) def test_array_length(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -306,8 +304,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, arraylen_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=0, getfield_gc=0) def test_residual_function(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -340,8 +338,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0, arraylen_gc=1, call=1) + self.check_resops(call=2, setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=2, getfield_gc=0) def test_double_frame_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2', 'other'], @@ -377,8 +375,8 @@ expected = f(20) res = self.meta_interp(f, [20], enable_opts='') assert res == expected - self.check_loops(getfield_gc=1, setfield_gc=0, - arraylen_gc=1, getarrayitem_gc=1, setarrayitem_gc=1) + self.check_resops(setarrayitem_gc=1, setfield_gc=0, + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) # ------------------------------ @@ -425,8 +423,7 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_loops(getfield_gc=0, setfield_gc=0, - getarrayitem_gc=0) + self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) # ------------------------------ @@ -460,8 +457,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) - + self.check_resops(setfield_gc=0, getfield_gc=0) def test_virtualizable_with_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'frame'], @@ -495,8 +491,7 @@ res = self.meta_interp(f, [10, 1], listops=True) assert res == f(10, 1) - self.check_loops(getarrayitem_gc=0) - + self.check_resops(getarrayitem_gc=0) def test_subclass_of_virtualizable(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -524,8 +519,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) - + self.check_resops(setfield_gc=0, getfield_gc=0) def test_external_pass(self): jitdriver = JitDriver(greens = [], reds = ['n', 'z', 'frame'], @@ -1011,8 +1005,8 @@ res = self.meta_interp(f, [70], listops=True) assert res == intmask(42 ** 70) - self.check_loops(int_add=0, - int_sub=1) # for 'n -= 1' only + self.check_resops(int_add=0, + int_sub=2) # for 'n -= 1' only def test_simple_access_directly(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1043,7 +1037,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) from pypy.jit.backend.test.support import BaseCompiledMixin if isinstance(self, BaseCompiledMixin): @@ -1098,42 +1092,42 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_check_for_nonstandardness_only_once(self): - myjitdriver = JitDriver(greens = [], reds = ['frame'], - virtualizables = ['frame']) + myjitdriver = JitDriver(greens = [], reds = ['frame'], + virtualizables = ['frame']) - class Frame(object): - _virtualizable2_ = ['x', 'y', 'z'] + class Frame(object): + _virtualizable2_ = ['x', 'y', 'z'] - def __init__(self, x, y, z=1): - self = hint(self, access_directly=True) - self.x = x - self.y = y - self.z = z + def __init__(self, x, y, z=1): + self = hint(self, access_directly=True) + self.x = x + self.y = y + self.z = z - class SomewhereElse: - pass - somewhere_else = SomewhereElse() + class SomewhereElse: + pass + somewhere_else = SomewhereElse() - def f(n): - frame = Frame(n, 0) - somewhere_else.top_frame = frame # escapes - frame = hint(frame, access_directly=True) - while frame.x > 0: - myjitdriver.can_enter_jit(frame=frame) - myjitdriver.jit_merge_point(frame=frame) - top_frame = somewhere_else.top_frame - child_frame = Frame(frame.x, top_frame.z, 17) - frame.y += child_frame.x - frame.x -= top_frame.z - return somewhere_else.top_frame.y - - res = self.meta_interp(f, [10]) - assert res == 55 - self.check_loops(new_with_vtable=0, ptr_eq=1, everywhere=True) - self.check_history(ptr_eq=2) + def f(n): + frame = Frame(n, 0) + somewhere_else.top_frame = frame # escapes + frame = hint(frame, access_directly=True) + while frame.x > 0: + myjitdriver.can_enter_jit(frame=frame) + myjitdriver.jit_merge_point(frame=frame) + top_frame = somewhere_else.top_frame + child_frame = Frame(frame.x, top_frame.z, 17) + frame.y += child_frame.x + frame.x -= top_frame.z + return somewhere_else.top_frame.y + + res = self.meta_interp(f, [10]) + assert res == 55 + self.check_resops(new_with_vtable=0, ptr_eq=1) + self.check_history(ptr_eq=2) def test_virtual_child_frame_with_arrays(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1165,7 +1159,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == 55 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_blackhole_should_not_pay_attention(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1203,7 +1197,7 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_blackhole_should_synchronize(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1239,7 +1233,7 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_loops(getfield_gc=0, setfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=0) def test_blackhole_should_not_reenter(self): if not self.basic: diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -171,7 +171,7 @@ return 1 # self.meta_interp(f, [10]) - self.check_loops(new_with_vtable=1) # the vref + self.check_resops(new_with_vtable=2) # the vref self.check_aborted_count(0) def test_simple_all_removed(self): @@ -205,8 +205,7 @@ virtual_ref_finish(vref, xy) # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=0, # all virtualized - new_array=0) + self.check_resops(new_with_vtable=0, new_array=0) self.check_aborted_count(0) def test_simple_no_access(self): @@ -242,7 +241,7 @@ virtual_ref_finish(vref, xy) # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=1, # the vref: xy doesn't need to be forced + self.check_resops(new_with_vtable=2, # the vref: xy doesn't need to be forced new_array=0) # and neither xy.next1/2/3 self.check_aborted_count(0) @@ -280,8 +279,8 @@ exctx.topframeref = vref_None # self.meta_interp(f, [15]) - self.check_loops(new_with_vtable=2, # XY(), the vref - new_array=3) # next1/2/3 + self.check_resops(new_with_vtable=4, # XY(), the vref + new_array=6) # next1/2/3 self.check_aborted_count(0) def test_simple_force_sometimes(self): @@ -320,8 +319,8 @@ # res = self.meta_interp(f, [30]) assert res == 13 - self.check_loops(new_with_vtable=1, # the vref, but not XY() - new_array=0) # and neither next1/2/3 + self.check_resops(new_with_vtable=2, # the vref, but not XY() + new_array=0) # and neither next1/2/3 self.check_loop_count(1) self.check_aborted_count(0) @@ -362,7 +361,7 @@ # res = self.meta_interp(f, [30]) assert res == 13 - self.check_loops(new_with_vtable=0, # all virtualized in the n!=13 loop + self.check_resops(new_with_vtable=0, # all virtualized in the n!=13 loop new_array=0) self.check_loop_count(1) self.check_aborted_count(0) @@ -412,7 +411,7 @@ res = self.meta_interp(f, [72]) assert res == 6 self.check_loop_count(2) # the loop and the bridge - self.check_loops(new_with_vtable=2, # loop: nothing; bridge: vref, xy + self.check_resops(new_with_vtable=2, # loop: nothing; bridge: vref, xy new_array=2) # bridge: next4, next5 self.check_aborted_count(0) @@ -442,8 +441,8 @@ # res = self.meta_interp(f, [15]) assert res == 1 - self.check_loops(new_with_vtable=2, # vref, xy - new_array=1) # next1 + self.check_resops(new_with_vtable=4, # vref, xy + new_array=2) # next1 self.check_aborted_count(0) def test_recursive_call_1(self): @@ -543,7 +542,7 @@ # res = self.meta_interp(f, [15]) assert res == 1 - self.check_loops(new_with_vtable=2) # vref, xy + self.check_resops(new_with_vtable=4) # vref, xy def test_cannot_use_invalid_virtualref(self): myjitdriver = JitDriver(greens = [], reds = ['n']) diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -847,7 +847,8 @@ i5 = arraylen_gc(p2, descr=arraydescr) i6 = int_ge(i5, 1) guard_true(i6) [] - jump(p0, p1, p2) + p3 = getarrayitem_gc(p2, 0, descr=arraydescr) + jump(p0, p1, p3, p2) """ self.optimize_bridge(loop, bridge, expected, p0=self.myptr) diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -1,10 +1,7 @@ import py -from pypy.jit.metainterp.warmspot import ll_meta_interp from pypy.jit.metainterp.warmspot import get_stats -from pypy.rlib.jit import JitDriver -from pypy.rlib.jit import unroll_safe +from pypy.rlib.jit import JitDriver, set_param, unroll_safe from pypy.jit.backend.llgraph import runner -from pypy.jit.metainterp.history import BoxInt from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES @@ -97,18 +94,18 @@ n = A().m(n) return n def f(n, enable_opts): - myjitdriver.set_param('enable_opts', hlstr(enable_opts)) + set_param(None, 'enable_opts', hlstr(enable_opts)) return g(n) # check that the set_param will override the default res = self.meta_interp(f, [10, llstr('')]) assert res == 0 - self.check_loops(new_with_vtable=1) + self.check_resops(new_with_vtable=1) res = self.meta_interp(f, [10, llstr(ALL_OPTS_NAMES)], enable_opts='') assert res == 0 - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_unwanted_loops(self): mydriver = JitDriver(reds = ['n', 'total', 'm'], greens = []) @@ -163,7 +160,7 @@ return n self.meta_interp(f, [50], backendopt=True) self.check_enter_count_at_most(2) - self.check_loops(call=0) + self.check_resops(call=0) def test_loop_header(self): # artificial test: we enter into the JIT only when can_enter_jit() @@ -187,7 +184,7 @@ assert f(15) == 1 res = self.meta_interp(f, [15], backendopt=True) assert res == 1 - self.check_loops(int_add=1) # I get 13 without the loop_header() + self.check_resops(int_add=2) # I get 13 without the loop_header() def test_omit_can_enter_jit(self): # Simple test comparing the effects of always giving a can_enter_jit(), @@ -249,8 +246,8 @@ m = m - 1 self.meta_interp(f1, [8]) self.check_loop_count(1) - self.check_loops({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, - 'jump': 1}) + self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + 'int_sub': 2}) def test_void_red_variable(self): mydriver = JitDriver(greens=[], reds=['a', 'm']) diff --git a/pypy/jit/metainterp/test/test_ztranslation.py b/pypy/jit/metainterp/test/test_ztranslation.py --- a/pypy/jit/metainterp/test/test_ztranslation.py +++ b/pypy/jit/metainterp/test/test_ztranslation.py @@ -1,7 +1,7 @@ import py from pypy.jit.metainterp.warmspot import rpython_ll_meta_interp, ll_meta_interp from pypy.jit.backend.llgraph import runner -from pypy.rlib.jit import JitDriver, unroll_parameters +from pypy.rlib.jit import JitDriver, unroll_parameters, set_param from pypy.rlib.jit import PARAMETERS, dont_look_inside, hint from pypy.jit.metainterp.jitprof import Profiler from pypy.rpython.lltypesystem import lltype, llmemory @@ -57,9 +57,9 @@ get_printable_location=get_printable_location) def f(i): for param, defl in unroll_parameters: - jitdriver.set_param(param, defl) - jitdriver.set_param("threshold", 3) - jitdriver.set_param("trace_eagerness", 2) + set_param(jitdriver, param, defl) + set_param(jitdriver, "threshold", 3) + set_param(jitdriver, "trace_eagerness", 2) total = 0 frame = Frame(i) while frame.l[0] > 3: @@ -117,8 +117,8 @@ raise ValueError return 2 def main(i): - jitdriver.set_param("threshold", 3) - jitdriver.set_param("trace_eagerness", 2) + set_param(jitdriver, "threshold", 3) + set_param(jitdriver, "trace_eagerness", 2) total = 0 n = i while n > 3: diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -48,13 +48,13 @@ translator.warmrunnerdesc = warmrunnerdesc # for later debugging def ll_meta_interp(function, args, backendopt=False, type_system='lltype', - listcomp=False, **kwds): + listcomp=False, translationoptions={}, **kwds): if listcomp: extraconfigopts = {'translation.list_comprehension_operations': True} else: extraconfigopts = {} - if kwds.pop("taggedpointers", False): - extraconfigopts["translation.taggedpointers"] = True + for key, value in translationoptions.items(): + extraconfigopts['translation.' + key] = value interp, graph = get_interpreter(function, args, backendopt=False, # will be done below type_system=type_system, @@ -120,7 +120,8 @@ op = block.operations[i] if (op.opname == 'jit_marker' and op.args[0].value == marker_name and - op.args[1].value.active): # the jitdriver + (op.args[1].value is None or + op.args[1].value.active)): # the jitdriver results.append((graph, block, i)) return results @@ -254,10 +255,8 @@ s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] graph = copygraph(graph) - graph.startblock.isstartblock = False [jmpp] = find_jit_merge_points([graph]) graph.startblock = support.split_before_jit_merge_point(*jmpp) - graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot # to list some variable in greens=[] or reds=[] in JitDriver, # or that a jit_merge_point() takes a constant as an argument. @@ -846,11 +845,18 @@ _, PTR_SET_PARAM_STR_FUNCTYPE = self.cpu.ts.get_FuncType( [lltype.Ptr(STR)], lltype.Void) def make_closure(jd, fullfuncname, is_string): - state = jd.warmstate - def closure(i): - if is_string: - i = hlstr(i) - getattr(state, fullfuncname)(i) + if jd is None: + def closure(i): + if is_string: + i = hlstr(i) + for jd in self.jitdrivers_sd: + getattr(jd.warmstate, fullfuncname)(i) + else: + state = jd.warmstate + def closure(i): + if is_string: + i = hlstr(i) + getattr(state, fullfuncname)(i) if is_string: TP = PTR_SET_PARAM_STR_FUNCTYPE else: @@ -859,12 +865,16 @@ return Constant(funcptr, TP) # for graph, block, i in find_set_param(graphs): + op = block.operations[i] - for jd in self.jitdrivers_sd: - if jd.jitdriver is op.args[1].value: - break + if op.args[1].value is not None: + for jd in self.jitdrivers_sd: + if jd.jitdriver is op.args[1].value: + break + else: + assert 0, "jitdriver of set_param() not found" else: - assert 0, "jitdriver of set_param() not found" + jd = None funcname = op.args[2].value key = jd, funcname if key not in closures: diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -36,7 +36,7 @@ i = i + 1 return i self.interpret(f, []) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_bridge(self): py.test.skip('We currently cant virtualize across bridges') @@ -52,7 +52,7 @@ return total self.interpret(f, [1, 10]) - self.check_loops(new_with_vtable=0) + self.check_resops(new_with_vtable=0) def test_bridge_bad_case(self): py.test.skip('We currently cant virtualize across bridges') @@ -67,7 +67,7 @@ return a + b self.interpret(f, [1, 10]) - self.check_loops(new_with_vtable=1) # XXX should eventually be 0? + self.check_resops(new_with_vtable=1) # XXX should eventually be 0? # I think it should be either 0 or 2, 1 makes little sense # If the loop after entering goes first time to the bridge, a # is rewrapped again, without preserving the identity. I'm not diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -95,17 +95,17 @@ return space.newlist(res_w) -def range_withspecialized_implementation(space, start, step, howmany): +def range_withspecialized_implementation(space, start, step, length): assert space.config.objspace.std.withrangelist - from pypy.objspace.std.rangeobject import W_RangeListObject - return W_RangeListObject(start, step, howmany) + from pypy.objspace.std.listobject import make_range_list + return make_range_list(space, start, step, length) bigint_one = rbigint.fromint(1) def range_with_longs(space, w_start, w_stop, w_step): start = lo = space.bigint_w(w_start) - stop = hi = space.bigint_w(w_stop) + hi = space.bigint_w(w_stop) step = st = space.bigint_w(w_step) if not step.tobool(): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,11 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + if isinstance(w_list, W_ListObject): + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,21 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + + l = [1, 2, 3] + assert list_strategy(l) == "int" + l = ["a", "b", "c"] + assert list_strategy(l) == "str" + l = [1.1, 2.2, 3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1, "b", 3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + raises(TypeError, list_strategy, 5) diff --git a/pypy/module/_bisect/test/test_ztranslation.py b/pypy/module/_bisect/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_bisect/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_bisect') diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -67,10 +67,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_builtin = space.getbuiltinmodule('__builtin__') - w_import = space.getattr(w_builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -13,12 +13,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -36,7 +38,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -19,11 +19,41 @@ def test_keyerror_without_factory(self): from _collections import defaultdict - d1 = defaultdict() - for key in ['foo', (1,)]: - try: - d1[key] - except KeyError, err: - assert err.args[0] == key - else: - assert 0, "expected KeyError" + for d1 in [defaultdict(), defaultdict(None)]: + for key in ['foo', (1,)]: + try: + d1[key] + except KeyError, err: + assert err.args[0] == key + else: + assert 0, "expected KeyError" + + def test_noncallable(self): + from _collections import defaultdict + raises(TypeError, defaultdict, [('a', 5)]) + d = defaultdict(None, [('a', 5)]) + assert d.items() == [('a', 5)] + + def test_kwds(self): + from _collections import defaultdict + d = defaultdict(default_factory=5) + assert d.keys() == ['default_factory'] + + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/module/_ffi/interp_ffi.py b/pypy/module/_ffi/interp_ffi.py --- a/pypy/module/_ffi/interp_ffi.py +++ b/pypy/module/_ffi/interp_ffi.py @@ -457,14 +457,14 @@ # ======================================================================== class W_CDLL(Wrappable): - def __init__(self, space, name): + def __init__(self, space, name, mode): self.space = space if name is None: self.name = "" else: self.name = name try: - self.cdll = libffi.CDLL(name) + self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: raise operationerrfmt(space.w_OSError, '%s: %s', self.name, e.msg or 'unspecified error') @@ -492,9 +492,9 @@ "No symbol %s found in library %s", name, self.name) return space.wrap(address_as_uint) - at unwrap_spec(name='str_or_None') -def descr_new_cdll(space, w_type, name): - return space.wrap(W_CDLL(space, name)) + at unwrap_spec(name='str_or_None', mode=int) +def descr_new_cdll(space, w_type, name, mode=-1): + return space.wrap(W_CDLL(space, name, mode)) W_CDLL.typedef = TypeDef( @@ -509,6 +509,6 @@ def get_libc(space): from pypy.rlib.clibffi import get_libc_name try: - return space.wrap(W_CDLL(space, get_libc_name())) + return space.wrap(W_CDLL(space, get_libc_name(), -1)) except OSError, e: raise wrap_oserror(space, e) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -206,24 +206,28 @@ @unwrap_spec(size=int) def direct_readlines(self, size=0): stream = self.getstream() - # NB. this implementation is very inefficient for unbuffered - # streams, but ok if stream.readline() is efficient. + # this is implemented as: .read().split('\n') + # except that it keeps the \n in the resulting strings if size <= 0: - result = [] - while True: - line = stream.readline() - if not line: - break - result.append(line) - size -= len(line) + data = stream.readall() else: - result = [] - while size > 0: - line = stream.readline() - if not line: - break - result.append(line) - size -= len(line) + data = stream.read(size) + result = [] + splitfrom = 0 + for i in range(len(data)): + if data[i] == '\n': + result.append(data[splitfrom : i + 1]) + splitfrom = i + 1 + # + if splitfrom < len(data): + # there is a partial line at the end. If size > 0, it is likely + # to be because the 'read(size)' returned data up to the middle + # of a line. In that case, use 'readline()' to read until the + # end of the current line. + data = data[splitfrom:] + if size > 0: + data += stream.readline() + result.append(data) return result @unwrap_spec(offset=r_longlong, whence=int) diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -4,32 +4,44 @@ from pypy.interpreter.error import OperationError from pypy.tool.sourcetools import func_renamer from pypy.interpreter.baseobjspace import Wrappable -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.rlib import rgc, ropenssl from pypy.rlib.objectmodel import keepalive_until_here -from pypy.rlib import ropenssl from pypy.rlib.rstring import StringBuilder from pypy.module.thread.os_lock import Lock algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') +# HASH_MALLOC_SIZE is the size of EVP_MD, EVP_MD_CTX plus their points +# Used for adding memory pressure. Last number is an (under?)estimate of +# EVP_PKEY_CTX's size. +# XXX: Make a better estimate here +HASH_MALLOC_SIZE = ropenssl.EVP_MD_SIZE + ropenssl.EVP_MD_CTX_SIZE \ + + rffi.sizeof(ropenssl.EVP_MD) * 2 + 208 + class W_Hash(Wrappable): ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) + _block_size = -1 def __init__(self, space, name): self.name = name + self.digest_size = self.compute_digest_size() # Allocate a lock for each HASH object. # An optimization would be to not release the GIL on small requests, # and use a custom lock only when needed. self.lock = Lock(space) + ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw') + rgc.add_memory_pressure(HASH_MALLOC_SIZE + self.digest_size) + self.ctx = ctx + + def initdigest(self, space, name): digest = ropenssl.EVP_get_digestbyname(name) if not digest: raise OperationError(space.w_ValueError, space.wrap("unknown hash function")) - ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw') - ropenssl.EVP_DigestInit(ctx, digest) - self.ctx = ctx + ropenssl.EVP_DigestInit(self.ctx, digest) def __del__(self): # self.lock.free() @@ -65,33 +77,29 @@ "Return the digest value as a string of hexadecimal digits." digest = self._digest(space) hexdigits = '0123456789abcdef' - result = StringBuilder(self._digest_size() * 2) + result = StringBuilder(self.digest_size * 2) for c in digest: result.append(hexdigits[(ord(c) >> 4) & 0xf]) result.append(hexdigits[ ord(c) & 0xf]) return space.wrap(result.build()) def get_digest_size(self, space): - return space.wrap(self._digest_size()) + return space.wrap(self.digest_size) def get_block_size(self, space): - return space.wrap(self._block_size()) + return space.wrap(self.compute_block_size()) def _digest(self, space): - copy = self.copy(space) - ctx = copy.ctx - digest_size = self._digest_size() - digest = lltype.malloc(rffi.CCHARP.TO, digest_size, flavor='raw') + with lltype.scoped_alloc(ropenssl.EVP_MD_CTX.TO) as ctx: + with self.lock: + ropenssl.EVP_MD_CTX_copy(ctx, self.ctx) + digest_size = self.digest_size + with lltype.scoped_alloc(rffi.CCHARP.TO, digest_size) as digest: + ropenssl.EVP_DigestFinal(ctx, digest, None) + ropenssl.EVP_MD_CTX_cleanup(ctx) + return rffi.charpsize2str(digest, digest_size) - try: - ropenssl.EVP_DigestFinal(ctx, digest, None) - return rffi.charpsize2str(digest, digest_size) - finally: - keepalive_until_here(copy) - lltype.free(digest, flavor='raw') - - - def _digest_size(self): + def compute_digest_size(self): # XXX This isn't the nicest way, but the EVP_MD_size OpenSSL # XXX function is defined as a C macro on OS X and would be # XXX significantly harder to implement in another way. @@ -105,12 +113,14 @@ 'sha512': 64, 'SHA512': 64, }.get(self.name, 0) - def _block_size(self): + def compute_block_size(self): + if self._block_size != -1: + return self._block_size # XXX This isn't the nicest way, but the EVP_MD_CTX_block_size # XXX OpenSSL function is defined as a C macro on some systems # XXX and would be significantly harder to implement in # XXX another way. - return { + self._block_size = { 'md5': 64, 'MD5': 64, 'sha1': 64, 'SHA1': 64, 'sha224': 64, 'SHA224': 64, @@ -118,6 +128,7 @@ 'sha384': 128, 'SHA384': 128, 'sha512': 128, 'SHA512': 128, }.get(self.name, 0) + return self._block_size W_Hash.typedef = TypeDef( 'HASH', @@ -135,6 +146,7 @@ @unwrap_spec(name=str, string='bufferstr') def new(space, name, string=''): w_hash = W_Hash(space, name) + w_hash.initdigest(space, name) w_hash.update(space, string) return space.wrap(w_hash) diff --git a/pypy/module/_minimal_curses/__init__.py b/pypy/module/_minimal_curses/__init__.py --- a/pypy/module/_minimal_curses/__init__.py +++ b/pypy/module/_minimal_curses/__init__.py @@ -4,7 +4,8 @@ try: import _minimal_curses as _curses # when running on top of pypy-c except ImportError: - raise ImportError("no _curses or _minimal_curses module") # no _curses at all + import py + py.test.skip("no _curses or _minimal_curses module") #no _curses at all from pypy.interpreter.mixedmodule import MixedModule from pypy.module._minimal_curses import fficurses diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -100,7 +100,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, maxlength) - res = intmask(res) # XXX why? try: if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -117,7 +116,6 @@ res, newbuf = self.do_recv_string( space, length - offset, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: raise BufferTooShort(space, space.wrap( @@ -148,7 +146,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) @@ -413,7 +410,7 @@ self.buffer, min(self.BUFFER_SIZE, buflength), read_ptr, rffi.NULL) if result: - return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) + return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: @@ -476,7 +473,7 @@ block = timeout < 0 if not block: # XXX does not check for overflow - deadline = _GetTickCount() + int(1000 * timeout + 0.5) + deadline = intmask(_GetTickCount()) + int(1000 * timeout + 0.5) else: deadline = 0 @@ -500,7 +497,7 @@ return True if not block: - now = _GetTickCount() + now = intmask(_GetTickCount()) if now > deadline: return False diff = deadline - now diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -4,6 +4,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import wrap_oserror, OperationError from pypy.rpython.lltypesystem import rffi, lltype +from pypy.rlib import rgc from pypy.rlib.rarithmetic import r_uint from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform @@ -23,6 +24,8 @@ _CreateSemaphore = rwin32.winexternal( 'CreateSemaphoreA', [rffi.VOIDP, rffi.LONG, rffi.LONG, rwin32.LPCSTR], rwin32.HANDLE) + _CloseHandle = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], + rwin32.BOOL, threadsafe=False) _ReleaseSemaphore = rwin32.winexternal( 'ReleaseSemaphore', [rwin32.HANDLE, rffi.LONG, rffi.LONGP], rwin32.BOOL) @@ -51,6 +54,7 @@ SEM_FAILED = platform.ConstantInteger('SEM_FAILED') SEM_VALUE_MAX = platform.ConstantInteger('SEM_VALUE_MAX') SEM_TIMED_WAIT = platform.Has('sem_timedwait') + SEM_T_SIZE = platform.SizeOf('sem_t') config = platform.configure(CConfig) TIMEVAL = config['TIMEVAL'] @@ -61,18 +65,21 @@ SEM_FAILED = config['SEM_FAILED'] # rffi.cast(SEM_T, config['SEM_FAILED']) SEM_VALUE_MAX = config['SEM_VALUE_MAX'] SEM_TIMED_WAIT = config['SEM_TIMED_WAIT'] + SEM_T_SIZE = config['SEM_T_SIZE'] if sys.platform == 'darwin': HAVE_BROKEN_SEM_GETVALUE = True else: HAVE_BROKEN_SEM_GETVALUE = False - def external(name, args, result): + def external(name, args, result, **kwargs): return rffi.llexternal(name, args, result, - compilation_info=eci) + compilation_info=eci, **kwargs) _sem_open = external('sem_open', [rffi.CCHARP, rffi.INT, rffi.INT, rffi.UINT], SEM_T) + # tread sem_close as not threadsafe for now to be able to use the __del__ + _sem_close = external('sem_close', [SEM_T], rffi.INT, threadsafe=False) _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT) _sem_wait = external('sem_wait', [SEM_T], rffi.INT) _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT) @@ -90,6 +97,11 @@ raise OSError(rposix.get_errno(), "sem_open failed") return res + def sem_close(handle): + res = _sem_close(handle) + if res < 0: + raise OSError(rposix.get_errno(), "sem_close failed") + def sem_unlink(name): res = _sem_unlink(name) if res < 0: @@ -205,6 +217,11 @@ raise WindowsError(err, "CreateSemaphore") return handle + def delete_semaphore(handle): + if not _CloseHandle(handle): + err = rwin32.GetLastError() + raise WindowsError(err, "CloseHandle") + def semlock_acquire(self, space, block, w_timeout): if not block: full_msecs = 0 @@ -218,7 +235,7 @@ elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise OperationError(space.w_OverflowError, space.wrap("timeout is too large")) - full_msecs = int(timeout + 0.5) + full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) @@ -226,7 +243,7 @@ if res != rwin32.WAIT_TIMEOUT: return True - msecs = r_uint(full_msecs) + msecs = full_msecs start = _GetTickCount() while True: @@ -252,7 +269,7 @@ ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False - msecs = r_uint(full_msecs - (ticks - start)) + msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: @@ -291,8 +308,13 @@ sem_unlink(name) except OSError: pass + else: + rgc.add_memory_pressure(SEM_T_SIZE) return sem + def delete_semaphore(handle): + sem_close(handle) + def semlock_acquire(self, space, block, w_timeout): if not block: deadline = lltype.nullptr(TIMESPECP.TO) @@ -483,6 +505,9 @@ def exit(self, space, __args__): self.release(space) + def __del__(self): + delete_semaphore(self.handle) + @unwrap_spec(kind=int, value=int, maxvalue=int) def descr_new(space, w_subtype, kind, value, maxvalue): if kind != RECURSIVE_MUTEX and kind != SEMAPHORE: diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py --- a/pypy/module/_multiprocessing/test/test_semaphore.py +++ b/pypy/module/_multiprocessing/test/test_semaphore.py @@ -70,8 +70,10 @@ maxvalue = 1 sem = SemLock(kind, value, maxvalue) - assert sem.acquire() - assert not sem.acquire(timeout=0.1) + res = sem.acquire() + assert res == True + res = sem.acquire(timeout=0.1) + assert res == False def test_semaphore_rebuild(self): from _multiprocessing import SemLock diff --git a/pypy/module/_random/test/test_ztranslation.py b/pypy/module/_random/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_random/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_random') diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,6 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def set_last_error(space, w_error): + at unwrap_spec(error=int) +def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError - SetLastError(space.uint_w(w_error)) + SetLastError(error) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -212,6 +212,8 @@ while count + basic_size <= total_size: fieldtypes.append(basic_ffi_type) count += basic_size + if basic_size == 0: # corner case. get out of this infinite + break # loop after 1 iteration ("why not") self.ffi_struct = clibffi.make_struct_ffitype_e(self.size, self.alignment, fieldtypes) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1022,6 +1022,12 @@ assert ret.y == 1234500, "ret.y == %d" % (ret.y,) s.free() + def test_ffi_type(self): + import _rawffi + EMPTY = _rawffi.Structure([]) + S2E = _rawffi.Structure([('bah', (EMPTY, 1))]) + S2E.get_ffi_type() # does not hang + class AppTestAutoFree: def setup_class(cls): space = gettestobjspace(usemodules=('_rawffi', 'struct')) diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module._socket.interp_socket import converted_error, W_RSocket from pypy.rlib import rsocket -from pypy.rlib.rsocket import SocketError +from pypy.rlib.rsocket import SocketError, INVALID_SOCKET from pypy.interpreter.error import OperationError def gethostname(space): @@ -284,7 +284,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(-1, space)]) # -1 as per cpython + addr.as_object(INVALID_SOCKET, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -324,6 +324,7 @@ class A(object): pass a = A() assert _weakref.proxy(a) is _weakref.proxy(a) + assert _weakref.proxy(a) is _weakref.proxy(a, None) def test_callable_proxy(self): import _weakref, gc diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -835,7 +835,7 @@ a.append(3.0) r = weakref.ref(a, lambda a: l.append(a())) del a - gc.collect() + gc.collect(); gc.collect() # XXX needs two of them right now... assert l assert l[0] is None or len(l[0]) == 0 diff --git a/pypy/module/bz2/test/test_large.py b/pypy/module/bz2/test/test_large.py --- a/pypy/module/bz2/test/test_large.py +++ b/pypy/module/bz2/test/test_large.py @@ -8,7 +8,7 @@ py.test.skip("skipping this very slow test; try 'pypy-c -A'") cls.space = gettestobjspace(usemodules=('bz2',)) largetest_bz2 = py.path.local(__file__).dirpath().join("largetest.bz2") - cls.w_compressed_data = cls.space.wrap(largetest_bz2.read()) + cls.w_compressed_data = cls.space.wrap(largetest_bz2.read('rb')) def test_decompress(self): from bz2 import decompress diff --git a/pypy/module/cStringIO/test/test_ztranslation.py b/pypy/module/cStringIO/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/cStringIO/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('cStringIO') diff --git a/pypy/module/clr/boxing_rules.py b/pypy/module/clr/boxing_rules.py --- a/pypy/module/clr/boxing_rules.py +++ b/pypy/module/clr/boxing_rules.py @@ -43,11 +43,11 @@ def tocli(self): return box(self._value) -from pypy.objspace.fake.objspace import W_Object as W_Object_Fake -from pypy.rlib.nonconst import NonConstant +##from pypy.objspace.fake.objspace import W_Object as W_Object_Fake +##from pypy.rlib.nonconst import NonConstant -class __extend__(W_Object_Fake): - __metaclass__ = extendabletype +##class __extend__(W_Object_Fake): +## __metaclass__ = extendabletype - def tocli(self): - return NonConstant(None) +## def tocli(self): +## return NonConstant(None) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -392,6 +392,7 @@ 'Slice': 'space.gettypeobject(W_SliceObject.typedef)', 'StaticMethod': 'space.gettypeobject(StaticMethod.typedef)', 'CFunction': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', + 'WrapperDescr': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' }.items(): GLOBALS['Py%s_Type#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) diff --git a/pypy/module/cpyext/include/eval.h b/pypy/module/cpyext/include/eval.h --- a/pypy/module/cpyext/include/eval.h +++ b/pypy/module/cpyext/include/eval.h @@ -14,8 +14,8 @@ PyObject * PyEval_CallFunction(PyObject *obj, const char *format, ...); PyObject * PyEval_CallMethod(PyObject *obj, const char *name, const char *format, ...); -PyObject * PyObject_CallFunction(PyObject *obj, char *format, ...); -PyObject * PyObject_CallMethod(PyObject *obj, char *name, char *format, ...); +PyObject * PyObject_CallFunction(PyObject *obj, const char *format, ...); +PyObject * PyObject_CallMethod(PyObject *obj, const char *name, const char *format, ...); PyObject * PyObject_CallFunctionObjArgs(PyObject *callable, ...); PyObject * PyObject_CallMethodObjArgs(PyObject *callable, PyObject *name, ...); diff --git a/pypy/module/cpyext/include/modsupport.h b/pypy/module/cpyext/include/modsupport.h --- a/pypy/module/cpyext/include/modsupport.h +++ b/pypy/module/cpyext/include/modsupport.h @@ -48,7 +48,11 @@ /* * This is from pyport.h. Perhaps it belongs elsewhere. */ +#ifdef __cplusplus +#define PyMODINIT_FUNC extern "C" void +#else #define PyMODINIT_FUNC void +#endif #ifdef __cplusplus diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.1" /* PyPy version as a string */ -#define PYPY_VERSION "1.6.1" +#define PYPY_VERSION "1.7.1" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -33,7 +33,7 @@ PyAPI_FUNC(void *) PyCObject_GetDesc(PyObject *); /* Import a pointer to a C object from a module using a PyCObject. */ -PyAPI_FUNC(void *) PyCObject_Import(char *module_name, char *cobject_name); +PyAPI_FUNC(void *) PyCObject_Import(const char *module_name, const char *cobject_name); /* Modify a C object. Fails (==0) if object has a destructor. */ PyAPI_FUNC(int) PyCObject_SetVoidPtr(PyObject *self, void *cobj); diff --git a/pypy/module/cpyext/include/pyerrors.h b/pypy/module/cpyext/include/pyerrors.h --- a/pypy/module/cpyext/include/pyerrors.h +++ b/pypy/module/cpyext/include/pyerrors.h @@ -11,8 +11,8 @@ (PyClass_Check((x)) || (PyType_Check((x)) && \ PyObject_IsSubclass((x), PyExc_BaseException))) -PyObject *PyErr_NewException(char *name, PyObject *base, PyObject *dict); -PyObject *PyErr_NewExceptionWithDoc(char *name, char *doc, PyObject *base, PyObject *dict); +PyObject *PyErr_NewException(const char *name, PyObject *base, PyObject *dict); +PyObject *PyErr_NewExceptionWithDoc(const char *name, const char *doc, PyObject *base, PyObject *dict); PyObject *PyErr_Format(PyObject *exception, const char *format, ...); /* These APIs aren't really part of the error implementation, but diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -32,7 +32,7 @@ Py_DecRef(space, w_item) if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.wrappeditems + wrappeditems = w_list.getitems() if index < 0 or index >= len(wrappeditems): raise OperationError(space.w_IndexError, space.wrap( "list assignment index out of range")) @@ -47,7 +47,7 @@ IndexError exception.""" if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.wrappeditems + wrappeditems = w_list.getitems() if index < 0 or index >= len(wrappeditems): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) @@ -74,7 +74,7 @@ """Macro form of PyList_Size() without error checking. """ assert isinstance(w_list, W_ListObject) - return len(w_list.wrappeditems) + return len(w_list.getitems()) @cpython_api([PyObject], Py_ssize_t, error=-1) diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -240,6 +240,7 @@ def PyStaticMethod_New(space, w_func): return space.wrap(StaticMethod(w_func)) + at cpython_api([PyObject, lltype.Ptr(PyMethodDef)], PyObject) def PyDescr_NewMethod(space, w_type, method): return space.wrap(W_PyCMethodObject(space, method, w_type)) diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -54,9 +54,15 @@ modname = rffi.charp2str(name) state = space.fromcache(State) f_name, f_path = state.package_context - w_mod = PyImport_AddModule(space, f_name) + if f_name is not None: + modname = f_name + w_mod = PyImport_AddModule(space, modname) + state.package_context = None, None From noreply at buildbot.pypy.org Thu Dec 8 15:02:05 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 15:02:05 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: clean-up. Message-ID: <20111208140205.0B1B98205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50301:24d1d9d80c96 Date: 2011-12-08 15:01 +0100 http://bitbucket.org/pypy/pypy/changeset/24d1d9d80c96/ Log: clean-up. diff --git a/pypy/objspace/std/tupletype.py b/pypy/objspace/std/tupletype.py --- a/pypy/objspace/std/tupletype.py +++ b/pypy/objspace/std/tupletype.py @@ -2,8 +2,6 @@ from pypy.interpreter import gateway from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import StdTypeDef, SMM -from types import IntType, FloatType, StringType - def wraptuple(space, list_w): from pypy.objspace.std.tupleobject import W_TupleObject From noreply at buildbot.pypy.org Thu Dec 8 15:23:16 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 8 Dec 2011 15:23:16 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: refactor FINISH to directly store all values to the failboxes. Message-ID: <20111208142316.308108205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50302:e70ac50eb3a3 Date: 2011-12-07 15:52 +0100 http://bitbucket.org/pypy/pypy/changeset/e70ac50eb3a3/ Log: refactor FINISH to directly store all values to the failboxes. diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -318,7 +318,48 @@ return fcond def emit_op_finish(self, op, arglocs, regalloc, fcond): - self._gen_path_to_exit_path(op.getdescr(), op.getarglist(), arglocs, c.AL) + for i in range(len(arglocs) -1): + loc = arglocs[i] + box = op.getarg(i) + if loc is None: + continue + if loc.is_reg(): + if box.type == REF: + adr = self.fail_boxes_ptr.get_addr_for_num(i) + elif box.type == INT: + adr = self.fail_boxes_int.get_addr_for_num(i) + else: + assert 0 + self.mc.gen_load_int(r.ip.value, adr) + self.mc.STR_ri(loc.value, r.ip.value) + elif loc.is_vfp_reg(): + assert box.type == FLOAT + adr = self.fail_boxes_float.get_addr_for_num(i) + self.mc.gen_load_int(r.ip.value, adr) + self.mc.VSTR(loc.value, r.ip.value) + elif loc.is_stack() or loc.is_imm() or loc.is_imm_float(): + if box.type == FLOAT: + adr = self.fail_boxes_float.get_addr_for_num(i) + self.mov_loc_loc(loc, r.vfp_ip) + self.mc.gen_load_int(r.ip.value, adr) + self.mc.VSTR(r.vfp_ip.value, r.ip.value) + elif box.type == REF or box.type == INT: + if box.type == REF: + adr = self.fail_boxes_ptr.get_addr_for_num(i) + elif box.type == INT: + adr = self.fail_boxes_int.get_addr_for_num(i) + self.mov_loc_loc(loc, r.ip) + self.mc.gen_load_int(r.lr.value, adr) + self.mc.STR_ri(r.ip.value, r.lr.value) + else: + assert 0 + # note: no exception should currently be set in llop.get_exception_addr + # even if this finish may be an exit_frame_with_exception (in this case + # the exception instance is in arglocs[0]). + addr = self.cpu.get_on_leave_jitted_int(save_exception=False) + self.mc.BL(addr) + self.mc.gen_load_int(r.r0.value, arglocs[-1].value) + self.gen_func_epilog() return fcond def emit_op_call(self, op, args, regalloc, fcond, force_index=-1): diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -503,14 +503,14 @@ return args def prepare_op_finish(self, op, fcond): - args = [imm(self.frame_manager.frame_depth)] + args = [None] * (op.numargs() + 1) for i in range(op.numargs()): arg = op.getarg(i) if arg: - args.append(self.loc(arg)) + args[i] = self.loc(arg) self.possibly_free_var(arg) - else: - args.append(None) + n = self.cpu.get_fail_descr_number(op.getdescr()) + args[-1] = imm(n) return args def prepare_op_guard_true(self, op, fcond): From noreply at buildbot.pypy.org Thu Dec 8 15:23:17 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 8 Dec 2011 15:23:17 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: translation fix, missing case Message-ID: <20111208142317.581BC8205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50303:10bf505d8a69 Date: 2011-12-08 15:13 +0100 http://bitbucket.org/pypy/pypy/changeset/10bf505d8a69/ Log: translation fix, missing case diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -348,6 +348,8 @@ adr = self.fail_boxes_ptr.get_addr_for_num(i) elif box.type == INT: adr = self.fail_boxes_int.get_addr_for_num(i) + else: + assert 0 self.mov_loc_loc(loc, r.ip) self.mc.gen_load_int(r.lr.value, adr) self.mc.STR_ri(r.ip.value, r.lr.value) From noreply at buildbot.pypy.org Thu Dec 8 15:23:18 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 8 Dec 2011 15:23:18 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: Reverse this BKPT back to a NOP here, because this is not always patched Message-ID: <20111208142318.800FC8205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50304:9512dcdd4967 Date: 2011-12-08 15:15 +0100 http://bitbucket.org/pypy/pypy/changeset/9512dcdd4967/ Log: Reverse this BKPT back to a NOP here, because this is not always patched diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -1028,8 +1028,9 @@ # jump to merge point jmp_pos = self.mc.currpos() - #jmp_location = self.mc.curraddr() - self.mc.BKPT() + # This location is not necessarily patched later, depending on how many + # instructions we emit from here to the merge point below. + self.mc.NOP() # Path B: load return value and reset token # Fast Path using result boxes From noreply at buildbot.pypy.org Thu Dec 8 16:02:59 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 8 Dec 2011 16:02:59 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: begin reimplementation of CALL on PPC64 Message-ID: <20111208150259.811588205C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50305:103beedd8034 Date: 2011-12-08 07:02 -0800 http://bitbucket.org/pypy/pypy/changeset/103beedd8034/ Log: begin reimplementation of CALL on PPC64 diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -456,7 +456,12 @@ if IS_PPC_32: self.mc.bl_abs(adr) else: - assert 0 + self.mc.load_from_addr(r.r0, adr) + self.mc.load_from_addr(r.r2, adr + WORD) + self.mc.load_from_addr(r.r11, adr + 2 * WORD) + self.mc.mtctr(r.r0.value) + self.mc.bctrl() + self.mark_gc_roots(force_index) regalloc.possibly_free_vars(args) diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -10,7 +10,7 @@ from pypy.jit.backend.ppc.ppcgen.codebuilder import PPCBuilder from pypy.jit.backend.ppc.ppcgen.jump import remap_frame_layout from pypy.jit.backend.ppc.ppcgen.arch import (IS_PPC_32, IS_PPC_64, WORD, - NONVOLATILES, + NONVOLATILES, MAX_REG_PARAMS, GPR_SAVE_AREA, BACKCHAIN_SIZE, FPR_SAVE_AREA, FLOAT_INT_CONVERSION, FORCE_INDEX) @@ -80,40 +80,6 @@ EMPTY_LOC = '\xFE' END_OF_LOCS = '\xFF' - - ''' - PyPy's PPC stack frame layout - ============================= - - . . - . . - ---------------------------- - | BACKCHAIN | OLD FRAME - ------------------------------------------------------ - | | PyPy Frame - | GPR SAVE AREA | - | | - ---------------------------- - | FORCE INDEX | - ---------------------------- <- Spilling Pointer (SPP) - | | - | SPILLING AREA | - | | - ---------------------------- <- Stack Pointer (SP) - - The size of the GPR save area and the force index area fixed: - - GPR SAVE AREA: len(NONVOLATILES) * WORD - FORCE INDEX : WORD - - - The size of the spilling area is known when the trace operations - have been generated. - ''' - - #GPR_SAVE_AREA_AND_FORCE_INDEX = GPR_SAVE_AREA + WORD - # ^^^^^^^^^^^^^ ^^^^ - # save GRP regs force index ENCODING_AREA = len(r.MANAGED_REGS) * WORD OFFSET_SPP_TO_GPR_SAVE_AREA = (FORCE_INDEX + FLOAT_INT_CONVERSION + ENCODING_AREA) @@ -229,6 +195,7 @@ stack_loc : pointer to top of the stack spp_loc : pointer to begin of the spilling area ''' + enc = rffi.cast(rffi.CCHARP, mem_loc) managed_size = WORD * len(r.MANAGED_REGS) # XXX do some sanity considerations @@ -346,6 +313,7 @@ # - jump back to the calling code def _gen_exit_path(self): mc = PPCBuilder() + mc.mr(r.r6.value, r.r3.value) self._save_managed_regs(mc) decode_func_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func) @@ -358,6 +326,8 @@ r2_value = descr[1] r11_value = descr[2] + + # load parameters into parameter registers if IS_PPC_32: mc.lwz(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding @@ -369,6 +339,7 @@ # load address of decoding function into r0 mc.alloc_scratch_reg(addr) if IS_PPC_64: + mc.li(r.r2.value, 99) mc.std(r.r2.value, r.SP.value, 3 * WORD) # load TOC pointer and environment pointer mc.load_imm(r.r2, r2_value) @@ -749,13 +720,22 @@ self.cpu.gc_ll_descr.gcrootmap) def compute_frame_depth(self, regalloc): + PARAMETER_AREA = self.max_stack_params * WORD + if IS_PPC_64: + PARAMETER_AREA += MAX_REG_PARAMS * WORD + SPILLING_AREA = regalloc.frame_manager.frame_depth * WORD + + print "PARAMETER SAVE AREA = %d" % PARAMETER_AREA + print "SPILLING AREA = %d" % SPILLING_AREA + print "OFFSET TO ENCODING = %d" % (PARAMETER_AREA + SPILLING_AREA) + frame_depth = ( GPR_SAVE_AREA + FPR_SAVE_AREA + FLOAT_INT_CONVERSION + FORCE_INDEX + self.ENCODING_AREA - + regalloc.frame_manager.frame_depth * WORD - + self.max_stack_params * WORD + + SPILLING_AREA + + PARAMETER_AREA + BACKCHAIN_SIZE * WORD) return frame_depth From noreply at buildbot.pypy.org Thu Dec 8 17:27:14 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 8 Dec 2011 17:27:14 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: remove comments Message-ID: <20111208162714.39B718205C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50306:e83a924bb5d8 Date: 2011-12-08 08:24 -0800 http://bitbucket.org/pypy/pypy/changeset/e83a924bb5d8/ Log: remove comments diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -348,44 +348,9 @@ n += WORD stack_args.append(None) - """ - # adjust SP and compute size of parameter save area - if IS_PPC_32: - stack_space = BACKCHAIN_SIZE + len(stack_args) * WORD - while stack_space % (4 * WORD) != 0: - stack_space += 1 - self.mc.stwu(r.SP.value, r.SP.value, -stack_space) - self.mc.mflr(r.r0.value) - self.mc.stw(r.r0.value, r.SP.value, stack_space + WORD) - else: - # ABI fixed frame + 8 GPRs + arguments - stack_space = (6 + MAX_REG_PARAMS + len(stack_args)) * WORD - while stack_space % (2 * WORD) != 0: - stack_space += 1 - self.mc.stdu(r.SP.value, r.SP.value, -stack_space) - self.mc.mflr(r.r0.value) - self.mc.std(r.r0.value, r.SP.value, stack_space + 2 * WORD) - """ - # compute maximum of parameters passed self.max_stack_params = max(self.max_stack_params, len(stack_args)) - """ - # then we push everything on the stack - for i, arg in enumerate(stack_args): - if IS_PPC_32: - abi = 2 - else: - abi = 6 + MAX_REG_PARAMS - offset = (abi + i) * WORD - if arg is not None: - self.mc.load_imm(r.r0, arg.value) - if IS_PPC_32: - self.mc.stw(r.r0.value, r.SP.value, offset) - else: - self.mc.std(r.r0.value, r.SP.value, offset) - """ - # compute offset at which parameters are stored if IS_PPC_32: param_offset = BACKCHAIN_SIZE * WORD @@ -434,24 +399,6 @@ # remap values stored in core registers remap_frame_layout(self, non_float_locs, non_float_regs, r.r0) - """ - #the actual call - if IS_PPC_32: - self.mc.bl_abs(adr) - self.mc.lwz(r.r0.value, r.SP.value, stack_space + WORD) - else: - self.mc.std(r.r2.value, r.SP.value, 3 * WORD) - self.mc.load_from_addr(r.r0, adr) - self.mc.load_from_addr(r.r2, adr + WORD) - self.mc.load_from_addr(r.r11, adr + 2 * WORD) - self.mc.mtctr(r.r0.value) - self.mc.bctrl() - self.mc.ld(r.r2.value, r.SP.value, 3 * WORD) - self.mc.ld(r.r0.value, r.SP.value, stack_space + 2 * WORD) - self.mc.mtlr(r.r0.value) - self.mc.addi(r.SP.value, r.SP.value, stack_space) - """ - # the actual call if IS_PPC_32: self.mc.bl_abs(adr) From noreply at buildbot.pypy.org Thu Dec 8 17:27:15 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 8 Dec 2011 17:27:15 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: remove debug stuff Message-ID: <20111208162715.5F32D8205C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50307:976ed819b3c4 Date: 2011-12-08 08:26 -0800 http://bitbucket.org/pypy/pypy/changeset/976ed819b3c4/ Log: remove debug stuff diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -195,7 +195,6 @@ stack_loc : pointer to top of the stack spp_loc : pointer to begin of the spilling area ''' - enc = rffi.cast(rffi.CCHARP, mem_loc) managed_size = WORD * len(r.MANAGED_REGS) # XXX do some sanity considerations @@ -339,7 +338,6 @@ # load address of decoding function into r0 mc.alloc_scratch_reg(addr) if IS_PPC_64: - mc.li(r.r2.value, 99) mc.std(r.r2.value, r.SP.value, 3 * WORD) # load TOC pointer and environment pointer mc.load_imm(r.r2, r2_value) From noreply at buildbot.pypy.org Thu Dec 8 17:38:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 17:38:57 +0100 (CET) Subject: [pypy-commit] pypy default: Hack: record at least some partial information about which frame Message-ID: <20111208163857.6F4CC8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50308:062e9d06c908 Date: 2011-12-08 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/062e9d06c908/ Log: Hack: record at least some partial information about which frame locations are freed, and if a hint is present, try to allocate the spilled boxes into these free locations. The hints are initialized based on looking ahead at the final JUMP operation. The goal is to reduce the number of frame-to-frame MOVs that must be emitted in the JUMP, by spilling boxes directly to the correct location, when possible. diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -23,9 +23,12 @@ return self.frame_bindings.get(box, None) def loc(self, box): - res = self.get(box) - if res is not None: - return res + try: + return self.frame_bindings[box] + except KeyError: + return self.get_new_loc(box) + + def get_new_loc(self, box): size = self.frame_size(box.type) self.frame_depth += ((-self.frame_depth) & (size-1)) # ^^^ frame_depth is rounded up to a multiple of 'size', assuming @@ -67,8 +70,17 @@ self.position = -1 self.frame_manager = frame_manager self.assembler = assembler + self.hint_frame_locations = {} # {Box: StackLoc} + self.freed_frame_locations = {} # {StackLoc: None} + + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +96,16 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + if v in self.frame_manager.frame_bindings: + loc = self.frame_manager.frame_bindings[v] + self.freed_frame_locations[loc] = None def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. @@ -160,6 +177,23 @@ self.reg_bindings[v] = loc return loc + def _frame_loc(self, v): + # first check if it's already in the frame_manager + try: + return self.frame_manager.frame_bindings[v] + except KeyError: + pass + # check if we have a hint for this box + if v in self.hint_frame_locations: + # if we do, check that the hinted location is known to be free + loc = self.hint_frame_locations[v] + if loc in self.freed_frame_locations: + del self.freed_frame_locations[loc] + self.frame_manager.frame_bindings[v] = loc + return loc + # no valid hint. make up a new free location + return self.frame_manager.get_new_loc(v) + def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, @@ -167,7 +201,7 @@ loc = self.reg_bindings[v_to_spill] del self.reg_bindings[v_to_spill] if self.frame_manager.get(v_to_spill) is None: - newloc = self.frame_manager.loc(v_to_spill) + newloc = self._frame_loc(v_to_spill) self.assembler.regalloc_mov(loc, newloc) return loc @@ -244,7 +278,7 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg - return self.frame_manager.loc(box) + return self._frame_loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): """ Return the location of the constant v. If 'selected_reg' is @@ -292,7 +326,7 @@ self.reg_bindings[v] = loc self.assembler.regalloc_mov(prev_loc, loc) else: - loc = self.frame_manager.loc(v) + loc = self._frame_loc(v) self.assembler.regalloc_mov(prev_loc, loc) def force_result_in_reg(self, result_v, v, forbidden_vars=[]): @@ -311,7 +345,7 @@ self.reg_bindings[result_v] = loc return loc if v not in self.reg_bindings: - prev_loc = self.frame_manager.loc(v) + prev_loc = self._frame_loc(v) loc = self.force_allocate_reg(v, forbidden_vars) self.assembler.regalloc_mov(prev_loc, loc) assert v in self.reg_bindings @@ -331,7 +365,7 @@ def _sync_var(self, v): if not self.frame_manager.get(v): reg = self.reg_bindings[v] - to = self.frame_manager.loc(v) + to = self._frame_loc(v) self.assembler.regalloc_mov(reg, to) # otherwise it's clean diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -348,3 +348,50 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, b1 = newboxes(0, 1) + longevity = {b0: (0, 1), b1: (0, 1)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.hint_frame_locations[b0] = "some_stack_loc" + rm.freed_frame_locations["some_stack_loc"] = None + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_spill_var(b0) + rm.force_spill_var(b1) + assert rm.loc(b0) == "some_stack_loc" + assert isinstance(rm.loc(b1), FakeFramePos) + rm._check_invariants() + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + rm.position = 1 + assert loc not in rm.freed_frame_locations + rm.possibly_free_var(b0) + assert loc in rm.freed_frame_locations + # + rm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 is loc + assert rm.freed_frame_locations == {} + # + rm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 is not loc1 # because it's not in freed_frame_locations + # + rm._check_invariants() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -690,6 +690,7 @@ def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1318,6 +1318,29 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of rm and xrm based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + descr = op.getdescr() + assert isinstance(descr, LoopToken) + nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) + for i in range(op.numargs()): + box = op.getarg(i) + if isinstance(box, Box): + loc = nonfloatlocs[i] + if isinstance(loc, StackLoc): + assert box.type != FLOAT + self.rm.hint_frame_locations[box] = loc + else: + loc = floatlocs[i] + if isinstance(loc, StackLoc): + assert box.type == FLOAT + self.xrm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None From noreply at buildbot.pypy.org Thu Dec 8 17:43:23 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 17:43:23 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Fix. Previously, we would risk getting a specialised tuple if enabled. Message-ID: <20111208164323.5FFEE8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50309:7ad77a6f050c Date: 2011-12-08 17:43 +0100 http://bitbucket.org/pypy/pypy/changeset/7ad77a6f050c/ Log: Fix. Previously, we would risk getting a specialised tuple if enabled. That makes little sense: we can only handle setting arbitrary items in a generic W_TupleObject. diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -6,13 +6,12 @@ borrow_from, make_ref, from_ref) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.tupleobject import W_TupleObject -from pypy.objspace.std.smalltupleobject import W_SmallTupleObject PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple") @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): - return space.newtuple([space.w_None] * size) + return W_TupleObject([space.w_None] * size) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyTuple_SetItem(space, w_t, pos, w_obj): @@ -24,12 +23,12 @@ return 0 def _setitem_tuple(w_t, pos, w_obj): - if isinstance(w_t, W_TupleObject): - w_t.wrappeditems[pos] = w_obj - elif isinstance(w_t, W_SmallTupleObject): - w_t.setitem(pos, w_obj) - else: - assert False + # this function checks that w_t is really a W_TupleObject. It + # should only ever be called with a freshly built tuple from + # PyTuple_New(), which always return a W_TupleObject, even if there + # are also other implementations of tuples. + assert isinstance(w_t, W_TupleObject) + w_t.wrappeditems[pos] = w_obj @cpython_api([PyObject, Py_ssize_t], PyObject) def PyTuple_GetItem(space, w_t, pos): From noreply at buildbot.pypy.org Thu Dec 8 17:55:21 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Thu, 8 Dec 2011 17:55:21 +0100 (CET) Subject: [pypy-commit] pypy default: (l.diekmann, cfbolz): Be more careful about unrolling getitems. also make one less copy Message-ID: <20111208165521.D65F78205C@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: Changeset: r50310:f19d06a5972d Date: 2011-12-08 17:27 +0100 http://bitbucket.org/pypy/pypy/changeset/f19d06a5972d/ Log: (l.diekmann, cfbolz): Be more careful about unrolling getitems. also make one less copy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -9,8 +9,9 @@ from pypy.interpreter import gateway, baseobjspace from pypy.rlib.objectmodel import instantiate, specialize from pypy.rlib.listsort import make_timsort_class -from pypy.rlib import rerased, jit +from pypy.rlib import rerased, jit, debug from pypy.interpreter.argument import Signature +from pypy.tool.sourcetools import func_with_new_name UNROLL_CUTOFF = 5 @@ -170,6 +171,19 @@ share with the storage, if possible.""" return self.strategy.getitems(self) + def getitems_fixedsize(self): + """Returns a fixed-size list of all items after wrapping them.""" + l = self.strategy.getitems_fixedsize(self) + debug.make_sure_not_resized(l) + return l + + def getitems_unroll(self): + """Returns a fixed-size list of all items after wrapping them. The JIT + will fully unroll this function. """ + l = self.strategy.getitems_unroll(self) + debug.make_sure_not_resized(l) + return l + def getitems_copy(self): """Returns a copy of all items in the list. Same as getitems except for ObjectListStrategy.""" @@ -366,6 +380,8 @@ def getitems_copy(self, w_list): return [] + getitems_fixedsize = func_with_new_name(getitems_copy, "getitems_fixedsize") + getitems_unroll = getitems_fixedsize def getstorage_copy(self, w_list): return self.erase(None) @@ -496,7 +512,6 @@ # tuple is unmutable return w_list.lstorage - @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): l = self.unerase(w_list.lstorage) @@ -519,6 +534,13 @@ return r + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self._getitems_range_unroll(w_list, True) + def getitems_unroll(self, w_list): + return self._getitems_range_unroll(w_list, True) + _getitems_range_unroll = jit.unroll_safe(func_with_new_name(_getitems_range, "_getitems_range_unroll")) + def getslice(self, w_list, start, stop, step, length): v = self.unerase(w_list.lstorage) old_start = v[0] @@ -676,6 +698,13 @@ def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + @jit.unroll_safe + def getitems_unroll(self, w_list): + return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self.getitems_unroll(w_list) + def getstorage_copy(self, w_list): items = self.unerase(w_list.lstorage)[:] return self.erase(items) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -408,8 +408,10 @@ if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems elif isinstance(w_obj, W_ListObject): - # XXX this can copy twice - t = w_obj.getitems()[:] + if unroll: + t = w_obj.getitems_unroll() + else: + t = w_obj.getitems_fixedsize() else: if unroll: return make_sure_not_resized(ObjSpace.unpackiterable_unroll( diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -48,6 +48,46 @@ for i in range(7): assert self.space.eq_w(l[i], l2[i]) + def test_getitems_fixedsize(self): + w = self.space.wrap + from pypy.objspace.std.listobject import make_range_list + rangelist = make_range_list(self.space, 1,1,7) + emptylist = W_ListObject(self.space, []) + intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]) + strlist = W_ListObject(self.space, [w('1'),w('2'),w('3'),w('4'),w('5'),w('6'),w('7')]) + floatlist = W_ListObject(self.space, [w(1.0),w(2.0),w(3.0),w(4.0),w(5.0),w(6.0),w(7.0)]) + objlist = W_ListObject(self.space, [w(1),w('2'),w(3.0),w(4),w(5),w(6),w(7)]) + + emptylist_copy = emptylist.getitems_fixedsize() + assert emptylist_copy == [] + + rangelist_copy = rangelist.getitems_fixedsize() + intlist_copy = intlist.getitems_fixedsize() + strlist_copy = strlist.getitems_fixedsize() + floatlist_copy = floatlist.getitems_fixedsize() + objlist_copy = objlist.getitems_fixedsize() + for i in range(7): + assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i)) + assert self.space.eq_w(intlist_copy[i], intlist.getitem(i)) + assert self.space.eq_w(strlist_copy[i], strlist.getitem(i)) + assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i)) + assert self.space.eq_w(objlist_copy[i], objlist.getitem(i)) + + emptylist_copy = emptylist.getitems_unroll() + assert emptylist_copy == [] + + rangelist_copy = rangelist.getitems_unroll() + intlist_copy = intlist.getitems_unroll() + strlist_copy = strlist.getitems_unroll() + floatlist_copy = floatlist.getitems_unroll() + objlist_copy = objlist.getitems_unroll() + for i in range(7): + assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i)) + assert self.space.eq_w(intlist_copy[i], intlist.getitem(i)) + assert self.space.eq_w(strlist_copy[i], strlist.getitem(i)) + assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i)) + assert self.space.eq_w(objlist_copy[i], objlist.getitem(i)) + def test_random_getitem(self): w = self.space.wrap s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9') From noreply at buildbot.pypy.org Thu Dec 8 17:55:23 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Thu, 8 Dec 2011 17:55:23 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20111208165523.1322F8205C@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: Changeset: r50311:6acad4874739 Date: 2011-12-08 17:47 +0100 http://bitbucket.org/pypy/pypy/changeset/6acad4874739/ Log: merge diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -23,9 +23,12 @@ return self.frame_bindings.get(box, None) def loc(self, box): - res = self.get(box) - if res is not None: - return res + try: + return self.frame_bindings[box] + except KeyError: + return self.get_new_loc(box) + + def get_new_loc(self, box): size = self.frame_size(box.type) self.frame_depth += ((-self.frame_depth) & (size-1)) # ^^^ frame_depth is rounded up to a multiple of 'size', assuming @@ -67,8 +70,17 @@ self.position = -1 self.frame_manager = frame_manager self.assembler = assembler + self.hint_frame_locations = {} # {Box: StackLoc} + self.freed_frame_locations = {} # {StackLoc: None} + + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +96,16 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + if v in self.frame_manager.frame_bindings: + loc = self.frame_manager.frame_bindings[v] + self.freed_frame_locations[loc] = None def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. @@ -160,6 +177,23 @@ self.reg_bindings[v] = loc return loc + def _frame_loc(self, v): + # first check if it's already in the frame_manager + try: + return self.frame_manager.frame_bindings[v] + except KeyError: + pass + # check if we have a hint for this box + if v in self.hint_frame_locations: + # if we do, check that the hinted location is known to be free + loc = self.hint_frame_locations[v] + if loc in self.freed_frame_locations: + del self.freed_frame_locations[loc] + self.frame_manager.frame_bindings[v] = loc + return loc + # no valid hint. make up a new free location + return self.frame_manager.get_new_loc(v) + def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, @@ -167,7 +201,7 @@ loc = self.reg_bindings[v_to_spill] del self.reg_bindings[v_to_spill] if self.frame_manager.get(v_to_spill) is None: - newloc = self.frame_manager.loc(v_to_spill) + newloc = self._frame_loc(v_to_spill) self.assembler.regalloc_mov(loc, newloc) return loc @@ -244,7 +278,7 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg - return self.frame_manager.loc(box) + return self._frame_loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): """ Return the location of the constant v. If 'selected_reg' is @@ -292,7 +326,7 @@ self.reg_bindings[v] = loc self.assembler.regalloc_mov(prev_loc, loc) else: - loc = self.frame_manager.loc(v) + loc = self._frame_loc(v) self.assembler.regalloc_mov(prev_loc, loc) def force_result_in_reg(self, result_v, v, forbidden_vars=[]): @@ -311,7 +345,7 @@ self.reg_bindings[result_v] = loc return loc if v not in self.reg_bindings: - prev_loc = self.frame_manager.loc(v) + prev_loc = self._frame_loc(v) loc = self.force_allocate_reg(v, forbidden_vars) self.assembler.regalloc_mov(prev_loc, loc) assert v in self.reg_bindings @@ -331,7 +365,7 @@ def _sync_var(self, v): if not self.frame_manager.get(v): reg = self.reg_bindings[v] - to = self.frame_manager.loc(v) + to = self._frame_loc(v) self.assembler.regalloc_mov(reg, to) # otherwise it's clean diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -348,3 +348,50 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, b1 = newboxes(0, 1) + longevity = {b0: (0, 1), b1: (0, 1)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.hint_frame_locations[b0] = "some_stack_loc" + rm.freed_frame_locations["some_stack_loc"] = None + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_spill_var(b0) + rm.force_spill_var(b1) + assert rm.loc(b0) == "some_stack_loc" + assert isinstance(rm.loc(b1), FakeFramePos) + rm._check_invariants() + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + rm.position = 1 + assert loc not in rm.freed_frame_locations + rm.possibly_free_var(b0) + assert loc in rm.freed_frame_locations + # + rm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 is loc + assert rm.freed_frame_locations == {} + # + rm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 is not loc1 # because it's not in freed_frame_locations + # + rm._check_invariants() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -690,6 +690,7 @@ def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1318,6 +1318,29 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of rm and xrm based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + descr = op.getdescr() + assert isinstance(descr, LoopToken) + nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) + for i in range(op.numargs()): + box = op.getarg(i) + if isinstance(box, Box): + loc = nonfloatlocs[i] + if isinstance(loc, StackLoc): + assert box.type != FLOAT + self.rm.hint_frame_locations[box] = loc + else: + loc = floatlocs[i] + if isinstance(loc, StackLoc): + assert box.type == FLOAT + self.xrm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None From noreply at buildbot.pypy.org Thu Dec 8 17:57:00 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Thu, 8 Dec 2011 17:57:00 +0100 (CET) Subject: [pypy-commit] pypy type-specialized-instances: fixes for type-specialized-attributes Message-ID: <20111208165700.560F68205C@wyvern.cs.uni-duesseldorf.de> Author: l.diekmann Branch: type-specialized-instances Changeset: r50312:e481a093e056 Date: 2011-12-08 16:35 +0000 http://bitbucket.org/pypy/pypy/changeset/e481a093e056/ Log: fixes for type-specialized-attributes diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -134,7 +134,7 @@ @jit.elidable def _get_new_attr(self, name, index, attrclass): - key = name, index, attrclass + key = name, index, attrclass.key_for_attr_cache cache = self.cache_attrs if cache is None: cache = self.cache_attrs = {} @@ -334,6 +334,7 @@ return "" % (self.selector, self.position, self.back) class PlainAttribute(AbstractStoredAttribute): + key_for_attr_cache = 0 erase_item, unerase_item = rerased.new_erasing_pair("mapdict storage object item") erase_item = staticmethod(erase_item) @@ -349,6 +350,7 @@ obj._mapdict_write_storage(self.position, erased) class IntAttribute(AbstractStoredAttribute): + key_for_attr_cache = 1 erase_item, unerase_item = rerased.erase_int, rerased.unerase_int erase_item = staticmethod(erase_item) From noreply at buildbot.pypy.org Thu Dec 8 18:06:26 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 18:06:26 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Kill. Message-ID: <20111208170626.4C2E18205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50313:526f61d70b8f Date: 2011-12-08 18:01 +0100 http://bitbucket.org/pypy/pypy/changeset/526f61d70b8f/ Log: Kill. diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -53,14 +53,7 @@ hash_test([1,(1,2)]) hash_test([1,('a',2)]) hash_test([1,()]) - - def test_setitem(self): - py.test.skip('skip for now, only needed for cpyext') - w_specialisedtuple = self.space.newtuple([self.space.wrap(1)]) - w_specialisedtuple.setitem(0, self.space.wrap(5)) - list_w = w_specialisedtuple.tolist() - assert len(list_w) == 1 - assert self.space.eq_w(list_w[0], self.space.wrap(5)) + class AppTestW_SpecialisedTupleObject: From noreply at buildbot.pypy.org Thu Dec 8 18:06:27 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 18:06:27 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Test and fix. Message-ID: <20111208170627.721918205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50314:f0cc6a3ff8e9 Date: 2011-12-08 18:06 +0100 http://bitbucket.org/pypy/pypy/changeset/f0cc6a3ff8e9/ Log: Test and fix. diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -98,7 +98,7 @@ # XXX duplicate logic from tupleobject.py mult = 1000003 x = 0x345678 - z = 2 + z = nValues for i in iter_n: value = getattr(self, 'value%s' % i) if typetuple[i] == object: diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -53,6 +53,7 @@ hash_test([1,(1,2)]) hash_test([1,('a',2)]) hash_test([1,()]) + hash_test([1,2,3]) class AppTestW_SpecialisedTupleObject: From noreply at buildbot.pypy.org Thu Dec 8 18:18:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 18:18:07 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Re-add and review this code. Needed to ensure that "tup*1 is tup", Message-ID: <20111208171807.533398205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50315:d896533b28af Date: 2011-12-08 18:15 +0100 http://bitbucket.org/pypy/pypy/changeset/d896533b28af/ Log: Re-add and review this code. Needed to ensure that "tup*1 is tup", in case obscure code depends on it. diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -249,23 +249,23 @@ start += step return space.newtuple(subitems) -##def mul_specialisedtuple_times(space, w_tuple, w_times): -## try: -## times = space.getindex_w(w_times, space.w_OverflowError) -## except OperationError, e: -## if e.match(space, space.w_TypeError): -## raise FailedToImplement -## raise -## if times == 1 and space.type(w_tuple) == space.w_tuple: -## return w_tuple -## items = w_tuple.tolist() -## return space.newtuple(items * times) +def mul_specialisedtuple_times(space, w_tuple, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + raise FailedToImplement + raise + if times == 1 and space.type(w_tuple) == space.w_tuple: + return w_tuple + items = w_tuple.tolist() + return space.newtuple(items * times) -##def mul__SpecialisedTuple_ANY(space, w_tuple, w_times): -## return mul_specialisedtuple_times(space, w_tuple, w_times) +def mul__SpecialisedTuple_ANY(space, w_tuple, w_times): + return mul_specialisedtuple_times(space, w_tuple, w_times) -##def mul__ANY_SpecialisedTuple(space, w_times, w_tuple): -## return mul_specialisedtuple_times(space, w_tuple, w_times) +def mul__ANY_SpecialisedTuple(space, w_times, w_tuple): + return mul_specialisedtuple_times(space, w_tuple, w_times) def eq__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): return w_tuple1.eq(space, w_tuple2) diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -222,5 +222,4 @@ class AppTestAll(test_tupleobject.AppTestW_TupleObject): - def test_mul_identity(self): - skip("not working with specialisedtuple") + pass From noreply at buildbot.pypy.org Thu Dec 8 18:18:08 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 18:18:08 +0100 (CET) Subject: [pypy-commit] pypy default: Kill this __init__, which nowadays contains just a duplicate of Message-ID: <20111208171808.7858B8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50316:8de6f245c959 Date: 2011-12-08 18:17 +0100 http://bitbucket.org/pypy/pypy/changeset/8de6f245c959/ Log: Kill this __init__, which nowadays contains just a duplicate of setting to w_seq --- already done in the parent __init__. diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -33,9 +33,6 @@ """Sequence iterator specialized for lists, accessing directly their RPython-level list of wrapped objects. """ - def __init__(w_self, w_seq): - W_AbstractSeqIterObject.__init__(w_self, w_seq) - w_self.w_seq = w_seq class W_FastTupleIterObject(W_AbstractSeqIterObject): """Sequence iterator specialized for tuples, accessing From pullrequests-noreply at bitbucket.org Thu Dec 8 20:15:33 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Thu, 08 Dec 2011 19:15:33 -0000 Subject: [pypy-commit] [OPEN] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays Message-ID: A new pull request has been opened by Jeff Terrace. jterrace/pypy has changes to be pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars Title: Added tolist() function to numpypy's scalars and multidim arrays Includes tests Changes to be pulled: b7f67fbd9e44 by Jeff Terrace: "Fixed merge conflict" b2e4afae62e5 by Jeff Terrace: "Added tolist() function to numpypy's scalars and multidim arrays" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Dec 8 20:40:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 20:40:39 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: - fix space.fixedlist/unpackiterable/listview to handle directly tuples Message-ID: <20111208194039.125A68205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50317:5f96cb15c116 Date: 2011-12-08 20:40 +0100 http://bitbucket.org/pypy/pypy/changeset/5f96cb15c116/ Log: - fix space.fixedlist/unpackiterable/listview to handle directly tuples of any kind - fix the test to raise an OperationError, printing nicer tracebacks diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -29,7 +29,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.smallintobject import W_SmallIntObject from pypy.objspace.std.stringobject import W_StringObject -from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.typeobject import W_TypeObject # types @@ -391,8 +391,8 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject): t = w_obj.getitems_copy() else: @@ -405,8 +405,8 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.tolist() elif isinstance(w_obj, W_ListObject): # XXX this can copy twice t = w_obj.getitems()[:] @@ -428,8 +428,8 @@ def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): t = w_obj.getitems() - elif isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + elif isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: diff --git a/pypy/objspace/std/smalltupleobject.py b/pypy/objspace/std/smalltupleobject.py --- a/pypy/objspace/std/smalltupleobject.py +++ b/pypy/objspace/std/smalltupleobject.py @@ -9,13 +9,14 @@ from pypy.interpreter import gateway from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name from pypy.objspace.std.tupleobject import W_AbstractTupleObject, W_TupleObject class W_SmallTupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef - def tolist(self): - raise NotImplementedError + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError def length(self): raise NotImplementedError @@ -51,6 +52,9 @@ l[i] = getattr(self, 'w_value%s' % i) return l + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + def length(self): return n diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -8,6 +8,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import compute_hash from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name class NotSpecialised(Exception): pass @@ -21,8 +22,8 @@ reprlist = [repr(item) for item in self._to_unwrapped_list()] return "%s(%s)" % (self.__class__.__name__, ', '.join(reprlist)) - def tolist(self): - raise NotImplementedError + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError def _to_unwrapped_list(self): "NOT_RPYTHON" @@ -46,6 +47,9 @@ def unwrap(self, space): return tuple(self._to_unwrapped_list()) + def delegating(self): + pass # for tests only + def make_specialised_class(typetuple): assert type(typetuple) == tuple @@ -84,6 +88,9 @@ list_w[i] = value return list_w + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + def _to_unwrapped_list(self): "NOT_RPYTHON" list_w = [None] * nValues @@ -224,6 +231,7 @@ registerimplementation(W_SpecialisedTupleObject) def delegate_SpecialisedTuple2Tuple(space, w_specialised): + w_specialised.delegating() return W_TupleObject(w_specialised.tolist()) def len__SpecialisedTuple(space, w_tuple): diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -63,11 +63,11 @@ def forbid_delegation(space, w_tuple): def delegation_forbidden(): # haaaack - if sys._getframe(2).f_code.co_name == '_mm_repr_tupleS0': - return old_tolist() - raise NotImplementedError, w_tuple - old_tolist = w_tuple.tolist - w_tuple.tolist = delegation_forbidden + co = sys._getframe(2).f_code + if co.co_name.startswith('_mm_repr_tuple'): + return + raise OperationError(space.w_ReferenceError, w_tuple) + w_tuple.delegating = delegation_forbidden return w_tuple cls.w_forbid_delegation = cls.space.wrap(gateway.interp2app(forbid_delegation)) @@ -96,6 +96,10 @@ obj = (1, 2, 3) assert self.isspecialised(obj, '_ooo') + def test_delegation(self): + t = self.forbid_delegation((42, 43)) + raises(ReferenceError, t.__getslice__, 0, 1) + def test_len(self): t = self.forbid_delegation((42,43)) assert len(t) == 2 diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -12,6 +12,15 @@ class W_AbstractTupleObject(W_Object): __slots__ = () + def tolist(self): + "Returns the items, as a fixed-size list." + raise NotImplementedError + + def getitems_copy(self): + "Returns a copy of the items, as a resizable list." + raise NotImplementedError + + class W_TupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef _immutable_fields_ = ['wrappeditems[*]'] @@ -29,6 +38,12 @@ items = [space.unwrap(w_item) for w_item in w_tuple.wrappeditems] return tuple(items) + def tolist(self): + return self.wrappeditems + + def getitems_copy(self): + return self.wrappeditems[:] # returns a resizable list + registerimplementation(W_TupleObject) From notifications-noreply at bitbucket.org Thu Dec 8 21:04:22 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 08 Dec 2011 20:04:22 -0000 Subject: [pypy-commit] [COMMENT] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays Message-ID: <20111208200422.6435.22167@bitbucket13.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars#comment-1329 Alex Gaynor (alex_gaynor) said: Couple comments: # You can't use self.value in W_GenericBox, as it doesn't have a specific type, the right way to go is probably to check self.get_dtype(space).kind and call eitehr __int__ or __float__ on it. # tolist in numarray is a bit messy, a) it should always return a wrapped result, b) it probably doesn't need a special case for 1-dim arrays, it can just keep recursively calling tolist() since eventually it'll get down to a box. # the tests for scalar can just directly instantiate a box object (e.g. numpy.int32()), they don't need to read it out of an array # there should be a test for a 0-d array (whic are different than scalars) Thanks for working on this! -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Thu Dec 8 21:52:14 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Thu, 08 Dec 2011 20:52:14 -0000 Subject: [pypy-commit] [OPEN] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays In-Reply-To: References: Message-ID: <20111208205214.2672.87740@bitbucket03.managed.contegix.com> Pull request #15 has been updated by Jeff Terrace to include new changes. https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars Title: Added tolist() function to numpypy's scalars and multidim arrays Creator: Jeff Terrace Includes tests Updated list of changes: c904560ff1b3 by Jeff Terrace: "Added 0-d array test, changed scalar test to use type objects instead of arrays,?" b7f67fbd9e44 by Jeff Terrace: "Fixed merge conflict" b2e4afae62e5 by Jeff Terrace: "Added tolist() function to numpypy's scalars and multidim arrays" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Thu Dec 8 21:54:00 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 08 Dec 2011 20:54:00 -0000 Subject: [pypy-commit] [COMMENT] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays Message-ID: <20111208205400.1855.72579@bitbucket12.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars#comment-1331 Jeff Terrace (jterrace) said: 1. I think I did this right, although I couldn't figure out how to import the LTR names from interp_dtype, so they are hard coded. It's also not very robust to future type additions, so not sure what the best approach is to make sure it gets updated later? 2. Looks much nicer now 3. Done 4. Done -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Dec 8 22:13:08 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Dec 2011 22:13:08 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Fixes for runappdirect on top of pypy. Message-ID: <20111208211308.CF80B8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50318:bbaae4e2d252 Date: 2011-12-08 22:12 +0100 http://bitbucket.org/pypy/pypy/changeset/bbaae4e2d252/ Log: Fixes for runappdirect on top of pypy. diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -3,7 +3,7 @@ from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject from pypy.objspace.std.specialisedtupleobject import _specialisations from pypy.interpreter.error import OperationError -from pypy.conftest import gettestobjspace +from pypy.conftest import gettestobjspace, option from pypy.objspace.std.test import test_tupleobject from pypy.interpreter import gateway @@ -69,7 +69,12 @@ raise OperationError(space.w_ReferenceError, w_tuple) w_tuple.delegating = delegation_forbidden return w_tuple - cls.w_forbid_delegation = cls.space.wrap(gateway.interp2app(forbid_delegation)) + if option.runappdirect: + cls.w_forbid_delegation = lambda self, x: x + cls.test_delegation = lambda self: skip("runappdirect") + else: + cls.w_forbid_delegation = cls.space.wrap( + gateway.interp2app(forbid_delegation)) def w_isspecialised(self, obj, expected=''): import __pypy__ From noreply at buildbot.pypy.org Thu Dec 8 23:15:15 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 8 Dec 2011 23:15:15 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: a draft Message-ID: <20111208221515.7BAE38205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3973:f6959a758880 Date: 2011-12-09 00:14 +0200 http://bitbucket.org/pypy/extradoc/changeset/f6959a758880/ Log: a draft diff --git a/blog/draft/matplotlib.rst b/blog/draft/matplotlib.rst new file mode 100644 --- /dev/null +++ b/blog/draft/matplotlib.rst @@ -0,0 +1,80 @@ +=================================== +Plotting using matplotlib from PyPy +=================================== + +**Big fat warning** This is just a proof of concept. It actually barely works. +There are missing pieces left and right there were replaced with hacks so +I can get this to run and show it's possible. Don't try that at home, +especially your home. You have been warned. + +There was a lot of talking about PyPy not integrating well with the current +scientific python ecosystem and numpypy (a numpy reimplementation on top +of pypy) was dubbed "a fancy array library". I'm going to show it is possible. + +First, `the demo`_:: + + #!/usr/bin/env pypy + + # numpy, pypy version + import numpypy as numpy + # DRAGONS LIVE THERE (fortunately hidden) + from embed.emb import import_mod + + pylab = import_mod('matplotlib.pylab') + + if __name__ == '__main__': + a = numpy.arange(100, dtype=int) + b = numpy.sin(a) + pylab.plot(a, b) + pylab.show() + +And you get: + + XXX pic + +Now, how to reproduce it: + +* You need a PyPy without cpyext, I did not find a linker that would support + overriding symbols. Right now there are no nightlies like this, so you have + to compile it yourself, like:: + + ./translate.py -Ojit targetpypystandalone.py --withoutmod-cpyext + + That would give you a PyPy that's unable to load some libraries like PIL, but + perfectly working otherwise. + +* Speaking of which, you need a reasonably recent PyPy. + +* The approach is generally portable, however the implementation is not. Works + on 64bit linux, would not bet for anything else. + +* You need to install python2.6, python2.6 development headers and have numpy + and matplotlib installed on that python. + +* You need a checkout of my `hacks directory`_ and put embedded on your + ``PYTHONPATH``, pypy checkout also has to be on the ``PYTHONPATH``. + +Er wait, what happened? +----------------------- + +What didn't happen is we did not reimplement matplotlib on top of PyPy. What +did happen is we run a CPython instance in PyPy using ctypes. We instantiate +it and nicely follow `embedding`_ tutorial for CPython. Since numpy arrays +are not movable, we're able to pass around an integer that's a pointer to array +data and reconstruct it in the embedded interpreter. Hence with a relatively +little effort we managed to reuse the sama array data on both sides to +plot at array. Easy, no? + +This approach can be extended to support anything that's not too tied with +python objects. SciPy and matplotlib both fall into the same category +but probably the same strategy can be applied to anything, like GTK or QT. +It's just a matter of extending a hack into a working library. + +To summarize, while we're busy making numpypy better and faster, it seems +that all heavy lifting on the C side can be done using an embedded Python +interpreter with relatively little effort. To get to that point, I spent +a day and a half to learn how to embed CPython, with very little prior +experience in the CPython APIs. + +Cheers, +fijal From noreply at buildbot.pypy.org Thu Dec 8 23:15:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 8 Dec 2011 23:15:16 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20111208221516.9886E8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3974:37901e468764 Date: 2011-12-09 00:14 +0200 http://bitbucket.org/pypy/extradoc/changeset/37901e468764/ Log: merge diff --git a/planning/micronumpy.txt b/planning/micronumpy.txt --- a/planning/micronumpy.txt +++ b/planning/micronumpy.txt @@ -1,10 +1,6 @@ NEW TASKS --------- -- add in numpy.generic and the various subclasses, use them in returning - instances from subscripting (and possibly internally), also make them valid - for the dtype arguments - - astype - a good sort function @@ -13,16 +9,13 @@ - endianness -- scalar types like numpy.int8 - -- add multi-dim arrays - - - will need to refactor some functions - - frompyfunc to create ufuncs from python functions - more ufuncs -- arange/linspace/other ranges +- linspace/other ranges -- numpy.flatiter array.flat and friends +- more attributes/methods on numpy.flatiter + +- axis= parameter to various methods + From noreply at buildbot.pypy.org Thu Dec 8 23:21:11 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 8 Dec 2011 23:21:11 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: some review Message-ID: <20111208222111.C1D5E8205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3975:2d85277632f9 Date: 2011-12-08 17:21 -0500 http://bitbucket.org/pypy/extradoc/changeset/2d85277632f9/ Log: some review diff --git a/blog/draft/matplotlib.rst b/blog/draft/matplotlib.rst --- a/blog/draft/matplotlib.rst +++ b/blog/draft/matplotlib.rst @@ -2,14 +2,15 @@ Plotting using matplotlib from PyPy =================================== -**Big fat warning** This is just a proof of concept. It actually barely works. -There are missing pieces left and right there were replaced with hacks so -I can get this to run and show it's possible. Don't try that at home, -especially your home. You have been warned. +**Big fat warning** This is just a proof of concept. It barely works. There are +missing pieces left and right, which were replaced with hacks so I can get this +to run and prove it's possible. Don't try this at home, especially your home. +You have been warned. -There was a lot of talking about PyPy not integrating well with the current -scientific python ecosystem and numpypy (a numpy reimplementation on top -of pypy) was dubbed "a fancy array library". I'm going to show it is possible. +There has been a lot of talking about PyPy not integrating well with the +current scientific Python ecosystem, and ``numpypy`` (a NumPy reimplementation +on top of pypy) was dubbed "a fancy array library". I'm going to show that +integration with this ecosystem is possible with our design. First, `the demo`_:: @@ -45,25 +46,25 @@ * Speaking of which, you need a reasonably recent PyPy. -* The approach is generally portable, however the implementation is not. Works +* The approach is generally portable (XXX: why not?), however the implementation is not. Works on 64bit linux, would not bet for anything else. -* You need to install python2.6, python2.6 development headers and have numpy - and matplotlib installed on that python. +* You need to install python2.6, the python2.6 development headers, and have + numpy and matplotlib installed on that python. * You need a checkout of my `hacks directory`_ and put embedded on your - ``PYTHONPATH``, pypy checkout also has to be on the ``PYTHONPATH``. + ``PYTHONPATH``, your pypy checkout also has to be on the ``PYTHONPATH``. Er wait, what happened? ----------------------- What didn't happen is we did not reimplement matplotlib on top of PyPy. What -did happen is we run a CPython instance in PyPy using ctypes. We instantiate -it and nicely follow `embedding`_ tutorial for CPython. Since numpy arrays -are not movable, we're able to pass around an integer that's a pointer to array -data and reconstruct it in the embedded interpreter. Hence with a relatively -little effort we managed to reuse the sama array data on both sides to -plot at array. Easy, no? +did happen is we embed CPython inside of PyPy using ctypes. We instantiate it. +and follow the `embedding`_ tutorial for CPython. Since numpy arrays are not +movable, we're able to pass around an integer that's represents the memory +address of the array data and reconstruct it in the embedded interpreter. Hence +with a relatively little effort we managed to reuse the same array data on both +sides to plot at array. Easy, no? This approach can be extended to support anything that's not too tied with python objects. SciPy and matplotlib both fall into the same category @@ -74,7 +75,7 @@ that all heavy lifting on the C side can be done using an embedded Python interpreter with relatively little effort. To get to that point, I spent a day and a half to learn how to embed CPython, with very little prior -experience in the CPython APIs. +experience in the CPython APIs. (XXX: this should make clear that you can use it for integration, but for speed you should keep stuff all in PyPy) Cheers, fijal From noreply at buildbot.pypy.org Thu Dec 8 23:23:40 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 8 Dec 2011 23:23:40 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: clarrify Message-ID: <20111208222340.BCF248205C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3976:82c71fa181e5 Date: 2011-12-08 17:23 -0500 http://bitbucket.org/pypy/extradoc/changeset/82c71fa181e5/ Log: clarrify diff --git a/blog/draft/matplotlib.rst b/blog/draft/matplotlib.rst --- a/blog/draft/matplotlib.rst +++ b/blog/draft/matplotlib.rst @@ -46,8 +46,9 @@ * Speaking of which, you need a reasonably recent PyPy. -* The approach is generally portable (XXX: why not?), however the implementation is not. Works - on 64bit linux, would not bet for anything else. +* The approach is generally portable, however the implementation has not been + tested on any platforms other than 64-bit Linux. Try anything else at your + own peril. * You need to install python2.6, the python2.6 development headers, and have numpy and matplotlib installed on that python. From noreply at buildbot.pypy.org Thu Dec 8 23:25:37 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 8 Dec 2011 23:25:37 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: address the review Message-ID: <20111208222537.330E48205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3977:3977810e2b86 Date: 2011-12-09 00:24 +0200 http://bitbucket.org/pypy/extradoc/changeset/3977810e2b86/ Log: address the review diff --git a/blog/draft/matplotlib.rst b/blog/draft/matplotlib.rst --- a/blog/draft/matplotlib.rst +++ b/blog/draft/matplotlib.rst @@ -46,8 +46,8 @@ * Speaking of which, you need a reasonably recent PyPy. -* The approach is generally portable (XXX: why not?), however the implementation is not. Works - on 64bit linux, would not bet for anything else. +* The approach is generally portable, however the implementation has been + tested only on 64bit linux. Few tweaks might be required. * You need to install python2.6, the python2.6 development headers, and have numpy and matplotlib installed on that python. @@ -72,10 +72,11 @@ It's just a matter of extending a hack into a working library. To summarize, while we're busy making numpypy better and faster, it seems -that all heavy lifting on the C side can be done using an embedded Python +that all external libraries on the C side can be done using an embedded Python interpreter with relatively little effort. To get to that point, I spent a day and a half to learn how to embed CPython, with very little prior -experience in the CPython APIs. (XXX: this should make clear that you can use it for integration, but for speed you should keep stuff all in PyPy) +experience in the CPython APIs. Of course you should still keep as much as +possible in PyPy to make it nice and fast :) Cheers, fijal From noreply at buildbot.pypy.org Thu Dec 8 23:25:38 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 8 Dec 2011 23:25:38 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20111208222538.4C54E8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3978:84a45e78b80d Date: 2011-12-09 00:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/84a45e78b80d/ Log: merge From noreply at buildbot.pypy.org Thu Dec 8 23:28:06 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 8 Dec 2011 23:28:06 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: add a screenshot Message-ID: <20111208222806.9B9EE8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3979:3e9cbd0c0d54 Date: 2011-12-09 00:27 +0200 http://bitbucket.org/pypy/extradoc/changeset/3e9cbd0c0d54/ Log: add a screenshot diff --git a/blog/draft/matplotlib.rst b/blog/draft/matplotlib.rst --- a/blog/draft/matplotlib.rst +++ b/blog/draft/matplotlib.rst @@ -80,3 +80,6 @@ Cheers, fijal + +.. _`hacks directory`: https://bitbucket.org/fijal/hack2 +.. _`the demo`: https://bitbucket.org/fijal/hack2/src/default/embed/embed/matplotwrapper.py diff --git a/blog/draft/screen0.png b/blog/draft/screen0.png new file mode 100644 index 0000000000000000000000000000000000000000..0f6d5f2e8bdb961c6fc893e6d745910f98792684 GIT binary patch [cut] From noreply at buildbot.pypy.org Thu Dec 8 23:28:46 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 8 Dec 2011 23:28:46 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: links Message-ID: <20111208222846.585C28205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3980:89893594fd04 Date: 2011-12-09 00:28 +0200 http://bitbucket.org/pypy/extradoc/changeset/89893594fd04/ Log: links diff --git a/blog/draft/matplotlib.rst b/blog/draft/matplotlib.rst --- a/blog/draft/matplotlib.rst +++ b/blog/draft/matplotlib.rst @@ -83,3 +83,4 @@ .. _`hacks directory`: https://bitbucket.org/fijal/hack2 .. _`the demo`: https://bitbucket.org/fijal/hack2/src/default/embed/embed/matplotwrapper.py +.. _`embedding`: http://docs.python.org/extending/embedding.html From noreply at buildbot.pypy.org Fri Dec 9 08:36:52 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Dec 2011 08:36:52 +0100 (CET) Subject: [pypy-commit] pypy default: A failing test: keepalive_until_there is not really Message-ID: <20111209073652.431238205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50319:1edf7af77df6 Date: 2011-12-09 08:36 +0100 http://bitbucket.org/pypy/pypy/changeset/1edf7af77df6/ Log: A failing test: keepalive_until_there is not really strong enough. diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,5 +1,7 @@ import py -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.objectmodel import keepalive_until_here +from pypy.rlib import rgc from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -80,6 +82,55 @@ assert res == 1 self.check_resops(call=1) # for the case B(), but not for the case A() + def test_keepalive(self): + # Fails for now, bcause the keepalive operation doesn't become + # anything more than a '-live-' in the jitcodes. We end up with + # operations that are reordered as follows: + # - x = ll_alloc_with_del() + # - setfield(x.state, state) + # - setfield(state.num, 1000) + # but when run on CPython with reference counting, __del__ is + # invoked between the 2nd and the 3rd line, i.e. too early. + py.test.skip("XXX fails") + # + mydriver = JitDriver(reds = ['n', 'states'], greens = []) + class State: + num = 1 + class X: + def __init__(self, state): + self.state = state + def __del__(self): + self.state.num += 1 + @dont_look_inside + def do_stuff(): + pass + def f(n): + states = [] + while n > 0: + mydriver.jit_merge_point(n=n, states=states) + state = State() + states.append(state) + x = X(state) + do_stuff() + state.num *= 1000 + do_stuff() + keepalive_until_here(x) + n -= 1 + return states + def main(n): + states = f(n) + rgc.collect() + rgc.collect() + err = 1001 + for state in states: + if state.num != 1001: + err = state.num + print 'ERROR:', err + return err + assert main(20) == 1001 + res = self.meta_interp(main, [20]) + assert res == 1001 + class TestLLtype(DelTests, LLJitMixin): def test_signal_action(self): From noreply at buildbot.pypy.org Fri Dec 9 09:19:21 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Dec 2011 09:19:21 +0100 (CET) Subject: [pypy-commit] pypy default: bah. Message-ID: <20111209081921.EE0478205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50320:5a510c5ffc55 Date: 2011-12-09 09:18 +0100 http://bitbucket.org/pypy/pypy/changeset/5a510c5ffc55/ Log: bah. diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -83,15 +83,7 @@ self.check_resops(call=1) # for the case B(), but not for the case A() def test_keepalive(self): - # Fails for now, bcause the keepalive operation doesn't become - # anything more than a '-live-' in the jitcodes. We end up with - # operations that are reordered as follows: - # - x = ll_alloc_with_del() - # - setfield(x.state, state) - # - setfield(state.num, 1000) - # but when run on CPython with reference counting, __del__ is - # invoked between the 2nd and the 3rd line, i.e. too early. - py.test.skip("XXX fails") + py.test.skip("XXX fails") # hum, I think the test itself is broken # mydriver = JitDriver(reds = ['n', 'states'], greens = []) class State: From noreply at buildbot.pypy.org Fri Dec 9 10:26:38 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 9 Dec 2011 10:26:38 +0100 (CET) Subject: [pypy-commit] benchmarks default: sort benchmark names Message-ID: <20111209092638.8D6CF8205C@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r154:dece54731283 Date: 2011-12-09 10:26 +0100 http://bitbucket.org/pypy/benchmarks/changeset/dece54731283/ Log: sort benchmark names diff --git a/runner.py b/runner.py --- a/runner.py +++ b/runner.py @@ -80,7 +80,7 @@ default=','.join(BENCHMARK_SET), help=("Comma-separated list of benchmarks to run" " Valid benchmarks are: " + - ", ".join(BENCHMARK_SET))) + ", ".join(sorted(BENCHMARK_SET)))) parser.add_option('-p', '--pypy-c', default=sys.executable, help='pypy-c or other modified python to run against') parser.add_option('-r', '--revision', default=0, action="store", From noreply at buildbot.pypy.org Fri Dec 9 12:00:06 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Dec 2011 12:00:06 +0100 (CET) Subject: [pypy-commit] pypy temp2: hg backout 062e9d06c908 Message-ID: <20111209110006.EB39182ABC@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: temp2 Changeset: r50321:03796662a8a0 Date: 2011-12-09 11:59 +0100 http://bitbucket.org/pypy/pypy/changeset/03796662a8a0/ Log: hg backout 062e9d06c908 diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -23,12 +23,9 @@ return self.frame_bindings.get(box, None) def loc(self, box): - try: - return self.frame_bindings[box] - except KeyError: - return self.get_new_loc(box) - - def get_new_loc(self, box): + res = self.get(box) + if res is not None: + return res size = self.frame_size(box.type) self.frame_depth += ((-self.frame_depth) & (size-1)) # ^^^ frame_depth is rounded up to a multiple of 'size', assuming @@ -70,17 +67,8 @@ self.position = -1 self.frame_manager = frame_manager self.assembler = assembler - self.hint_frame_locations = {} # {Box: StackLoc} - self.freed_frame_locations = {} # {StackLoc: None} - - def is_still_alive(self, v): - # Check if 'v' is alive at the current position. - # Return False if the last usage is strictly before. - return self.longevity[v][1] >= self.position def stays_alive(self, v): - # Check if 'v' stays alive after the current position. - # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -96,16 +84,11 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const): + if isinstance(v, Const) or v not in self.reg_bindings: return if v not in self.longevity or self.longevity[v][1] <= self.position: - if v in self.reg_bindings: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] - if self.frame_manager is not None: - if v in self.frame_manager.frame_bindings: - loc = self.frame_manager.frame_bindings[v] - self.freed_frame_locations[loc] = None + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. @@ -177,23 +160,6 @@ self.reg_bindings[v] = loc return loc - def _frame_loc(self, v): - # first check if it's already in the frame_manager - try: - return self.frame_manager.frame_bindings[v] - except KeyError: - pass - # check if we have a hint for this box - if v in self.hint_frame_locations: - # if we do, check that the hinted location is known to be free - loc = self.hint_frame_locations[v] - if loc in self.freed_frame_locations: - del self.freed_frame_locations[loc] - self.frame_manager.frame_bindings[v] = loc - return loc - # no valid hint. make up a new free location - return self.frame_manager.get_new_loc(v) - def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, @@ -201,7 +167,7 @@ loc = self.reg_bindings[v_to_spill] del self.reg_bindings[v_to_spill] if self.frame_manager.get(v_to_spill) is None: - newloc = self._frame_loc(v_to_spill) + newloc = self.frame_manager.loc(v_to_spill) self.assembler.regalloc_mov(loc, newloc) return loc @@ -278,7 +244,7 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg - return self._frame_loc(box) + return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): """ Return the location of the constant v. If 'selected_reg' is @@ -326,7 +292,7 @@ self.reg_bindings[v] = loc self.assembler.regalloc_mov(prev_loc, loc) else: - loc = self._frame_loc(v) + loc = self.frame_manager.loc(v) self.assembler.regalloc_mov(prev_loc, loc) def force_result_in_reg(self, result_v, v, forbidden_vars=[]): @@ -345,7 +311,7 @@ self.reg_bindings[result_v] = loc return loc if v not in self.reg_bindings: - prev_loc = self._frame_loc(v) + prev_loc = self.frame_manager.loc(v) loc = self.force_allocate_reg(v, forbidden_vars) self.assembler.regalloc_mov(prev_loc, loc) assert v in self.reg_bindings @@ -365,7 +331,7 @@ def _sync_var(self, v): if not self.frame_manager.get(v): reg = self.reg_bindings[v] - to = self._frame_loc(v) + to = self.frame_manager.loc(v) self.assembler.regalloc_mov(reg, to) # otherwise it's clean diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -348,50 +348,3 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() - - - def test_hint_frame_locations_1(self): - b0, b1 = newboxes(0, 1) - longevity = {b0: (0, 1), b1: (0, 1)} - fm = TFrameManager() - asm = MockAsm() - rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) - rm.hint_frame_locations[b0] = "some_stack_loc" - rm.freed_frame_locations["some_stack_loc"] = None - rm.force_allocate_reg(b0) - rm.force_allocate_reg(b1) - rm.force_spill_var(b0) - rm.force_spill_var(b1) - assert rm.loc(b0) == "some_stack_loc" - assert isinstance(rm.loc(b1), FakeFramePos) - rm._check_invariants() - - def test_hint_frame_locations_2(self): - b0, b1, b2 = newboxes(0, 1, 2) - longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} - fm = TFrameManager() - asm = MockAsm() - rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) - rm.force_allocate_reg(b0) - rm.force_allocate_reg(b1) - rm.force_allocate_reg(b2) - rm.force_spill_var(b0) - loc = rm.loc(b0) - assert isinstance(loc, FakeFramePos) - rm.position = 1 - assert loc not in rm.freed_frame_locations - rm.possibly_free_var(b0) - assert loc in rm.freed_frame_locations - # - rm.hint_frame_locations[b1] = loc - rm.force_spill_var(b1) - loc1 = rm.loc(b1) - assert loc1 is loc - assert rm.freed_frame_locations == {} - # - rm.hint_frame_locations[b2] = loc - rm.force_spill_var(b2) - loc2 = rm.loc(b2) - assert loc2 is not loc1 # because it's not in freed_frame_locations - # - rm._check_invariants() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -690,7 +690,6 @@ def _assemble(self, regalloc, operations): self._regalloc = regalloc - regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1318,29 +1318,6 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) - def compute_hint_frame_locations(self, operations): - # optimization only: fill in the 'hint_frame_locations' dictionary - # of rm and xrm based on the JUMP at the end of the loop, by looking - # at where we would like the boxes to be after the jump. - op = operations[-1] - if op.getopnum() != rop.JUMP: - return - descr = op.getdescr() - assert isinstance(descr, LoopToken) - nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) - for i in range(op.numargs()): - box = op.getarg(i) - if isinstance(box, Box): - loc = nonfloatlocs[i] - if isinstance(loc, StackLoc): - assert box.type != FLOAT - self.rm.hint_frame_locations[box] = loc - else: - loc = floatlocs[i] - if isinstance(loc, StackLoc): - assert box.type == FLOAT - self.xrm.hint_frame_locations[box] = loc - def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None From noreply at buildbot.pypy.org Fri Dec 9 12:01:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Dec 2011 12:01:53 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: hg merge default Message-ID: <20111209110153.E25B082ABC@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50322:4efbd07c3e55 Date: 2011-12-09 12:01 +0100 http://bitbucket.org/pypy/pypy/changeset/4efbd07c3e55/ Log: hg merge default diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -23,9 +23,12 @@ return self.frame_bindings.get(box, None) def loc(self, box): - res = self.get(box) - if res is not None: - return res + try: + return self.frame_bindings[box] + except KeyError: + return self.get_new_loc(box) + + def get_new_loc(self, box): size = self.frame_size(box.type) self.frame_depth += ((-self.frame_depth) & (size-1)) # ^^^ frame_depth is rounded up to a multiple of 'size', assuming @@ -67,8 +70,17 @@ self.position = -1 self.frame_manager = frame_manager self.assembler = assembler + self.hint_frame_locations = {} # {Box: StackLoc} + self.freed_frame_locations = {} # {StackLoc: None} + + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +96,16 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + if v in self.frame_manager.frame_bindings: + loc = self.frame_manager.frame_bindings[v] + self.freed_frame_locations[loc] = None def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. @@ -160,6 +177,23 @@ self.reg_bindings[v] = loc return loc + def _frame_loc(self, v): + # first check if it's already in the frame_manager + try: + return self.frame_manager.frame_bindings[v] + except KeyError: + pass + # check if we have a hint for this box + if v in self.hint_frame_locations: + # if we do, check that the hinted location is known to be free + loc = self.hint_frame_locations[v] + if loc in self.freed_frame_locations: + del self.freed_frame_locations[loc] + self.frame_manager.frame_bindings[v] = loc + return loc + # no valid hint. make up a new free location + return self.frame_manager.get_new_loc(v) + def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, @@ -167,7 +201,7 @@ loc = self.reg_bindings[v_to_spill] del self.reg_bindings[v_to_spill] if self.frame_manager.get(v_to_spill) is None: - newloc = self.frame_manager.loc(v_to_spill) + newloc = self._frame_loc(v_to_spill) self.assembler.regalloc_mov(loc, newloc) return loc @@ -244,7 +278,7 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg - return self.frame_manager.loc(box) + return self._frame_loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): """ Return the location of the constant v. If 'selected_reg' is @@ -292,7 +326,7 @@ self.reg_bindings[v] = loc self.assembler.regalloc_mov(prev_loc, loc) else: - loc = self.frame_manager.loc(v) + loc = self._frame_loc(v) self.assembler.regalloc_mov(prev_loc, loc) def force_result_in_reg(self, result_v, v, forbidden_vars=[]): @@ -311,7 +345,7 @@ self.reg_bindings[result_v] = loc return loc if v not in self.reg_bindings: - prev_loc = self.frame_manager.loc(v) + prev_loc = self._frame_loc(v) loc = self.force_allocate_reg(v, forbidden_vars) self.assembler.regalloc_mov(prev_loc, loc) assert v in self.reg_bindings @@ -331,7 +365,7 @@ def _sync_var(self, v): if not self.frame_manager.get(v): reg = self.reg_bindings[v] - to = self.frame_manager.loc(v) + to = self._frame_loc(v) self.assembler.regalloc_mov(reg, to) # otherwise it's clean diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -348,3 +348,50 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, b1 = newboxes(0, 1) + longevity = {b0: (0, 1), b1: (0, 1)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.hint_frame_locations[b0] = "some_stack_loc" + rm.freed_frame_locations["some_stack_loc"] = None + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_spill_var(b0) + rm.force_spill_var(b1) + assert rm.loc(b0) == "some_stack_loc" + assert isinstance(rm.loc(b1), FakeFramePos) + rm._check_invariants() + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + rm.position = 1 + assert loc not in rm.freed_frame_locations + rm.possibly_free_var(b0) + assert loc in rm.freed_frame_locations + # + rm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 is loc + assert rm.freed_frame_locations == {} + # + rm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 is not loc1 # because it's not in freed_frame_locations + # + rm._check_invariants() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -690,6 +690,7 @@ def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1318,6 +1318,29 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of rm and xrm based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + descr = op.getdescr() + assert isinstance(descr, LoopToken) + nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) + for i in range(op.numargs()): + box = op.getarg(i) + if isinstance(box, Box): + loc = nonfloatlocs[i] + if isinstance(loc, StackLoc): + assert box.type != FLOAT + self.rm.hint_frame_locations[box] = loc + else: + loc = floatlocs[i] + if isinstance(loc, StackLoc): + assert box.type == FLOAT + self.xrm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -33,9 +33,6 @@ """Sequence iterator specialized for lists, accessing directly their RPython-level list of wrapped objects. """ - def __init__(w_self, w_seq): - W_AbstractSeqIterObject.__init__(w_self, w_seq) - w_self.w_seq = w_seq class W_FastTupleIterObject(W_AbstractSeqIterObject): """Sequence iterator specialized for tuples, accessing diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -9,8 +9,9 @@ from pypy.interpreter import gateway, baseobjspace from pypy.rlib.objectmodel import instantiate, specialize from pypy.rlib.listsort import make_timsort_class -from pypy.rlib import rerased, jit +from pypy.rlib import rerased, jit, debug from pypy.interpreter.argument import Signature +from pypy.tool.sourcetools import func_with_new_name UNROLL_CUTOFF = 5 @@ -170,6 +171,19 @@ share with the storage, if possible.""" return self.strategy.getitems(self) + def getitems_fixedsize(self): + """Returns a fixed-size list of all items after wrapping them.""" + l = self.strategy.getitems_fixedsize(self) + debug.make_sure_not_resized(l) + return l + + def getitems_unroll(self): + """Returns a fixed-size list of all items after wrapping them. The JIT + will fully unroll this function. """ + l = self.strategy.getitems_unroll(self) + debug.make_sure_not_resized(l) + return l + def getitems_copy(self): """Returns a copy of all items in the list. Same as getitems except for ObjectListStrategy.""" @@ -366,6 +380,8 @@ def getitems_copy(self, w_list): return [] + getitems_fixedsize = func_with_new_name(getitems_copy, "getitems_fixedsize") + getitems_unroll = getitems_fixedsize def getstorage_copy(self, w_list): return self.erase(None) @@ -496,7 +512,6 @@ # tuple is unmutable return w_list.lstorage - @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): l = self.unerase(w_list.lstorage) @@ -519,6 +534,13 @@ return r + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self._getitems_range_unroll(w_list, True) + def getitems_unroll(self, w_list): + return self._getitems_range_unroll(w_list, True) + _getitems_range_unroll = jit.unroll_safe(func_with_new_name(_getitems_range, "_getitems_range_unroll")) + def getslice(self, w_list, start, stop, step, length): v = self.unerase(w_list.lstorage) old_start = v[0] @@ -676,6 +698,13 @@ def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + @jit.unroll_safe + def getitems_unroll(self, w_list): + return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self.getitems_unroll(w_list) + def getstorage_copy(self, w_list): items = self.unerase(w_list.lstorage)[:] return self.erase(items) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -408,8 +408,10 @@ if isinstance(w_obj, W_AbstractTupleObject): t = w_obj.tolist() elif isinstance(w_obj, W_ListObject): - # XXX this can copy twice - t = w_obj.getitems()[:] + if unroll: + t = w_obj.getitems_unroll() + else: + t = w_obj.getitems_fixedsize() else: if unroll: return make_sure_not_resized(ObjSpace.unpackiterable_unroll( diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -48,6 +48,46 @@ for i in range(7): assert self.space.eq_w(l[i], l2[i]) + def test_getitems_fixedsize(self): + w = self.space.wrap + from pypy.objspace.std.listobject import make_range_list + rangelist = make_range_list(self.space, 1,1,7) + emptylist = W_ListObject(self.space, []) + intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]) + strlist = W_ListObject(self.space, [w('1'),w('2'),w('3'),w('4'),w('5'),w('6'),w('7')]) + floatlist = W_ListObject(self.space, [w(1.0),w(2.0),w(3.0),w(4.0),w(5.0),w(6.0),w(7.0)]) + objlist = W_ListObject(self.space, [w(1),w('2'),w(3.0),w(4),w(5),w(6),w(7)]) + + emptylist_copy = emptylist.getitems_fixedsize() + assert emptylist_copy == [] + + rangelist_copy = rangelist.getitems_fixedsize() + intlist_copy = intlist.getitems_fixedsize() + strlist_copy = strlist.getitems_fixedsize() + floatlist_copy = floatlist.getitems_fixedsize() + objlist_copy = objlist.getitems_fixedsize() + for i in range(7): + assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i)) + assert self.space.eq_w(intlist_copy[i], intlist.getitem(i)) + assert self.space.eq_w(strlist_copy[i], strlist.getitem(i)) + assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i)) + assert self.space.eq_w(objlist_copy[i], objlist.getitem(i)) + + emptylist_copy = emptylist.getitems_unroll() + assert emptylist_copy == [] + + rangelist_copy = rangelist.getitems_unroll() + intlist_copy = intlist.getitems_unroll() + strlist_copy = strlist.getitems_unroll() + floatlist_copy = floatlist.getitems_unroll() + objlist_copy = objlist.getitems_unroll() + for i in range(7): + assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i)) + assert self.space.eq_w(intlist_copy[i], intlist.getitem(i)) + assert self.space.eq_w(strlist_copy[i], strlist.getitem(i)) + assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i)) + assert self.space.eq_w(objlist_copy[i], objlist.getitem(i)) + def test_random_getitem(self): w = self.space.wrap s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9') From noreply at buildbot.pypy.org Fri Dec 9 14:16:47 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Dec 2011 14:16:47 +0100 (CET) Subject: [pypy-commit] pypy default: For minimark, use by default 4MB for the nursery size, instead of Message-ID: <20111209131647.0BF9982ABC@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50323:4c10a84a7da0 Date: 2011-12-09 14:14 +0100 http://bitbucket.org/pypy/pypy/changeset/4c10a84a7da0/ Log: For minimark, use by default 4MB for the nursery size, instead of half the number found by poking in /proc/cpuinfo, which may be a bit bogus. Also, nowadays, that rule seems wrong, in the sense that we need at least a few MBs to get good results, and increasing it doesn't change a lot. diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -2,8 +2,7 @@ Environment variables can be used to fine-tune the following parameters: - PYPY_GC_NURSERY The nursery size. Defaults to half the size of - the L2 cache. Try values like '1.2MB'. Small values + PYPY_GC_NURSERY The nursery size. Defaults to '4MB'. Small values (like 1 or 1KB) are useful for debugging. PYPY_GC_MAJOR_COLLECT Major collection memory factor. Default is '1.82', @@ -61,7 +60,7 @@ # # * young objects: allocated in the nursery if they are not too large, or # raw-malloced otherwise. The nursery is a fixed-size memory buffer of -# half the size of the L2 cache. When full, we do a minor collection; +# 4MB by default. When full, we do a minor collection; # the surviving objects from the nursery are moved outside, and the # non-surviving raw-malloced objects are freed. All surviving objects # become old. @@ -329,7 +328,8 @@ # size (needed to handle mallocs just below 'large_objects') but # hacking at the current nursery position in collect_and_reserve(). if newsize <= 0: - newsize = env.estimate_best_nursery_size() + newsize = 4*1024*1024 # fixed to 4MB by default + # (it was env.estimate_best_nursery_size()) if newsize <= 0: newsize = defaultsize if newsize < minsize: From noreply at buildbot.pypy.org Fri Dec 9 14:44:10 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 9 Dec 2011 14:44:10 +0100 (CET) Subject: [pypy-commit] pypy default: windows fix (mattip, stakkars) Message-ID: <20111209134410.C356282ABC@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: Changeset: r50324:8abfe57358d8 Date: 2011-12-09 14:20 +0100 http://bitbucket.org/pypy/pypy/changeset/8abfe57358d8/ Log: windows fix (mattip, stakkars) diff --git a/pypy/rlib/rdynload.py b/pypy/rlib/rdynload.py --- a/pypy/rlib/rdynload.py +++ b/pypy/rlib/rdynload.py @@ -115,7 +115,8 @@ if _WIN32: DLLHANDLE = rwin32.HMODULE - def dlopen(name): + def dlopen(name, mode=-1): + # mode is unused on windows, but a consistant signature res = rwin32.LoadLibrary(name) if not res: err = rwin32.GetLastError() From noreply at buildbot.pypy.org Fri Dec 9 16:15:36 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 9 Dec 2011 16:15:36 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: (bivab, arigo) use constants here Message-ID: <20111209151536.0F52B82ABD@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50325:a94098254217 Date: 2011-12-09 16:14 +0100 http://bitbucket.org/pypy/pypy/changeset/a94098254217/ Log: (bivab, arigo) use constants here diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -1113,7 +1113,7 @@ regs_to_save.append(reg) assert gcrootmap.is_shadow_stack with saved_registers(self.mc, regs_to_save): - self._emit_call(-1, self.releasegil_addr, [], self._regalloc, fcond) + self._emit_call(NO_FORCE_INDEX, self.releasegil_addr, [], self._regalloc, fcond) def call_reacquire_gil(self, gcrootmap, save_loc, fcond): # save the previous result into the stack temporarily. @@ -1127,7 +1127,7 @@ regs_to_save.append(r.ip) # for alingment assert gcrootmap.is_shadow_stack with saved_registers(self.mc, regs_to_save): - self._emit_call(-1, self.reacqgil_addr, [], self._regalloc, fcond) + self._emit_call(NO_FORCE_INDEX, self.reacqgil_addr, [], self._regalloc, fcond) def write_new_force_index(self): # for shadowstack only: get a new, unused force_index number and From noreply at buildbot.pypy.org Fri Dec 9 16:15:37 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 9 Dec 2011 16:15:37 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: (arigo, bivab): write the correct value at the force index in the frame Message-ID: <20111209151537.3763282ABD@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50326:0cde4d93fe16 Date: 2011-12-09 16:15 +0100 http://bitbucket.org/pypy/pypy/changeset/0cde4d93fe16/ Log: (arigo, bivab): write the correct value at the force index in the frame diff --git a/pypy/jit/backend/arm/runner.py b/pypy/jit/backend/arm/runner.py --- a/pypy/jit/backend/arm/runner.py +++ b/pypy/jit/backend/arm/runner.py @@ -114,7 +114,7 @@ fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) - rffi.cast(TP, addr_of_force_index)[0] = -1 + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index # start of "no gc operation!" block frame_depth = faildescr._arm_frame_depth*WORD addr_end_of_frame = (addr_of_force_index - From noreply at buildbot.pypy.org Fri Dec 9 16:20:18 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 9 Dec 2011 16:20:18 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: Quite a simplification and improvement: Message-ID: <20111209152018.139E782ABD@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50327:ffe08320d6bc Date: 2011-12-09 16:18 +0100 http://bitbucket.org/pypy/pypy/changeset/ffe08320d6bc/ Log: Quite a simplification and improvement: removed the extra mess for signed_defn.h and put it into the standard header diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -35,8 +35,6 @@ # define Py_LOCAL_INLINE(type) static __inline type __fastcall #endif -#include "signed_defn.h" - /* Deprecated DL_IMPORT and DL_EXPORT macros */ #ifdef _WIN32 # if defined(Py_BUILD_CORE) diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -1,6 +1,6 @@ import sys -import py, autopath +import py from pypy.rlib.libffi import (CDLL, Func, get_libc_name, ArgChain, types, IS_32_BIT, array_getitem, array_setitem) @@ -100,6 +100,7 @@ def setup_class(cls): from pypy.tool.udir import udir from pypy.translator.tool.cbuild import ExternalCompilationInfo + from pypy.translator.tool.cbuild import STANDARD_HEADER from pypy.translator.platform import platform BaseFfiTest.setup_class() @@ -120,10 +121,8 @@ for match in re.finditer(" ([a-z_]+)\(", meth.__doc__): exports.append(match.group(1)) # - c_file.write(py.code.Source('\n'.join(snippets))) - eci = ExternalCompilationInfo( - export_symbols=exports, - include_dirs=[str(py.path.local(autopath.pypydir).join('translator', 'c'))]) + c_file.write(STANDARD_HEADER + py.code.Source('\n'.join(snippets))) + eci = ExternalCompilationInfo(export_symbols=exports) cls.libfoo_name = str(platform.compile([c_file], eci, 'x', standalone=False)) @@ -158,8 +157,7 @@ # ------------------------------------------------------------------------ def test_very_simple(self): - """ #include "src/signed_defn.h" - + """ int diff_xy(int x, Signed y) { return x - y; @@ -238,8 +236,6 @@ def test_pointer_as_argument(self): """#include - #include "src/signed_defn.h" - Signed inc(Signed* x) { Signed oldval; @@ -276,8 +272,7 @@ lltype.free(ptr_result, flavor='raw') def test_return_pointer(self): - """ #include "src/signed_defn.h" - + """ struct pair { Signed a; Signed b; @@ -395,8 +390,7 @@ def test_byval_argument(self): - """ #include "src/signed_defn.h" - + """ struct Point { Signed x; Signed y; @@ -426,8 +420,7 @@ lltype.free(ffi_point_struct, flavor='raw') def test_byval_result(self): - """ #include "src/signed_defn.h" - + """ struct Point make_point(Signed x, Signed y) { struct Point p; p.x = x; diff --git a/pypy/translator/c/src/g_prerequisite.h b/pypy/translator/c/src/g_prerequisite.h --- a/pypy/translator/c/src/g_prerequisite.h +++ b/pypy/translator/c/src/g_prerequisite.h @@ -8,8 +8,6 @@ # include "Python.h" #endif -#include "signed_defn.h" - #ifdef _WIN32 # include /* needed, otherwise _lseeki64 truncates to 32-bits (??) */ #endif diff --git a/pypy/translator/c/src/signals.h b/pypy/translator/c/src/signals.h --- a/pypy/translator/c/src/signals.h +++ b/pypy/translator/c/src/signals.h @@ -7,7 +7,6 @@ #include #include -#include "src/signed_defn.h" #ifdef _WIN32 #include diff --git a/pypy/translator/c/src/signed_defn.h b/pypy/translator/c/src/signed_defn.h deleted file mode 100644 --- a/pypy/translator/c/src/signed_defn.h +++ /dev/null @@ -1,18 +0,0 @@ -/* this file defines Signed and Unsigned */ - -#ifndef SIGNED_DEFN_H -#define SIGNED_DEFN_H - -#ifdef _WIN64 - typedef __int64 Signed; - typedef unsigned __int64 Unsigned; -# define SIGNED_MIN LLONG_MIN -#else - typedef long Signed; - typedef unsigned long Unsigned; -# define SIGNED_MIN LONG_MIN -#endif - -#endif - -/* end of signed_def.h */ diff --git a/pypy/translator/tool/cbuild.py b/pypy/translator/tool/cbuild.py --- a/pypy/translator/tool/cbuild.py +++ b/pypy/translator/tool/cbuild.py @@ -320,4 +320,13 @@ #define __XSI_VISIBLE 700 /* Windows: winsock/winsock2 mess */ #define WIN32_LEAN_AND_MEAN +#ifdef _WIN64 + typedef __int64 Signed; + typedef unsigned __int64 Unsigned; +# define SIGNED_MIN LLONG_MIN +#else + typedef long Signed; + typedef unsigned long Unsigned; +# define SIGNED_MIN LONG_MIN +#endif ''' From noreply at buildbot.pypy.org Fri Dec 9 18:19:06 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Dec 2011 18:19:06 +0100 (CET) Subject: [pypy-commit] pypy default: Write a hack to distinguish between "true built-in modules" and Message-ID: <20111209171906.CC2C182ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50328:a72429e0e0ed Date: 2011-12-09 18:18 +0100 http://bitbucket.org/pypy/pypy/changeset/a72429e0e0ed/ Log: Write a hack to distinguish between "true built-in modules" and "pseudo-extension built-in modules". The latters are the ones that are extension modules in CPython. We list the formers explicitly. True built-in modules are treated like CPython treats built-in modules, i.e. they always shadow any xx.py. The pseudo-extension built-in modules are treated like CPython treats extension modules, and are only found in sys.path order for the fake entry '.../lib_pypy/__extensions__' which we put just before 'lib_pypy' and the 'lib-python' entries. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -487,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -483,10 +483,20 @@ # XXX Check for frozen modules? # when w_path is a string + delayed_builtin = None + w_lib_extensions = None + if w_path is None: # check the builtin modules if modulename in space.builtin_modules: - return FindInfo(C_BUILTIN, modulename, None) + delayed_builtin = FindInfo(C_BUILTIN, modulename, None) + # a "real builtin module xx" shadows every file "xx.py" there + # could possibly be; a "pseudo-extension module" does not, and + # is only loaded at the point in sys.path where we find + # '.../lib_pypy/__extensions__'. + if modulename in space.MODULES_THAT_ALWAYS_SHADOW: + return delayed_builtin + w_lib_extensions = space.sys.get_state(space).w_lib_extensions w_path = space.sys.get('path') # XXX check frozen modules? @@ -495,6 +505,9 @@ if w_path is not None: for w_pathitem in space.unpackiterable(w_path): # sys.path_hooks import hook + if (w_lib_extensions is not None and + space.is_w(w_pathitem, w_lib_extensions)): + return delayed_builtin if use_loader: w_loader = find_in_path_hooks(space, w_modulename, w_pathitem) if w_loader: @@ -527,7 +540,7 @@ # Out of file descriptors. # not found - return None + return delayed_builtin def _prepare_module(space, w_mod, filename, pkgdir): w = space.wrap diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -38,6 +38,8 @@ test_reload = "def test():\n raise ValueError\n", infinite_reload = "import infinite_reload; reload(infinite_reload)", del_sys_module = "import sys\ndel sys.modules['del_sys_module']\n", + itertools = "hello_world = 42\n", + gc = "should_never_be_seen = 42\n", ) root.ensure("notapackage", dir=1) # empty, no __init__.py setuppkg("pkg", @@ -147,6 +149,7 @@ class AppTestImport: def setup_class(cls): # interpreter-level + cls.space = gettestobjspace(usemodules=['itertools']) cls.saved_modules = _setup(cls.space) #XXX Compile class @@ -571,6 +574,47 @@ else: assert False, 'should not work' + def test_shadow_builtin(self): + # 'import gc' is supposed to always find the built-in module; + # like CPython, it is a built-in module, so it shadows everything, + # even though there is a gc.py. + import sys + assert 'gc' not in sys.modules + import gc + assert not hasattr(gc, 'should_never_be_seen') + assert '(built-in)' in repr(gc) + del sys.modules['gc'] + + def test_shadow_extension_1(self): + # 'import itertools' is supposed to find itertools.py if there is + # one in sys.path. + import sys + assert 'itertools' not in sys.modules + import itertools + assert hasattr(itertools, 'hello_world') + assert not hasattr(itertools, 'count') + assert '(built-in)' not in repr(itertools) + del sys.modules['itertools'] + + def test_shadow_extension_2(self): + # 'import itertools' is supposed to find the built-in module even + # if there is also one in sys.path as long as it is *after* the + # special entry '.../lib_pypy/__extensions__'. (Note that for now + # there is one in lib_pypy/itertools.py, which should not be seen + # either; hence the (built-in) test below.) + import sys + assert 'itertools' not in sys.modules + sys.path.append(sys.path.pop(0)) + try: + import itertools + assert not hasattr(itertools, 'hello_world') + assert hasattr(itertools, 'izip') + assert '(built-in)' in repr(itertools) + finally: + sys.path.insert(0, sys.path.pop()) + del sys.modules['itertools'] + + class TestAbi: def test_abi_tag(self): space1 = gettestobjspace(soabi='TEST') diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -170,3 +170,7 @@ def get_flag(self, name): space = self.space return space.int_w(space.getattr(self.get('flags'), space.wrap(name))) + + def get_state(self, space): + from pypy.module.sys import state + return state.get(space) diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py --- a/pypy/module/sys/state.py +++ b/pypy/module/sys/state.py @@ -24,7 +24,7 @@ # Initialize the default path pypydir = os.path.dirname(os.path.abspath(pypy.__file__)) srcdir = os.path.dirname(pypydir) - path = getinitialpath(srcdir) + path = getinitialpath(self, srcdir) self.w_path = space.newlist([space.wrap(p) for p in path]) def checkdir(path): @@ -35,7 +35,7 @@ platform = sys.platform -def getinitialpath(prefix): +def getinitialpath(state, prefix): from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d.%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1]) @@ -49,6 +49,12 @@ checkdir(lib_pypy) importlist = [] + # + if state is not None: # 'None' for testing only + lib_extensions = os.path.join(lib_pypy, '__extensions__') + state.w_lib_extensions = state.space.wrap(lib_extensions) + importlist.append(lib_extensions) + # importlist.append(lib_pypy) importlist.append(python_std_lib_modified) importlist.append(python_std_lib) @@ -71,7 +77,7 @@ @unwrap_spec(srcdir=str) def pypy_initial_path(space, srcdir): try: - path = getinitialpath(srcdir) + path = getinitialpath(get(space), srcdir) except OSError: return space.w_None else: diff --git a/pypy/module/sys/test/test_initialpath.py b/pypy/module/sys/test/test_initialpath.py --- a/pypy/module/sys/test/test_initialpath.py +++ b/pypy/module/sys/test/test_initialpath.py @@ -13,7 +13,7 @@ def test_stdlib_in_prefix(tmpdir): dirs = build_hierarchy(tmpdir) - path = getinitialpath(str(tmpdir)) + path = getinitialpath(None, str(tmpdir)) # we get at least 'dirs', and maybe more (e.g. plat-linux2) assert path[:len(dirs)] == map(str, dirs) @@ -21,7 +21,7 @@ lib_pypy, lib_python_modified, lib_python = build_hierarchy(tmpdir) lib_tk_modified = lib_python_modified.join('lib-tk') lib_tk = lib_python.join('lib-tk') - path = getinitialpath(str(tmpdir)) + path = getinitialpath(None, str(tmpdir)) i = path.index(str(lib_tk_modified)) j = path.index(str(lib_tk)) assert i < j From noreply at buildbot.pypy.org Fri Dec 9 18:35:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Dec 2011 18:35:03 +0100 (CET) Subject: [pypy-commit] buildbot default: Give it a bit more time before timing out. Useful for Message-ID: <20111209173503.36F2182ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r599:83d6d146e64f Date: 2011-12-09 18:34 +0100 http://bitbucket.org/pypy/buildbot/changeset/83d6d146e64f/ Log: Give it a bit more time before timing out. Useful for very slow final "make" runs. diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -83,7 +83,7 @@ add_args = {'translationArgs': translationArgs, 'targetArgs': targetArgs, 'interpreter': interpreter} - kw['timeout'] = 5400 + kw['timeout'] = 7200 ShellCmd.__init__(self, workdir, *a, **kw) self.addFactoryArguments(**add_args) self.command = ([interpreter] + self.command + translationArgs + From noreply at buildbot.pypy.org Fri Dec 9 19:16:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 9 Dec 2011 19:16:16 +0100 (CET) Subject: [pypy-commit] pypy default: add -g so debugger can be used Message-ID: <20111209181616.0565782ABD@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50329:be6e2485ed4a Date: 2011-12-09 20:15 +0200 http://bitbucket.org/pypy/pypy/changeset/be6e2485ed4a/ Log: add -g so debugger can be used diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -864,6 +864,7 @@ elif sys.platform.startswith('linux'): compile_extra.append("-Werror=implicit-function-declaration") export_symbols_eci.append('pypyAPI') + compile_extra.append('-g') else: kwds["includes"] = ['Python.h'] # this is our Python.h From noreply at buildbot.pypy.org Fri Dec 9 23:46:35 2011 From: noreply at buildbot.pypy.org (ctismer) Date: Fri, 9 Dec 2011 23:46:35 +0100 (CET) Subject: [pypy-commit] pypy win64-stage1: small fix to test_libffi Message-ID: <20111209224635.ABC8A82ABD@wyvern.cs.uni-duesseldorf.de> Author: Christian Tismer Branch: win64-stage1 Changeset: r50330:91bdfa626831 Date: 2011-12-09 23:46 +0100 http://bitbucket.org/pypy/pypy/changeset/91bdfa626831/ Log: small fix to test_libffi diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -100,7 +100,7 @@ def setup_class(cls): from pypy.tool.udir import udir from pypy.translator.tool.cbuild import ExternalCompilationInfo - from pypy.translator.tool.cbuild import STANDARD_HEADER + from pypy.translator.tool.cbuild import STANDARD_DEFINES from pypy.translator.platform import platform BaseFfiTest.setup_class() @@ -121,7 +121,7 @@ for match in re.finditer(" ([a-z_]+)\(", meth.__doc__): exports.append(match.group(1)) # - c_file.write(STANDARD_HEADER + py.code.Source('\n'.join(snippets))) + c_file.write(STANDARD_DEFINES + str(py.code.Source('\n'.join(snippets)))) eci = ExternalCompilationInfo(export_symbols=exports) cls.libfoo_name = str(platform.compile([c_file], eci, 'x', standalone=False)) From noreply at buildbot.pypy.org Sat Dec 10 00:09:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 00:09:54 +0100 (CET) Subject: [pypy-commit] pypy default: Python 2.5 compat Message-ID: <20111209230954.8886E82ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50331:eff9db9ac0c8 Date: 2011-12-09 22:23 +0100 http://bitbucket.org/pypy/pypy/changeset/eff9db9ac0c8/ Log: Python 2.5 compat diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import py from pypy.jit.metainterp.test.support import LLJitMixin From noreply at buildbot.pypy.org Sat Dec 10 00:09:55 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 00:09:55 +0100 (CET) Subject: [pypy-commit] pypy default: Improve the hack 062e9d06c908: revert the changes done in the Message-ID: <20111209230955.B5CE182ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50332:9b45755e2c2b Date: 2011-12-09 22:58 +0100 http://bitbucket.org/pypy/pypy/changeset/9b45755e2c2b/ Log: Improve the hack 062e9d06c908: revert the changes done in the RegisterManager, and instead write a more involved but cleaner solution in the FrameManager. As a bonus you get tests too. This solution should work even when assembling bridges, not just loops. diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -16,35 +16,101 @@ """ Manage frame positions """ def __init__(self): - self.frame_bindings = {} - self.frame_depth = 0 + self.bindings = {} + self.used = [] # list of bools + self.hint_frame_locations = {} + + frame_depth = property(lambda:xxx, lambda:xxx) # XXX kill me + + def get_frame_depth(self): + return len(self.used) def get(self, box): - return self.frame_bindings.get(box, None) + return self.bindings.get(box, None) def loc(self, box): + """Return or create the frame location associated with 'box'.""" + # first check if it's already in the frame_manager try: - return self.frame_bindings[box] + return self.bindings[box] except KeyError: - return self.get_new_loc(box) + pass + # check if we have a hint for this box + if box in self.hint_frame_locations: + # if we do, try to reuse the location for this box + loc = self.hint_frame_locations[box] + if self.try_to_reuse_location(box, loc): + return loc + # no valid hint. make up a new free location + return self.get_new_loc(box) def get_new_loc(self, box): size = self.frame_size(box.type) - self.frame_depth += ((-self.frame_depth) & (size-1)) - # ^^^ frame_depth is rounded up to a multiple of 'size', assuming + # frame_depth is rounded up to a multiple of 'size', assuming # that 'size' is a power of two. The reason for doing so is to # avoid obscure issues in jump.py with stack locations that try # to move from position (6,7) to position (7,8). - newloc = self.frame_pos(self.frame_depth, box.type) - self.frame_bindings[box] = newloc - self.frame_depth += size + while self.get_frame_depth() & (size - 1): + self.used.append(False) + # + index = self.get_frame_depth() + newloc = self.frame_pos(index, box.type) + for i in range(size): + self.used.append(True) + # + if not we_are_translated(): # extra testing + testindex = self.get_loc_index(newloc) + assert testindex == index + # + self.bindings[box] = newloc return newloc + def set_binding(self, box, loc): + self.bindings[box] = loc + # + index = self.get_loc_index(loc) + endindex = index + self.frame_size(box) + while len(self.used) < endindex: + self.used.append(False) + while index < endindex: + self.used[index] = True + index += 1 + def reserve_location_in_frame(self, size): - frame_depth = self.frame_depth - self.frame_depth += size + frame_depth = self.get_frame_depth() + for i in range(size): + self.used.append(True) return frame_depth + def mark_as_free(self, box): + try: + loc = self.bindings[box] + except KeyError: + return # already gone + del self.bindings[box] + # + size = self.frame_size(box.type) + baseindex = self.get_loc_index(loc) + for i in range(size): + index = baseindex + i + assert 0 <= index < len(self.used) + self.used[index] = False + + def try_to_reuse_location(self, box, loc): + index = self.get_loc_index(loc) + assert index >= 0 + size = self.frame_size(box.type) + for i in range(size): + while (index + i) >= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -52,6 +118,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -70,8 +140,6 @@ self.position = -1 self.frame_manager = frame_manager self.assembler = assembler - self.hint_frame_locations = {} # {Box: StackLoc} - self.freed_frame_locations = {} # {StackLoc: None} def is_still_alive(self, v): # Check if 'v' is alive at the current position. @@ -103,9 +171,7 @@ self.free_regs.append(self.reg_bindings[v]) del self.reg_bindings[v] if self.frame_manager is not None: - if v in self.frame_manager.frame_bindings: - loc = self.frame_manager.frame_bindings[v] - self.freed_frame_locations[loc] = None + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. @@ -177,23 +243,6 @@ self.reg_bindings[v] = loc return loc - def _frame_loc(self, v): - # first check if it's already in the frame_manager - try: - return self.frame_manager.frame_bindings[v] - except KeyError: - pass - # check if we have a hint for this box - if v in self.hint_frame_locations: - # if we do, check that the hinted location is known to be free - loc = self.hint_frame_locations[v] - if loc in self.freed_frame_locations: - del self.freed_frame_locations[loc] - self.frame_manager.frame_bindings[v] = loc - return loc - # no valid hint. make up a new free location - return self.frame_manager.get_new_loc(v) - def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, @@ -201,7 +250,7 @@ loc = self.reg_bindings[v_to_spill] del self.reg_bindings[v_to_spill] if self.frame_manager.get(v_to_spill) is None: - newloc = self._frame_loc(v_to_spill) + newloc = self.frame_manager.loc(v_to_spill) self.assembler.regalloc_mov(loc, newloc) return loc @@ -278,7 +327,7 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg - return self._frame_loc(box) + return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): """ Return the location of the constant v. If 'selected_reg' is @@ -326,7 +375,7 @@ self.reg_bindings[v] = loc self.assembler.regalloc_mov(prev_loc, loc) else: - loc = self._frame_loc(v) + loc = self.frame_manager.loc(v) self.assembler.regalloc_mov(prev_loc, loc) def force_result_in_reg(self, result_v, v, forbidden_vars=[]): @@ -345,7 +394,7 @@ self.reg_bindings[result_v] = loc return loc if v not in self.reg_bindings: - prev_loc = self._frame_loc(v) + prev_loc = self.frame_manager.loc(v) loc = self.force_allocate_reg(v, forbidden_vars) self.assembler.regalloc_mov(prev_loc, loc) assert v in self.reg_bindings @@ -365,7 +414,7 @@ def _sync_var(self, v): if not self.frame_manager.get(v): reg = self.reg_bindings[v] - to = self._frame_loc(v) + to = self.frame_manager.loc(v) self.assembler.regalloc_mov(reg, to) # otherwise it's clean diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -44,6 +44,9 @@ return 2 else: return 1 + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -282,7 +285,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -305,7 +308,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -327,7 +330,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -351,20 +354,14 @@ def test_hint_frame_locations_1(self): - b0, b1 = newboxes(0, 1) - longevity = {b0: (0, 1), b1: (0, 1)} + b0, = newboxes(0) fm = TFrameManager() - asm = MockAsm() - rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) - rm.hint_frame_locations[b0] = "some_stack_loc" - rm.freed_frame_locations["some_stack_loc"] = None - rm.force_allocate_reg(b0) - rm.force_allocate_reg(b1) - rm.force_spill_var(b0) - rm.force_spill_var(b1) - assert rm.loc(b0) == "some_stack_loc" - assert isinstance(rm.loc(b1), FakeFramePos) - rm._check_invariants() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 def test_hint_frame_locations_2(self): b0, b1, b2 = newboxes(0, 1, 2) @@ -378,20 +375,99 @@ rm.force_spill_var(b0) loc = rm.loc(b0) assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 rm.position = 1 - assert loc not in rm.freed_frame_locations + assert fm.used == [True] rm.possibly_free_var(b0) - assert loc in rm.freed_frame_locations + assert fm.used == [False] # - rm.hint_frame_locations[b1] = loc + fm.hint_frame_locations[b1] = loc rm.force_spill_var(b1) loc1 = rm.loc(b1) - assert loc1 is loc - assert rm.freed_frame_locations == {} + assert loc1 == loc + assert fm.used == [True] # - rm.hint_frame_locations[b2] = loc + fm.hint_frame_locations[b2] = loc rm.force_spill_var(b2) loc2 = rm.loc(b2) - assert loc2 is not loc1 # because it's not in freed_frame_locations + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] # rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -694,7 +694,7 @@ regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -138,6 +138,10 @@ return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -184,7 +188,6 @@ allgcrefs): operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations @@ -307,7 +310,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -316,7 +319,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -352,7 +355,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -1334,12 +1337,12 @@ loc = nonfloatlocs[i] if isinstance(loc, StackLoc): assert box.type != FLOAT - self.rm.hint_frame_locations[box] = loc + self.fm.hint_frame_locations[box] = loc else: loc = floatlocs[i] if isinstance(loc, StackLoc): assert box.type == FLOAT - self.xrm.hint_frame_locations[box] = loc + self.fm.hint_frame_locations[box] = loc def consider_jump(self, op): assembler = self.assembler @@ -1385,7 +1388,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -42,6 +42,7 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) @@ -49,10 +50,9 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[2].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous self.cpu.set_future_value_int(0, 0) fail = self.run(loop) assert fail.identifier == 2 @@ -104,6 +104,9 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) jump(i3, i12, i11, i10, i6, i7, descr=looptoken) @@ -112,9 +115,8 @@ guard_op = loop.operations[5] loop_frame_depth = loop.token._x86_frame_depth assert loop.token._x86_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) From noreply at buildbot.pypy.org Sat Dec 10 00:09:56 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 00:09:56 +0100 (CET) Subject: [pypy-commit] pypy default: test and fix Message-ID: <20111209230956.DBD8182ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50333:94737f156c30 Date: 2011-12-09 23:04 +0100 http://bitbucket.org/pypy/pypy/changeset/94737f156c30/ Log: test and fix diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -69,7 +69,7 @@ self.bindings[box] = loc # index = self.get_loc_index(loc) - endindex = index + self.frame_size(box) + endindex = index + self.frame_size(box.type) while len(self.used) < endindex: self.used.append(False) while index < endindex: diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -42,8 +42,10 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) def get_loc_index(self, loc): assert isinstance(loc, FakeFramePos) return loc.pos From notifications-noreply at bitbucket.org Sat Dec 10 08:23:14 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 07:23:14 -0000 Subject: [pypy-commit] [COMMENT] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays Message-ID: <20111210072314.22431.3038@bitbucket01.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars#comment-1354 Alex Gaynor (alex_gaynor) said: # You still can't use self.value here, you need to call space.float(self) (and friends). # an RPython list of wrapped vars is usually named l_w (specifically the trailing _w), I'm not sure how these tests are passing with the unwrap call, but it's definitely not correct. -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Sat Dec 10 08:23:33 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 07:23:33 -0000 Subject: [pypy-commit] [COMMENT] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays Message-ID: <20111210072333.6508.35993@bitbucket03.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars#comment-1355 Alex Gaynor (alex_gaynor) said: ah, and the scalar test is missing some asserts -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sat Dec 10 10:41:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 10:41:57 +0100 (CET) Subject: [pypy-commit] pypy default: Baaaaah. Don't rely on "is" being true for two independently Message-ID: <20111210094157.AAFE382ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50335:b8e86a5f522b Date: 2011-12-10 10:41 +0100 http://bitbucket.org/pypy/pypy/changeset/b8e86a5f522b/ Log: Baaaaah. Don't rely on "is" being true for two independently wrapped strings. It fails on py.py, for example. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -506,7 +506,7 @@ for w_pathitem in space.unpackiterable(w_path): # sys.path_hooks import hook if (w_lib_extensions is not None and - space.is_w(w_pathitem, w_lib_extensions)): + space.eq_w(w_pathitem, w_lib_extensions)): return delayed_builtin if use_loader: w_loader = find_in_path_hooks(space, w_modulename, w_pathitem) From noreply at buildbot.pypy.org Sat Dec 10 10:41:56 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 10:41:56 +0100 (CET) Subject: [pypy-commit] pypy temp2: close temporary branch Message-ID: <20111210094156.813AD82ABC@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: temp2 Changeset: r50334:2db53dd9c46e Date: 2011-12-09 23:08 +0100 http://bitbucket.org/pypy/pypy/changeset/2db53dd9c46e/ Log: close temporary branch From noreply at buildbot.pypy.org Sat Dec 10 10:46:47 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 10:46:47 +0100 (CET) Subject: [pypy-commit] pypy default: fixes for test_app_main. Message-ID: <20111210094647.2FFA682ABC@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50336:998ae4038c2f Date: 2011-12-10 10:46 +0100 http://bitbucket.org/pypy/pypy/changeset/998ae4038c2f/ Log: fixes for test_app_main. diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -672,7 +672,7 @@ def pypy_initial_path(s): from pypy.module.sys.state import getinitialpath try: - return getinitialpath(s) + return getinitialpath(None, s) except OSError: return None diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -821,6 +821,8 @@ newpath = app_main.get_library_path('/tmp/pypy-c') # stdlib not found assert newpath == sys.path newpath = app_main.get_library_path(self.fake_exe) + if newpath[0].endswith('__extensions__'): + newpath = newpath[1:] # we get at least 'expected_path', and maybe more (e.g.plat-linux2) assert newpath[:len(self.expected_path)] == self.expected_path finally: From notifications-noreply at bitbucket.org Sat Dec 10 11:02:28 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 10:02:28 -0000 Subject: [pypy-commit] [COMMENT] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays Message-ID: <20111210100228.2157.29467@bitbucket01.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars#comment-1356 arigo said: jterrace: space.unwrap() is there only for tests and is not RPython. -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sat Dec 10 12:13:24 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 12:13:24 +0100 (CET) Subject: [pypy-commit] pypy default: Skip these tests in py.test -A. Message-ID: <20111210111324.9AEB582ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50337:386488dbe96d Date: 2011-12-10 11:12 +0000 http://bitbucket.org/pypy/pypy/changeset/386488dbe96d/ Log: Skip these tests in py.test -A. diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -150,6 +150,7 @@ def setup_class(cls): # interpreter-level cls.space = gettestobjspace(usemodules=['itertools']) + cls.w_runappdirect = cls.space.wrap(conftest.option.runappdirect) cls.saved_modules = _setup(cls.space) #XXX Compile class @@ -575,6 +576,7 @@ assert False, 'should not work' def test_shadow_builtin(self): + if self.runappdirect: skip("hard to test: module is already imported") # 'import gc' is supposed to always find the built-in module; # like CPython, it is a built-in module, so it shadows everything, # even though there is a gc.py. @@ -586,6 +588,7 @@ del sys.modules['gc'] def test_shadow_extension_1(self): + if self.runappdirect: skip("hard to test: module is already imported") # 'import itertools' is supposed to find itertools.py if there is # one in sys.path. import sys @@ -597,6 +600,7 @@ del sys.modules['itertools'] def test_shadow_extension_2(self): + if self.runappdirect: skip("hard to test: module is already imported") # 'import itertools' is supposed to find the built-in module even # if there is also one in sys.path as long as it is *after* the # special entry '.../lib_pypy/__extensions__'. (Note that for now From noreply at buildbot.pypy.org Sat Dec 10 12:20:33 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 12:20:33 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix test Message-ID: <20111210112033.9CEE782ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50338:51117c91db41 Date: 2011-12-10 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/51117c91db41/ Log: fix test diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -438,7 +438,7 @@ i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=...) + p39 = same_as(...) # Should be killed by backend """) def test_local_closure_is_virtual(self): From noreply at buildbot.pypy.org Sat Dec 10 12:20:34 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 12:20:34 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: at bit more logging Message-ID: <20111210112034.C06D582ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50339:b63f055c491d Date: 2011-12-10 12:05 +0100 http://bitbucket.org/pypy/pypy/changeset/b63f055c491d/ Log: at bit more logging diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -50,7 +50,7 @@ cmdline.append(str(self.filepath)) # print cmdline, logfile - env={'PYPYLOG': 'jit-log-opt,jit-log-noopt,jit-summary:' + str(logfile)} + env={'PYPYLOG': 'jit-log-opt,jit-log-noopt,jit-log-virtualstate,jit-summary:' + str(logfile)} pipe = subprocess.Popen(cmdline, env=env, stdout=subprocess.PIPE, From noreply at buildbot.pypy.org Sat Dec 10 12:20:37 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 12:20:37 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge 8de6f245c959 Message-ID: <20111210112037.ACBD782ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50340:38465568087c Date: 2011-12-10 12:20 +0100 http://bitbucket.org/pypy/pypy/changeset/38465568087c/ Log: hg merge 8de6f245c959 diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -519,8 +518,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -1608,6 +1607,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -23,9 +23,12 @@ return self.frame_bindings.get(box, None) def loc(self, box): - res = self.get(box) - if res is not None: - return res + try: + return self.frame_bindings[box] + except KeyError: + return self.get_new_loc(box) + + def get_new_loc(self, box): size = self.frame_size(box.type) self.frame_depth += ((-self.frame_depth) & (size-1)) # ^^^ frame_depth is rounded up to a multiple of 'size', assuming @@ -73,8 +76,17 @@ self.position = -1 self.frame_manager = frame_manager self.assembler = assembler + self.hint_frame_locations = {} # {Box: StackLoc} + self.freed_frame_locations = {} # {StackLoc: None} + + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -90,11 +102,16 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + if v in self.frame_manager.frame_bindings: + loc = self.frame_manager.frame_bindings[v] + self.freed_frame_locations[loc] = None def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. @@ -166,6 +183,23 @@ self.reg_bindings[v] = loc return loc + def _frame_loc(self, v): + # first check if it's already in the frame_manager + try: + return self.frame_manager.frame_bindings[v] + except KeyError: + pass + # check if we have a hint for this box + if v in self.hint_frame_locations: + # if we do, check that the hinted location is known to be free + loc = self.hint_frame_locations[v] + if loc in self.freed_frame_locations: + del self.freed_frame_locations[loc] + self.frame_manager.frame_bindings[v] = loc + return loc + # no valid hint. make up a new free location + return self.frame_manager.get_new_loc(v) + def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, @@ -173,7 +207,7 @@ loc = self.reg_bindings[v_to_spill] del self.reg_bindings[v_to_spill] if self.frame_manager.get(v_to_spill) is None: - newloc = self.frame_manager.loc(v_to_spill) + newloc = self._frame_loc(v_to_spill) self.assembler.regalloc_mov(loc, newloc) return loc @@ -250,7 +284,7 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg - return self.frame_manager.loc(box) + return self._frame_loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): """ Return the location of the constant v. If 'selected_reg' is @@ -298,7 +332,7 @@ self.reg_bindings[v] = loc self.assembler.regalloc_mov(prev_loc, loc) else: - loc = self.frame_manager.loc(v) + loc = self._frame_loc(v) self.assembler.regalloc_mov(prev_loc, loc) def force_result_in_reg(self, result_v, v, forbidden_vars=[]): @@ -317,7 +351,7 @@ self.reg_bindings[result_v] = loc return loc if v not in self.reg_bindings: - prev_loc = self.frame_manager.loc(v) + prev_loc = self._frame_loc(v) loc = self.force_allocate_reg(v, forbidden_vars) self.assembler.regalloc_mov(prev_loc, loc) assert v in self.reg_bindings @@ -337,7 +371,7 @@ def _sync_var(self, v): if not self.frame_manager.get(v): reg = self.reg_bindings[v] - to = self.frame_manager.loc(v) + to = self._frame_loc(v) self.assembler.regalloc_mov(reg, to) # otherwise it's clean diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -348,3 +348,50 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, b1 = newboxes(0, 1) + longevity = {b0: (0, 1), b1: (0, 1)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.hint_frame_locations[b0] = "some_stack_loc" + rm.freed_frame_locations["some_stack_loc"] = None + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_spill_var(b0) + rm.force_spill_var(b1) + assert rm.loc(b0) == "some_stack_loc" + assert isinstance(rm.loc(b1), FakeFramePos) + rm._check_invariants() + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + rm.position = 1 + assert loc not in rm.freed_frame_locations + rm.possibly_free_var(b0) + assert loc in rm.freed_frame_locations + # + rm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 is loc + assert rm.freed_frame_locations == {} + # + rm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 is not loc1 # because it's not in freed_frame_locations + # + rm._check_invariants() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -698,6 +698,7 @@ def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1319,6 +1319,29 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of rm and xrm based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + descr = op.getdescr() + assert isinstance(descr, LoopToken) + nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) + for i in range(op.numargs()): + box = op.getarg(i) + if isinstance(box, Box): + loc = nonfloatlocs[i] + if isinstance(loc, StackLoc): + assert box.type != FLOAT + self.rm.hint_frame_locations[box] = loc + else: + loc = floatlocs[i] + if isinstance(loc, StackLoc): + assert box.type == FLOAT + self.xrm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -371,7 +371,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -382,10 +382,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -399,6 +402,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -407,14 +412,17 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + return self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) @@ -432,12 +440,22 @@ def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -464,7 +482,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -670,9 +697,6 @@ self.original_greenkey, jitcell_token) metainterp_sd.stats.add_jitcell_token(jitcell_token) - def reset_counter_from_failure(self): - pass - def compile_trace(metainterp, resumekey, start_resumedescr=None): """Try to compile a new bridge leading from the beginning of the history diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -234,6 +234,9 @@ # longlongs are treated as floats, see # e.g. llsupport/descr.py:getDescrClass is_float = True + elif kind == 'u': + # they're all False + pass else: assert False, "unsupported ffitype or kind" # diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1796,7 +1796,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -147,6 +147,29 @@ self.check_resops({'jump': 1, 'int_lt': 2, 'setinteriorfield_raw': 4, 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2}) + def test_array_getitem_uint8(self): + myjitdriver = JitDriver( + greens = [], + reds = ["n", "i", "s", "data"], + ) + def f(data, n): + i = s = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, s=s, data=data) + s += rffi.cast(lltype.Signed, array_getitem(types.uchar, 1, data, 0, 0)) + i += 1 + return s + + def main(n): + with lltype.scoped_alloc(rffi.CArray(rffi.UCHAR), 1) as data: + data[0] = rffi.cast(rffi.UCHAR, 200) + return f(data, n) + + assert self.meta_interp(main, [10]) == 2000 + self.check_resops({'jump': 2, 'int_lt': 2, 'getinteriorfield_raw': 2, + 'guard_true': 2, 'int_add': 4}) + + class TestFfiCall(FfiCallTests, LLJitMixin): supports_all = False diff --git a/pypy/jit/metainterp/test/test_math.py b/pypy/jit/metainterp/test/test_math.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_math.py @@ -0,0 +1,47 @@ +import math +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN + +class MathTests: + + def test_math_sqrt(self): + def f(x): + try: + return math.sqrt(x) + except ValueError: + return -INFINITY + + res = self.interp_operations(f, [0.0]) + assert res == 0.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [25.0]) + assert res == 5.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-0.0]) + assert str(res) == '-0.0' + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [1000000.0]) + assert res == 1000.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-1.0]) + assert res == -INFINITY + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [INFINITY]) + assert isinf(res) and not isnan(res) and res > 0.0 + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [NAN]) + assert isnan(res) and not isinf(res) + self.check_operations_history(call_pure=0) + + +class TestOOtype(MathTests, OOJitMixin): + pass + +class TestLLtype(MathTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1238,6 +1238,31 @@ self.meta_interp(portal, [0, 0, 0], inline=True) self.check_resops(call_may_force=0, call=0) + def test_dont_repeatedly_trace_from_the_same_guard(self): + driver = JitDriver(greens = [], reds = ['level', 'i']) + + def portal(level): + if level == 0: + i = -10 + else: + i = 0 + # + while True: + driver.jit_merge_point(level=level, i=i) + if level == 25: + return 42 + i += 1 + if i <= 0: # <- guard + continue # first make a loop + else: + # then we fail the guard above, doing a recursive call, + # which will itself fail the same guard above, and so on + return portal(level + 1) + + self.meta_interp(portal, [0]) + self.check_loop_count_at_most(2) # and not, e.g., 24 + + class TestLLtype(RecursiveTests, LLJitMixin): pass diff --git a/pypy/module/_bisect/test/test_ztranslation.py b/pypy/module/_bisect/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_bisect/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_bisect') diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -13,12 +13,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -36,7 +38,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -19,11 +19,41 @@ def test_keyerror_without_factory(self): from _collections import defaultdict - d1 = defaultdict() - for key in ['foo', (1,)]: - try: - d1[key] - except KeyError, err: - assert err.args[0] == key - else: - assert 0, "expected KeyError" + for d1 in [defaultdict(), defaultdict(None)]: + for key in ['foo', (1,)]: + try: + d1[key] + except KeyError, err: + assert err.args[0] == key + else: + assert 0, "expected KeyError" + + def test_noncallable(self): + from _collections import defaultdict + raises(TypeError, defaultdict, [('a', 5)]) + d = defaultdict(None, [('a', 5)]) + assert d.items() == [('a', 5)] + + def test_kwds(self): + from _collections import defaultdict + d = defaultdict(default_factory=5) + assert d.keys() == ['default_factory'] + + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/module/_ffi/interp_ffi.py b/pypy/module/_ffi/interp_ffi.py --- a/pypy/module/_ffi/interp_ffi.py +++ b/pypy/module/_ffi/interp_ffi.py @@ -457,14 +457,14 @@ # ======================================================================== class W_CDLL(Wrappable): - def __init__(self, space, name): + def __init__(self, space, name, mode): self.space = space if name is None: self.name = "" else: self.name = name try: - self.cdll = libffi.CDLL(name) + self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: raise operationerrfmt(space.w_OSError, '%s: %s', self.name, e.msg or 'unspecified error') @@ -492,9 +492,9 @@ "No symbol %s found in library %s", name, self.name) return space.wrap(address_as_uint) - at unwrap_spec(name='str_or_None') -def descr_new_cdll(space, w_type, name): - return space.wrap(W_CDLL(space, name)) + at unwrap_spec(name='str_or_None', mode=int) +def descr_new_cdll(space, w_type, name, mode=-1): + return space.wrap(W_CDLL(space, name, mode)) W_CDLL.typedef = TypeDef( @@ -509,6 +509,6 @@ def get_libc(space): from pypy.rlib.clibffi import get_libc_name try: - return space.wrap(W_CDLL(space, get_libc_name())) + return space.wrap(W_CDLL(space, get_libc_name(), -1)) except OSError, e: raise wrap_oserror(space, e) diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py --- a/pypy/module/_multiprocessing/test/test_semaphore.py +++ b/pypy/module/_multiprocessing/test/test_semaphore.py @@ -70,8 +70,10 @@ maxvalue = 1 sem = SemLock(kind, value, maxvalue) - assert sem.acquire() - assert not sem.acquire(timeout=0.1) + res = sem.acquire() + assert res == True + res = sem.acquire(timeout=0.1) + assert res == False def test_semaphore_rebuild(self): from _multiprocessing import SemLock diff --git a/pypy/module/_random/test/test_ztranslation.py b/pypy/module/_random/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_random/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_random') diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module._socket.interp_socket import converted_error, W_RSocket from pypy.rlib import rsocket -from pypy.rlib.rsocket import SocketError +from pypy.rlib.rsocket import SocketError, INVALID_SOCKET from pypy.interpreter.error import OperationError def gethostname(space): @@ -284,7 +284,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(-1, space)]) # -1 as per cpython + addr.as_object(INVALID_SOCKET, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) diff --git a/pypy/module/cStringIO/test/test_ztranslation.py b/pypy/module/cStringIO/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/cStringIO/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('cStringIO') diff --git a/pypy/module/clr/boxing_rules.py b/pypy/module/clr/boxing_rules.py --- a/pypy/module/clr/boxing_rules.py +++ b/pypy/module/clr/boxing_rules.py @@ -43,11 +43,11 @@ def tocli(self): return box(self._value) -from pypy.objspace.fake.objspace import W_Object as W_Object_Fake -from pypy.rlib.nonconst import NonConstant +##from pypy.objspace.fake.objspace import W_Object as W_Object_Fake +##from pypy.rlib.nonconst import NonConstant -class __extend__(W_Object_Fake): - __metaclass__ = extendabletype +##class __extend__(W_Object_Fake): +## __metaclass__ = extendabletype - def tocli(self): - return NonConstant(None) +## def tocli(self): +## return NonConstant(None) diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -439,9 +439,6 @@ self.w_it = self.space.iter(self.space.next(self.w_iterables)) def next_w(self): - if not self.w_iterables: - # already stopped - raise OperationError(self.space.w_StopIteration, self.space.w_None) if not self.w_it: self._advance() try: diff --git a/pypy/module/itertools/test/test_ztranslation.py b/pypy/module/itertools/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/itertools/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('itertools') diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -32,6 +32,7 @@ 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', + 'float32': 'interp_boxes.W_Float32Box', 'float64': 'interp_boxes.W_Float64Box', } @@ -76,4 +77,5 @@ 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', 'arange': 'app_numpy.arange', + 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -36,3 +36,40 @@ j += 1 i += step return arr + + +def reshape(a, shape): + '''reshape(a, newshape) + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred + from the length of the array and remaining dimensions. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. + + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raise if the data is copied, + you should assign the new shape to the shape attribute of the array +''' + if not hasattr(a, 'reshape'): + a = numpypy.array(a) + return a.reshape(shape) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.inttype import int_typedef -from pypy.objspace.std.typeobject import W_TypeObject from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -33,9 +32,8 @@ _attrs_ = () def descr__new__(space, w_subtype, __args__): - assert isinstance(w_subtype, W_TypeObject) raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", - w_subtype.get_module_type_name() + w_subtype.getname(space, '?') ) def descr_str(self, space): @@ -258,10 +256,12 @@ W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, __module__ = "numpypy", + + __new__ = interp2app(W_Float32Box.descr__new__.im_func), ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), __module__ = "numpypy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), -) \ No newline at end of file +) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -18,7 +18,7 @@ VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) class W_Dtype(Wrappable): - _immuable_fields_ = ["itemtype", "num", "kind"] + _immutable_fields_ = ["itemtype", "num", "kind"] def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): self.signature = signature.BaseSignature() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,6 +98,105 @@ endshape[i] = remainder[i] return endshape +def get_shape_from_iterable(space, old_size, w_iterable): + new_size = 0 + new_shape = [] + if space.isinstance_w(w_iterable, space.w_int): + new_size = space.int_w(w_iterable) + if new_size < 0: + new_size = old_size + new_shape = [new_size] + else: + neg_dim = -1 + batch = space.listview(w_iterable) + new_size = 1 + if len(batch) < 1: + if old_size == 1: + # Scalars can have an empty size. + new_size = 1 + else: + new_size = 0 + new_shape = [] + i = 0 + for elem in batch: + s = space.int_w(elem) + if s < 0: + if neg_dim >= 0: + raise OperationError(space.w_ValueError, space.wrap( + "can only specify one unknown dimension")) + s = 1 + neg_dim = i + new_size *= s + new_shape.append(s) + i += 1 + if neg_dim >= 0: + new_shape[neg_dim] = old_size / new_size + new_size *= new_shape[neg_dim] + if new_size != old_size: + raise OperationError(space.w_ValueError, + space.wrap("total size of new array must be unchanged")) + return new_shape + +# Recalculating strides. Find the steps that the iteration does for each +# dimension, given the stride and shape. Then try to create a new stride that +# fits the new shape, using those steps. If there is a shape/step mismatch +# (meaning that the realignment of elements crosses from one step into another) +# return None so that the caller can raise an exception. +def calc_new_strides(new_shape, old_shape, old_strides): + # Return the proper strides for new_shape, or None if the mapping crosses + # stepping boundaries + + # Assumes that prod(old_shape) == prod(new_shape), len(old_shape) > 1, and + # len(new_shape) > 0 + steps = [] + last_step = 1 + oldI = 0 + new_strides = [] + if old_strides[0] < old_strides[-1]: + for i in range(len(old_shape)): + steps.append(old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[0] + n_new_elems_used = 1 + n_old_elems_to_use = old_shape[0] + for s in new_shape: + new_strides.append(cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI += 1 + if steps[oldI] != steps[oldI - 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI += 1 + if oldI >= len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + else: + for i in range(len(old_shape) - 1, -1, -1): + steps.insert(0, old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[-1] + n_new_elems_used = 1 + oldI = -1 + n_old_elems_to_use = old_shape[-1] + for i in range(len(new_shape) - 1, -1, -1): + s = new_shape[i] + new_strides.insert(0, cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI -= 1 + if steps[oldI] != steps[oldI + 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI -= 1 + if oldI < -len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + return new_strides # Iterators for arrays # -------------------- @@ -444,6 +543,7 @@ return False i = i.next(shapelen) return True + def descr_all(self, space): return space.wrap(self._all()) @@ -459,6 +559,7 @@ return True i = i.next(shapelen) return False + def descr_any(self, space): return space.wrap(self._any()) @@ -483,6 +584,12 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) + def descr_set_shape(self, space, w_iterable): + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_iterable) + concrete.setshape(space, new_shape) + def descr_get_size(self, space): return space.wrap(self.find_size()) @@ -607,11 +714,6 @@ def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): idx = space.int_w(w_idx) - if not self.shape: - if idx != 0: - raise OperationError(space.w_IndexError, - space.wrap("index out of range")) - return 0 if idx < 0: idx = self.shape[0] + idx if idx < 0 or idx >= self.shape[0]: @@ -730,10 +832,49 @@ strides += self.strides[s:] backstrides += self.backstrides[s:] new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature, + W_NDimSlice.signature, self.signature, ]) - return NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) + return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], + shape[:]) + + def descr_reshape(self, space, args_w): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `numpypy.reshape` for full documentation. + + See Also + -------- + numpypy.reshape : equivalent function +""" + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newtuple(args_w) + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_shape) + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + # We can create a view, strides somehow match up. + new_sig = signature.Signature.find_sig([ + W_NDimSlice.signature, self.signature + ]) + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(self, new_sig, self.start, new_strides, + new_backstrides, new_shape) + else: + # Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -751,7 +892,7 @@ if len(concrete.shape) < 2: return space.wrap(self) new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature + W_NDimSlice.signature, self.signature ]) strides = [] backstrides = [] @@ -760,8 +901,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) + return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -775,6 +916,15 @@ def descr_debug_repr(self, space): return space.wrap(self.debug_repr()) + def descr_array_iface(self, space): + concrete = self.get_concrete() + storage = concrete.get_storage(space) + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -830,6 +980,14 @@ def debug_repr(self): return 'Scalar' + def setshape(self, space, new_shape): + # In order to get here, we already checked that prod(new_shape) == 1, + # so in order to have a consistent API, let it go through. + pass + + def get_storage(self, space): + raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1022,13 +1180,46 @@ return space.wrap(self.shape[0]) return space.wrap(1) + def setshape(self, space, new_shape): + if len(self.shape) < 1: + return + elif len(self.shape) < 2: + # TODO: this code could be refactored into calc_strides + # but then calc_strides would have to accept a stepping factor + strides = [] + backstrides = [] + s = self.strides[0] + if self.order == 'C': + new_shape.reverse() + for sh in new_shape: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + new_shape.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + self.shape = new_shape[:] + return + new_strides = calc_new_strides(new_shape, self.shape, self.strides) + if new_strides is None: + raise OperationError(space.w_AttributeError, space.wrap( + "incompatible shape for a non-contiguous array")) + new_backstrides = [0] * len(new_shape) + for nd in range(len(new_shape)): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + self.strides = new_strides[:] + self.backstrides = new_backstrides[:] + self.shape = new_shape[:] -class NDimSlice(ViewArray): +class W_NDimSlice(ViewArray): signature = signature.BaseSignature() def __init__(self, parent, signature, start, strides, backstrides, shape): - if isinstance(parent, NDimSlice): + if isinstance(parent, W_NDimSlice): parent = parent.parent ViewArray.__init__(self, parent, signature, strides, backstrides, shape) self.start = start @@ -1077,11 +1268,16 @@ def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() + a_iter = array.start_iter() while not iter.done(): - array.setitem(iter.offset, self.getitem(iter.offset)) + array.setitem(a_iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) return array + def get_storage(self, space): + return self.parent.get_storage(space) + class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one @@ -1137,9 +1333,16 @@ return ArrayIterator(self.size) raise NotImplementedError # use ViewIterator simply, test it + def setshape(self, space, new_shape): + self.shape = new_shape + self.calc_strides(new_shape) + def debug_repr(self): return 'Array' + def get_storage(self, space): + return self.storage + def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1259,9 +1462,11 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), __debug_repr__ = interp2app(BaseArray.descr_debug_repr), + __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), - shape = GetSetProperty(BaseArray.descr_get_shape), + shape = GetSetProperty(BaseArray.descr_get_shape, + BaseArray.descr_set_shape), size = GetSetProperty(BaseArray.descr_get_size), T = GetSetProperty(BaseArray.descr_get_transpose), @@ -1279,6 +1484,7 @@ dot = interp2app(BaseArray.descr_dot), copy = interp2app(BaseArray.descr_copy), + reshape = interp2app(BaseArray.descr_reshape), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -173,7 +173,7 @@ raises(TypeError, numpy.number, 0) raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) - assert str(exc.value) == "cannot create 'numpypy.signedinteger' instances" + assert str(exc.value) == "cannot create 'signedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -240,6 +240,13 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + def test_float32(self): + import numpypy as numpy + + assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] + + assert numpy.float32(12) == numpy.float64(12) + def test_float64(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -158,6 +158,13 @@ assert shape_agreement(self.space, [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + def test_calc_new_strides(self): + from pypy.module.micronumpy.interp_numarray import calc_new_strides + assert calc_new_strides([2, 4], [4, 2], [4, 2]) == [8, 2] + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2]) == [2] class AppTestNumArray(BaseNumpyAppTest): def test_ndarray(self): @@ -216,8 +223,8 @@ assert a[2] == 4 def test_copy(self): - from numpypy import array - a = array(range(5)) + from numpypy import arange, array + a = arange(5) b = a.copy() for i in xrange(5): assert b[i] == a[i] @@ -227,6 +234,11 @@ a = array(1) assert a.copy() == a + a = arange(8) + b = a[::2] + c = b.copy() + assert (c == b).all() + def test_iterator_init(self): from numpypy import array a = array(range(5)) @@ -318,8 +330,8 @@ def test_scalar(self): from numpypy import array, dtype a = array(3) - #assert a[0] == 3 raises(IndexError, "a[0]") + raises(IndexError, "a[0] = 5") assert a.size == 1 assert a.shape == () assert a.dtype is dtype(int) @@ -339,6 +351,81 @@ c = a[:3] assert c.shape == (3,) + def test_set_shape(self): + from numpypy import array, zeros + a = array([]) + a.shape = [] + a = array(range(12)) + a.shape = (3, 4) + assert (a == [range(4), range(4, 8), range(8, 12)]).all() + a.shape = (3, 2, 2) + assert a[1, 1, 1] == 7 + a.shape = (3, -1, 2) + assert a.shape == (3, 2, 2) + a.shape = 12 + assert a.shape == (12, ) + exc = raises(ValueError, "a.shape = 10") + assert str(exc.value) == "total size of new array must be unchanged" + a = array(3) + a.shape = () + #numpy allows this + a.shape = (1,) + + def test_reshape(self): + from numpypy import array, zeros + a = array(range(12)) + exc = raises(ValueError, "b = a.reshape((3, 10))") + assert str(exc.value) == "total size of new array must be unchanged" + b = a.reshape((3, 4)) + assert b.shape == (3, 4) + assert (b == [range(4), range(4, 8), range(8, 12)]).all() + b[:, 0] = 1000 + assert (a == [1000, 1, 2, 3, 1000, 5, 6, 7, 1000, 9, 10, 11]).all() + a = zeros((4, 2, 3)) + a.shape = (12, 2) + + def test_slice_reshape(self): + from numpypy import zeros, arange + a = zeros((4, 2, 3)) + b = a[::2, :, :] + b.shape = (2, 6) + exc = raises(AttributeError, "b.shape = 12") + assert str(exc.value) == \ + "incompatible shape for a non-contiguous array" + b = a[::2, :, :].reshape((2, 6)) + assert b.shape == (2, 6) + b = arange(20)[1:17:2] + b.shape = (4, 2) + assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() + c = b.reshape((2, 4)) + assert (c == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + + z = arange(96).reshape((12, -1)) + assert z.shape == (12, 8) + y = z.reshape((4, 3, 8)) + v = y[:, ::2, :] + w = y.reshape(96) + u = v.reshape(64) + assert y[1, 2, 1] == z[5, 1] + y[1, 2, 1] = 1000 + # z, y, w, v are views of eachother + assert z[5, 1] == 1000 + assert v[1, 1, 1] == 1000 + assert w[41] == 1000 + # u is not a view, it is a copy! + assert u[25] == 41 + + a = zeros((5, 2)) + assert a.reshape(-1).shape == (10,) + + raises(ValueError, arange(10).reshape, (5, -1, -1)) + + def test_reshape_varargs(self): + from numpypy import arange + z = arange(96).reshape(12, -1) + y = z.reshape(4, 3, 8) + assert y.shape == (4, 3, 8) + def test_add(self): from numpypy import array a = array(range(5)) @@ -1027,6 +1114,16 @@ b = a[0].copy() assert (b == zeros(10)).all() + def test_array_interface(self): + from numpypy import array + a = array([1, 2, 3]) + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + a = a[::2] + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + raises(TypeError, getattr, array(3), '__array_interface__') + class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct @@ -1155,3 +1252,14 @@ a = arange(0, 0.8, 0.1) assert len(a) == 8 assert arange(False, True, True).dtype is dtype(int) + + +class AppTestRanges(BaseNumpyAppTest): + def test_app_reshape(self): + from numpypy import arange, array, dtype, reshape + a = arange(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) + a = range(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,13 +8,12 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_boxes, interp_ufuncs, signature -from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, - FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import (W_NDimArray, NDimSlice, +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.compile import (FakeSpace, + IntObject, Parser, InterpreterState) +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, BaseArray) from pypy.rlib.nonconst import NonConstant -from pypy.rpython.annlowlevel import llstr, hlstr class TestNumpyJIt(LLJitMixin): diff --git a/pypy/module/micronumpy/test/test_ztranslation.py b/pypy/module/micronumpy/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_ztranslation.py @@ -0,0 +1,5 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_numpy_translates(): + checkmodule('micronumpy') diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -117,3 +117,14 @@ --TICK-- jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=...) """) + + def test_floatlist_unpack_without_calls(self): + def fn(n): + l = [2.3, 3.4, 4.5] + for i in range(n): + x, y, z = l # ID: look + # + log = self.run(fn, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('look') + assert 'call' not in log.opnames(ops) diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -0,0 +1,27 @@ + +""" +Extra tests for the pure Python PyPy _collections module +(not used in normal PyPy's) +""" + +from pypy.conftest import gettestobjspace + +class AppTestcStringIO: + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/objspace/fake/__init__.py b/pypy/objspace/fake/__init__.py --- a/pypy/objspace/fake/__init__.py +++ b/pypy/objspace/fake/__init__.py @@ -1,2 +0,0 @@ -from objspace import FakeObjSpace -Space = FakeObjSpace diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -1,108 +1,12 @@ -import re -from copy import copy -from pypy.tool.error import debug -from pypy.interpreter.argument import Arguments -from pypy.interpreter.gateway import interp2app -from pypy.rlib.nonconst import NonConstant +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root -def my_import(name): - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod -def find_gateways(modname, basepath, module): - identifier = r'[a-zA-Z0-9][a-zA-Z0-9_]*' - r_simplename = re.compile(r'(%s)[.](%s)$' % (identifier, identifier)) - res = [] - for name in module.interpleveldefs.values(): - match = r_simplename.match(name) - if match: - submod_name, obj_name = match.groups() - submod_name = '%s.%s.%s' % (basepath, modname, submod_name) - submod = my_import(submod_name) - obj = getattr(submod, obj_name) - res += find_gw_in_obj(obj) - return res - -def find_gw_in_obj(obj): - if hasattr(obj, 'typedef'): - typedef = obj.typedef - return [gw for gw in typedef.rawdict.values() - if isinstance(gw, interp2app)] - elif hasattr(obj, 'func_code'): - return [interp2app(obj)] - else: - assert False - -## Since the fake objspace is more a hack than a real object space, it -## happens that the annotator complains about operations that cannot -## succeed because it knows too much about the objects involved. For -## example, if it knows that a list is always empty, it will block -## each operations that tries to access that list. This is not what we -## want, because we know that with real objectspaces that operations -## will succeed. - -## As a workaround, we insert dummy rpython code (the function -## dummy_rpython) that manipulates the variables in order to give -## them a more sensible annotation. This is the preferred way to solve -## the problems so far. - -## If the solution above doesn't work, the alternative is to -## substitute the interpreter code with something that doesn't hurt -## the annotator. It's a very ugly hack, better solutions are welcome -## :-) - - -# dummy rpython code to give some variables more sensible annotations -def dummy_rpython(dummy_function): - # to make the annotator flow-in without executing the code - if NonConstant(False): - dummy_function.defs_w = [None] # else the annotator would see an always empty list - -def patch_pypy(): - from pypy.interpreter.baseobjspace import W_Root - - def descr_call_mismatch(self, space, opname, RequiredClass, args): - from pypy.interpreter.error import OperationError - msg = 'This message will never be displayed :-)' - raise OperationError(space.w_TypeError, space.wrap(msg)) - W_Root.descr_call_mismatch = descr_call_mismatch - - -def checkmodule(modname, backend, interactive=False, basepath='pypy.module'): - "Compile a fake PyPy module." - from pypy.objspace.fake.objspace import FakeObjSpace, W_Object - from pypy.translator.driver import TranslationDriver - +def checkmodule(modname): space = FakeObjSpace() - space.config.translating = True - ModuleClass = __import__(basepath + '.%s' % modname, - None, None, ['Module']).Module - module = ModuleClass(space, space.wrap(modname)) - w_moduledict = module.getdict(space) - - gateways = find_gateways(modname, basepath, module) - functions = [gw.__spacebind__(space) for gw in gateways] - arguments = Arguments.frompacked(space, W_Object(), W_Object()) - dummy_function = copy(functions[0]) - - def main(argv): # use the standalone mode not to allow SomeObject - dummy_rpython(dummy_function) - for func in functions: - func.call_args(arguments) - return 0 - - patch_pypy() - driver = TranslationDriver() - driver.setup(main, None) - try: - driver.proceed(['compile_' + backend]) - except SystemExit: - raise - except: - if not interactive: - raise - debug(driver) - raise SystemExit(1) + mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) + # force computation and record what we wrap + module = mod.Module(space, W_Root()) + for name in module.loaders: + module._load_lazily(space, name) + # + space.translates(**{'translation.list_comprehension_operations':True}) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,147 +1,302 @@ -from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, W_Root -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import W_Root, ObjSpace +from pypy.interpreter.baseobjspace import Wrappable, SpaceCache +from pypy.interpreter import argument, gateway +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.annotation.model import SomeInstance, s_None +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rpython.lltypesystem import lltype +from pypy.tool.sourcetools import compile2, func_with_new_name +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import instantiate, we_are_translated from pypy.rlib.nonconst import NonConstant from pypy.rlib.rarithmetic import r_uint -from pypy.rlib.rbigint import rbigint +from pypy.translator.translator import TranslationContext +from pypy.tool.option import make_config -#class W_Type(W_Root): -# _attrs_ = () -class W_Object(W_Root): - _attrs_ = () -W_Object.typedef = TypeDef('foobar') +class W_MyObject(Wrappable): + typedef = None -def make_dummy(a=W_Object(), b=W_Object()): - def fn(*args): - if NonConstant(True): - return a - else: - return b - return fn + def getdict(self, space): + return w_obj_or_none() -int_dummy = make_dummy(42, 43) -float_dummy = make_dummy(42.0, 42.1) -uint_dummy = make_dummy(r_uint(42), r_uint(43)) -str_dummy = make_dummy('foo', 'bar') -bool_dummy = make_dummy(True, False) -unicode_dummy = make_dummy(u'abc', u'cde') -bigint_dummy = make_dummy(rbigint.fromint(0), rbigint.fromint(1)) + def getdictvalue(self, space, attr): + attr + "xx" # check that it's a string + return w_obj_or_none() + + def setdictvalue(self, space, attr, w_value): + attr + "xx" # check that it's a string + is_root(w_value) + return NonConstant(True) + + def deldictvalue(self, space, attr): + attr + "xx" # check that it's a string + return NonConstant(True) + + def setdict(self, space, w_dict): + is_root(w_dict) + + def setclass(self, space, w_subtype): + is_root(w_subtype) + + def str_w(self, space): + return NonConstant("foobar") + + def unicode_w(self, space): + return NonConstant(u"foobar") + + def int_w(self, space): + return NonConstant(-42) + + def uint_w(self, space): + return r_uint(NonConstant(42)) + + def bigint_w(self, space): + from pypy.rlib.rbigint import rbigint + return rbigint.fromint(NonConstant(42)) + + +def w_some_obj(): + if NonConstant(False): + return W_Root() + return W_MyObject() + +def w_obj_or_none(): + if NonConstant(False): + return None + return w_some_obj() + +def is_root(w_obj): + assert isinstance(w_obj, W_Root) +is_root.expecting = W_Root + +def is_arguments(arg): + assert isinstance(arg, argument.Arguments) +is_arguments.expecting = argument.Arguments + + +class Entry(ExtRegistryEntry): + _about_ = is_root, is_arguments + + def compute_result_annotation(self, s_w_obj): + cls = self.instance.expecting + s_inst = SomeInstance(self.bookkeeper.getuniqueclassdef(cls), + can_be_None=True) + assert s_inst.contains(s_w_obj) + return s_None + + def specialize_call(self, hop): + return hop.inputconst(lltype.Void, None) + +# ____________________________________________________________ + class FakeObjSpace(ObjSpace): - w_None = W_Object() - w_False = W_Object() - w_True = W_Object() - w_Ellipsis = W_Object() - w_NotImplemented = W_Object() - w_int = W_Object() - w_dict = W_Object() - w_float = W_Object() - w_long = W_Object() - w_tuple = W_Object() - w_str = W_Object() - w_basestring = W_Object() - w_unicode = W_Object() - w_type = W_Object() - w_instance = W_Object() - w_slice = W_Object() - w_hex = W_Object() - w_oct = W_Object() - - def initialize(self): - self.config.objspace.geninterp = False - self.config.objspace.disable_call_speedhacks = True - self.wrap_cache = {} - self.make_builtins() - def _freeze_(self): - return True + def __init__(self): + self._seen_extras = [] + ObjSpace.__init__(self) + + def float_w(self, w_obj): + is_root(w_obj) + return NonConstant(42.5) + + def is_true(self, w_obj): + is_root(w_obj) + return NonConstant(False) + + def unwrap(self, w_obj): + "NOT_RPYTHON" + raise NotImplementedError + + def newdict(self, module=False, instance=False, classofinstance=None, + strdict=False): + return w_some_obj() + + def newtuple(self, list_w): + for w_x in list_w: + is_root(w_x) + return w_some_obj() + + def newlist(self, list_w): + for w_x in list_w: + is_root(w_x) + return w_some_obj() + + def newslice(self, w_start, w_end, w_step): + is_root(w_start) + is_root(w_end) + is_root(w_step) + return w_some_obj() + + def newint(self, x): + return w_some_obj() + + def newfloat(self, x): + return w_some_obj() + + def marshal_w(self, w_obj): + "NOT_RPYTHON" + raise NotImplementedError def wrap(self, x): - if isinstance(x, Wrappable): - w_result = x.__spacebind__(self) - return w_result - return W_Object() + if not we_are_translated(): + if isinstance(x, gateway.interp2app): + self._see_interp2app(x) + if isinstance(x, GetSetProperty): + self._see_getsetproperty(x) + return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" - def unwrap(self, w_obj): - assert isinstance(w_obj, W_Object) - return None + def _see_interp2app(self, interp2app): + "NOT_RPYTHON" + activation = interp2app._code.activation + def check(): + scope_w = [w_some_obj()] * NonConstant(42) + w_result = activation._run(self, scope_w) + is_root(w_result) + check = func_with_new_name(check, 'check__' + interp2app.name) + self._seen_extras.append(check) - lookup = make_dummy() - allocate_instance = make_dummy() - getattr = make_dummy() - setattr = make_dummy() - getitem = make_dummy() - setitem = make_dummy() - delitem = make_dummy() - int_w = int_dummy - uint_w = uint_dummy - float_w = float_dummy - unicode_w = unicode_dummy - bigint_w = bigint_dummy - iter = make_dummy() - type = make_dummy() - str = make_dummy() - int = make_dummy() - float = make_dummy() - repr = make_dummy() - id = make_dummy() - len = make_dummy() - str_w = str_dummy - call_args = make_dummy() - new_interned_str = make_dummy() - newint = make_dummy() - newlong = make_dummy() - newfloat = make_dummy() - def newdict(self, module=False): - return self.newfloat() - newlist = make_dummy() - emptylist = make_dummy() - newtuple = make_dummy() - newslice = make_dummy() - lt = make_dummy() - le = make_dummy() - eq = make_dummy() - ne = make_dummy() - gt = make_dummy() - ge = make_dummy() - lt_w = bool_dummy - le_w = bool_dummy - eq_w = bool_dummy - ne_w = bool_dummy - gt_w = bool_dummy - ge_w = bool_dummy - is_w = bool_dummy - is_ = make_dummy() - next = make_dummy() - is_true = bool_dummy - nonzero = make_dummy() - issubtype = make_dummy() - ord = make_dummy() - hash = make_dummy() - delattr = make_dummy() # should return None? - contains = make_dummy() - hex = make_dummy() - oct = make_dummy() - pow = make_dummy() - inplace_pow = make_dummy() - cmp = make_dummy() + def _see_getsetproperty(self, getsetproperty): + "NOT_RPYTHON" + space = self + def checkprop(): + getsetproperty.fget(getsetproperty, space, w_some_obj()) + if getsetproperty.fset is not None: + getsetproperty.fset(getsetproperty, space, w_some_obj(), + w_some_obj()) + if getsetproperty.fdel is not None: + getsetproperty.fdel(getsetproperty, space, w_some_obj()) + if not getsetproperty.name.startswith('<'): + checkprop = func_with_new_name(checkprop, + 'checkprop__' + getsetproperty.name) + self._seen_extras.append(checkprop) - # XXsX missing operations - def coerce(self, *args): raise NotImplementedError("space.coerce()") - def get(self, *args): raise NotImplementedError("space.get()") - def set(self, *args): raise NotImplementedError("space.set()") - def delete(self, *args): raise NotImplementedError("space.delete()") - def userdel(self, *args): raise NotImplementedError("space.userdel()") - def marshal_w(self, *args):raise NotImplementedError("space.marshal_w()") + def call_obj_args(self, w_callable, w_obj, args): + is_root(w_callable) + is_root(w_obj) + is_arguments(args) + return w_some_obj() - gettypefor = make_dummy() - gettypeobject = make_dummy() - unpackiterable = make_dummy([W_Object()], [W_Object()]) + def call(self, w_callable, w_args, w_kwds=None): + is_root(w_callable) + is_root(w_args) + is_root(w_kwds) + return w_some_obj() + def call_function(self, w_func, *args_w): + is_root(w_func) + for w_arg in list(args_w): + is_root(w_arg) + return w_some_obj() -## Register all exceptions -import exceptions -for name in ObjSpace.ExceptionTable: - exc = getattr(exceptions, name) - setattr(FakeObjSpace, 'w_' + name, W_Object()) + def call_args(self, w_func, args): + is_root(w_func) + is_arguments(args) + return w_some_obj() + + def gettypefor(self, cls): + return self.gettypeobject(cls.typedef) + + def gettypeobject(self, typedef): + assert typedef is not None + return self.fromcache(TypeCache).getorbuild(typedef) + + def unpackiterable(self, w_iterable, expected_length=-1): + is_root(w_iterable) + if expected_length < 0: + expected_length = 3 + return [w_some_obj()] * expected_length + + def allocate_instance(self, cls, w_subtype): + is_root(w_subtype) + return instantiate(cls) + allocate_instance._annspecialcase_ = "specialize:arg(1)" + + def decode_index(self, w_index_or_slice, seqlength): + is_root(w_index_or_slice) + return (NonConstant(42), NonConstant(42), NonConstant(42)) + + def decode_index4(self, w_index_or_slice, seqlength): + is_root(w_index_or_slice) + return (NonConstant(42), NonConstant(42), + NonConstant(42), NonConstant(42)) + + def exec_(self, *args, **kwds): + pass + + # ---------- + + def translates(self, func=None, argtypes=None, **kwds): + config = make_config(None, **kwds) + if func is not None: + if argtypes is None: + nb_args = func.func_code.co_argcount + argtypes = [W_Root] * nb_args + # + t = TranslationContext(config=config) + self.t = t # for debugging + ann = t.buildannotator() + if func is not None: + ann.build_types(func, argtypes, complete_now=False) + # + # annotate all _seen_extras, knowing that annotating some may + # grow the list + done = 0 + while done < len(self._seen_extras): + print self._seen_extras + ann.build_types(self._seen_extras[done], [], + complete_now=False) + done += 1 + ann.complete() + #t.viewcg() + t.buildrtyper().specialize() + t.checkgraphs() + + +def setup(): + for name in (ObjSpace.ConstantTable + + ObjSpace.ExceptionTable + + ['int', 'str', 'float', 'long', 'tuple', 'list', + 'dict', 'unicode', 'complex', 'slice', 'bool', + 'type', 'basestring']): + setattr(FakeObjSpace, 'w_' + name, w_some_obj()) + # + for (name, _, arity, _) in ObjSpace.MethodTable: + args = ['w_%d' % i for i in range(arity)] + d = {'is_root': is_root, + 'w_some_obj': w_some_obj} + exec compile2("""\ + def meth(self, %s): + %s + return w_some_obj() + """ % (', '.join(args), + '; '.join(['is_root(%s)' % arg for arg in args]))) in d + meth = func_with_new_name(d['meth'], name) + setattr(FakeObjSpace, name, meth) + # + for name in ObjSpace.IrregularOpTable: + assert hasattr(FakeObjSpace, name) # missing? + +setup() + +# ____________________________________________________________ + +class TypeCache(SpaceCache): + def build(cache, typedef): + assert isinstance(typedef, TypeDef) + for value in typedef.rawdict.values(): + cache.space.wrap(value) + return w_some_obj() + +class FakeCompiler(object): + pass +FakeObjSpace.default_compiler = FakeCompiler() + +class FakeModule(object): + def get(self, name): + name + "xx" # check that it's a string + return w_some_obj() +FakeObjSpace.sys = FakeModule() +FakeObjSpace.sys.filesystemencoding = 'foobar' diff --git a/pypy/objspace/fake/test/__init__.py b/pypy/objspace/fake/test/__init__.py deleted file mode 100644 diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -1,7 +1,63 @@ import py -from pypy.objspace.fake.checkmodule import checkmodule +from pypy.objspace.fake.objspace import FakeObjSpace, is_root +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, W_Root, ObjSpace -def test_dotnet(): - # the only module known to pass checkmodule is _dotnet so far - py.test.skip('fixme') - checkmodule('_dotnet', 'cli') + +def make_checker(): + check = [] + def see(): + check.append(True) + see._annspecialcase_ = 'specialize:memo' + return see, check + +def test_wrap_interp2app(): + see, check = make_checker() + space = FakeObjSpace() + assert len(space._seen_extras) == 0 + assert len(check) == 0 + space.wrap(interp2app(lambda space: see())) + assert len(space._seen_extras) == 1 + assert len(check) == 0 + space.translates() + assert len(check) == 1 + +def test_wrap_interp2app_int(): + see, check = make_checker() + def foobar(space, x, w_y, z): + is_root(w_y) + see() + return space.wrap(x - z) + space = FakeObjSpace() + space.wrap(interp2app(foobar, unwrap_spec=[ObjSpace, int, W_Root, int])) + space.translates() + assert check + +def test_wrap_GetSetProperty(): + see, check = make_checker() + def foobar(w_obj, space): + is_root(w_obj) + see() + return space.w_None + space = FakeObjSpace() + space.wrap(GetSetProperty(foobar)) + space.translates() + assert check + + +def test_gettypefor_untranslated(): + see, check = make_checker() + class W_Foo(Wrappable): + def do_it(self, space, w_x): + is_root(w_x) + see() + return W_Root() + W_Foo.typedef = TypeDef('foo', + __module__ = 'barmod', + do_it = interp2app(W_Foo.do_it)) + space = FakeObjSpace() + space.gettypefor(W_Foo) + assert not check + space.translates() + assert check diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/fake/test/test_objspace.py @@ -0,0 +1,74 @@ +import py +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root +from pypy.interpreter.argument import Arguments +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import Wrappable +from pypy.rlib.unroll import unrolling_iterable + +def test_create(): + FakeObjSpace() + + +class TestTranslate: + def setup_method(self, meth): + self.space = FakeObjSpace() + + def test_simple(self): + space = self.space + space.translates(lambda w_x, w_y: space.add(w_x, w_y)) + + def test_methodtable(self): + space = self.space + for fixed_arity in [1, 2, 3, 4]: + # + methodtable = [name for (name, _, arity, _) in space.MethodTable + if arity == fixed_arity] + methodtable = unrolling_iterable(methodtable) + args_w = (W_Root(),) * fixed_arity + # + def f(): + for name in methodtable: + getattr(space, name)(*args_w) + # + space.translates(f) + + def test_newdict(self): + space = self.space + space.translates(lambda: (space.newdict(), + space.newdict(strdict=True))) + + def test_constants(self): + space = self.space + space.translates(lambda: (space.w_None, space.w_True, space.w_False, + space.w_int, space.w_str, + space.w_TypeError)) + + def test_wrap(self): + space = self.space + space.translates(lambda: (space.wrap(42), space.wrap(42.5), + space.wrap("foo"))) + + def test_call_args(self): + space = self.space + args = Arguments(space, [W_Root()]) + space.translates(lambda: space.call_args(W_Root(), args)) + + def test_gettypefor(self): + space = self.space + class W_Foo(Wrappable): + typedef = TypeDef("foo") + space.translates(lambda: space.gettypefor(W_Foo)) + + def test_is_true(self): + space = self.space + space.translates(lambda: space.is_true(W_Root())) + py.test.raises(AssertionError, + space.translates, lambda: space.is_true(42)) + + def test_unpackiterable(self): + space = self.space + space.translates(lambda: (space.unpackiterable(W_Root()), + space.unpackiterable(W_Root(), 42))) + + def test_newlist(self): + self.space.newlist([W_Root(), W_Root()]) diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -33,9 +33,6 @@ """Sequence iterator specialized for lists, accessing directly their RPython-level list of wrapped objects. """ - def __init__(w_self, w_seq): - W_AbstractSeqIterObject.__init__(w_self, w_seq) - w_self.w_seq = w_seq class W_FastTupleIterObject(W_AbstractSeqIterObject): """Sequence iterator specialized for tuples, accessing diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -9,8 +9,9 @@ from pypy.interpreter import gateway, baseobjspace from pypy.rlib.objectmodel import instantiate, specialize from pypy.rlib.listsort import make_timsort_class -from pypy.rlib import rerased, jit +from pypy.rlib import rerased, jit, debug from pypy.interpreter.argument import Signature +from pypy.tool.sourcetools import func_with_new_name UNROLL_CUTOFF = 5 @@ -170,6 +171,19 @@ share with the storage, if possible.""" return self.strategy.getitems(self) + def getitems_fixedsize(self): + """Returns a fixed-size list of all items after wrapping them.""" + l = self.strategy.getitems_fixedsize(self) + debug.make_sure_not_resized(l) + return l + + def getitems_unroll(self): + """Returns a fixed-size list of all items after wrapping them. The JIT + will fully unroll this function. """ + l = self.strategy.getitems_unroll(self) + debug.make_sure_not_resized(l) + return l + def getitems_copy(self): """Returns a copy of all items in the list. Same as getitems except for ObjectListStrategy.""" @@ -366,6 +380,8 @@ def getitems_copy(self, w_list): return [] + getitems_fixedsize = func_with_new_name(getitems_copy, "getitems_fixedsize") + getitems_unroll = getitems_fixedsize def getstorage_copy(self, w_list): return self.erase(None) @@ -496,7 +512,6 @@ # tuple is unmutable return w_list.lstorage - @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): l = self.unerase(w_list.lstorage) @@ -519,6 +534,13 @@ return r + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self._getitems_range_unroll(w_list, True) + def getitems_unroll(self, w_list): + return self._getitems_range_unroll(w_list, True) + _getitems_range_unroll = jit.unroll_safe(func_with_new_name(_getitems_range, "_getitems_range_unroll")) + def getslice(self, w_list, start, stop, step, length): v = self.unerase(w_list.lstorage) old_start = v[0] @@ -676,6 +698,13 @@ def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + @jit.unroll_safe + def getitems_unroll(self, w_list): + return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self.getitems_unroll(w_list) + def getstorage_copy(self, w_list): items = self.unerase(w_list.lstorage)[:] return self.erase(items) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -408,8 +408,10 @@ if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems elif isinstance(w_obj, W_ListObject): - # XXX this can copy twice - t = w_obj.getitems()[:] + if unroll: + t = w_obj.getitems_unroll() + else: + t = w_obj.getitems_fixedsize() else: if unroll: return make_sure_not_resized(ObjSpace.unpackiterable_unroll( diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -48,6 +48,46 @@ for i in range(7): assert self.space.eq_w(l[i], l2[i]) + def test_getitems_fixedsize(self): + w = self.space.wrap + from pypy.objspace.std.listobject import make_range_list + rangelist = make_range_list(self.space, 1,1,7) + emptylist = W_ListObject(self.space, []) + intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]) + strlist = W_ListObject(self.space, [w('1'),w('2'),w('3'),w('4'),w('5'),w('6'),w('7')]) + floatlist = W_ListObject(self.space, [w(1.0),w(2.0),w(3.0),w(4.0),w(5.0),w(6.0),w(7.0)]) + objlist = W_ListObject(self.space, [w(1),w('2'),w(3.0),w(4),w(5),w(6),w(7)]) + + emptylist_copy = emptylist.getitems_fixedsize() + assert emptylist_copy == [] + + rangelist_copy = rangelist.getitems_fixedsize() + intlist_copy = intlist.getitems_fixedsize() + strlist_copy = strlist.getitems_fixedsize() + floatlist_copy = floatlist.getitems_fixedsize() + objlist_copy = objlist.getitems_fixedsize() + for i in range(7): + assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i)) + assert self.space.eq_w(intlist_copy[i], intlist.getitem(i)) + assert self.space.eq_w(strlist_copy[i], strlist.getitem(i)) + assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i)) + assert self.space.eq_w(objlist_copy[i], objlist.getitem(i)) + + emptylist_copy = emptylist.getitems_unroll() + assert emptylist_copy == [] + + rangelist_copy = rangelist.getitems_unroll() + intlist_copy = intlist.getitems_unroll() + strlist_copy = strlist.getitems_unroll() + floatlist_copy = floatlist.getitems_unroll() + objlist_copy = objlist.getitems_unroll() + for i in range(7): + assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i)) + assert self.space.eq_w(intlist_copy[i], intlist.getitem(i)) + assert self.space.eq_w(strlist_copy[i], strlist.getitem(i)) + assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i)) + assert self.space.eq_w(objlist_copy[i], objlist.getitem(i)) + def test_random_getitem(self): w = self.space.wrap s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9') diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = intmask(cConfig.INVALID_SOCKET) + INVALID_SOCKET = r_uint(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -639,11 +639,11 @@ return dlsym(self.lib, name) class CDLL(RawCDLL): - def __init__(self, libname): + def __init__(self, libname, mode=-1): """Load the library, or raises DLOpenError.""" RawCDLL.__init__(self, rffi.cast(DLLHANDLE, -1)) with rffi.scoped_str2charp(libname) as ll_libname: - self.lib = dlopen(ll_libname) + self.lib = dlopen(ll_libname, mode) def __del__(self): if self.lib != rffi.cast(DLLHANDLE, -1): diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -393,11 +393,11 @@ # XXX: it partially duplicate the code in clibffi.py class CDLL(object): - def __init__(self, libname): + def __init__(self, libname, mode=-1): """Load the library, or raises DLOpenError.""" self.lib = rffi.cast(DLLHANDLE, 0) with rffi.scoped_str2charp(libname) as ll_libname: - self.lib = dlopen(ll_libname) + self.lib = dlopen(ll_libname, mode) def __del__(self): if self.lib: diff --git a/pypy/rlib/rdynload.py b/pypy/rlib/rdynload.py --- a/pypy/rlib/rdynload.py +++ b/pypy/rlib/rdynload.py @@ -87,9 +87,10 @@ """ if mode == -1: if RTLD_LOCAL is not None: - mode = RTLD_LOCAL | RTLD_NOW + mode = RTLD_LOCAL else: - mode = RTLD_NOW + mode = 0 + mode |= RTLD_NOW res = c_dlopen(name, rffi.cast(rffi.INT, mode)) if not res: err = dlerror() diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -420,7 +420,11 @@ low, high = _get_file_size(self.file_handle) if not high and low <= sys.maxint: return low + # not so sure if the signed/unsigned strictness is a good idea: + high = rffi.cast(lltype.Unsigned, high) + low = rffi.cast(lltype.Unsigned, low) size = (high << 32) + low + size = rffi.cast(lltype.Signed, size) elif _POSIX: st = os.fstat(self.fd) size = st[stat.ST_SIZE] diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -20,6 +20,7 @@ from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.rffi import sizeof, offsetof +INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): return lltype.malloc(rffi.CCHARP.TO, buffersize, flavor='raw') diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -443,3 +443,4 @@ assert p[1] == 34 lltype.free(p, flavor='raw') lltype.free(ffi_point_struct, flavor='raw') + diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -112,7 +112,7 @@ rffi.LONGLONG: ctypes.c_longlong, rffi.ULONGLONG: ctypes.c_ulonglong, rffi.SIZE_T: ctypes.c_size_t, - lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_long), + lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_byte), llmemory.Address: ctypes.c_void_p, llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -56,6 +56,12 @@ DEFINED = DefinedConstantDouble(macro) return configure(CConfig)['DEFINED'] +def getdefinedinteger(macro, c_header_source): + class CConfig: + _compilation_info_ = eci_from_header(c_header_source) + DEFINED = DefinedConstantInteger(macro) + return configure(CConfig)['DEFINED'] + def has(name, c_header_source, include_dirs=None): class CConfig: _compilation_info_ = eci_from_header(c_header_source, include_dirs) diff --git a/pypy/rpython/tool/test/test_rffi_platform.py b/pypy/rpython/tool/test/test_rffi_platform.py --- a/pypy/rpython/tool/test/test_rffi_platform.py +++ b/pypy/rpython/tool/test/test_rffi_platform.py @@ -108,6 +108,12 @@ '#define ALFKJLKJFLKJFKLEJDLKEWMECEE') assert res +def test_defined_constant(): + res = rffi_platform.getdefineddouble('ABCDFGH', '#define ABCDFGH 2.0') + assert res == 2.0 + res = rffi_platform.getdefinedinteger('ABCDFGH', '#define ABCDFGH 2') + assert res == 2 + def test_defined_constant_float(): value = rffi_platform.getdefineddouble('BLAH', '#define BLAH 1.0') assert value == 1.0 diff --git a/pypy/translator/c/test/test_typed.py b/pypy/translator/c/test/test_typed.py --- a/pypy/translator/c/test/test_typed.py +++ b/pypy/translator/c/test/test_typed.py @@ -275,6 +275,14 @@ fn = self.getcompiled(f, [r_longlong]) assert fn(0) == 0 + def test_upcast_int(self): + from pypy.rpython.lltypesystem import rffi + def f(v): + v = rffi.cast(rffi.USHORT, v) + return intmask(v) + fn = self.getcompiled(f, [int]) + assert fn(0x1234CDEF) == 0xCDEF + def test_function_ptr(self): def f1(): return 1 diff --git a/pypy/translator/translator.py b/pypy/translator/translator.py --- a/pypy/translator/translator.py +++ b/pypy/translator/translator.py @@ -150,11 +150,11 @@ from pypy.translator.tool.graphpage import FlowGraphPage FlowGraphPage(self).display() - def viewcg(self, center_graph=None): + def viewcg(self, center_graph=None, huge=100): """Shows the whole call graph and the class hierarchy, based on the computed annotations.""" from pypy.translator.tool.graphpage import TranslatorPage - TranslatorPage(self, center_graph=center_graph).display() + TranslatorPage(self, center_graph=center_graph, huge=huge).display() From noreply at buildbot.pypy.org Sat Dec 10 12:53:02 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 12:53:02 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge default Message-ID: <20111210115302.4AD0682ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50341:8474a7603bbe Date: 2011-12-10 12:52 +0100 http://bitbucket.org/pypy/pypy/changeset/8474a7603bbe/ Log: hg merge default diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -487,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -16,28 +16,53 @@ """ Manage frame positions """ def __init__(self): - self.frame_bindings = {} - self.frame_depth = 0 + self.bindings = {} + self.used = [] # list of bools + self.hint_frame_locations = {} + + frame_depth = property(lambda:xxx, lambda:xxx) # XXX kill me + + def get_frame_depth(self): + return len(self.used) def get(self, box): - return self.frame_bindings.get(box, None) + return self.bindings.get(box, None) def loc(self, box): + """Return or create the frame location associated with 'box'.""" + # first check if it's already in the frame_manager try: - return self.frame_bindings[box] + return self.bindings[box] except KeyError: - return self.get_new_loc(box) + pass + # check if we have a hint for this box + if box in self.hint_frame_locations: + # if we do, try to reuse the location for this box + loc = self.hint_frame_locations[box] + if self.try_to_reuse_location(box, loc): + return loc + # no valid hint. make up a new free location + return self.get_new_loc(box) def get_new_loc(self, box): size = self.frame_size(box.type) - self.frame_depth += ((-self.frame_depth) & (size-1)) - # ^^^ frame_depth is rounded up to a multiple of 'size', assuming + # frame_depth is rounded up to a multiple of 'size', assuming # that 'size' is a power of two. The reason for doing so is to # avoid obscure issues in jump.py with stack locations that try # to move from position (6,7) to position (7,8). - newloc = self.frame_pos(self.frame_depth, box.type) - self.frame_bindings[box] = newloc - self.frame_depth += size + while self.get_frame_depth() & (size - 1): + self.used.append(False) + # + index = self.get_frame_depth() + newloc = self.frame_pos(index, box.type) + for i in range(size): + self.used.append(True) + # + if not we_are_translated(): # extra testing + testindex = self.get_loc_index(newloc) + assert testindex == index + # + self.bindings[box] = newloc return newloc def forget_frame_allocation(self, box): @@ -46,11 +71,52 @@ except KeyError: pass + def set_binding(self, box, loc): + self.bindings[box] = loc + # + index = self.get_loc_index(loc) + endindex = index + self.frame_size(box.type) + while len(self.used) < endindex: + self.used.append(False) + while index < endindex: + self.used[index] = True + index += 1 + def reserve_location_in_frame(self, size): - frame_depth = self.frame_depth - self.frame_depth += size + frame_depth = self.get_frame_depth() + for i in range(size): + self.used.append(True) return frame_depth + def mark_as_free(self, box): + try: + loc = self.bindings[box] + except KeyError: + return # already gone + del self.bindings[box] + # + size = self.frame_size(box.type) + baseindex = self.get_loc_index(loc) + for i in range(size): + index = baseindex + i + assert 0 <= index < len(self.used) + self.used[index] = False + + def try_to_reuse_location(self, box, loc): + index = self.get_loc_index(loc) + assert index >= 0 + size = self.frame_size(box.type) + for i in range(size): + while (index + i) >= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -58,6 +124,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -76,8 +146,6 @@ self.position = -1 self.frame_manager = frame_manager self.assembler = assembler - self.hint_frame_locations = {} # {Box: StackLoc} - self.freed_frame_locations = {} # {StackLoc: None} def is_still_alive(self, v): # Check if 'v' is alive at the current position. @@ -109,9 +177,7 @@ self.free_regs.append(self.reg_bindings[v]) del self.reg_bindings[v] if self.frame_manager is not None: - if v in self.frame_manager.frame_bindings: - loc = self.frame_manager.frame_bindings[v] - self.freed_frame_locations[loc] = None + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. @@ -183,23 +249,6 @@ self.reg_bindings[v] = loc return loc - def _frame_loc(self, v): - # first check if it's already in the frame_manager - try: - return self.frame_manager.frame_bindings[v] - except KeyError: - pass - # check if we have a hint for this box - if v in self.hint_frame_locations: - # if we do, check that the hinted location is known to be free - loc = self.hint_frame_locations[v] - if loc in self.freed_frame_locations: - del self.freed_frame_locations[loc] - self.frame_manager.frame_bindings[v] = loc - return loc - # no valid hint. make up a new free location - return self.frame_manager.get_new_loc(v) - def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, @@ -207,7 +256,7 @@ loc = self.reg_bindings[v_to_spill] del self.reg_bindings[v_to_spill] if self.frame_manager.get(v_to_spill) is None: - newloc = self._frame_loc(v_to_spill) + newloc = self.frame_manager.loc(v_to_spill) self.assembler.regalloc_mov(loc, newloc) return loc @@ -284,7 +333,7 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg - return self._frame_loc(box) + return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): """ Return the location of the constant v. If 'selected_reg' is @@ -332,7 +381,7 @@ self.reg_bindings[v] = loc self.assembler.regalloc_mov(prev_loc, loc) else: - loc = self._frame_loc(v) + loc = self.frame_manager.loc(v) self.assembler.regalloc_mov(prev_loc, loc) def force_result_in_reg(self, result_v, v, forbidden_vars=[]): @@ -351,7 +400,7 @@ self.reg_bindings[result_v] = loc return loc if v not in self.reg_bindings: - prev_loc = self._frame_loc(v) + prev_loc = self.frame_manager.loc(v) loc = self.force_allocate_reg(v, forbidden_vars) self.assembler.regalloc_mov(prev_loc, loc) assert v in self.reg_bindings @@ -371,7 +420,7 @@ def _sync_var(self, v): if not self.frame_manager.get(v): reg = self.reg_bindings[v] - to = self._frame_loc(v) + to = self.frame_manager.loc(v) self.assembler.regalloc_mov(reg, to) # otherwise it's clean diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -42,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -282,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -305,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -327,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -351,20 +356,14 @@ def test_hint_frame_locations_1(self): - b0, b1 = newboxes(0, 1) - longevity = {b0: (0, 1), b1: (0, 1)} + b0, = newboxes(0) fm = TFrameManager() - asm = MockAsm() - rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) - rm.hint_frame_locations[b0] = "some_stack_loc" - rm.freed_frame_locations["some_stack_loc"] = None - rm.force_allocate_reg(b0) - rm.force_allocate_reg(b1) - rm.force_spill_var(b0) - rm.force_spill_var(b1) - assert rm.loc(b0) == "some_stack_loc" - assert isinstance(rm.loc(b1), FakeFramePos) - rm._check_invariants() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 def test_hint_frame_locations_2(self): b0, b1, b2 = newboxes(0, 1, 2) @@ -378,20 +377,99 @@ rm.force_spill_var(b0) loc = rm.loc(b0) assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 rm.position = 1 - assert loc not in rm.freed_frame_locations + assert fm.used == [True] rm.possibly_free_var(b0) - assert loc in rm.freed_frame_locations + assert fm.used == [False] # - rm.hint_frame_locations[b1] = loc + fm.hint_frame_locations[b1] = loc rm.force_spill_var(b1) loc1 = rm.loc(b1) - assert loc1 is loc - assert rm.freed_frame_locations == {} + assert loc1 == loc + assert fm.used == [True] # - rm.hint_frame_locations[b2] = loc + fm.hint_frame_locations[b2] = loc rm.force_spill_var(b2) loc2 = rm.loc(b2) - assert loc2 is not loc1 # because it's not in freed_frame_locations + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] # rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -702,7 +702,7 @@ regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -139,6 +139,10 @@ return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -185,7 +189,6 @@ allgcrefs): operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations @@ -308,7 +311,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -317,7 +320,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -353,7 +356,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -1335,12 +1338,12 @@ loc = nonfloatlocs[i] if isinstance(loc, StackLoc): assert box.type != FLOAT - self.rm.hint_frame_locations[box] = loc + self.fm.hint_frame_locations[box] = loc else: loc = floatlocs[i] if isinstance(loc, StackLoc): assert box.type == FLOAT - self.xrm.hint_frame_locations[box] = loc + self.fm.hint_frame_locations[box] = loc def consider_jump(self, op): assembler = self.assembler @@ -1386,7 +1389,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -44,6 +44,7 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) @@ -51,10 +52,9 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous self.cpu.set_future_value_int(0, 0) fail = self.run(loop) assert fail.identifier == 2 @@ -109,6 +109,9 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) jump(i3, i12, i11, i10, i6, i7, descr=targettoken) @@ -117,9 +120,8 @@ guard_op = loop.operations[6] loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,5 +1,7 @@ import py -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.objectmodel import keepalive_until_here +from pypy.rlib import rgc from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -80,6 +82,47 @@ assert res == 1 self.check_resops(call=1) # for the case B(), but not for the case A() + def test_keepalive(self): + py.test.skip("XXX fails") # hum, I think the test itself is broken + # + mydriver = JitDriver(reds = ['n', 'states'], greens = []) + class State: + num = 1 + class X: + def __init__(self, state): + self.state = state + def __del__(self): + self.state.num += 1 + @dont_look_inside + def do_stuff(): + pass + def f(n): + states = [] + while n > 0: + mydriver.jit_merge_point(n=n, states=states) + state = State() + states.append(state) + x = X(state) + do_stuff() + state.num *= 1000 + do_stuff() + keepalive_until_here(x) + n -= 1 + return states + def main(n): + states = f(n) + rgc.collect() + rgc.collect() + err = 1001 + for state in states: + if state.num != 1001: + err = state.num + print 'ERROR:', err + return err + assert main(20) == 1001 + res = self.meta_interp(main, [20]) + assert res == 1001 + class TestLLtype(DelTests, LLJitMixin): def test_signal_action(self): diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import py from pypy.jit.metainterp.test.support import LLJitMixin diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -864,6 +864,7 @@ elif sys.platform.startswith('linux'): compile_extra.append("-Werror=implicit-function-declaration") export_symbols_eci.append('pypyAPI') + compile_extra.append('-g') else: kwds["includes"] = ['Python.h'] # this is our Python.h diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -483,10 +483,20 @@ # XXX Check for frozen modules? # when w_path is a string + delayed_builtin = None + w_lib_extensions = None + if w_path is None: # check the builtin modules if modulename in space.builtin_modules: - return FindInfo(C_BUILTIN, modulename, None) + delayed_builtin = FindInfo(C_BUILTIN, modulename, None) + # a "real builtin module xx" shadows every file "xx.py" there + # could possibly be; a "pseudo-extension module" does not, and + # is only loaded at the point in sys.path where we find + # '.../lib_pypy/__extensions__'. + if modulename in space.MODULES_THAT_ALWAYS_SHADOW: + return delayed_builtin + w_lib_extensions = space.sys.get_state(space).w_lib_extensions w_path = space.sys.get('path') # XXX check frozen modules? @@ -495,6 +505,9 @@ if w_path is not None: for w_pathitem in space.unpackiterable(w_path): # sys.path_hooks import hook + if (w_lib_extensions is not None and + space.eq_w(w_pathitem, w_lib_extensions)): + return delayed_builtin if use_loader: w_loader = find_in_path_hooks(space, w_modulename, w_pathitem) if w_loader: @@ -527,7 +540,7 @@ # Out of file descriptors. # not found - return None + return delayed_builtin def _prepare_module(space, w_mod, filename, pkgdir): w = space.wrap diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -38,6 +38,8 @@ test_reload = "def test():\n raise ValueError\n", infinite_reload = "import infinite_reload; reload(infinite_reload)", del_sys_module = "import sys\ndel sys.modules['del_sys_module']\n", + itertools = "hello_world = 42\n", + gc = "should_never_be_seen = 42\n", ) root.ensure("notapackage", dir=1) # empty, no __init__.py setuppkg("pkg", @@ -147,6 +149,8 @@ class AppTestImport: def setup_class(cls): # interpreter-level + cls.space = gettestobjspace(usemodules=['itertools']) + cls.w_runappdirect = cls.space.wrap(conftest.option.runappdirect) cls.saved_modules = _setup(cls.space) #XXX Compile class @@ -571,6 +575,50 @@ else: assert False, 'should not work' + def test_shadow_builtin(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import gc' is supposed to always find the built-in module; + # like CPython, it is a built-in module, so it shadows everything, + # even though there is a gc.py. + import sys + assert 'gc' not in sys.modules + import gc + assert not hasattr(gc, 'should_never_be_seen') + assert '(built-in)' in repr(gc) + del sys.modules['gc'] + + def test_shadow_extension_1(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import itertools' is supposed to find itertools.py if there is + # one in sys.path. + import sys + assert 'itertools' not in sys.modules + import itertools + assert hasattr(itertools, 'hello_world') + assert not hasattr(itertools, 'count') + assert '(built-in)' not in repr(itertools) + del sys.modules['itertools'] + + def test_shadow_extension_2(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import itertools' is supposed to find the built-in module even + # if there is also one in sys.path as long as it is *after* the + # special entry '.../lib_pypy/__extensions__'. (Note that for now + # there is one in lib_pypy/itertools.py, which should not be seen + # either; hence the (built-in) test below.) + import sys + assert 'itertools' not in sys.modules + sys.path.append(sys.path.pop(0)) + try: + import itertools + assert not hasattr(itertools, 'hello_world') + assert hasattr(itertools, 'izip') + assert '(built-in)' in repr(itertools) + finally: + sys.path.insert(0, sys.path.pop()) + del sys.modules['itertools'] + + class TestAbi: def test_abi_tag(self): space1 = gettestobjspace(soabi='TEST') diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -170,3 +170,7 @@ def get_flag(self, name): space = self.space return space.int_w(space.getattr(self.get('flags'), space.wrap(name))) + + def get_state(self, space): + from pypy.module.sys import state + return state.get(space) diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py --- a/pypy/module/sys/state.py +++ b/pypy/module/sys/state.py @@ -24,7 +24,7 @@ # Initialize the default path pypydir = os.path.dirname(os.path.abspath(pypy.__file__)) srcdir = os.path.dirname(pypydir) - path = getinitialpath(srcdir) + path = getinitialpath(self, srcdir) self.w_path = space.newlist([space.wrap(p) for p in path]) def checkdir(path): @@ -35,7 +35,7 @@ platform = sys.platform -def getinitialpath(prefix): +def getinitialpath(state, prefix): from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d.%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1]) @@ -49,6 +49,12 @@ checkdir(lib_pypy) importlist = [] + # + if state is not None: # 'None' for testing only + lib_extensions = os.path.join(lib_pypy, '__extensions__') + state.w_lib_extensions = state.space.wrap(lib_extensions) + importlist.append(lib_extensions) + # importlist.append(lib_pypy) importlist.append(python_std_lib_modified) importlist.append(python_std_lib) @@ -71,7 +77,7 @@ @unwrap_spec(srcdir=str) def pypy_initial_path(space, srcdir): try: - path = getinitialpath(srcdir) + path = getinitialpath(get(space), srcdir) except OSError: return space.w_None else: diff --git a/pypy/module/sys/test/test_initialpath.py b/pypy/module/sys/test/test_initialpath.py --- a/pypy/module/sys/test/test_initialpath.py +++ b/pypy/module/sys/test/test_initialpath.py @@ -13,7 +13,7 @@ def test_stdlib_in_prefix(tmpdir): dirs = build_hierarchy(tmpdir) - path = getinitialpath(str(tmpdir)) + path = getinitialpath(None, str(tmpdir)) # we get at least 'dirs', and maybe more (e.g. plat-linux2) assert path[:len(dirs)] == map(str, dirs) @@ -21,7 +21,7 @@ lib_pypy, lib_python_modified, lib_python = build_hierarchy(tmpdir) lib_tk_modified = lib_python_modified.join('lib-tk') lib_tk = lib_python.join('lib-tk') - path = getinitialpath(str(tmpdir)) + path = getinitialpath(None, str(tmpdir)) i = path.index(str(lib_tk_modified)) j = path.index(str(lib_tk)) assert i < j diff --git a/pypy/rlib/rdynload.py b/pypy/rlib/rdynload.py --- a/pypy/rlib/rdynload.py +++ b/pypy/rlib/rdynload.py @@ -115,7 +115,8 @@ if _WIN32: DLLHANDLE = rwin32.HMODULE - def dlopen(name): + def dlopen(name, mode=-1): + # mode is unused on windows, but a consistant signature res = rwin32.LoadLibrary(name) if not res: err = rwin32.GetLastError() diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -2,8 +2,7 @@ Environment variables can be used to fine-tune the following parameters: - PYPY_GC_NURSERY The nursery size. Defaults to half the size of - the L2 cache. Try values like '1.2MB'. Small values + PYPY_GC_NURSERY The nursery size. Defaults to '4MB'. Small values (like 1 or 1KB) are useful for debugging. PYPY_GC_MAJOR_COLLECT Major collection memory factor. Default is '1.82', @@ -61,7 +60,7 @@ # # * young objects: allocated in the nursery if they are not too large, or # raw-malloced otherwise. The nursery is a fixed-size memory buffer of -# half the size of the L2 cache. When full, we do a minor collection; +# 4MB by default. When full, we do a minor collection; # the surviving objects from the nursery are moved outside, and the # non-surviving raw-malloced objects are freed. All surviving objects # become old. @@ -329,7 +328,8 @@ # size (needed to handle mallocs just below 'large_objects') but # hacking at the current nursery position in collect_and_reserve(). if newsize <= 0: - newsize = env.estimate_best_nursery_size() + newsize = 4*1024*1024 # fixed to 4MB by default + # (it was env.estimate_best_nursery_size()) if newsize <= 0: newsize = defaultsize if newsize < minsize: diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -672,7 +672,7 @@ def pypy_initial_path(s): from pypy.module.sys.state import getinitialpath try: - return getinitialpath(s) + return getinitialpath(None, s) except OSError: return None diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -821,6 +821,8 @@ newpath = app_main.get_library_path('/tmp/pypy-c') # stdlib not found assert newpath == sys.path newpath = app_main.get_library_path(self.fake_exe) + if newpath[0].endswith('__extensions__'): + newpath = newpath[1:] # we get at least 'expected_path', and maybe more (e.g.plat-linux2) assert newpath[:len(self.expected_path)] == self.expected_path finally: From noreply at buildbot.pypy.org Sat Dec 10 12:54:59 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 12:54:59 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix Message-ID: <20111210115459.D4F9382ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50342:b08cf0eb7990 Date: 2011-12-10 12:54 +0100 http://bitbucket.org/pypy/pypy/changeset/b08cf0eb7990/ Log: fix diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1330,7 +1330,7 @@ if op.getopnum() != rop.JUMP: return descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) for i in range(op.numargs()): box = op.getarg(i) From notifications-noreply at bitbucket.org Sat Dec 10 14:15:43 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 13:15:43 -0000 Subject: [pypy-commit] [COMMENT] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays Message-ID: <20111210131543.21987.87109@bitbucket01.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars#comment-1358 Jeff Terrace (jterrace) said: Yikes! Bear with me while I get the hang of RPython. I'll fix this stuff and try again. -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sat Dec 10 14:27:55 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 14:27:55 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fall back on jumping to preamble if retracing fails Message-ID: <20111210132755.91A4B82ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50343:f78f16899afb Date: 2011-12-10 14:27 +0100 http://bitbucket.org/pypy/pypy/changeset/f78f16899afb/ Log: fall back on jumping to preamble if retracing fails diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -209,7 +209,18 @@ try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - return None + # Fall back on jumping to preamble + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert target_token.exported_state + part.operations = [label] + \ + [ResOperation(rop.JUMP, target_token.exported_state.original_jump_args, + None, descr=loop_jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + inline_short_preamble=False) + except InvalidLoop: + return None assert part.operations[-1].getopnum() != rop.LABEL target_token = label.getdescr() assert isinstance(target_token, TargetToken) diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -171,7 +171,7 @@ target_token.start_resumedescr = start_resumedescr target_token.exported_state = ExportedState(constant_inputargs, short_boxes, inputarg_setup_ops, self.optimizer, - aliased_vrituals) + aliased_vrituals, original_jump_args) def import_state(self, targetop): self.did_import = False @@ -275,7 +275,6 @@ try: jumpargs = virtual_state.make_inputargs(values, self.optimizer) except BadVirtualState: - # FIXME: Produce jump to preamble instead (see test_retrace_not_matching_bridge) raise InvalidLoop jumpop.initarglist(jumpargs) @@ -340,13 +339,11 @@ if not virtual_state.generalization_of(final_virtual_state, bad): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop - # XXX Is it possible to end up here? If so, consider: - # - Fallback on having the preamble jump to itself? - # - Would virtual_state.generate_guards make sense here? final_virtual_state.debug_print("Bad virtual state at end of loop, ", bad) debug_stop('jit-log-virtualstate') raise InvalidLoop + debug_stop('jit-log-virtualstate') maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards @@ -595,9 +592,11 @@ class ExportedState(object): def __init__(self, constant_inputargs, - short_boxes, inputarg_setup_ops, optimizer, aliased_vrituals): + short_boxes, inputarg_setup_ops, optimizer, aliased_vrituals, + original_jump_args): self.constant_inputargs = constant_inputargs self.short_boxes = short_boxes self.inputarg_setup_ops = inputarg_setup_ops self.optimizer = optimizer self.aliased_vrituals = aliased_vrituals + self.original_jump_args = original_jump_args diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -820,6 +820,91 @@ return node.value res = self.meta_interp(f, [10], repeat=10) assert res == f(10) + self.check_resops(jump=2) + + def test_nested_loops(self): + class Int(object): + def __init__(self, val): + self.val = val + myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) + bytecode = "iajb+JI" + def f(n): + pc = sa = 0 + i = j = Int(0) + while pc < len(bytecode): + myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i, j=j) + op = bytecode[pc] + if op == 'i': + i = Int(0) + elif op == 'j': + j = Int(0) + elif op == '+': + sa += i.val * j.val + elif op == 'a': + i = Int(i.val + 1) + elif op == 'b': + j = Int(j.val + 1) + elif op == 'J': + if j.val < n: + pc -= 2 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + elif op == 'I': + if i.val < n: + pc -= 5 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + pc += 1 + return sa + + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_aborted_count(0) + self.check_target_token_count(3) + + def test_nested_loops_bridge(self): + class Int(object): + def __init__(self, val): + self.val = val + myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) + bytecode = "iajb+JI" + def f(n): + pc = sa = 0 + i = j = Int(0) + while pc < len(bytecode): + myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i, j=j) + op = bytecode[pc] + if op == 'i': + i = Int(0) + elif op == 'j': + j = Int(0) + elif op == '+': + if i.val < n-8: + sa += 7 + if j.val < n-16: + sa += 42 + sa += i.val * j.val + elif op == 'a': + i = Int(i.val + 1) + elif op == 'b': + j = Int(j.val + 1) + elif op == 'J': + if j.val < n: + pc -= 2 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + elif op == 'I': + if i.val < n: + pc -= 5 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + pc += 1 + return sa + + res = self.meta_interp(f, [32]) + assert res == f(32) + self.check_aborted_count(0) + self.check_target_token_count(3) class VirtualMiscTests: From noreply at buildbot.pypy.org Sat Dec 10 15:26:02 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 10 Dec 2011 15:26:02 +0100 (CET) Subject: [pypy-commit] pypy default: Fail slightly bettter than a SyntaxError Message-ID: <20111210142602.7465682ABD@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50344:22d20cc8666b Date: 2011-12-10 16:25 +0200 http://bitbucket.org/pypy/pypy/changeset/22d20cc8666b/ Log: Fail slightly bettter than a SyntaxError diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal From noreply at buildbot.pypy.org Sat Dec 10 16:03:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 16:03:29 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Fix. Message-ID: <20111210150329.04E0B82ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-targets Changeset: r50345:9addc6b0c977 Date: 2011-12-10 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/9addc6b0c977/ Log: Fix. diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -65,12 +65,6 @@ self.bindings[box] = newloc return newloc - def forget_frame_allocation(self, box): - try: - del self.frame_bindings[box] - except KeyError: - pass - def set_binding(self, box, loc): self.bindings[box] = loc # diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -164,6 +164,7 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -1329,11 +1330,19 @@ op = operations[-1] if op.getopnum() != rop.JUMP: return + self.final_jump_op = op descr = op.getdescr() assert isinstance(descr, TargetToken) - nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) - for i in range(op.numargs()): - box = op.getarg(i) + if descr._x86_loop_code != -1: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + + def _compute_hint_frame_locations_from_descr(self, descr): + nonfloatlocs, floatlocs = descr._x86_arglocs + jump_op = self.final_jump_op + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) if isinstance(box, Box): loc = nonfloatlocs[i] if isinstance(loc, StackLoc): @@ -1460,12 +1469,20 @@ else: nonfloatlocs[i] = loc if isinstance(loc, RegLoc): - self.fm.forget_frame_allocation(arg) + self.fm.mark_as_free(arg) descr._x86_arglocs = nonfloatlocs, floatlocs descr._x86_loop_code = self.assembler.mc.get_relative_pos() descr._x86_clt = self.assembler.current_clt self.assembler.target_tokens_currently_compiling[descr] = None self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) ## from pypy.rpython.annlowlevel import llhelper ## def fn(addr): @@ -1539,3 +1556,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If -1, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = -1 From noreply at buildbot.pypy.org Sat Dec 10 16:23:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 16:23:29 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Reset the targettoken's _x86_loop_code attribute between tests. Message-ID: <20111210152329.5ACA882ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-targets Changeset: r50346:95089d049fe7 Date: 2011-12-10 16:22 +0100 http://bitbucket.org/pypy/pypy/changeset/95089d049fe7/ Log: Reset the targettoken's _x86_loop_code attribute between tests. diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1333,7 +1333,7 @@ self.final_jump_op = op descr = op.getdescr() assert isinstance(descr, TargetToken) - if descr._x86_loop_code != -1: + if descr._x86_loop_code != 0: # if the target LABEL was already compiled, i.e. if it belongs # to some already-compiled piece of code self._compute_hint_frame_locations_from_descr(descr) @@ -1341,6 +1341,7 @@ def _compute_hint_frame_locations_from_descr(self, descr): nonfloatlocs, floatlocs = descr._x86_arglocs jump_op = self.final_jump_op + assert len(nonfloatlocs) == jump_op.numargs() for i in range(jump_op.numargs()): box = jump_op.getarg(i) if isinstance(box, Box): @@ -1559,4 +1560,4 @@ # xxx hack: set a default value for TargetToken._x86_loop_code. # If -1, we know that it is a LABEL that was not compiled yet. -TargetToken._x86_loop_code = -1 +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -102,6 +102,10 @@ fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 From noreply at buildbot.pypy.org Sat Dec 10 16:25:30 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 16:25:30 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Update comment. Message-ID: <20111210152530.4637182ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-targets Changeset: r50347:6c2822188033 Date: 2011-12-10 16:25 +0100 http://bitbucket.org/pypy/pypy/changeset/6c2822188033/ Log: Update comment. diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1559,5 +1559,5 @@ raise NotImplementedError(msg) # xxx hack: set a default value for TargetToken._x86_loop_code. -# If -1, we know that it is a LABEL that was not compiled yet. +# If 0, we know that it is a LABEL that was not compiled yet. TargetToken._x86_loop_code = 0 From noreply at buildbot.pypy.org Sat Dec 10 16:30:17 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 10 Dec 2011 16:30:17 +0100 (CET) Subject: [pypy-commit] pypy default: some of these generate getfield_gc_pures now Message-ID: <20111210153017.3573C82ABD@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50348:6c4b9c3bc0d6 Date: 2011-12-10 10:30 -0500 http://bitbucket.org/pypy/pypy/changeset/6c4b9c3bc0d6/ Log: some of these generate getfield_gc_pures now diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -185,7 +185,8 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, + 'getfield_gc': 35, 'getfield_gc_pure': 6, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, From noreply at buildbot.pypy.org Sat Dec 10 16:32:27 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 16:32:27 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Comments. Message-ID: <20111210153227.41C1C82ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-targets Changeset: r50349:959af14f07a4 Date: 2011-12-10 16:32 +0100 http://bitbucket.org/pypy/pypy/changeset/959af14f07a4/ Log: Comments. diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1325,7 +1325,7 @@ def compute_hint_frame_locations(self, operations): # optimization only: fill in the 'hint_frame_locations' dictionary - # of rm and xrm based on the JUMP at the end of the loop, by looking + # of 'fm' based on the JUMP at the end of the loop, by looking # at where we would like the boxes to be after the jump. op = operations[-1] if op.getopnum() != rop.JUMP: @@ -1337,6 +1337,11 @@ # if the target LABEL was already compiled, i.e. if it belongs # to some already-compiled piece of code self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. def _compute_hint_frame_locations_from_descr(self, descr): nonfloatlocs, floatlocs = descr._x86_arglocs From noreply at buildbot.pypy.org Sat Dec 10 16:33:51 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 16:33:51 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: fix+ Message-ID: <20111210153351.04EFB82ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50350:a1c0dc4d1391 Date: 2011-12-10 15:16 +0100 http://bitbucket.org/pypy/pypy/changeset/a1c0dc4d1391/ Log: fix+ diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2695,8 +2695,10 @@ # The attempts of retracing first loop will end up retracing the # second and thus fail 5 times, saturating the retrace_count. Instead a - # bridge back to the preamble of the first loop is produced. - self.check_trace_count(6) + # bridge back to the preamble of the first loop is produced. A guard in + # this bridge is later traced resulting in a failed attempt of retracing + # the second loop. + self.check_trace_count(8) # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times. From noreply at buildbot.pypy.org Sat Dec 10 16:33:52 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 16:33:52 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: port tests Message-ID: <20111210153352.2A65682ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50351:f95dbc2efd61 Date: 2011-12-10 15:20 +0100 http://bitbucket.org/pypy/pypy/changeset/f95dbc2efd61/ Log: port tests diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -167,7 +167,7 @@ return f(data, n) assert self.meta_interp(main, [10]) == 2000 - self.check_resops({'jump': 2, 'int_lt': 2, 'getinteriorfield_raw': 2, + self.check_resops({'jump': 1, 'int_lt': 2, 'getinteriorfield_raw': 2, 'guard_true': 2, 'int_add': 4}) diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1260,7 +1260,7 @@ return portal(level + 1) self.meta_interp(portal, [0]) - self.check_loop_count_at_most(2) # and not, e.g., 24 + self.check_trace_count_at_most(2) # and not, e.g., 24 class TestLLtype(RecursiveTests, LLJitMixin): From noreply at buildbot.pypy.org Sat Dec 10 16:33:53 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 16:33:53 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge Message-ID: <20111210153353.514A182ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50352:7c782d5ad65b Date: 2011-12-10 16:32 +0100 http://bitbucket.org/pypy/pypy/changeset/7c782d5ad65b/ Log: hg merge diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -65,12 +65,6 @@ self.bindings[box] = newloc return newloc - def forget_frame_allocation(self, box): - try: - del self.frame_bindings[box] - except KeyError: - pass - def set_binding(self, box, loc): self.bindings[box] = loc # diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -164,6 +164,7 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -1329,11 +1330,20 @@ op = operations[-1] if op.getopnum() != rop.JUMP: return + self.final_jump_op = op descr = op.getdescr() assert isinstance(descr, TargetToken) - nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) - for i in range(op.numargs()): - box = op.getarg(i) + if descr._x86_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + + def _compute_hint_frame_locations_from_descr(self, descr): + nonfloatlocs, floatlocs = descr._x86_arglocs + jump_op = self.final_jump_op + assert len(nonfloatlocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) if isinstance(box, Box): loc = nonfloatlocs[i] if isinstance(loc, StackLoc): @@ -1460,12 +1470,20 @@ else: nonfloatlocs[i] = loc if isinstance(loc, RegLoc): - self.fm.forget_frame_allocation(arg) + self.fm.mark_as_free(arg) descr._x86_arglocs = nonfloatlocs, floatlocs descr._x86_loop_code = self.assembler.mc.get_relative_pos() descr._x86_clt = self.assembler.current_clt self.assembler.target_tokens_currently_compiling[descr] = None self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) ## from pypy.rpython.annlowlevel import llhelper ## def fn(addr): @@ -1539,3 +1557,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If 0, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -102,6 +102,10 @@ fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 From noreply at buildbot.pypy.org Sat Dec 10 16:33:54 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 16:33:54 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge Message-ID: <20111210153354.7D2BC82ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50353:8164cef19074 Date: 2011-12-10 16:33 +0100 http://bitbucket.org/pypy/pypy/changeset/8164cef19074/ Log: hg merge diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1325,7 +1325,7 @@ def compute_hint_frame_locations(self, operations): # optimization only: fill in the 'hint_frame_locations' dictionary - # of rm and xrm based on the JUMP at the end of the loop, by looking + # of 'fm' based on the JUMP at the end of the loop, by looking # at where we would like the boxes to be after the jump. op = operations[-1] if op.getopnum() != rop.JUMP: @@ -1337,6 +1337,11 @@ # if the target LABEL was already compiled, i.e. if it belongs # to some already-compiled piece of code self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. def _compute_hint_frame_locations_from_descr(self, descr): nonfloatlocs, floatlocs = descr._x86_arglocs From noreply at buildbot.pypy.org Sat Dec 10 16:40:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 16:40:31 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Fix: read 'frame_depth' before assembling the bridge. Message-ID: <20111210154031.D242582ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-targets Changeset: r50354:0da687824062 Date: 2011-12-10 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/0da687824062/ Log: Fix: read 'frame_depth' before assembling the bridge. diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -116,9 +116,9 @@ i11 = int_add(i12, i6) jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth bridge = self.attach_bridge(ops, loop, 6) guard_op = loop.operations[6] - loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth From noreply at buildbot.pypy.org Sat Dec 10 16:59:13 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 16:59:13 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: cleaner to use jump_args Message-ID: <20111210155913.06B3282ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50355:3c35f57a7f67 Date: 2011-12-10 16:56 +0100 http://bitbucket.org/pypy/pypy/changeset/3c35f57a7f67/ Log: cleaner to use jump_args diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -214,7 +214,7 @@ assert isinstance(target_token, TargetToken) assert target_token.exported_state part.operations = [label] + \ - [ResOperation(rop.JUMP, target_token.exported_state.original_jump_args, + [ResOperation(rop.JUMP, target_token.exported_state.jump_args, None, descr=loop_jitcell_token)] try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -171,7 +171,7 @@ target_token.start_resumedescr = start_resumedescr target_token.exported_state = ExportedState(constant_inputargs, short_boxes, inputarg_setup_ops, self.optimizer, - aliased_vrituals, original_jump_args) + aliased_vrituals, jump_args) def import_state(self, targetop): self.did_import = False @@ -593,10 +593,10 @@ class ExportedState(object): def __init__(self, constant_inputargs, short_boxes, inputarg_setup_ops, optimizer, aliased_vrituals, - original_jump_args): + jump_args): self.constant_inputargs = constant_inputargs self.short_boxes = short_boxes self.inputarg_setup_ops = inputarg_setup_ops self.optimizer = optimizer self.aliased_vrituals = aliased_vrituals - self.original_jump_args = original_jump_args + self.jump_args = jump_args From noreply at buildbot.pypy.org Sat Dec 10 16:59:14 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 10 Dec 2011 16:59:14 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge Message-ID: <20111210155914.2577482ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50356:6c6cf4df068d Date: 2011-12-10 16:58 +0100 http://bitbucket.org/pypy/pypy/changeset/6c6cf4df068d/ Log: hg merge diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -116,9 +116,9 @@ i11 = int_add(i12, i6) jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth bridge = self.attach_bridge(ops, loop, 6) guard_op = loop.operations[6] - loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth From noreply at buildbot.pypy.org Sat Dec 10 17:18:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 17:18:14 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Fixes for metainterp/test/test_ztranslation. Unsure if it's really Message-ID: <20111210161814.116A082ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-targets Changeset: r50357:195866ee2672 Date: 2011-12-10 17:17 +0100 http://bitbucket.org/pypy/pypy/changeset/195866ee2672/ Log: Fixes for metainterp/test/test_ztranslation. Unsure if it's really worth the burden to maintain this mess just for this test... diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -49,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -348,13 +353,13 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + def compile_add_target_token(loop, descr): - compiled_version = loop loop = _from_opaque(loop) op = loop.operations[-1] - descr.compiled_version = compiled_version - descr.target_opindex = len(loop.operations) - descr.target_arguments = op.args + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args def compile_add_var(loop, intvar): loop = _from_opaque(loop) @@ -392,18 +397,9 @@ def compile_add_jump_target(loop, targettoken): loop = _from_opaque(loop) - if isinstance(targettoken, history.JitCellToken): - assert False - loop_target = _from_opaque(targettoken.compiled_loop_token.compiled_version) - target_opindex = 0 - target_inputargs = loop_target.inputargs - elif isinstance(targettoken, history.TargetToken): - loop_target = _from_opaque(targettoken.compiled_version) - target_opindex = targettoken.target_opindex - target_inputargs = targettoken.target_arguments - else: - assert False - + descrobj = _normalize(targettoken) + loop_target, target_opindex, target_inputargs = TARGET_TOKENS[descrobj] + # op = loop.operations[-1] op.jump_target = loop_target op.jump_target_opindex = target_opindex @@ -1823,6 +1819,7 @@ setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) From noreply at buildbot.pypy.org Sat Dec 10 18:42:49 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Dec 2011 18:42:49 +0100 (CET) Subject: [pypy-commit] pypy default: Forbid SomeObject there. Message-ID: <20111210174249.A5E5082ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50358:969865e9cb30 Date: 2011-12-10 18:42 +0100 http://bitbucket.org/pypy/pypy/changeset/969865e9cb30/ Log: Forbid SomeObject there. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -238,6 +238,7 @@ t = TranslationContext(config=config) self.t = t # for debugging ann = t.buildannotator() + ann.policy.allow_someobjects = False if func is not None: ann.build_types(func, argtypes, complete_now=False) # From noreply at buildbot.pypy.org Sat Dec 10 20:35:08 2011 From: noreply at buildbot.pypy.org (ned) Date: Sat, 10 Dec 2011 20:35:08 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox: Implement from os: getuid, geteuid, getgid, getegid; in an attempt to get 'import site' working, but the problems go deeper. Message-ID: <20111210193508.189AD82ABD@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox Changeset: r50359:7fa920a29974 Date: 2011-12-10 14:33 -0500 http://bitbucket.org/pypy/pypy/changeset/7fa920a29974/ Log: Implement from os: getuid, geteuid, getgid, getegid; in an attempt to get 'import site' working, but the problems go deeper. diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -9,6 +9,7 @@ from pypy.tool.ansi_print import AnsiLog import subprocess from pypy.tool.killsubprocess import killsubprocess +from pypy.translator.sandbox.vfs import UID, GID class MyAnsiLog(AnsiLog): KW_TO_COLOR = { @@ -524,6 +525,14 @@ node = self.get_node(vpathname) return node.keys() + def do_ll_os__ll_os_getuid(self): + return UID + do_ll_os__ll_os_geteuid = do_ll_os__ll_os_getuid + + def do_ll_os__ll_os_getgid(self): + return GID + do_ll_os__ll_os_getegid = do_ll_os__ll_os_getgid + class VirtualizedSocketProc(VirtualizedSandboxedProc): """ Extends VirtualizedSandboxProc with socket diff --git a/pypy/translator/sandbox/test/test_sandlib.py b/pypy/translator/sandbox/test/test_sandlib.py --- a/pypy/translator/sandbox/test/test_sandlib.py +++ b/pypy/translator/sandbox/test/test_sandlib.py @@ -247,3 +247,18 @@ output, error = proc.communicate("") assert output == "All ok!\n" assert error == "" + +def test_getuid(): + def entry_point(argv): + import os + print "uid is %s" % os.getuid() + print "euid is %s" % os.geteuid() + print "gid is %s" % os.getgid() + print "egid is %s" % os.getegid() + return 0 + exe = compile(entry_point) + + proc = SandboxedProcWithFiles([exe]) + output, error = proc.communicate("") + assert output == "uid is 1000\neuid is 1000\ngid is 1000\negid is 1000\n" + assert error == "" From pullrequests-noreply at bitbucket.org Sat Dec 10 21:22:18 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 20:22:18 -0000 Subject: [pypy-commit] [OPEN] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays In-Reply-To: References: Message-ID: <20111210202218.29330.8988@bitbucket02.managed.contegix.com> Pull request #15 has been updated by Jeff Terrace to include new changes. https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars Title: Added tolist() function to numpypy's scalars and multidim arrays Creator: Jeff Terrace Includes tests Updated list of changes: 4e8130835a43 by Jeff Terrace: "Fixed the numarray tolist to call eval with a correct iterator and changed scala?" aca40ab92929 by Jeff Terrace: "Third try at adding tolist() to numpypy scalars and arrays" c904560ff1b3 by Jeff Terrace: "Added 0-d array test, changed scalar test to use type objects instead of arrays,?" b7f67fbd9e44 by Jeff Terrace: "Fixed merge conflict" b2e4afae62e5 by Jeff Terrace: "Added tolist() function to numpypy's scalars and multidim arrays" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Sat Dec 10 23:33:43 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 22:33:43 -0000 Subject: [pypy-commit] [OPEN] Pull request #16 for pypy/pypy: Adds full fromstring support and exposes uint types Message-ID: A new pull request has been opened by Jeff Terrace. jterrace/pypy has changes to be pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/16/adds-full-fromstring-support-and-exposes Title: Adds full fromstring support and exposes uint types 1. Fixed typo in W_UnsignedIntegerBox 2. Updated dtype tests to check for proper overflows on int types 3. Exposed uint8, uint16, uint32 4. Wrote tests for uint8, uint16, uint32, and uint64 but the uint64 test is set to be skipped because it's broken. Did not expose uint64 since the tests weren't passing. 5. Updated fromstring() function to support the full range of parameters 6. Added lots of tests for fromstring 7. Added str/w_str to the numpypy FakeSpace 8. Fixed dtype coerce functions to handle strings so that numpy.int32('3') works properly (and also used in fromstring). Added tests for it. Changes to be pulled: a677a3081191 by Jeff Terrace: "Moved unpacking down to the type level and fixed non-negative slice inference so?" 0e06e4517539 by Jeff Terrace: "Add StringObject to FakeSpace and revert hacked isinstance_w workaround for w_st?" 37a67252257e by Jeff Terrace: "Add full support for the rest of fromstring() functionality" 1b487f5ec10c by Jeff Terrace: "Fixes for string special case to fix compiling tests" 800e9e2e1a90 by Jeff Terrace: "Treat strings as a special case in coerce to fix failing tests" 794f05b409c7 by Jeff Terrace: "Changed coerce methods to work with strings. Added tests for it." 1063e28fec01 by Jeff Terrace: "Fixed typo in W_UnsignedIntegerBox, exposed and wrote tests for uint8, uint16, u?" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Sat Dec 10 23:36:44 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 22:36:44 -0000 Subject: [pypy-commit] [COMMENT] Pull request #16 for pypy/pypy: Adds full fromstring support and exposes uint types Message-ID: <20111210223644.9466.52040@bitbucket01.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/16/adds-full-fromstring-support-and-exposes#comment-1365 Jeff Terrace (jterrace) said: Adding a comment with nicer formatted list since I can't seem to edit my description: 1. Fixed typo in W_UnsignedIntegerBox 2. Updated dtype tests to check for proper overflows on int types 3. Exposed uint8, uint16, uint32 4. Wrote tests for uint8, uint16, uint32, and uint64 but the uint64 test is set to be skipped because it's broken. Did not expose uint64 since the tests weren't passing. 5. Updated fromstring() function to support the full range of parameters 6. Added lots of tests for fromstring 7. Added str/w_str to the numpypy FakeSpace 8. Fixed dtype coerce functions to handle strings so that numpy.int32('3') works properly (and also used in fromstring). Added tests for it. -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Sun Dec 11 00:29:42 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 23:29:42 -0000 Subject: [pypy-commit] [COMMENT] Pull request #16 for pypy/pypy: Adds full fromstring support and exposes uint types Message-ID: <20111210232942.4865.36658@bitbucket05.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/16/adds-full-fromstring-support-and-exposes#comment-1366 Alex Gaynor (alex_gaynor) said: Can this be split into multiple matches, ideally 3 I think: # the unsigned stuff # the fromstring stuff # numpy.int32("str") -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Sun Dec 11 00:53:22 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 10 Dec 2011 23:53:22 -0000 Subject: [pypy-commit] [COMMENT] Pull request #16 for pypy/pypy: Adds full fromstring support and exposes uint types Message-ID: <20111210235322.26293.24108@bitbucket13.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/16/adds-full-fromstring-support-and-exposes#comment-1367 Jeff Terrace (jterrace) said: #2 depends on #3, and the tests for #2 depend on #1, so they could be decomposed, but it would take some extra work. How come? Just to make it easier to review? -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From notifications-noreply at bitbucket.org Sun Dec 11 01:00:06 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sun, 11 Dec 2011 00:00:06 -0000 Subject: [pypy-commit] [COMMENT] Pull request #16 for pypy/pypy: Adds full fromstring support and exposes uint types Message-ID: <20111211000006.31014.27917@bitbucket05.managed.contegix.com> New comment on pull request: https://bitbucket.org/pypy/pypy/pull-request/16/adds-full-fromstring-support-and-exposes#comment-1368 Alex Gaynor (alex_gaynor) said: Yup, exactly. -- This is a pull request comment notification from bitbucket.org. You are receiving this either because you are participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sun Dec 11 01:06:11 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 11 Dec 2011 01:06:11 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-refactor-complex: update the version number (technically we're 1.8-pre or something now, but this is at least *more* accureate) Message-ID: <20111211000611.A17B382ABD@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-dtype-refactor-complex Changeset: r50360:4c4c94b41e24 Date: 2011-12-10 19:05 -0500 http://bitbucket.org/pypy/pypy/changeset/4c4c94b41e24/ Log: update the version number (technically we're 1.8-pre or something now, but this is at least *more* accureate) diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From noreply at buildbot.pypy.org Sun Dec 11 01:10:33 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 11 Dec 2011 01:10:33 +0100 (CET) Subject: [pypy-commit] pypy default: update the version number (technically we're 1.8-pre or something now, but this is at least *more* accureate) Message-ID: <20111211001033.AC6F582ABD@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50361:16f29d272911 Date: 2011-12-10 19:05 -0500 http://bitbucket.org/pypy/pypy/changeset/16f29d272911/ Log: update the version number (technically we're 1.8-pre or something now, but this is at least *more* accureate) diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From pullrequests-noreply at bitbucket.org Sun Dec 11 02:03:09 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sun, 11 Dec 2011 01:03:09 -0000 Subject: [pypy-commit] [OPEN] Pull request #17 for pypy/pypy: Updates for numpy uint types Message-ID: A new pull request has been opened by Jeff Terrace. jterrace/pypy has changes to be pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/17/updates-for-numpy-uint-types Title: Updates for numpy uint types # Fixed typo in W_UnsignedIntegerBox # Updated dtype tests to check for proper overflows on int types # Exposed uint8, uint16, uint32 # Wrote tests for uint8, uint16, uint32, and uint64 but the uint64 test is set to be skipped because it's broken. Did not expose uint64 since the tests weren't passing. Changes to be pulled: 1fd47b3fd2c3 by Jeff Terrace: "Fixed typo in W_UnsignedIntegerBox, updated dtype int tests to check for overflo?" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Sun Dec 11 02:03:56 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sun, 11 Dec 2011 01:03:56 -0000 Subject: [pypy-commit] [REJECTED] Pull request #16 for pypy/pypy: Adds full fromstring support and exposes uint types In-Reply-To: References: Message-ID: <20111211010356.6350.50347@bitbucket13.managed.contegix.com> Pull request #16 has been rejected by Alex Gaynor. jterrace/pypy had changes to be pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/16/adds-full-fromstring-support-and-exposes So it can be split into several smaller oens. Rejected changes: a677a3081191 by Jeff Terrace: "Moved unpacking down to the type level and fixed non-negative slice inference so?" 0e06e4517539 by Jeff Terrace: "Add StringObject to FakeSpace and revert hacked isinstance_w workaround for w_st?" 37a67252257e by Jeff Terrace: "Add full support for the rest of fromstring() functionality" 1b487f5ec10c by Jeff Terrace: "Fixes for string special case to fix compiling tests" 800e9e2e1a90 by Jeff Terrace: "Treat strings as a special case in coerce to fix failing tests" 794f05b409c7 by Jeff Terrace: "Changed coerce methods to work with strings. Added tests for it." 1063e28fec01 by Jeff Terrace: "Fixed typo in W_UnsignedIntegerBox, exposed and wrote tests for uint8, uint16, u?" The pull request has been closed. -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sun Dec 11 06:14:08 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 11 Dec 2011 06:14:08 +0100 (CET) Subject: [pypy-commit] pypy default: (jterrace) added tolist to various objects in numpy. Message-ID: <20111211051408.1CEC182ABD@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50362:3eba2ed546ad Date: 2011-12-11 00:13 -0500 http://bitbucket.org/pypy/pypy/changeset/3eba2ed546ad/ Log: (jterrace) added tolist to various objects in numpy. diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,6 +91,9 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") + def descr_tolist(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -179,6 +182,8 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), + + tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,6 +876,17 @@ arr.setshape(space, new_shape) return arr + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1485,6 +1496,7 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), + tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,6 +879,45 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_tolist_scalar(self): + from numpypy import int32, bool_ + x = int32(23) + assert x.tolist() == 23 + assert type(x.tolist()) is int + y = bool_(True) + assert y.tolist() is True + + def test_tolist_zerodim(self): + from numpypy import array + x = array(3) + assert x.tolist() == 3 + assert type(x.tolist()) is int + + def test_tolist_singledim(self): + from numpypy import array + a = array(range(5)) + assert a.tolist() == [0, 1, 2, 3, 4] + assert type(a.tolist()[0]) is int + b = array([0.2, 0.4, 0.6]) + assert b.tolist() == [0.2, 0.4, 0.6] + + def test_tolist_multidim(self): + from numpypy import array + a = array([[1, 2], [3, 4]]) + assert a.tolist() == [[1, 2], [3, 4]] + + def test_tolist_view(self): + from numpypy import array + a = array([[1,2],[3,4]]) + assert (a + a).tolist() == [[2, 4], [6, 8]] + + def test_tolist_slice(self): + from numpypy import array + a = array([[17.1, 27.2], [40.3, 50.3]]) + assert a[:,0].tolist() == [17.1, 40.3] + assert a[0].tolist() == [17.1, 27.2] + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -77,6 +77,9 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj + def to_builtin_type(self, space, box): + return space.wrap(self.unbox(box)) + def _coerce(self, space, w_item): raise NotImplementedError From pullrequests-noreply at bitbucket.org Sun Dec 11 06:14:56 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sun, 11 Dec 2011 05:14:56 -0000 Subject: [pypy-commit] [REJECTED] Pull request #15 for pypy/pypy: Added tolist() function to numpypy's scalars and multidim arrays In-Reply-To: References: Message-ID: <20111211051456.29434.74736@bitbucket03.managed.contegix.com> Pull request #15 has been rejected by Alex Gaynor. jterrace/pypy had changes to be pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/15/added-tolist-function-to-numpypys-scalars Added in 3eba2ed546ad, I couldn't find a way to do the pull request dance and make a few small changes, so this is "rejected" but really merged. Thanks! Rejected changes: 4e8130835a43 by Jeff Terrace: "Fixed the numarray tolist to call eval with a correct iterator and changed scala?" aca40ab92929 by Jeff Terrace: "Third try at adding tolist() to numpypy scalars and arrays" c904560ff1b3 by Jeff Terrace: "Added 0-d array test, changed scalar test to use type objects instead of arrays,?" b7f67fbd9e44 by Jeff Terrace: "Fixed merge conflict" b2e4afae62e5 by Jeff Terrace: "Added tolist() function to numpypy's scalars and multidim arrays" The pull request has been closed. -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sun Dec 11 06:49:55 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 11 Dec 2011 06:49:55 +0100 (CET) Subject: [pypy-commit] pypy default: (jterrace, alex): Expose unsigned integer types in numpy and fix a typo. Fixed up the handling of unsigned64s which are larger in value than a long. Message-ID: <20111211054955.B5B6D82210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50363:716c084b46a1 Date: 2011-12-11 00:49 -0500 http://bitbucket.org/pypy/pypy/changeset/716c084b46a1/ Log: (jterrace, alex): Expose unsigned integer types in numpy and fix a typo. Fixed up the handling of unsigned64s which are larger in value than a long. diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -24,11 +24,16 @@ 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'uint8': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'uint16': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'uint32': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', + 'uint64': 'interp_boxes.W_UInt64Box', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -38,6 +38,7 @@ w_ValueError = None w_TypeError = None w_IndexError = None + w_OverflowError = None w_None = None w_bool = "bool" @@ -149,6 +150,10 @@ # XXX array probably assert False + def exception_match(self, w_exc_type, w_check_class): + # Good enough for now + raise NotImplementedError + class FloatObject(W_Root): tp = FakeSpace.w_float def __init__(self, floatval): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -107,38 +107,38 @@ class W_SignedIntegerBox(W_IntegerBox): pass -class W_UnsignedIntgerBox(W_IntegerBox): +class W_UnsignedIntegerBox(W_IntegerBox): pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int8") -class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int16") -class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int32") -class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("long") -class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): +class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): pass class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") -class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): - pass +class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint64") class W_InexactBox(W_NumberBox): _attrs_ = () @@ -203,13 +203,18 @@ __module__ = "numpypy", ) +W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), ) -W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt8Box.descr__new__.im_func), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, @@ -217,8 +222,9 @@ __new__ = interp2app(W_Int16Box.descr__new__.im_func), ) -W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt16Box.descr__new__.im_func), ) W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, @@ -226,8 +232,9 @@ __new__ = interp2app(W_Int32Box.descr__new__.im_func), ) -W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt32Box.descr__new__.im_func), ) if LONG_BIT == 32: @@ -238,7 +245,7 @@ __module__ = "numpypy", ) -W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntegerBox.typedef, __module__ = "numpypy", ) @@ -247,8 +254,9 @@ __new__ = interp2app(W_Int64Box.descr__new__.im_func), ) -W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt64Box.descr__new__.im_func), ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -174,6 +174,8 @@ raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) assert str(exc.value) == "cannot create 'signedinteger' instances" + exc = raises(TypeError, numpy.unsignedinteger, 0) + assert str(exc.value) == "cannot create 'unsignedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -210,17 +212,54 @@ assert type(int(x)) is int assert int(x) == -128 + def test_uint8(self): + import numpypy as numpy + + assert numpy.uint8.mro() == [numpy.uint8, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.uint8) + assert type(a[1]) is numpy.uint8 + assert numpy.dtype("uint8").type is numpy.uint8 + + x = numpy.uint8(128) + assert x == 128 + assert x != -128 + assert type(x) is numpy.uint8 + assert repr(x) == "128" + + assert type(int(x)) is int + assert int(x) == 128 + + assert numpy.uint8(255) == 255 + assert numpy.uint8(256) == 0 + def test_int16(self): import numpypy as numpy x = numpy.int16(3) assert x == 3 + assert numpy.int16(32767) == 32767 + assert numpy.int16(32768) == -32768 + + def test_uint16(self): + import numpypy as numpy + + assert numpy.uint16(65535) == 65535 + assert numpy.uint16(65536) == 0 def test_int32(self): import numpypy as numpy x = numpy.int32(23) assert x == 23 + assert numpy.int32(2147483647) == 2147483647 + assert numpy.int32(2147483648) == -2147483648 + + def test_uint32(self): + import numpypy as numpy + + assert numpy.uint32(4294967295) == 4294967295 + assert numpy.uint32(4294967296) == 0 def test_int_(self): import numpypy as numpy @@ -240,6 +279,25 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + assert numpy.int64(9223372036854775807) == 9223372036854775807 + raises(OverflowError, numpy.int64, 9223372036854775808) + + def test_uint64(self): + import sys + import numpypy as numpy + + assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.uint64).type is numpy.uint64 + skip("see comment") + # These tests pass "by chance" on numpy, things that are larger than + # platform long (i.e. a python int), don't get put in a normal box, + # instead they become an object array containing a long, we don't have + # yet, so these can't pass. + assert numpy.uint64(9223372036854775808) == 9223372036854775808 + assert numpy.uint64(18446744073709551615) == 18446744073709551615 + raises(OverflowError, numpy.uint64(18446744073709551616)) + def test_float32(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,6 +1,7 @@ import functools import math +from pypy.interpreter.error import OperationError from pypy.module.micronumpy import interp_boxes from pypy.objspace.std.floatobject import float2string from pypy.rlib import rfloat, libffi, clibffi @@ -274,6 +275,19 @@ T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + def _coerce(self, space, w_item): + try: + return Integer._coerce(self, space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.toulonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Float(Primitive): _mixin_ = True From pullrequests-noreply at bitbucket.org Sun Dec 11 06:51:41 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sun, 11 Dec 2011 05:51:41 -0000 Subject: [pypy-commit] [REJECTED] Pull request #17 for pypy/pypy: Updates for numpy uint types In-Reply-To: References: Message-ID: <20111211055141.5007.84820@bitbucket13.managed.contegix.com> Pull request #17 has been rejected by Alex Gaynor. jterrace/pypy had changes to be pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/17/updates-for-numpy-uint-types Merged in 716c084b46a1, once again I don't know how to get this to show as "accepted" and still have changes, I fixed up the uint64 stuff, except for some special numpy weirdness. Rejected changes: 1fd47b3fd2c3 by Jeff Terrace: "Fixed typo in W_UnsignedIntegerBox, updated dtype int tests to check for overflo?" The pull request has been closed. -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sun Dec 11 10:32:57 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 11 Dec 2011 10:32:57 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: dissable fallback for now Message-ID: <20111211093257.A7DF382210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50364:cb17a24f8acd Date: 2011-12-11 10:32 +0100 http://bitbucket.org/pypy/pypy/changeset/cb17a24f8acd/ Log: dissable fallback for now diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -209,6 +209,7 @@ try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: + return None # XXX: Dissable for now # Fall back on jumping to preamble target_token = label.getdescr() assert isinstance(target_token, TargetToken) From noreply at buildbot.pypy.org Sun Dec 11 10:32:59 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 11 Dec 2011 10:32:59 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge default Message-ID: <20111211093259.240BE82210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50365:ba23d85f0f16 Date: 2011-12-11 10:32 +0100 http://bitbucket.org/pypy/pypy/changeset/ba23d85f0f16/ Log: hg merge default diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -24,11 +24,16 @@ 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'uint8': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'uint16': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'uint32': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', + 'uint64': 'interp_boxes.W_UInt64Box', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -38,6 +38,7 @@ w_ValueError = None w_TypeError = None w_IndexError = None + w_OverflowError = None w_None = None w_bool = "bool" @@ -149,6 +150,10 @@ # XXX array probably assert False + def exception_match(self, w_exc_type, w_check_class): + # Good enough for now + raise NotImplementedError + class FloatObject(W_Root): tp = FakeSpace.w_float def __init__(self, floatval): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,6 +91,9 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") + def descr_tolist(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -104,38 +107,38 @@ class W_SignedIntegerBox(W_IntegerBox): pass -class W_UnsignedIntgerBox(W_IntegerBox): +class W_UnsignedIntegerBox(W_IntegerBox): pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int8") -class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int16") -class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int32") -class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("long") -class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): +class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): pass class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") -class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): - pass +class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint64") class W_InexactBox(W_NumberBox): _attrs_ = () @@ -179,6 +182,8 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), + + tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, @@ -198,13 +203,18 @@ __module__ = "numpypy", ) +W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), ) -W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt8Box.descr__new__.im_func), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, @@ -212,8 +222,9 @@ __new__ = interp2app(W_Int16Box.descr__new__.im_func), ) -W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt16Box.descr__new__.im_func), ) W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, @@ -221,8 +232,9 @@ __new__ = interp2app(W_Int32Box.descr__new__.im_func), ) -W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt32Box.descr__new__.im_func), ) if LONG_BIT == 32: @@ -233,7 +245,7 @@ __module__ = "numpypy", ) -W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntegerBox.typedef, __module__ = "numpypy", ) @@ -242,8 +254,9 @@ __new__ = interp2app(W_Int64Box.descr__new__.im_func), ) -W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt64Box.descr__new__.im_func), ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,6 +876,17 @@ arr.setshape(space, new_shape) return arr + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1485,6 +1496,7 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), + tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -174,6 +174,8 @@ raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) assert str(exc.value) == "cannot create 'signedinteger' instances" + exc = raises(TypeError, numpy.unsignedinteger, 0) + assert str(exc.value) == "cannot create 'unsignedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -210,17 +212,54 @@ assert type(int(x)) is int assert int(x) == -128 + def test_uint8(self): + import numpypy as numpy + + assert numpy.uint8.mro() == [numpy.uint8, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.uint8) + assert type(a[1]) is numpy.uint8 + assert numpy.dtype("uint8").type is numpy.uint8 + + x = numpy.uint8(128) + assert x == 128 + assert x != -128 + assert type(x) is numpy.uint8 + assert repr(x) == "128" + + assert type(int(x)) is int + assert int(x) == 128 + + assert numpy.uint8(255) == 255 + assert numpy.uint8(256) == 0 + def test_int16(self): import numpypy as numpy x = numpy.int16(3) assert x == 3 + assert numpy.int16(32767) == 32767 + assert numpy.int16(32768) == -32768 + + def test_uint16(self): + import numpypy as numpy + + assert numpy.uint16(65535) == 65535 + assert numpy.uint16(65536) == 0 def test_int32(self): import numpypy as numpy x = numpy.int32(23) assert x == 23 + assert numpy.int32(2147483647) == 2147483647 + assert numpy.int32(2147483648) == -2147483648 + + def test_uint32(self): + import numpypy as numpy + + assert numpy.uint32(4294967295) == 4294967295 + assert numpy.uint32(4294967296) == 0 def test_int_(self): import numpypy as numpy @@ -240,6 +279,25 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + assert numpy.int64(9223372036854775807) == 9223372036854775807 + raises(OverflowError, numpy.int64, 9223372036854775808) + + def test_uint64(self): + import sys + import numpypy as numpy + + assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.uint64).type is numpy.uint64 + skip("see comment") + # These tests pass "by chance" on numpy, things that are larger than + # platform long (i.e. a python int), don't get put in a normal box, + # instead they become an object array containing a long, we don't have + # yet, so these can't pass. + assert numpy.uint64(9223372036854775808) == 9223372036854775808 + assert numpy.uint64(18446744073709551615) == 18446744073709551615 + raises(OverflowError, numpy.uint64(18446744073709551616)) + def test_float32(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,6 +879,45 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_tolist_scalar(self): + from numpypy import int32, bool_ + x = int32(23) + assert x.tolist() == 23 + assert type(x.tolist()) is int + y = bool_(True) + assert y.tolist() is True + + def test_tolist_zerodim(self): + from numpypy import array + x = array(3) + assert x.tolist() == 3 + assert type(x.tolist()) is int + + def test_tolist_singledim(self): + from numpypy import array + a = array(range(5)) + assert a.tolist() == [0, 1, 2, 3, 4] + assert type(a.tolist()[0]) is int + b = array([0.2, 0.4, 0.6]) + assert b.tolist() == [0.2, 0.4, 0.6] + + def test_tolist_multidim(self): + from numpypy import array + a = array([[1, 2], [3, 4]]) + assert a.tolist() == [[1, 2], [3, 4]] + + def test_tolist_view(self): + from numpypy import array + a = array([[1,2],[3,4]]) + assert (a + a).tolist() == [[2, 4], [6, 8]] + + def test_tolist_slice(self): + from numpypy import array + a = array([[17.1, 27.2], [40.3, 50.3]]) + assert a[:,0].tolist() == [17.1, 40.3] + assert a[0].tolist() == [17.1, 27.2] + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -185,7 +185,8 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, + 'getfield_gc': 35, 'getfield_gc_pure': 6, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 2, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,6 +1,7 @@ import functools import math +from pypy.interpreter.error import OperationError from pypy.module.micronumpy import interp_boxes from pypy.objspace.std.floatobject import float2string from pypy.rlib import rfloat, libffi, clibffi @@ -77,6 +78,9 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj + def to_builtin_type(self, space, box): + return space.wrap(self.unbox(box)) + def _coerce(self, space, w_item): raise NotImplementedError @@ -271,6 +275,19 @@ T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + def _coerce(self, space, w_item): + try: + return Integer._coerce(self, space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.toulonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Float(Primitive): _mixin_ = True diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -238,6 +238,7 @@ t = TranslationContext(config=config) self.t = t # for debugging ann = t.buildannotator() + ann.policy.allow_someobjects = False if func is not None: ann.build_types(func, argtypes, complete_now=False) # From noreply at buildbot.pypy.org Sun Dec 11 11:21:55 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 11:21:55 +0100 (CET) Subject: [pypy-commit] pypy default: Add a test for some of the nonstandard hashes (issue957) Message-ID: <20111211102155.1E5E182210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50366:9de8b8b018cb Date: 2011-12-11 11:21 +0100 http://bitbucket.org/pypy/pypy/changeset/9de8b8b018cb/ Log: Add a test for some of the nonstandard hashes (issue957) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -79,3 +79,29 @@ assert h.digest() == _hashlib.openssl_md5('x' * 20).digest() _hashlib.openssl_sha1(b).digest() + def test_extra_algorithms(self): + import _hashlib + test_string = "Nobody inspects the spammish repetition" + expected_results = { + "md5": "bb649c83dd1ea5c9d9dec9a18df0ffe9", + "md4": "c275b8454684ea416b93d7a418b43176", + "mdc2": None, # XXX find the correct expected value + "sha": "e2b0a8609b47c58e5d984c9ccfe69f9b654b032b", + "ripemd160": "cc4a5ce1b3df48aec5d22d1f16b894a0b894eccc", + "whirlpool": "1a22b79fe5afda02c63a25927193ed01dc718b74" + "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" + "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583", + } + def extracheck(hash_name, expected): + try: + m = _hashlib.new(hash_name) + except ValueError, e: + skip('%s: %s' % (hash_name, e)) + m.update(test_string) + got = m.hexdigest() + assert got and type(got) is str and len(got) % 2 == 0 + got.decode('hex') + if expected is not None: + assert got == expected + for hash_name, expected in sorted(expected_results.items()): + yield extracheck, hash_name, expected From noreply at buildbot.pypy.org Sun Dec 11 11:31:09 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 11 Dec 2011 11:31:09 +0100 (CET) Subject: [pypy-commit] pypy default: Merge nedbat-sandbox branch, with one tweak, don't realy on identity of one, Message-ID: <20111211103109.1B9AA82210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50367:1b1197dcc86d Date: 2011-12-11 12:30 +0200 http://bitbucket.org/pypy/pypy/changeset/1b1197dcc86d/ Log: Merge nedbat-sandbox branch, with one tweak, don't realy on identity of one, just use object() for identitiy checks. diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -6,11 +6,10 @@ import py import sys, os, posixpath, errno, stat, time -from pypy.rpython.module.ll_os_stat import s_StatResult from pypy.tool.ansi_print import AnsiLog -from pypy.rlib.rarithmetic import r_longlong import subprocess from pypy.tool.killsubprocess import killsubprocess +from pypy.translator.sandbox.vfs import UID, GID class MyAnsiLog(AnsiLog): KW_TO_COLOR = { @@ -34,6 +33,10 @@ from pypy.tool.lib_pypy import import_from_lib_pypy marshal = import_from_lib_pypy('marshal') +# Non-marshal result types +RESULTTYPE_STATRESULT = object() +RESULTTYPE_LONGLONG = object() + def read_message(f, timeout=None): # warning: 'timeout' is not really reliable and should only be used # for testing. Also, it doesn't work if the file f does any buffering. @@ -50,12 +53,30 @@ marshal.dump(msg, g) else: marshal.dump(msg, g, 0) + elif resulttype is RESULTTYPE_STATRESULT: + # Hand-coded marshal for stat results that mimics what rmarshal expects. + # marshal.dump(tuple(msg)) would have been too easy. rmarshal insists + # on 64-bit ints at places, even when the value fits in 32 bits. + import struct + st = tuple(msg) + fmt = "iIIiiiIfff" + buf = [] + buf.append(struct.pack(" Author: Maciej Fijalkowski Branch: nedbat-sandbox Changeset: r50368:2eb520555de5 Date: 2011-12-11 12:30 +0200 http://bitbucket.org/pypy/pypy/changeset/2eb520555de5/ Log: close merged branch From noreply at buildbot.pypy.org Sun Dec 11 11:37:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 11:37:29 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Dead import Message-ID: <20111211103729.1784982210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50369:1c9dc407966c Date: 2011-12-11 11:28 +0100 http://bitbucket.org/pypy/pypy/changeset/1c9dc407966c/ Log: Dead import diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -14,7 +14,6 @@ METH_VARARGS, build_type_checkers, PyObjectFields, bootstrap_function) from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.rlib.objectmodel import we_are_translated -from pypy.objspace.std.tupleobject import W_TupleObject PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction') PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject)) From noreply at buildbot.pypy.org Sun Dec 11 11:37:30 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 11:37:30 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Fix: make sure that PySequence_Fast() returns a W_ListObject Message-ID: <20111211103730.33EBB82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50370:5e1080705d7c Date: 2011-12-11 11:29 +0100 http://bitbucket.org/pypy/pypy/changeset/5e1080705d7c/ Log: Fix: make sure that PySequence_Fast() returns a W_ListObject or a W_TupleObject, not just some object of type list or tuple. The reason is that PySequence_Fast_GET_xxx() expects these exact interp- level types. diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -42,11 +42,11 @@ which case o is returned. Use PySequence_Fast_GET_ITEM() to access the members of the result. Returns NULL on failure. If the object is not a sequence, raises TypeError with m as the message text.""" - if (space.is_true(space.isinstance(w_obj, space.w_list)) or - space.is_true(space.isinstance(w_obj, space.w_tuple))): + if (isinstance(w_obj, listobject.W_ListObject) or + isinstance(w_obj, listobject.W_TupleObject)): return w_obj try: - return space.newtuple(space.fixedview(w_obj)) + return listobject.W_TupleObject(space.fixedview(w_obj)) except OperationError: raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) From noreply at buildbot.pypy.org Sun Dec 11 11:37:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 11:37:31 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Typo. Message-ID: <20111211103731.540C882210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50371:0b382030a9e9 Date: 2011-12-11 11:36 +0100 http://bitbucket.org/pypy/pypy/changeset/0b382030a9e9/ Log: Typo. diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -43,10 +43,10 @@ members of the result. Returns NULL on failure. If the object is not a sequence, raises TypeError with m as the message text.""" if (isinstance(w_obj, listobject.W_ListObject) or - isinstance(w_obj, listobject.W_TupleObject)): + isinstance(w_obj, tupleobject.W_TupleObject)): return w_obj try: - return listobject.W_TupleObject(space.fixedview(w_obj)) + return tupleobject.W_TupleObject(space.fixedview(w_obj)) except OperationError: raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) From noreply at buildbot.pypy.org Sun Dec 11 12:43:00 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 11 Dec 2011 12:43:00 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: clone the short_boxes to allow it to be reused in some fallabck if the first athempt to optimize fails Message-ID: <20111211114300.9416F82210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50372:3b4f0126fae2 Date: 2011-12-11 12:41 +0100 http://bitbucket.org/pypy/pypy/changeset/3b4f0126fae2/ Log: clone the short_boxes to allow it to be reused in some fallabck if the first athempt to optimize fails diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -175,6 +175,8 @@ for label in all_target_tokens: assert isinstance(label, TargetToken) label.original_jitcell_token = jitcell_token + if label.virtual_state and label.short_preamble: + metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) jitcell_token.target_tokens = all_target_tokens send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") record_loop_or_bridge(metainterp_sd, loop) @@ -209,7 +211,7 @@ try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - return None # XXX: Dissable for now + #return None # XXX: Dissable for now # Fall back on jumping to preamble target_token = label.getdescr() assert isinstance(target_token, TargetToken) diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -190,7 +190,7 @@ self.short = target_token.short_preamble self.short_seen = {} - self.short_boxes = exported_state.short_boxes + self.short_boxes = exported_state.short_boxes.clone() for box, const in exported_state.constant_inputargs.items(): self.short_seen[box] = True self.imported_state = exported_state @@ -216,6 +216,7 @@ for op in exported_state.inputarg_setup_ops: self.optimizer.send_extra_operation(op) seen = {} + for op in self.short_boxes.operations(): self.ensure_short_op_emitted(op, self.optimizer, seen) if op and op.result: diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -566,18 +566,27 @@ self.aliases = {} self.rename = {} self.optimizer = optimizer - for box in surviving_boxes: - self.potential_ops[box] = None - optimizer.produce_potential_short_preamble_ops(self) - self.short_boxes = {} - self.short_boxes_in_production = {} + if surviving_boxes is not None: + for box in surviving_boxes: + self.potential_ops[box] = None + optimizer.produce_potential_short_preamble_ops(self) - for box in self.potential_ops.keys(): - try: - self.produce_short_preamble_box(box) - except BoxNotProducable: - pass + self.short_boxes = {} + self.short_boxes_in_production = {} + + for box in self.potential_ops.keys(): + try: + self.produce_short_preamble_box(box) + except BoxNotProducable: + pass + + def clone(self): + sb = ShortBoxes(self.optimizer, None) + sb.aliases.update(self.aliases) + sb.short_boxes = {} + sb.short_boxes.update(self.short_boxes) + return sb def prioritized_alternatives(self, box): if box not in self.alternatives: diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -822,6 +822,44 @@ assert res == f(10) self.check_resops(jump=2) + def test_retrace_not_matching_bridge_str(self): + @dont_look_inside + def external(node): + return node.value + 1 + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'node', 'node2', 's']) + class A(): + def new(self): + return A() + def val(self, i): + return i + 7 + class B(A): + def new(self): + return B() + def val(self, i): + return i + 42 + def f(n): + s = '*' * n + node = self._new() + node2 = A() + node.value = 0 + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, node=node, node2=node2, s=s) + next = self._new() + next.value = node.value + n + node2.val(i) + if i != 7: + next.value += external(next) + else: + node2 = B() + node = next + node2 = node2.new() + node.value += len(s) + i += 1 + return node.value + res = self.meta_interp(f, [10], repeat=10) + assert res == f(10) + self.check_resops(jump=2) + def test_nested_loops(self): class Int(object): def __init__(self, val): From noreply at buildbot.pypy.org Sun Dec 11 12:43:01 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 11 Dec 2011 12:43:01 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge default Message-ID: <20111211114301.BBF1C82210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50373:ebf413fc4faa Date: 2011-12-11 12:42 +0100 http://bitbucket.org/pypy/pypy/changeset/ebf413fc4faa/ Log: hg merge default diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -79,3 +79,29 @@ assert h.digest() == _hashlib.openssl_md5('x' * 20).digest() _hashlib.openssl_sha1(b).digest() + def test_extra_algorithms(self): + import _hashlib + test_string = "Nobody inspects the spammish repetition" + expected_results = { + "md5": "bb649c83dd1ea5c9d9dec9a18df0ffe9", + "md4": "c275b8454684ea416b93d7a418b43176", + "mdc2": None, # XXX find the correct expected value + "sha": "e2b0a8609b47c58e5d984c9ccfe69f9b654b032b", + "ripemd160": "cc4a5ce1b3df48aec5d22d1f16b894a0b894eccc", + "whirlpool": "1a22b79fe5afda02c63a25927193ed01dc718b74" + "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" + "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583", + } + def extracheck(hash_name, expected): + try: + m = _hashlib.new(hash_name) + except ValueError, e: + skip('%s: %s' % (hash_name, e)) + m.update(test_string) + got = m.hexdigest() + assert got and type(got) is str and len(got) % 2 == 0 + got.decode('hex') + if expected is not None: + assert got == expected + for hash_name, expected in sorted(expected_results.items()): + yield extracheck, hash_name, expected diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -6,11 +6,10 @@ import py import sys, os, posixpath, errno, stat, time -from pypy.rpython.module.ll_os_stat import s_StatResult from pypy.tool.ansi_print import AnsiLog -from pypy.rlib.rarithmetic import r_longlong import subprocess from pypy.tool.killsubprocess import killsubprocess +from pypy.translator.sandbox.vfs import UID, GID class MyAnsiLog(AnsiLog): KW_TO_COLOR = { @@ -34,6 +33,10 @@ from pypy.tool.lib_pypy import import_from_lib_pypy marshal = import_from_lib_pypy('marshal') +# Non-marshal result types +RESULTTYPE_STATRESULT = object() +RESULTTYPE_LONGLONG = object() + def read_message(f, timeout=None): # warning: 'timeout' is not really reliable and should only be used # for testing. Also, it doesn't work if the file f does any buffering. @@ -50,12 +53,30 @@ marshal.dump(msg, g) else: marshal.dump(msg, g, 0) + elif resulttype is RESULTTYPE_STATRESULT: + # Hand-coded marshal for stat results that mimics what rmarshal expects. + # marshal.dump(tuple(msg)) would have been too easy. rmarshal insists + # on 64-bit ints at places, even when the value fits in 32 bits. + import struct + st = tuple(msg) + fmt = "iIIiiiIfff" + buf = [] + buf.append(struct.pack(" Author: Armin Rigo Branch: Changeset: r50374:0d499a1ce192 Date: 2011-12-11 12:24 +0100 http://bitbucket.org/pypy/pypy/changeset/0d499a1ce192/ Log: Fix. The testing framework was happily accepting app-level methods that are generators, and then running the yielded functions at interp-level :-( Add a hack to conftest.py to prevent that, and rewrite the test in a non-generator style. Now it fails, good. diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -80,28 +80,27 @@ _hashlib.openssl_sha1(b).digest() def test_extra_algorithms(self): - import _hashlib - test_string = "Nobody inspects the spammish repetition" expected_results = { "md5": "bb649c83dd1ea5c9d9dec9a18df0ffe9", "md4": "c275b8454684ea416b93d7a418b43176", "mdc2": None, # XXX find the correct expected value "sha": "e2b0a8609b47c58e5d984c9ccfe69f9b654b032b", "ripemd160": "cc4a5ce1b3df48aec5d22d1f16b894a0b894eccc", - "whirlpool": "1a22b79fe5afda02c63a25927193ed01dc718b74" - "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" - "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583", + "whirlpool": ("1a22b79fe5afda02c63a25927193ed01dc718b74" + "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" + "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583"), } - def extracheck(hash_name, expected): + import _hashlib + test_string = "Nobody inspects the spammish repetition" + for hash_name, expected in sorted(expected_results.items()): try: m = _hashlib.new(hash_name) except ValueError, e: - skip('%s: %s' % (hash_name, e)) + print 'skipped %s: %s' % (hash_name, e) + continue m.update(test_string) got = m.hexdigest() assert got and type(got) is str and len(got) % 2 == 0 got.decode('hex') if expected is not None: assert got == expected - for hash_name, expected in sorted(expected_results.items()): - yield extracheck, hash_name, expected From noreply at buildbot.pypy.org Sun Dec 11 12:48:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 12:48:39 +0100 (CET) Subject: [pypy-commit] pypy default: Fix: _hashlib was just broken for any non-standard hash function. Message-ID: <20111211114839.0524282210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50375:2e5b6dce5753 Date: 2011-12-11 12:48 +0100 http://bitbucket.org/pypy/pypy/changeset/2e5b6dce5753/ Log: Fix: _hashlib was just broken for any non-standard hash function. Small extra clean-ups. diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -21,11 +21,11 @@ class W_Hash(Wrappable): ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - _block_size = -1 def __init__(self, space, name): self.name = name - self.digest_size = self.compute_digest_size() + digest_type = self.digest_type_by_name(space) + self.digest_size = rffi.getintfield(digest_type, 'c_md_size') # Allocate a lock for each HASH object. # An optimization would be to not release the GIL on small requests, @@ -34,21 +34,22 @@ ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw') rgc.add_memory_pressure(HASH_MALLOC_SIZE + self.digest_size) + ropenssl.EVP_DigestInit(ctx, digest_type) self.ctx = ctx - def initdigest(self, space, name): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise OperationError(space.w_ValueError, - space.wrap("unknown hash function")) - ropenssl.EVP_DigestInit(self.ctx, digest) - def __del__(self): # self.lock.free() if self.ctx: ropenssl.EVP_MD_CTX_cleanup(self.ctx) lltype.free(self.ctx, flavor='raw') + def digest_type_by_name(self, space): + digest_type = ropenssl.EVP_get_digestbyname(self.name) + if not digest_type: + raise OperationError(space.w_ValueError, + space.wrap("unknown hash function")) + return digest_type + def descr_repr(self, space): addrstring = self.getaddrstring(space) return space.wrap("<%s HASH object at 0x%s>" % ( @@ -87,7 +88,9 @@ return space.wrap(self.digest_size) def get_block_size(self, space): - return space.wrap(self.compute_block_size()) + digest_type = self.digest_type_by_name(space) + block_size = rffi.getintfield(digest_type, 'c_block_size') + return space.wrap(block_size) def _digest(self, space): with lltype.scoped_alloc(ropenssl.EVP_MD_CTX.TO) as ctx: @@ -99,36 +102,6 @@ ropenssl.EVP_MD_CTX_cleanup(ctx) return rffi.charpsize2str(digest, digest_size) - def compute_digest_size(self): - # XXX This isn't the nicest way, but the EVP_MD_size OpenSSL - # XXX function is defined as a C macro on OS X and would be - # XXX significantly harder to implement in another way. - # Values are digest sizes in bytes - return { - 'md5': 16, 'MD5': 16, - 'sha1': 20, 'SHA1': 20, - 'sha224': 28, 'SHA224': 28, - 'sha256': 32, 'SHA256': 32, - 'sha384': 48, 'SHA384': 48, - 'sha512': 64, 'SHA512': 64, - }.get(self.name, 0) - - def compute_block_size(self): - if self._block_size != -1: - return self._block_size - # XXX This isn't the nicest way, but the EVP_MD_CTX_block_size - # XXX OpenSSL function is defined as a C macro on some systems - # XXX and would be significantly harder to implement in - # XXX another way. - self._block_size = { - 'md5': 64, 'MD5': 64, - 'sha1': 64, 'SHA1': 64, - 'sha224': 64, 'SHA224': 64, - 'sha256': 64, 'SHA256': 64, - 'sha384': 128, 'SHA384': 128, - 'sha512': 128, 'SHA512': 128, - }.get(self.name, 0) - return self._block_size W_Hash.typedef = TypeDef( 'HASH', @@ -142,11 +115,11 @@ digestsize=GetSetProperty(W_Hash.get_digest_size), block_size=GetSetProperty(W_Hash.get_block_size), ) +W_Hash.acceptable_as_base_class = False @unwrap_spec(name=str, string='bufferstr') def new(space, name, string=''): w_hash = W_Hash(space, name) - w_hash.initdigest(space, name) w_hash.update(space, string) return space.wrap(w_hash) @@ -158,6 +131,6 @@ return new(space, name, string) return new_hash -for name in algorithms: - newname = 'new_%s' % (name,) - globals()[newname] = make_new_hash(name, newname) +for _name in algorithms: + _newname = 'new_%s' % (_name,) + globals()[_newname] = make_new_hash(_name, _newname) diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -110,6 +110,10 @@ 'struct GENERAL_NAME_st', [('type', rffi.INT), ]) + EVP_MD_st = rffi_platform.Struct( + 'EVP_MD', + [('md_size', rffi.INT), + ('block_size', rffi.INT)]) EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD') EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX') @@ -258,7 +262,7 @@ [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) EVP_MD_CTX = rffi.COpaquePtr('EVP_MD_CTX', compilation_info=eci) -EVP_MD = rffi.COpaquePtr('EVP_MD', compilation_info=eci) +EVP_MD = lltype.Ptr(EVP_MD_st) OpenSSL_add_all_digests = external( 'OpenSSL_add_all_digests', [], lltype.Void) From noreply at buildbot.pypy.org Sun Dec 11 13:23:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 13:23:15 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: Update the demo. Message-ID: <20111211122315.E3E8A82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50376:f07d5e9e9d7a Date: 2011-12-11 12:50 +0100 http://bitbucket.org/pypy/pypy/changeset/f07d5e9e9d7a/ Log: Update the demo. diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -2,13 +2,15 @@ pypyjit.set_param(threshold=200) +def g(*args): + return len(args) + def f(n): - pairs = [(0.0, 1.0), (2.0, 3.0)] * n - mag = 0 - for (x1, x2) in pairs: - dx = x1 - x2 - mag += ((dx * dx ) ** (-1.5)) - return n + s = 0 + for i in range(n): + l = [i, n, 2] + s += g(*l) + return s try: print f(301) From noreply at buildbot.pypy.org Sun Dec 11 13:23:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 13:23:17 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: hg merge default Message-ID: <20111211122317.DC21E82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50377:b70295020ce8 Date: 2011-12-11 12:50 +0100 http://bitbucket.org/pypy/pypy/changeset/b70295020ce8/ Log: hg merge default diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -487,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -16,35 +16,101 @@ """ Manage frame positions """ def __init__(self): - self.frame_bindings = {} - self.frame_depth = 0 + self.bindings = {} + self.used = [] # list of bools + self.hint_frame_locations = {} + + frame_depth = property(lambda:xxx, lambda:xxx) # XXX kill me + + def get_frame_depth(self): + return len(self.used) def get(self, box): - return self.frame_bindings.get(box, None) + return self.bindings.get(box, None) def loc(self, box): + """Return or create the frame location associated with 'box'.""" + # first check if it's already in the frame_manager try: - return self.frame_bindings[box] + return self.bindings[box] except KeyError: - return self.get_new_loc(box) + pass + # check if we have a hint for this box + if box in self.hint_frame_locations: + # if we do, try to reuse the location for this box + loc = self.hint_frame_locations[box] + if self.try_to_reuse_location(box, loc): + return loc + # no valid hint. make up a new free location + return self.get_new_loc(box) def get_new_loc(self, box): size = self.frame_size(box.type) - self.frame_depth += ((-self.frame_depth) & (size-1)) - # ^^^ frame_depth is rounded up to a multiple of 'size', assuming + # frame_depth is rounded up to a multiple of 'size', assuming # that 'size' is a power of two. The reason for doing so is to # avoid obscure issues in jump.py with stack locations that try # to move from position (6,7) to position (7,8). - newloc = self.frame_pos(self.frame_depth, box.type) - self.frame_bindings[box] = newloc - self.frame_depth += size + while self.get_frame_depth() & (size - 1): + self.used.append(False) + # + index = self.get_frame_depth() + newloc = self.frame_pos(index, box.type) + for i in range(size): + self.used.append(True) + # + if not we_are_translated(): # extra testing + testindex = self.get_loc_index(newloc) + assert testindex == index + # + self.bindings[box] = newloc return newloc + def set_binding(self, box, loc): + self.bindings[box] = loc + # + index = self.get_loc_index(loc) + endindex = index + self.frame_size(box.type) + while len(self.used) < endindex: + self.used.append(False) + while index < endindex: + self.used[index] = True + index += 1 + def reserve_location_in_frame(self, size): - frame_depth = self.frame_depth - self.frame_depth += size + frame_depth = self.get_frame_depth() + for i in range(size): + self.used.append(True) return frame_depth + def mark_as_free(self, box): + try: + loc = self.bindings[box] + except KeyError: + return # already gone + del self.bindings[box] + # + size = self.frame_size(box.type) + baseindex = self.get_loc_index(loc) + for i in range(size): + index = baseindex + i + assert 0 <= index < len(self.used) + self.used[index] = False + + def try_to_reuse_location(self, box, loc): + index = self.get_loc_index(loc) + assert index >= 0 + size = self.frame_size(box.type) + for i in range(size): + while (index + i) >= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -52,6 +118,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -70,8 +140,6 @@ self.position = -1 self.frame_manager = frame_manager self.assembler = assembler - self.hint_frame_locations = {} # {Box: StackLoc} - self.freed_frame_locations = {} # {StackLoc: None} def is_still_alive(self, v): # Check if 'v' is alive at the current position. @@ -103,9 +171,7 @@ self.free_regs.append(self.reg_bindings[v]) del self.reg_bindings[v] if self.frame_manager is not None: - if v in self.frame_manager.frame_bindings: - loc = self.frame_manager.frame_bindings[v] - self.freed_frame_locations[loc] = None + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. @@ -177,23 +243,6 @@ self.reg_bindings[v] = loc return loc - def _frame_loc(self, v): - # first check if it's already in the frame_manager - try: - return self.frame_manager.frame_bindings[v] - except KeyError: - pass - # check if we have a hint for this box - if v in self.hint_frame_locations: - # if we do, check that the hinted location is known to be free - loc = self.hint_frame_locations[v] - if loc in self.freed_frame_locations: - del self.freed_frame_locations[loc] - self.frame_manager.frame_bindings[v] = loc - return loc - # no valid hint. make up a new free location - return self.frame_manager.get_new_loc(v) - def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, @@ -201,7 +250,7 @@ loc = self.reg_bindings[v_to_spill] del self.reg_bindings[v_to_spill] if self.frame_manager.get(v_to_spill) is None: - newloc = self._frame_loc(v_to_spill) + newloc = self.frame_manager.loc(v_to_spill) self.assembler.regalloc_mov(loc, newloc) return loc @@ -278,7 +327,7 @@ except KeyError: if box in self.bindings_to_frame_reg: return self.frame_reg - return self._frame_loc(box) + return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): """ Return the location of the constant v. If 'selected_reg' is @@ -326,7 +375,7 @@ self.reg_bindings[v] = loc self.assembler.regalloc_mov(prev_loc, loc) else: - loc = self._frame_loc(v) + loc = self.frame_manager.loc(v) self.assembler.regalloc_mov(prev_loc, loc) def force_result_in_reg(self, result_v, v, forbidden_vars=[]): @@ -345,7 +394,7 @@ self.reg_bindings[result_v] = loc return loc if v not in self.reg_bindings: - prev_loc = self._frame_loc(v) + prev_loc = self.frame_manager.loc(v) loc = self.force_allocate_reg(v, forbidden_vars) self.assembler.regalloc_mov(prev_loc, loc) assert v in self.reg_bindings @@ -365,7 +414,7 @@ def _sync_var(self, v): if not self.frame_manager.get(v): reg = self.reg_bindings[v] - to = self._frame_loc(v) + to = self.frame_manager.loc(v) self.assembler.regalloc_mov(reg, to) # otherwise it's clean diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -42,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -282,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -305,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -327,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -351,20 +356,14 @@ def test_hint_frame_locations_1(self): - b0, b1 = newboxes(0, 1) - longevity = {b0: (0, 1), b1: (0, 1)} + b0, = newboxes(0) fm = TFrameManager() - asm = MockAsm() - rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) - rm.hint_frame_locations[b0] = "some_stack_loc" - rm.freed_frame_locations["some_stack_loc"] = None - rm.force_allocate_reg(b0) - rm.force_allocate_reg(b1) - rm.force_spill_var(b0) - rm.force_spill_var(b1) - assert rm.loc(b0) == "some_stack_loc" - assert isinstance(rm.loc(b1), FakeFramePos) - rm._check_invariants() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 def test_hint_frame_locations_2(self): b0, b1, b2 = newboxes(0, 1, 2) @@ -378,20 +377,99 @@ rm.force_spill_var(b0) loc = rm.loc(b0) assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 rm.position = 1 - assert loc not in rm.freed_frame_locations + assert fm.used == [True] rm.possibly_free_var(b0) - assert loc in rm.freed_frame_locations + assert fm.used == [False] # - rm.hint_frame_locations[b1] = loc + fm.hint_frame_locations[b1] = loc rm.force_spill_var(b1) loc1 = rm.loc(b1) - assert loc1 is loc - assert rm.freed_frame_locations == {} + assert loc1 == loc + assert fm.used == [True] # - rm.hint_frame_locations[b2] = loc + fm.hint_frame_locations[b2] = loc rm.force_spill_var(b2) loc2 = rm.loc(b2) - assert loc2 is not loc1 # because it's not in freed_frame_locations + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] # rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -694,7 +694,7 @@ regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -138,6 +138,10 @@ return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -184,7 +188,6 @@ allgcrefs): operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations @@ -307,7 +310,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -316,7 +319,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -352,7 +355,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -1334,12 +1337,12 @@ loc = nonfloatlocs[i] if isinstance(loc, StackLoc): assert box.type != FLOAT - self.rm.hint_frame_locations[box] = loc + self.fm.hint_frame_locations[box] = loc else: loc = floatlocs[i] if isinstance(loc, StackLoc): assert box.type == FLOAT - self.xrm.hint_frame_locations[box] = loc + self.fm.hint_frame_locations[box] = loc def consider_jump(self, op): assembler = self.assembler @@ -1385,7 +1388,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -42,6 +42,7 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) @@ -49,10 +50,9 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[2].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous self.cpu.set_future_value_int(0, 0) fail = self.run(loop) assert fail.identifier == 2 @@ -104,6 +104,9 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) jump(i3, i12, i11, i10, i6, i7, descr=looptoken) @@ -112,9 +115,8 @@ guard_op = loop.operations[5] loop_frame_depth = loop.token._x86_frame_depth assert loop.token._x86_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,5 +1,7 @@ import py -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.objectmodel import keepalive_until_here +from pypy.rlib import rgc from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -80,6 +82,47 @@ assert res == 1 self.check_resops(call=1) # for the case B(), but not for the case A() + def test_keepalive(self): + py.test.skip("XXX fails") # hum, I think the test itself is broken + # + mydriver = JitDriver(reds = ['n', 'states'], greens = []) + class State: + num = 1 + class X: + def __init__(self, state): + self.state = state + def __del__(self): + self.state.num += 1 + @dont_look_inside + def do_stuff(): + pass + def f(n): + states = [] + while n > 0: + mydriver.jit_merge_point(n=n, states=states) + state = State() + states.append(state) + x = X(state) + do_stuff() + state.num *= 1000 + do_stuff() + keepalive_until_here(x) + n -= 1 + return states + def main(n): + states = f(n) + rgc.collect() + rgc.collect() + err = 1001 + for state in states: + if state.num != 1001: + err = state.num + print 'ERROR:', err + return err + assert main(20) == 1001 + res = self.meta_interp(main, [20]) + assert res == 1001 + class TestLLtype(DelTests, LLJitMixin): def test_signal_action(self): diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import py from pypy.jit.metainterp.test.support import LLJitMixin diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -21,11 +21,11 @@ class W_Hash(Wrappable): ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - _block_size = -1 def __init__(self, space, name): self.name = name - self.digest_size = self.compute_digest_size() + digest_type = self.digest_type_by_name(space) + self.digest_size = rffi.getintfield(digest_type, 'c_md_size') # Allocate a lock for each HASH object. # An optimization would be to not release the GIL on small requests, @@ -34,21 +34,22 @@ ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw') rgc.add_memory_pressure(HASH_MALLOC_SIZE + self.digest_size) + ropenssl.EVP_DigestInit(ctx, digest_type) self.ctx = ctx - def initdigest(self, space, name): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise OperationError(space.w_ValueError, - space.wrap("unknown hash function")) - ropenssl.EVP_DigestInit(self.ctx, digest) - def __del__(self): # self.lock.free() if self.ctx: ropenssl.EVP_MD_CTX_cleanup(self.ctx) lltype.free(self.ctx, flavor='raw') + def digest_type_by_name(self, space): + digest_type = ropenssl.EVP_get_digestbyname(self.name) + if not digest_type: + raise OperationError(space.w_ValueError, + space.wrap("unknown hash function")) + return digest_type + def descr_repr(self, space): addrstring = self.getaddrstring(space) return space.wrap("<%s HASH object at 0x%s>" % ( @@ -87,7 +88,9 @@ return space.wrap(self.digest_size) def get_block_size(self, space): - return space.wrap(self.compute_block_size()) + digest_type = self.digest_type_by_name(space) + block_size = rffi.getintfield(digest_type, 'c_block_size') + return space.wrap(block_size) def _digest(self, space): with lltype.scoped_alloc(ropenssl.EVP_MD_CTX.TO) as ctx: @@ -99,36 +102,6 @@ ropenssl.EVP_MD_CTX_cleanup(ctx) return rffi.charpsize2str(digest, digest_size) - def compute_digest_size(self): - # XXX This isn't the nicest way, but the EVP_MD_size OpenSSL - # XXX function is defined as a C macro on OS X and would be - # XXX significantly harder to implement in another way. - # Values are digest sizes in bytes - return { - 'md5': 16, 'MD5': 16, - 'sha1': 20, 'SHA1': 20, - 'sha224': 28, 'SHA224': 28, - 'sha256': 32, 'SHA256': 32, - 'sha384': 48, 'SHA384': 48, - 'sha512': 64, 'SHA512': 64, - }.get(self.name, 0) - - def compute_block_size(self): - if self._block_size != -1: - return self._block_size - # XXX This isn't the nicest way, but the EVP_MD_CTX_block_size - # XXX OpenSSL function is defined as a C macro on some systems - # XXX and would be significantly harder to implement in - # XXX another way. - self._block_size = { - 'md5': 64, 'MD5': 64, - 'sha1': 64, 'SHA1': 64, - 'sha224': 64, 'SHA224': 64, - 'sha256': 64, 'SHA256': 64, - 'sha384': 128, 'SHA384': 128, - 'sha512': 128, 'SHA512': 128, - }.get(self.name, 0) - return self._block_size W_Hash.typedef = TypeDef( 'HASH', @@ -142,11 +115,11 @@ digestsize=GetSetProperty(W_Hash.get_digest_size), block_size=GetSetProperty(W_Hash.get_block_size), ) +W_Hash.acceptable_as_base_class = False @unwrap_spec(name=str, string='bufferstr') def new(space, name, string=''): w_hash = W_Hash(space, name) - w_hash.initdigest(space, name) w_hash.update(space, string) return space.wrap(w_hash) @@ -158,6 +131,6 @@ return new(space, name, string) return new_hash -for name in algorithms: - newname = 'new_%s' % (name,) - globals()[newname] = make_new_hash(name, newname) +for _name in algorithms: + _newname = 'new_%s' % (_name,) + globals()[_newname] = make_new_hash(_name, _newname) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -79,3 +79,28 @@ assert h.digest() == _hashlib.openssl_md5('x' * 20).digest() _hashlib.openssl_sha1(b).digest() + def test_extra_algorithms(self): + expected_results = { + "md5": "bb649c83dd1ea5c9d9dec9a18df0ffe9", + "md4": "c275b8454684ea416b93d7a418b43176", + "mdc2": None, # XXX find the correct expected value + "sha": "e2b0a8609b47c58e5d984c9ccfe69f9b654b032b", + "ripemd160": "cc4a5ce1b3df48aec5d22d1f16b894a0b894eccc", + "whirlpool": ("1a22b79fe5afda02c63a25927193ed01dc718b74" + "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" + "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583"), + } + import _hashlib + test_string = "Nobody inspects the spammish repetition" + for hash_name, expected in sorted(expected_results.items()): + try: + m = _hashlib.new(hash_name) + except ValueError, e: + print 'skipped %s: %s' % (hash_name, e) + continue + m.update(test_string) + got = m.hexdigest() + assert got and type(got) is str and len(got) % 2 == 0 + got.decode('hex') + if expected is not None: + assert got == expected diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -864,6 +864,7 @@ elif sys.platform.startswith('linux'): compile_extra.append("-Werror=implicit-function-declaration") export_symbols_eci.append('pypyAPI') + compile_extra.append('-g') else: kwds["includes"] = ['Python.h'] # this is our Python.h diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -483,10 +483,20 @@ # XXX Check for frozen modules? # when w_path is a string + delayed_builtin = None + w_lib_extensions = None + if w_path is None: # check the builtin modules if modulename in space.builtin_modules: - return FindInfo(C_BUILTIN, modulename, None) + delayed_builtin = FindInfo(C_BUILTIN, modulename, None) + # a "real builtin module xx" shadows every file "xx.py" there + # could possibly be; a "pseudo-extension module" does not, and + # is only loaded at the point in sys.path where we find + # '.../lib_pypy/__extensions__'. + if modulename in space.MODULES_THAT_ALWAYS_SHADOW: + return delayed_builtin + w_lib_extensions = space.sys.get_state(space).w_lib_extensions w_path = space.sys.get('path') # XXX check frozen modules? @@ -495,6 +505,9 @@ if w_path is not None: for w_pathitem in space.unpackiterable(w_path): # sys.path_hooks import hook + if (w_lib_extensions is not None and + space.eq_w(w_pathitem, w_lib_extensions)): + return delayed_builtin if use_loader: w_loader = find_in_path_hooks(space, w_modulename, w_pathitem) if w_loader: @@ -527,7 +540,7 @@ # Out of file descriptors. # not found - return None + return delayed_builtin def _prepare_module(space, w_mod, filename, pkgdir): w = space.wrap diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -38,6 +38,8 @@ test_reload = "def test():\n raise ValueError\n", infinite_reload = "import infinite_reload; reload(infinite_reload)", del_sys_module = "import sys\ndel sys.modules['del_sys_module']\n", + itertools = "hello_world = 42\n", + gc = "should_never_be_seen = 42\n", ) root.ensure("notapackage", dir=1) # empty, no __init__.py setuppkg("pkg", @@ -147,6 +149,8 @@ class AppTestImport: def setup_class(cls): # interpreter-level + cls.space = gettestobjspace(usemodules=['itertools']) + cls.w_runappdirect = cls.space.wrap(conftest.option.runappdirect) cls.saved_modules = _setup(cls.space) #XXX Compile class @@ -571,6 +575,50 @@ else: assert False, 'should not work' + def test_shadow_builtin(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import gc' is supposed to always find the built-in module; + # like CPython, it is a built-in module, so it shadows everything, + # even though there is a gc.py. + import sys + assert 'gc' not in sys.modules + import gc + assert not hasattr(gc, 'should_never_be_seen') + assert '(built-in)' in repr(gc) + del sys.modules['gc'] + + def test_shadow_extension_1(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import itertools' is supposed to find itertools.py if there is + # one in sys.path. + import sys + assert 'itertools' not in sys.modules + import itertools + assert hasattr(itertools, 'hello_world') + assert not hasattr(itertools, 'count') + assert '(built-in)' not in repr(itertools) + del sys.modules['itertools'] + + def test_shadow_extension_2(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import itertools' is supposed to find the built-in module even + # if there is also one in sys.path as long as it is *after* the + # special entry '.../lib_pypy/__extensions__'. (Note that for now + # there is one in lib_pypy/itertools.py, which should not be seen + # either; hence the (built-in) test below.) + import sys + assert 'itertools' not in sys.modules + sys.path.append(sys.path.pop(0)) + try: + import itertools + assert not hasattr(itertools, 'hello_world') + assert hasattr(itertools, 'izip') + assert '(built-in)' in repr(itertools) + finally: + sys.path.insert(0, sys.path.pop()) + del sys.modules['itertools'] + + class TestAbi: def test_abi_tag(self): space1 = gettestobjspace(soabi='TEST') diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -24,11 +24,16 @@ 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'uint8': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'uint16': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'uint32': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', + 'uint64': 'interp_boxes.W_UInt64Box', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -38,6 +38,7 @@ w_ValueError = None w_TypeError = None w_IndexError = None + w_OverflowError = None w_None = None w_bool = "bool" @@ -149,6 +150,10 @@ # XXX array probably assert False + def exception_match(self, w_exc_type, w_check_class): + # Good enough for now + raise NotImplementedError + class FloatObject(W_Root): tp = FakeSpace.w_float def __init__(self, floatval): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,6 +91,9 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") + def descr_tolist(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -104,38 +107,38 @@ class W_SignedIntegerBox(W_IntegerBox): pass -class W_UnsignedIntgerBox(W_IntegerBox): +class W_UnsignedIntegerBox(W_IntegerBox): pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int8") -class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int16") -class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int32") -class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("long") -class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): +class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): pass class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") -class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): - pass +class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint64") class W_InexactBox(W_NumberBox): _attrs_ = () @@ -179,6 +182,8 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), + + tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, @@ -198,13 +203,18 @@ __module__ = "numpypy", ) +W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), ) -W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt8Box.descr__new__.im_func), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, @@ -212,8 +222,9 @@ __new__ = interp2app(W_Int16Box.descr__new__.im_func), ) -W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt16Box.descr__new__.im_func), ) W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, @@ -221,8 +232,9 @@ __new__ = interp2app(W_Int32Box.descr__new__.im_func), ) -W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt32Box.descr__new__.im_func), ) if LONG_BIT == 32: @@ -233,7 +245,7 @@ __module__ = "numpypy", ) -W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntegerBox.typedef, __module__ = "numpypy", ) @@ -242,8 +254,9 @@ __new__ = interp2app(W_Int64Box.descr__new__.im_func), ) -W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt64Box.descr__new__.im_func), ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,6 +876,17 @@ arr.setshape(space, new_shape) return arr + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1485,6 +1496,7 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), + tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -174,6 +174,8 @@ raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) assert str(exc.value) == "cannot create 'signedinteger' instances" + exc = raises(TypeError, numpy.unsignedinteger, 0) + assert str(exc.value) == "cannot create 'unsignedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -210,17 +212,54 @@ assert type(int(x)) is int assert int(x) == -128 + def test_uint8(self): + import numpypy as numpy + + assert numpy.uint8.mro() == [numpy.uint8, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.uint8) + assert type(a[1]) is numpy.uint8 + assert numpy.dtype("uint8").type is numpy.uint8 + + x = numpy.uint8(128) + assert x == 128 + assert x != -128 + assert type(x) is numpy.uint8 + assert repr(x) == "128" + + assert type(int(x)) is int + assert int(x) == 128 + + assert numpy.uint8(255) == 255 + assert numpy.uint8(256) == 0 + def test_int16(self): import numpypy as numpy x = numpy.int16(3) assert x == 3 + assert numpy.int16(32767) == 32767 + assert numpy.int16(32768) == -32768 + + def test_uint16(self): + import numpypy as numpy + + assert numpy.uint16(65535) == 65535 + assert numpy.uint16(65536) == 0 def test_int32(self): import numpypy as numpy x = numpy.int32(23) assert x == 23 + assert numpy.int32(2147483647) == 2147483647 + assert numpy.int32(2147483648) == -2147483648 + + def test_uint32(self): + import numpypy as numpy + + assert numpy.uint32(4294967295) == 4294967295 + assert numpy.uint32(4294967296) == 0 def test_int_(self): import numpypy as numpy @@ -240,6 +279,25 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + assert numpy.int64(9223372036854775807) == 9223372036854775807 + raises(OverflowError, numpy.int64, 9223372036854775808) + + def test_uint64(self): + import sys + import numpypy as numpy + + assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.uint64).type is numpy.uint64 + skip("see comment") + # These tests pass "by chance" on numpy, things that are larger than + # platform long (i.e. a python int), don't get put in a normal box, + # instead they become an object array containing a long, we don't have + # yet, so these can't pass. + assert numpy.uint64(9223372036854775808) == 9223372036854775808 + assert numpy.uint64(18446744073709551615) == 18446744073709551615 + raises(OverflowError, numpy.uint64(18446744073709551616)) + def test_float32(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,6 +879,45 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_tolist_scalar(self): + from numpypy import int32, bool_ + x = int32(23) + assert x.tolist() == 23 + assert type(x.tolist()) is int + y = bool_(True) + assert y.tolist() is True + + def test_tolist_zerodim(self): + from numpypy import array + x = array(3) + assert x.tolist() == 3 + assert type(x.tolist()) is int + + def test_tolist_singledim(self): + from numpypy import array + a = array(range(5)) + assert a.tolist() == [0, 1, 2, 3, 4] + assert type(a.tolist()[0]) is int + b = array([0.2, 0.4, 0.6]) + assert b.tolist() == [0.2, 0.4, 0.6] + + def test_tolist_multidim(self): + from numpypy import array + a = array([[1, 2], [3, 4]]) + assert a.tolist() == [[1, 2], [3, 4]] + + def test_tolist_view(self): + from numpypy import array + a = array([[1,2],[3,4]]) + assert (a + a).tolist() == [[2, 4], [6, 8]] + + def test_tolist_slice(self): + from numpypy import array + a = array([[17.1, 27.2], [40.3, 50.3]]) + assert a[:,0].tolist() == [17.1, 40.3] + assert a[0].tolist() == [17.1, 27.2] + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -185,7 +185,8 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, + 'getfield_gc': 35, 'getfield_gc_pure': 6, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,6 +1,7 @@ import functools import math +from pypy.interpreter.error import OperationError from pypy.module.micronumpy import interp_boxes from pypy.objspace.std.floatobject import float2string from pypy.rlib import rfloat, libffi, clibffi @@ -77,6 +78,9 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj + def to_builtin_type(self, space, box): + return space.wrap(self.unbox(box)) + def _coerce(self, space, w_item): raise NotImplementedError @@ -271,6 +275,19 @@ T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + def _coerce(self, space, w_item): + try: + return Integer._coerce(self, space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.toulonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Float(Primitive): _mixin_ = True diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -170,3 +170,7 @@ def get_flag(self, name): space = self.space return space.int_w(space.getattr(self.get('flags'), space.wrap(name))) + + def get_state(self, space): + from pypy.module.sys import state + return state.get(space) diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py --- a/pypy/module/sys/state.py +++ b/pypy/module/sys/state.py @@ -24,7 +24,7 @@ # Initialize the default path pypydir = os.path.dirname(os.path.abspath(pypy.__file__)) srcdir = os.path.dirname(pypydir) - path = getinitialpath(srcdir) + path = getinitialpath(self, srcdir) self.w_path = space.newlist([space.wrap(p) for p in path]) def checkdir(path): @@ -35,7 +35,7 @@ platform = sys.platform -def getinitialpath(prefix): +def getinitialpath(state, prefix): from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d.%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1]) @@ -49,6 +49,12 @@ checkdir(lib_pypy) importlist = [] + # + if state is not None: # 'None' for testing only + lib_extensions = os.path.join(lib_pypy, '__extensions__') + state.w_lib_extensions = state.space.wrap(lib_extensions) + importlist.append(lib_extensions) + # importlist.append(lib_pypy) importlist.append(python_std_lib_modified) importlist.append(python_std_lib) @@ -71,7 +77,7 @@ @unwrap_spec(srcdir=str) def pypy_initial_path(space, srcdir): try: - path = getinitialpath(srcdir) + path = getinitialpath(get(space), srcdir) except OSError: return space.w_None else: diff --git a/pypy/module/sys/test/test_initialpath.py b/pypy/module/sys/test/test_initialpath.py --- a/pypy/module/sys/test/test_initialpath.py +++ b/pypy/module/sys/test/test_initialpath.py @@ -13,7 +13,7 @@ def test_stdlib_in_prefix(tmpdir): dirs = build_hierarchy(tmpdir) - path = getinitialpath(str(tmpdir)) + path = getinitialpath(None, str(tmpdir)) # we get at least 'dirs', and maybe more (e.g. plat-linux2) assert path[:len(dirs)] == map(str, dirs) @@ -21,7 +21,7 @@ lib_pypy, lib_python_modified, lib_python = build_hierarchy(tmpdir) lib_tk_modified = lib_python_modified.join('lib-tk') lib_tk = lib_python.join('lib-tk') - path = getinitialpath(str(tmpdir)) + path = getinitialpath(None, str(tmpdir)) i = path.index(str(lib_tk_modified)) j = path.index(str(lib_tk)) assert i < j diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -238,6 +238,7 @@ t = TranslationContext(config=config) self.t = t # for debugging ann = t.buildannotator() + ann.policy.allow_someobjects = False if func is not None: ann.build_types(func, argtypes, complete_now=False) # diff --git a/pypy/rlib/rdynload.py b/pypy/rlib/rdynload.py --- a/pypy/rlib/rdynload.py +++ b/pypy/rlib/rdynload.py @@ -115,7 +115,8 @@ if _WIN32: DLLHANDLE = rwin32.HMODULE - def dlopen(name): + def dlopen(name, mode=-1): + # mode is unused on windows, but a consistant signature res = rwin32.LoadLibrary(name) if not res: err = rwin32.GetLastError() diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -110,6 +110,10 @@ 'struct GENERAL_NAME_st', [('type', rffi.INT), ]) + EVP_MD_st = rffi_platform.Struct( + 'EVP_MD', + [('md_size', rffi.INT), + ('block_size', rffi.INT)]) EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD') EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX') @@ -258,7 +262,7 @@ [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) EVP_MD_CTX = rffi.COpaquePtr('EVP_MD_CTX', compilation_info=eci) -EVP_MD = rffi.COpaquePtr('EVP_MD', compilation_info=eci) +EVP_MD = lltype.Ptr(EVP_MD_st) OpenSSL_add_all_digests = external( 'OpenSSL_add_all_digests', [], lltype.Void) diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -2,8 +2,7 @@ Environment variables can be used to fine-tune the following parameters: - PYPY_GC_NURSERY The nursery size. Defaults to half the size of - the L2 cache. Try values like '1.2MB'. Small values + PYPY_GC_NURSERY The nursery size. Defaults to '4MB'. Small values (like 1 or 1KB) are useful for debugging. PYPY_GC_MAJOR_COLLECT Major collection memory factor. Default is '1.82', @@ -61,7 +60,7 @@ # # * young objects: allocated in the nursery if they are not too large, or # raw-malloced otherwise. The nursery is a fixed-size memory buffer of -# half the size of the L2 cache. When full, we do a minor collection; +# 4MB by default. When full, we do a minor collection; # the surviving objects from the nursery are moved outside, and the # non-surviving raw-malloced objects are freed. All surviving objects # become old. @@ -329,7 +328,8 @@ # size (needed to handle mallocs just below 'large_objects') but # hacking at the current nursery position in collect_and_reserve(). if newsize <= 0: - newsize = env.estimate_best_nursery_size() + newsize = 4*1024*1024 # fixed to 4MB by default + # (it was env.estimate_best_nursery_size()) if newsize <= 0: newsize = defaultsize if newsize < minsize: diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -672,7 +672,7 @@ def pypy_initial_path(s): from pypy.module.sys.state import getinitialpath try: - return getinitialpath(s) + return getinitialpath(None, s) except OSError: return None diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -821,6 +821,8 @@ newpath = app_main.get_library_path('/tmp/pypy-c') # stdlib not found assert newpath == sys.path newpath = app_main.get_library_path(self.fake_exe) + if newpath[0].endswith('__extensions__'): + newpath = newpath[1:] # we get at least 'expected_path', and maybe more (e.g.plat-linux2) assert newpath[:len(self.expected_path)] == self.expected_path finally: diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -6,11 +6,10 @@ import py import sys, os, posixpath, errno, stat, time -from pypy.rpython.module.ll_os_stat import s_StatResult from pypy.tool.ansi_print import AnsiLog -from pypy.rlib.rarithmetic import r_longlong import subprocess from pypy.tool.killsubprocess import killsubprocess +from pypy.translator.sandbox.vfs import UID, GID class MyAnsiLog(AnsiLog): KW_TO_COLOR = { @@ -34,6 +33,10 @@ from pypy.tool.lib_pypy import import_from_lib_pypy marshal = import_from_lib_pypy('marshal') +# Non-marshal result types +RESULTTYPE_STATRESULT = object() +RESULTTYPE_LONGLONG = object() + def read_message(f, timeout=None): # warning: 'timeout' is not really reliable and should only be used # for testing. Also, it doesn't work if the file f does any buffering. @@ -50,12 +53,30 @@ marshal.dump(msg, g) else: marshal.dump(msg, g, 0) + elif resulttype is RESULTTYPE_STATRESULT: + # Hand-coded marshal for stat results that mimics what rmarshal expects. + # marshal.dump(tuple(msg)) would have been too easy. rmarshal insists + # on 64-bit ints at places, even when the value fits in 32 bits. + import struct + st = tuple(msg) + fmt = "iIIiiiIfff" + buf = [] + buf.append(struct.pack(" Author: Armin Rigo Branch: Changeset: r50378:0808b0899c34 Date: 2011-12-11 13:22 +0100 http://bitbucket.org/pypy/pypy/changeset/0808b0899c34/ Log: Merge the SpecialisedTuples branch, started by Mark W. P. Add a number of interp-level classes for 'tuple', specialized to some small number of items, e.g. (int, int), (int, object), etc. It should be a win both memory-wise and speed-wise, as it reduces a lot the number of reads and checks done to access tuple elements. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -2,13 +2,15 @@ pypyjit.set_param(threshold=200) +def g(*args): + return len(args) + def f(n): - pairs = [(0.0, 1.0), (2.0, 3.0)] * n - mag = 0 - for (x1, x2) in pairs: - dx = x1 - x2 - mag += ((dx * dx ) ** (-1.5)) - return n + s = 0 + for i in range(n): + l = [i, n, 2] + s += g(*l) + return s try: print f(301) diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -14,7 +14,6 @@ METH_VARARGS, build_type_checkers, PyObjectFields, bootstrap_function) from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.rlib.objectmodel import we_are_translated -from pypy.objspace.std.tupleobject import W_TupleObject PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction') PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject)) diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -42,11 +42,11 @@ which case o is returned. Use PySequence_Fast_GET_ITEM() to access the members of the result. Returns NULL on failure. If the object is not a sequence, raises TypeError with m as the message text.""" - if (space.is_true(space.isinstance(w_obj, space.w_list)) or - space.is_true(space.isinstance(w_obj, space.w_tuple))): + if (isinstance(w_obj, listobject.W_ListObject) or + isinstance(w_obj, tupleobject.W_TupleObject)): return w_obj try: - return space.newtuple(space.fixedview(w_obj)) + return tupleobject.W_TupleObject(space.fixedview(w_obj)) except OperationError: raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -6,13 +6,12 @@ borrow_from, make_ref, from_ref) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.tupleobject import W_TupleObject -from pypy.objspace.std.smalltupleobject import W_SmallTupleObject PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple") @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): - return space.newtuple([space.w_None] * size) + return W_TupleObject([space.w_None] * size) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyTuple_SetItem(space, w_t, pos, w_obj): @@ -24,12 +23,12 @@ return 0 def _setitem_tuple(w_t, pos, w_obj): - if isinstance(w_t, W_TupleObject): - w_t.wrappeditems[pos] = w_obj - elif isinstance(w_t, W_SmallTupleObject): - w_t.setitem(pos, w_obj) - else: - assert False + # this function checks that w_t is really a W_TupleObject. It + # should only ever be called with a freshly built tuple from + # PyTuple_New(), which always return a W_TupleObject, even if there + # are also other implementations of tuples. + assert isinstance(w_t, W_TupleObject) + w_t.wrappeditems[pos] = w_obj @cpython_api([PyObject, Py_ssize_t], PyObject) def PyTuple_GetItem(space, w_t, pos): diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -15,6 +15,7 @@ _registered_implementations.add(implcls) option_to_typename = { + "withspecialisedtuple" : ["specialisedtupleobject.W_SpecialisedTupleObject"], "withsmalltuple" : ["smalltupleobject.W_SmallTupleObject"], "withsmallint" : ["smallintobject.W_SmallIntObject"], "withsmalllong" : ["smalllongobject.W_SmallLongObject"], @@ -261,6 +262,11 @@ self.typeorder[smalltupleobject.W_SmallTupleObject] += [ (tupleobject.W_TupleObject, smalltupleobject.delegate_SmallTuple2Tuple)] + if config.objspace.std.withspecialisedtuple: + from pypy.objspace.std import specialisedtupleobject + self.typeorder[specialisedtupleobject.W_SpecialisedTupleObject] += [ + (tupleobject.W_TupleObject, specialisedtupleobject.delegate_SpecialisedTuple2Tuple)] + # put W_Root everywhere self.typeorder[W_Root] = [] for type in self.typeorder: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -29,7 +29,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.smallintobject import W_SmallIntObject from pypy.objspace.std.stringobject import W_StringObject -from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.typeobject import W_TypeObject # types @@ -391,8 +391,8 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject): t = w_obj.getitems_copy() else: @@ -405,8 +405,8 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.tolist() elif isinstance(w_obj, W_ListObject): if unroll: t = w_obj.getitems_unroll() @@ -430,8 +430,8 @@ def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): t = w_obj.getitems() - elif isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + elif isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: diff --git a/pypy/objspace/std/smalltupleobject.py b/pypy/objspace/std/smalltupleobject.py --- a/pypy/objspace/std/smalltupleobject.py +++ b/pypy/objspace/std/smalltupleobject.py @@ -9,13 +9,14 @@ from pypy.interpreter import gateway from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name from pypy.objspace.std.tupleobject import W_AbstractTupleObject, W_TupleObject class W_SmallTupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef - def tolist(self): - raise NotImplementedError + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError def length(self): raise NotImplementedError @@ -51,6 +52,9 @@ l[i] = getattr(self, 'w_value%s' % i) return l + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + def length(self): return n diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -0,0 +1,302 @@ +from pypy.interpreter.error import OperationError +from pypy.objspace.std.model import registerimplementation +from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.multimethod import FailedToImplement +from pypy.objspace.std.tupleobject import W_AbstractTupleObject +from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_hash +from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name + +class NotSpecialised(Exception): + pass + +class W_SpecialisedTupleObject(W_AbstractTupleObject): + from pypy.objspace.std.tupletype import tuple_typedef as typedef + __slots__ = [] + + def __repr__(self): + """ representation for debugging purposes """ + reprlist = [repr(item) for item in self._to_unwrapped_list()] + return "%s(%s)" % (self.__class__.__name__, ', '.join(reprlist)) + + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError + + def _to_unwrapped_list(self): + "NOT_RPYTHON" + raise NotImplementedError + + def length(self): + raise NotImplementedError + + def getitem(self, index): + raise NotImplementedError + + def hash(self, space): + raise NotImplementedError + + def eq(self, space, w_other): + raise NotImplementedError + + def setitem(self, index, w_item): + raise NotImplementedError + + def unwrap(self, space): + return tuple(self._to_unwrapped_list()) + + def delegating(self): + pass # for tests only + + +def make_specialised_class(typetuple): + assert type(typetuple) == tuple + + nValues = len(typetuple) + iter_n = unrolling_iterable(range(nValues)) + + class cls(W_SpecialisedTupleObject): + def __init__(self, space, *values_w): + self.space = space + assert len(values_w) == nValues + for i in iter_n: + w_obj = values_w[i] + val_type = typetuple[i] + if val_type == int: + unwrapped = space.int_w(w_obj) + elif val_type == float: + unwrapped = space.float_w(w_obj) + elif val_type == str: + unwrapped = space.str_w(w_obj) + elif val_type == object: + unwrapped = w_obj + else: + raise AssertionError + setattr(self, 'value%s' % i, unwrapped) + + def length(self): + return nValues + + def tolist(self): + list_w = [None] * nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + list_w[i] = value + return list_w + + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + + def _to_unwrapped_list(self): + "NOT_RPYTHON" + list_w = [None] * nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + value = self.space.unwrap(value) + list_w[i] = value + return list_w + + def hash(self, space): + # XXX duplicate logic from tupleobject.py + mult = 1000003 + x = 0x345678 + z = nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + y = space.int_w(space.hash(value)) + elif typetuple[i] == float: + # get the correct hash for float which is an + # integer & other less frequent cases + from pypy.objspace.std.floatobject import _hash_float + y = _hash_float(space, value) + else: + y = compute_hash(value) + x = (x ^ y) * mult + z -= 1 + mult += 82520 + z + z + x += 97531 + return space.wrap(intmask(x)) + + def _eq(self, w_other): + if not isinstance(w_other, cls): + # if we are not comparing same types, give up + raise FailedToImplement + for i in iter_n: + myval = getattr(self, 'value%s' % i) + otherval = getattr(w_other, 'value%s' % i) + if typetuple[i] == object: + if not self.space.eq_w(myval, otherval): + return False + else: + if myval != otherval: + return False + else: + return True + + def eq(self, space, w_other): + return space.newbool(self._eq(w_other)) + + def ne(self, space, w_other): + return space.newbool(not self._eq(w_other)) + +## def _compare(self, compare_op, w_other): +## if not isinstance(w_other, cls): +## raise FailedToImplement +## ncmp = min(self.length(), w_other.length()) +## for i in iter_n: +## if typetuple[i] == Any:#like space.eq on wrapped or two params? +## raise FailedToImplement +## if ncmp > i: +## l_val = getattr(self, 'value%s' % i) +## r_val = getattr(w_other, 'value%s' % i) +## if l_val != r_val: +## return compare_op(l_val, r_val) +## return compare_op(self.length(), w_other.length()) + + def getitem(self, index): + for i in iter_n: + if index == i: + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + return value + raise IndexError + + cls.__name__ = ('W_SpecialisedTupleObject_' + + ''.join([t.__name__[0] for t in typetuple])) + _specialisations.append(cls) + return cls + +# ---------- current specialized versions ---------- + +_specialisations = [] +Cls_ii = make_specialised_class((int, int)) +Cls_is = make_specialised_class((int, str)) +Cls_io = make_specialised_class((int, object)) +Cls_si = make_specialised_class((str, int)) +Cls_ss = make_specialised_class((str, str)) +Cls_so = make_specialised_class((str, object)) +Cls_oi = make_specialised_class((object, int)) +Cls_os = make_specialised_class((object, str)) +Cls_oo = make_specialised_class((object, object)) +Cls_ff = make_specialised_class((float, float)) +Cls_ooo = make_specialised_class((object, object, object)) + +def makespecialisedtuple(space, list_w): + if len(list_w) == 2: + w_arg1, w_arg2 = list_w + w_type1 = space.type(w_arg1) + w_type2 = space.type(w_arg2) + # + if w_type1 is space.w_int: + if w_type2 is space.w_int: + return Cls_ii(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_is(space, w_arg1, w_arg2) + else: + return Cls_io(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_str: + if w_type2 is space.w_int: + return Cls_si(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_ss(space, w_arg1, w_arg2) + else: + return Cls_so(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_float and w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) + # + else: + if w_type2 is space.w_int: + return Cls_oi(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_os(space, w_arg1, w_arg2) + else: + return Cls_oo(space, w_arg1, w_arg2) + # + elif len(list_w) == 3: + return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + else: + raise NotSpecialised + +# ____________________________________________________________ + +registerimplementation(W_SpecialisedTupleObject) + +def delegate_SpecialisedTuple2Tuple(space, w_specialised): + w_specialised.delegating() + return W_TupleObject(w_specialised.tolist()) + +def len__SpecialisedTuple(space, w_tuple): + return space.wrap(w_tuple.length()) + +def getitem__SpecialisedTuple_ANY(space, w_tuple, w_index): + index = space.getindex_w(w_index, space.w_IndexError, "tuple index") + if index < 0: + index += w_tuple.length() + try: + return w_tuple.getitem(index) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("tuple index out of range")) + +def getitem__SpecialisedTuple_Slice(space, w_tuple, w_slice): + length = w_tuple.length() + start, stop, step, slicelength = w_slice.indices4(space, length) + assert slicelength >= 0 + subitems = [None] * slicelength + for i in range(slicelength): + subitems[i] = w_tuple.getitem(start) + start += step + return space.newtuple(subitems) + +def mul_specialisedtuple_times(space, w_tuple, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + raise FailedToImplement + raise + if times == 1 and space.type(w_tuple) == space.w_tuple: + return w_tuple + items = w_tuple.tolist() + return space.newtuple(items * times) + +def mul__SpecialisedTuple_ANY(space, w_tuple, w_times): + return mul_specialisedtuple_times(space, w_tuple, w_times) + +def mul__ANY_SpecialisedTuple(space, w_times, w_tuple): + return mul_specialisedtuple_times(space, w_tuple, w_times) + +def eq__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): + return w_tuple1.eq(space, w_tuple2) + +def ne__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): + return w_tuple1.ne(space, w_tuple2) + +##from operator import lt, le, ge, gt + +##def lt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(lt, w_tuple2)) + +##def le__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(le, w_tuple2)) + +##def ge__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(ge, w_tuple2)) + +##def gt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(gt, w_tuple2)) + +def hash__SpecialisedTuple(space, w_tuple): + return w_tuple.hash(space) + +from pypy.objspace.std import tupletype +register_all(vars(), tupletype) diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -0,0 +1,234 @@ +import py, sys +from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject +from pypy.objspace.std.specialisedtupleobject import _specialisations +from pypy.interpreter.error import OperationError +from pypy.conftest import gettestobjspace, option +from pypy.objspace.std.test import test_tupleobject +from pypy.interpreter import gateway + + +for cls in _specialisations: + globals()[cls.__name__] = cls + + +class TestW_SpecialisedTupleObject(): + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + + def test_isspecialisedtupleobjectintint(self): + w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) + assert isinstance(w_tuple, W_SpecialisedTupleObject_ii) + + def test_isnotspecialisedtupleobject(self): + w_tuple = self.space.newtuple([self.space.wrap({})]) + assert not isinstance(w_tuple, W_SpecialisedTupleObject) + + def test_specialisedtupleclassname(self): + w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) + assert w_tuple.__class__.__name__ == 'W_SpecialisedTupleObject_ii' + + def test_hash_against_normal_tuple(self): + N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) + S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + + def hash_test(values): + N_values_w = [N_space.wrap(value) for value in values] + S_values_w = [S_space.wrap(value) for value in values] + N_w_tuple = N_space.newtuple(N_values_w) + S_w_tuple = S_space.newtuple(S_values_w) + + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + assert isinstance(N_w_tuple, W_TupleObject) + assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) + assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) + assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) + + hash_test([1,2]) + hash_test([1.5,2.8]) + hash_test([1.0,2.0]) + hash_test(['arbitrary','strings']) + hash_test([1,(1,2,3,4)]) + hash_test([1,(1,2)]) + hash_test([1,('a',2)]) + hash_test([1,()]) + hash_test([1,2,3]) + + +class AppTestW_SpecialisedTupleObject: + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + def forbid_delegation(space, w_tuple): + def delegation_forbidden(): + # haaaack + co = sys._getframe(2).f_code + if co.co_name.startswith('_mm_repr_tuple'): + return + raise OperationError(space.w_ReferenceError, w_tuple) + w_tuple.delegating = delegation_forbidden + return w_tuple + if option.runappdirect: + cls.w_forbid_delegation = lambda self, x: x + cls.test_delegation = lambda self: skip("runappdirect") + else: + cls.w_forbid_delegation = cls.space.wrap( + gateway.interp2app(forbid_delegation)) + + def w_isspecialised(self, obj, expected=''): + import __pypy__ + r = __pypy__.internal_repr(obj) + print obj, '==>', r, ' (expected: %r)' % expected + return ("SpecialisedTupleObject" + expected) in r + + def test_createspecialisedtuple(self): + spec = {int: 'i', + float: 'f', + str: 's', + list: 'o'} + # + for x in [42, 4.2, "foo", []]: + for y in [43, 4.3, "bar", []]: + expected1 = spec[type(x)] + expected2 = spec[type(y)] + if (expected1 == 'f') ^ (expected2 == 'f'): + if expected1 == 'f': expected1 = 'o' + if expected2 == 'f': expected2 = 'o' + obj = (x, y) + assert self.isspecialised(obj, '_' + expected1 + expected2) + # + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') + + def test_delegation(self): + t = self.forbid_delegation((42, 43)) + raises(ReferenceError, t.__getslice__, 0, 1) + + def test_len(self): + t = self.forbid_delegation((42,43)) + assert len(t) == 2 + + def test_notspecialisedtuple(self): + assert not self.isspecialised((42,43,44,45)) + assert not self.isspecialised((1.5,)) + + def test_slicing_to_specialised(self): + t = (1, 2, 3) + assert self.isspecialised(t[0:2]) + t = (1, '2', 3) + assert self.isspecialised(t[0:5:2]) + + def test_adding_to_specialised(self): + t = (1,) + assert self.isspecialised(t + (2,)) + + def test_multiply_to_specialised(self): + t = (1,) + assert self.isspecialised(t * 2) + + def test_slicing_from_specialised(self): + t = (1, 2, 3) + assert t[0:2:1] == (1, 2) + + def test_eq_no_delegation(self): + t = (1,) + a = self.forbid_delegation(t + (2,)) + b = (1, 2) + assert a == b + + c = (2, 1) + assert not a == c + + def test_eq_can_delegate(self): + a = (1,2) + b = (1,3,2) + assert not a == b + + values = [2, 2L, 2.0, 1, 1L, 1.0] + for x in values: + for y in values: + assert ((1,2) == (x,y)) == (1 == x and 2 == y) + + def test_neq(self): + a = self.forbid_delegation((1,2)) + b = (1,) + b = b+(2,) + assert not a != b + + c = (1,3) + assert a != c + + def test_ordering(self): + a = (1,2) #self.forbid_delegation((1,2)) --- code commented out + assert a < (2,2) + assert a < (1,3) + assert not a < (1,2) + + assert a <= (2,2) + assert a <= (1,2) + assert not a <= (1,1) + + assert a >= (0,2) + assert a >= (1,2) + assert not a >= (1,3) + + assert a > (0,2) + assert a > (1,1) + assert not a > (1,3) + + assert (2,2) > a + assert (1,3) > a + assert not (1,2) > a + + assert (2,2) >= a + assert (1,2) >= a + assert not (1,1) >= a + + assert (0,2) <= a + assert (1,2) <= a + assert not (1,3) <= a + + assert (0,2) < a + assert (1,1) < a + assert not (1,3) < a + + def test_hash(self): + a = (1,2) + b = (1,) + b += (2,) # else a and b refer to same constant + assert hash(a) == hash(b) + + c = (2,4) + assert hash(a) != hash(c) + + assert hash(a) == hash((1L, 2L)) == hash((1.0, 2.0)) == hash((1.0, 2L)) + + def test_getitem(self): + t = self.forbid_delegation((5,3)) + assert (t)[0] == 5 + assert (t)[1] == 3 + assert (t)[-1] == 3 + assert (t)[-2] == 5 + raises(IndexError, "t[2]") + raises(IndexError, "t[-3]") + + def test_three_tuples(self): + b = self.forbid_delegation((1, 2, 3)) + c = (1,) + d = c + (2, 3) + assert self.isspecialised(d) + assert b == d + + def test_mongrel(self): + a = self.forbid_delegation((1, 2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 3 + assert a[0] == 1 and a[1] == 2.2 and a[2] == '333' + b = ('333',) + assert a == (1, 2.2,) + b + assert not a != (1, 2.2) + b + + +class AppTestAll(test_tupleobject.AppTestW_TupleObject): + pass diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py --- a/pypy/objspace/std/test/test_tupleobject.py +++ b/pypy/objspace/std/test/test_tupleobject.py @@ -280,6 +280,8 @@ assert () * 10 == () assert (5,) * 3 == (5,5,5) assert (5,2) * 2 == (5,2,5,2) + + def test_mul_identity(self): t = (1,2,3) assert (t * 1) is t diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -12,6 +12,15 @@ class W_AbstractTupleObject(W_Object): __slots__ = () + def tolist(self): + "Returns the items, as a fixed-size list." + raise NotImplementedError + + def getitems_copy(self): + "Returns a copy of the items, as a resizable list." + raise NotImplementedError + + class W_TupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef _immutable_fields_ = ['wrappeditems[*]'] @@ -29,6 +38,12 @@ items = [space.unwrap(w_item) for w_item in w_tuple.wrappeditems] return tuple(items) + def tolist(self): + return self.wrappeditems + + def getitems_copy(self): + return self.wrappeditems[:] # returns a resizable list + registerimplementation(W_TupleObject) diff --git a/pypy/objspace/std/tupletype.py b/pypy/objspace/std/tupletype.py --- a/pypy/objspace/std/tupletype.py +++ b/pypy/objspace/std/tupletype.py @@ -5,6 +5,14 @@ def wraptuple(space, list_w): from pypy.objspace.std.tupleobject import W_TupleObject + + if space.config.objspace.std.withspecialisedtuple: + from specialisedtupleobject import makespecialisedtuple, NotSpecialised + try: + return makespecialisedtuple(space, list_w) + except NotSpecialised: + pass + if space.config.objspace.std.withsmalltuple: from pypy.objspace.std.smalltupleobject import W_SmallTupleObject2 from pypy.objspace.std.smalltupleobject import W_SmallTupleObject3 diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -63,7 +63,10 @@ exec_ = eval def repr(self, w_value): - return self.space.unwrap(self.space.repr(w_value)) + try: + return self.space.unwrap(self.space.repr(w_value)) + except Exception, e: + return ""%e def is_true(self, w_value): return self.space.is_true(w_value) From noreply at buildbot.pypy.org Sun Dec 11 13:34:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 13:34:11 +0100 (CET) Subject: [pypy-commit] pypy default: hg backout 3eba2ed546ad Message-ID: <20111211123411.5BBE682210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50379:1c29f19cb8d7 Date: 2011-12-11 13:33 +0100 http://bitbucket.org/pypy/pypy/changeset/1c29f19cb8d7/ Log: hg backout 3eba2ed546ad Does not translate. The issue is that to_builtin_type() is not RPython: the result of self.unbox() is going to be promoted to the widest possible integer, meaning that tests like "assert y.tolist() is True" would fail after translation (if it did translate). diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,9 +91,6 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") - def descr_tolist(self, space): - return self.get_dtype(space).itemtype.to_builtin_type(space, self) - class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -182,8 +179,6 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), - - tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,17 +876,6 @@ arr.setshape(space, new_shape) return arr - def descr_tolist(self, space): - if len(self.shape) == 0: - assert isinstance(self, Scalar) - return self.value.descr_tolist(space) - w_result = space.newlist([]) - for i in range(self.shape[0]): - space.call_method(w_result, "append", - space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") - ) - return w_result - def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1496,7 +1485,6 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), - tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,45 +879,6 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' - def test_tolist_scalar(self): - from numpypy import int32, bool_ - x = int32(23) - assert x.tolist() == 23 - assert type(x.tolist()) is int - y = bool_(True) - assert y.tolist() is True - - def test_tolist_zerodim(self): - from numpypy import array - x = array(3) - assert x.tolist() == 3 - assert type(x.tolist()) is int - - def test_tolist_singledim(self): - from numpypy import array - a = array(range(5)) - assert a.tolist() == [0, 1, 2, 3, 4] - assert type(a.tolist()[0]) is int - b = array([0.2, 0.4, 0.6]) - assert b.tolist() == [0.2, 0.4, 0.6] - - def test_tolist_multidim(self): - from numpypy import array - a = array([[1, 2], [3, 4]]) - assert a.tolist() == [[1, 2], [3, 4]] - - def test_tolist_view(self): - from numpypy import array - a = array([[1,2],[3,4]]) - assert (a + a).tolist() == [[2, 4], [6, 8]] - - def test_tolist_slice(self): - from numpypy import array - a = array([[17.1, 27.2], [40.3, 50.3]]) - assert a[:,0].tolist() == [17.1, 40.3] - assert a[0].tolist() == [17.1, 27.2] - - class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -78,9 +78,6 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj - def to_builtin_type(self, space, box): - return space.wrap(self.unbox(box)) - def _coerce(self, space, w_item): raise NotImplementedError From noreply at buildbot.pypy.org Sun Dec 11 13:50:10 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 13:50:10 +0100 (CET) Subject: [pypy-commit] pypy default: Hopefully fix test_pypy_c:test_call:test_stararg_virtual, by forcing Message-ID: <20111211125010.90CC582210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50380:f0e6f9c06870 Date: 2011-12-11 13:49 +0100 http://bitbucket.org/pypy/pypy/changeset/f0e6f9c06870/ Log: Hopefully fix test_pypy_c:test_call:test_stararg_virtual, by forcing getitems_fixedsize() to unroll small constant-length lists. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -694,14 +694,16 @@ return self.wrap(r) @jit.look_inside_iff(lambda self, w_list: - jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] @jit.unroll_safe def getitems_unroll(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] - @jit.dont_look_inside + + @jit.look_inside_iff(lambda self, w_list: + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) def getitems_fixedsize(self, w_list): return self.getitems_unroll(w_list) From noreply at buildbot.pypy.org Sun Dec 11 15:09:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 15:09:53 +0100 (CET) Subject: [pypy-commit] pypy default: Wrap(r_singlefloat) is not supported by the std obj space either. Message-ID: <20111211140953.25E3382210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50381:20cf388f70b8 Date: 2011-12-11 14:11 +0100 http://bitbucket.org/pypy/pypy/changeset/20cf388f70b8/ Log: Wrap(r_singlefloat) is not supported by the std obj space either. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -9,7 +9,7 @@ from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import instantiate, we_are_translated from pypy.rlib.nonconst import NonConstant -from pypy.rlib.rarithmetic import r_uint +from pypy.rlib.rarithmetic import r_uint, r_singlefloat from pypy.translator.translator import TranslationContext from pypy.tool.option import make_config @@ -145,9 +145,15 @@ self._see_interp2app(x) if isinstance(x, GetSetProperty): self._see_getsetproperty(x) + if isinstance(x, r_singlefloat): + self._wrap_not_rpython(x) return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" + def _wrap_not_rpython(self, x): + "NOT_RPYTHON" + raise NotImplementedError + def _see_interp2app(self, interp2app): "NOT_RPYTHON" activation = interp2app._code.activation From noreply at buildbot.pypy.org Sun Dec 11 15:09:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 15:09:54 +0100 (CET) Subject: [pypy-commit] pypy default: fix Message-ID: <20111211140954.4E3D082210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50382:b56d8ea76b88 Date: 2011-12-11 15:09 +0100 http://bitbucket.org/pypy/pypy/changeset/b56d8ea76b88/ Log: fix diff --git a/pypy/module/_continuation/test/test_translated.py b/pypy/module/_continuation/test/test_translated.py --- a/pypy/module/_continuation/test/test_translated.py +++ b/pypy/module/_continuation/test/test_translated.py @@ -93,13 +93,20 @@ if not option.runappdirect: py.test.skip("meant only for -A run") - def test_single_threaded(self): - for i in range(20): - yield Runner().run_test, - - def test_multi_threaded(self): - for i in range(5): - yield multithreaded_test, +def _setup(): + for _i in range(20): + def test_single_threaded(self): + Runner().run_test() + test_single_threaded.func_name = 'test_single_threaded_%d' % _i + setattr(AppTestWrapper, test_single_threaded.func_name, + test_single_threaded) + for _i in range(5): + def test_multi_threaded(self): + multithreaded_test() + test_multi_threaded.func_name = 'test_multi_threaded_%d' % _i + setattr(AppTestWrapper, test_multi_threaded.func_name, + test_multi_threaded) +_setup() class ThreadTest(object): def __init__(self, lock): From noreply at buildbot.pypy.org Sun Dec 11 15:11:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 15:11:14 +0100 (CET) Subject: [pypy-commit] pypy SpecialisedTuples: close branch Message-ID: <20111211141114.4CEFA82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: SpecialisedTuples Changeset: r50383:a48a35a5c618 Date: 2011-12-11 15:10 +0100 http://bitbucket.org/pypy/pypy/changeset/a48a35a5c618/ Log: close branch From noreply at buildbot.pypy.org Sun Dec 11 15:19:59 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 11 Dec 2011 15:19:59 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: a bit more cloning to get a short_preamble for the fallback that does not contain ops from the failed optimization attempt Message-ID: <20111211141959.E31B282210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50384:4c3b1c0a071a Date: 2011-12-11 14:23 +0100 http://bitbucket.org/pypy/pypy/changeset/4c3b1c0a071a/ Log: a bit more cloning to get a short_preamble for the fallback that does not contain ops from the failed optimization attempt diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -207,6 +207,7 @@ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] label = part.operations[0] + orignial_label = label.clone() assert label.getopnum() == rop.LABEL try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) @@ -216,12 +217,13 @@ target_token = label.getdescr() assert isinstance(target_token, TargetToken) assert target_token.exported_state - part.operations = [label] + \ + part.operations = [orignial_label] + \ [ResOperation(rop.JUMP, target_token.exported_state.jump_args, None, descr=loop_jitcell_token)] try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, inline_short_preamble=False) + except InvalidLoop: return None assert part.operations[-1].getopnum() != rop.LABEL diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -188,7 +188,7 @@ return self.did_import = True - self.short = target_token.short_preamble + self.short = target_token.short_preamble[:] self.short_seen = {} self.short_boxes = exported_state.short_boxes.clone() for box, const in exported_state.constant_inputargs.items(): From noreply at buildbot.pypy.org Sun Dec 11 15:20:01 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 11 Dec 2011 15:20:01 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge default Message-ID: <20111211142001.374AC82ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50385:988827261d3c Date: 2011-12-11 14:25 +0100 http://bitbucket.org/pypy/pypy/changeset/988827261d3c/ Log: hg merge default diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -2,13 +2,15 @@ pypyjit.set_param(threshold=200) +def g(*args): + return len(args) + def f(n): - pairs = [(0.0, 1.0), (2.0, 3.0)] * n - mag = 0 - for (x1, x2) in pairs: - dx = x1 - x2 - mag += ((dx * dx ) ** (-1.5)) - return n + s = 0 + for i in range(n): + l = [i, n, 2] + s += g(*l) + return s try: print f(301) diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -21,11 +21,11 @@ class W_Hash(Wrappable): ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - _block_size = -1 def __init__(self, space, name): self.name = name - self.digest_size = self.compute_digest_size() + digest_type = self.digest_type_by_name(space) + self.digest_size = rffi.getintfield(digest_type, 'c_md_size') # Allocate a lock for each HASH object. # An optimization would be to not release the GIL on small requests, @@ -34,21 +34,22 @@ ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw') rgc.add_memory_pressure(HASH_MALLOC_SIZE + self.digest_size) + ropenssl.EVP_DigestInit(ctx, digest_type) self.ctx = ctx - def initdigest(self, space, name): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise OperationError(space.w_ValueError, - space.wrap("unknown hash function")) - ropenssl.EVP_DigestInit(self.ctx, digest) - def __del__(self): # self.lock.free() if self.ctx: ropenssl.EVP_MD_CTX_cleanup(self.ctx) lltype.free(self.ctx, flavor='raw') + def digest_type_by_name(self, space): + digest_type = ropenssl.EVP_get_digestbyname(self.name) + if not digest_type: + raise OperationError(space.w_ValueError, + space.wrap("unknown hash function")) + return digest_type + def descr_repr(self, space): addrstring = self.getaddrstring(space) return space.wrap("<%s HASH object at 0x%s>" % ( @@ -87,7 +88,9 @@ return space.wrap(self.digest_size) def get_block_size(self, space): - return space.wrap(self.compute_block_size()) + digest_type = self.digest_type_by_name(space) + block_size = rffi.getintfield(digest_type, 'c_block_size') + return space.wrap(block_size) def _digest(self, space): with lltype.scoped_alloc(ropenssl.EVP_MD_CTX.TO) as ctx: @@ -99,36 +102,6 @@ ropenssl.EVP_MD_CTX_cleanup(ctx) return rffi.charpsize2str(digest, digest_size) - def compute_digest_size(self): - # XXX This isn't the nicest way, but the EVP_MD_size OpenSSL - # XXX function is defined as a C macro on OS X and would be - # XXX significantly harder to implement in another way. - # Values are digest sizes in bytes - return { - 'md5': 16, 'MD5': 16, - 'sha1': 20, 'SHA1': 20, - 'sha224': 28, 'SHA224': 28, - 'sha256': 32, 'SHA256': 32, - 'sha384': 48, 'SHA384': 48, - 'sha512': 64, 'SHA512': 64, - }.get(self.name, 0) - - def compute_block_size(self): - if self._block_size != -1: - return self._block_size - # XXX This isn't the nicest way, but the EVP_MD_CTX_block_size - # XXX OpenSSL function is defined as a C macro on some systems - # XXX and would be significantly harder to implement in - # XXX another way. - self._block_size = { - 'md5': 64, 'MD5': 64, - 'sha1': 64, 'SHA1': 64, - 'sha224': 64, 'SHA224': 64, - 'sha256': 64, 'SHA256': 64, - 'sha384': 128, 'SHA384': 128, - 'sha512': 128, 'SHA512': 128, - }.get(self.name, 0) - return self._block_size W_Hash.typedef = TypeDef( 'HASH', @@ -142,11 +115,11 @@ digestsize=GetSetProperty(W_Hash.get_digest_size), block_size=GetSetProperty(W_Hash.get_block_size), ) +W_Hash.acceptable_as_base_class = False @unwrap_spec(name=str, string='bufferstr') def new(space, name, string=''): w_hash = W_Hash(space, name) - w_hash.initdigest(space, name) w_hash.update(space, string) return space.wrap(w_hash) @@ -158,6 +131,6 @@ return new(space, name, string) return new_hash -for name in algorithms: - newname = 'new_%s' % (name,) - globals()[newname] = make_new_hash(name, newname) +for _name in algorithms: + _newname = 'new_%s' % (_name,) + globals()[_newname] = make_new_hash(_name, _newname) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -80,28 +80,27 @@ _hashlib.openssl_sha1(b).digest() def test_extra_algorithms(self): - import _hashlib - test_string = "Nobody inspects the spammish repetition" expected_results = { "md5": "bb649c83dd1ea5c9d9dec9a18df0ffe9", "md4": "c275b8454684ea416b93d7a418b43176", "mdc2": None, # XXX find the correct expected value "sha": "e2b0a8609b47c58e5d984c9ccfe69f9b654b032b", "ripemd160": "cc4a5ce1b3df48aec5d22d1f16b894a0b894eccc", - "whirlpool": "1a22b79fe5afda02c63a25927193ed01dc718b74" - "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" - "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583", + "whirlpool": ("1a22b79fe5afda02c63a25927193ed01dc718b74" + "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" + "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583"), } - def extracheck(hash_name, expected): + import _hashlib + test_string = "Nobody inspects the spammish repetition" + for hash_name, expected in sorted(expected_results.items()): try: m = _hashlib.new(hash_name) except ValueError, e: - skip('%s: %s' % (hash_name, e)) + print 'skipped %s: %s' % (hash_name, e) + continue m.update(test_string) got = m.hexdigest() assert got and type(got) is str and len(got) % 2 == 0 got.decode('hex') if expected is not None: assert got == expected - for hash_name, expected in sorted(expected_results.items()): - yield extracheck, hash_name, expected diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -14,7 +14,6 @@ METH_VARARGS, build_type_checkers, PyObjectFields, bootstrap_function) from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.rlib.objectmodel import we_are_translated -from pypy.objspace.std.tupleobject import W_TupleObject PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction') PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject)) diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -42,11 +42,11 @@ which case o is returned. Use PySequence_Fast_GET_ITEM() to access the members of the result. Returns NULL on failure. If the object is not a sequence, raises TypeError with m as the message text.""" - if (space.is_true(space.isinstance(w_obj, space.w_list)) or - space.is_true(space.isinstance(w_obj, space.w_tuple))): + if (isinstance(w_obj, listobject.W_ListObject) or + isinstance(w_obj, tupleobject.W_TupleObject)): return w_obj try: - return space.newtuple(space.fixedview(w_obj)) + return tupleobject.W_TupleObject(space.fixedview(w_obj)) except OperationError: raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -6,13 +6,12 @@ borrow_from, make_ref, from_ref) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.tupleobject import W_TupleObject -from pypy.objspace.std.smalltupleobject import W_SmallTupleObject PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple") @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): - return space.newtuple([space.w_None] * size) + return W_TupleObject([space.w_None] * size) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyTuple_SetItem(space, w_t, pos, w_obj): @@ -24,12 +23,12 @@ return 0 def _setitem_tuple(w_t, pos, w_obj): - if isinstance(w_t, W_TupleObject): - w_t.wrappeditems[pos] = w_obj - elif isinstance(w_t, W_SmallTupleObject): - w_t.setitem(pos, w_obj) - else: - assert False + # this function checks that w_t is really a W_TupleObject. It + # should only ever be called with a freshly built tuple from + # PyTuple_New(), which always return a W_TupleObject, even if there + # are also other implementations of tuples. + assert isinstance(w_t, W_TupleObject) + w_t.wrappeditems[pos] = w_obj @cpython_api([PyObject, Py_ssize_t], PyObject) def PyTuple_GetItem(space, w_t, pos): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,9 +91,6 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") - def descr_tolist(self, space): - return self.get_dtype(space).itemtype.to_builtin_type(space, self) - class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -182,8 +179,6 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), - - tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,17 +876,6 @@ arr.setshape(space, new_shape) return arr - def descr_tolist(self, space): - if len(self.shape) == 0: - assert isinstance(self, Scalar) - return self.value.descr_tolist(space) - w_result = space.newlist([]) - for i in range(self.shape[0]): - space.call_method(w_result, "append", - space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") - ) - return w_result - def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1496,7 +1485,6 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), - tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,45 +879,6 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' - def test_tolist_scalar(self): - from numpypy import int32, bool_ - x = int32(23) - assert x.tolist() == 23 - assert type(x.tolist()) is int - y = bool_(True) - assert y.tolist() is True - - def test_tolist_zerodim(self): - from numpypy import array - x = array(3) - assert x.tolist() == 3 - assert type(x.tolist()) is int - - def test_tolist_singledim(self): - from numpypy import array - a = array(range(5)) - assert a.tolist() == [0, 1, 2, 3, 4] - assert type(a.tolist()[0]) is int - b = array([0.2, 0.4, 0.6]) - assert b.tolist() == [0.2, 0.4, 0.6] - - def test_tolist_multidim(self): - from numpypy import array - a = array([[1, 2], [3, 4]]) - assert a.tolist() == [[1, 2], [3, 4]] - - def test_tolist_view(self): - from numpypy import array - a = array([[1,2],[3,4]]) - assert (a + a).tolist() == [[2, 4], [6, 8]] - - def test_tolist_slice(self): - from numpypy import array - a = array([[17.1, 27.2], [40.3, 50.3]]) - assert a[:,0].tolist() == [17.1, 40.3] - assert a[0].tolist() == [17.1, 27.2] - - class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -78,9 +78,6 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj - def to_builtin_type(self, space, box): - return space.wrap(self.unbox(box)) - def _coerce(self, space, w_item): raise NotImplementedError diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -694,14 +694,16 @@ return self.wrap(r) @jit.look_inside_iff(lambda self, w_list: - jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] @jit.unroll_safe def getitems_unroll(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] - @jit.dont_look_inside + + @jit.look_inside_iff(lambda self, w_list: + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) def getitems_fixedsize(self, w_list): return self.getitems_unroll(w_list) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -15,6 +15,7 @@ _registered_implementations.add(implcls) option_to_typename = { + "withspecialisedtuple" : ["specialisedtupleobject.W_SpecialisedTupleObject"], "withsmalltuple" : ["smalltupleobject.W_SmallTupleObject"], "withsmallint" : ["smallintobject.W_SmallIntObject"], "withsmalllong" : ["smalllongobject.W_SmallLongObject"], @@ -261,6 +262,11 @@ self.typeorder[smalltupleobject.W_SmallTupleObject] += [ (tupleobject.W_TupleObject, smalltupleobject.delegate_SmallTuple2Tuple)] + if config.objspace.std.withspecialisedtuple: + from pypy.objspace.std import specialisedtupleobject + self.typeorder[specialisedtupleobject.W_SpecialisedTupleObject] += [ + (tupleobject.W_TupleObject, specialisedtupleobject.delegate_SpecialisedTuple2Tuple)] + # put W_Root everywhere self.typeorder[W_Root] = [] for type in self.typeorder: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -29,7 +29,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.smallintobject import W_SmallIntObject from pypy.objspace.std.stringobject import W_StringObject -from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.typeobject import W_TypeObject # types @@ -391,8 +391,8 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject): t = w_obj.getitems_copy() else: @@ -405,8 +405,8 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.tolist() elif isinstance(w_obj, W_ListObject): if unroll: t = w_obj.getitems_unroll() @@ -430,8 +430,8 @@ def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): t = w_obj.getitems() - elif isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + elif isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: diff --git a/pypy/objspace/std/smalltupleobject.py b/pypy/objspace/std/smalltupleobject.py --- a/pypy/objspace/std/smalltupleobject.py +++ b/pypy/objspace/std/smalltupleobject.py @@ -9,13 +9,14 @@ from pypy.interpreter import gateway from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name from pypy.objspace.std.tupleobject import W_AbstractTupleObject, W_TupleObject class W_SmallTupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef - def tolist(self): - raise NotImplementedError + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError def length(self): raise NotImplementedError @@ -51,6 +52,9 @@ l[i] = getattr(self, 'w_value%s' % i) return l + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + def length(self): return n diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -0,0 +1,302 @@ +from pypy.interpreter.error import OperationError +from pypy.objspace.std.model import registerimplementation +from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.multimethod import FailedToImplement +from pypy.objspace.std.tupleobject import W_AbstractTupleObject +from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_hash +from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name + +class NotSpecialised(Exception): + pass + +class W_SpecialisedTupleObject(W_AbstractTupleObject): + from pypy.objspace.std.tupletype import tuple_typedef as typedef + __slots__ = [] + + def __repr__(self): + """ representation for debugging purposes """ + reprlist = [repr(item) for item in self._to_unwrapped_list()] + return "%s(%s)" % (self.__class__.__name__, ', '.join(reprlist)) + + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError + + def _to_unwrapped_list(self): + "NOT_RPYTHON" + raise NotImplementedError + + def length(self): + raise NotImplementedError + + def getitem(self, index): + raise NotImplementedError + + def hash(self, space): + raise NotImplementedError + + def eq(self, space, w_other): + raise NotImplementedError + + def setitem(self, index, w_item): + raise NotImplementedError + + def unwrap(self, space): + return tuple(self._to_unwrapped_list()) + + def delegating(self): + pass # for tests only + + +def make_specialised_class(typetuple): + assert type(typetuple) == tuple + + nValues = len(typetuple) + iter_n = unrolling_iterable(range(nValues)) + + class cls(W_SpecialisedTupleObject): + def __init__(self, space, *values_w): + self.space = space + assert len(values_w) == nValues + for i in iter_n: + w_obj = values_w[i] + val_type = typetuple[i] + if val_type == int: + unwrapped = space.int_w(w_obj) + elif val_type == float: + unwrapped = space.float_w(w_obj) + elif val_type == str: + unwrapped = space.str_w(w_obj) + elif val_type == object: + unwrapped = w_obj + else: + raise AssertionError + setattr(self, 'value%s' % i, unwrapped) + + def length(self): + return nValues + + def tolist(self): + list_w = [None] * nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + list_w[i] = value + return list_w + + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + + def _to_unwrapped_list(self): + "NOT_RPYTHON" + list_w = [None] * nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + value = self.space.unwrap(value) + list_w[i] = value + return list_w + + def hash(self, space): + # XXX duplicate logic from tupleobject.py + mult = 1000003 + x = 0x345678 + z = nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + y = space.int_w(space.hash(value)) + elif typetuple[i] == float: + # get the correct hash for float which is an + # integer & other less frequent cases + from pypy.objspace.std.floatobject import _hash_float + y = _hash_float(space, value) + else: + y = compute_hash(value) + x = (x ^ y) * mult + z -= 1 + mult += 82520 + z + z + x += 97531 + return space.wrap(intmask(x)) + + def _eq(self, w_other): + if not isinstance(w_other, cls): + # if we are not comparing same types, give up + raise FailedToImplement + for i in iter_n: + myval = getattr(self, 'value%s' % i) + otherval = getattr(w_other, 'value%s' % i) + if typetuple[i] == object: + if not self.space.eq_w(myval, otherval): + return False + else: + if myval != otherval: + return False + else: + return True + + def eq(self, space, w_other): + return space.newbool(self._eq(w_other)) + + def ne(self, space, w_other): + return space.newbool(not self._eq(w_other)) + +## def _compare(self, compare_op, w_other): +## if not isinstance(w_other, cls): +## raise FailedToImplement +## ncmp = min(self.length(), w_other.length()) +## for i in iter_n: +## if typetuple[i] == Any:#like space.eq on wrapped or two params? +## raise FailedToImplement +## if ncmp > i: +## l_val = getattr(self, 'value%s' % i) +## r_val = getattr(w_other, 'value%s' % i) +## if l_val != r_val: +## return compare_op(l_val, r_val) +## return compare_op(self.length(), w_other.length()) + + def getitem(self, index): + for i in iter_n: + if index == i: + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + return value + raise IndexError + + cls.__name__ = ('W_SpecialisedTupleObject_' + + ''.join([t.__name__[0] for t in typetuple])) + _specialisations.append(cls) + return cls + +# ---------- current specialized versions ---------- + +_specialisations = [] +Cls_ii = make_specialised_class((int, int)) +Cls_is = make_specialised_class((int, str)) +Cls_io = make_specialised_class((int, object)) +Cls_si = make_specialised_class((str, int)) +Cls_ss = make_specialised_class((str, str)) +Cls_so = make_specialised_class((str, object)) +Cls_oi = make_specialised_class((object, int)) +Cls_os = make_specialised_class((object, str)) +Cls_oo = make_specialised_class((object, object)) +Cls_ff = make_specialised_class((float, float)) +Cls_ooo = make_specialised_class((object, object, object)) + +def makespecialisedtuple(space, list_w): + if len(list_w) == 2: + w_arg1, w_arg2 = list_w + w_type1 = space.type(w_arg1) + w_type2 = space.type(w_arg2) + # + if w_type1 is space.w_int: + if w_type2 is space.w_int: + return Cls_ii(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_is(space, w_arg1, w_arg2) + else: + return Cls_io(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_str: + if w_type2 is space.w_int: + return Cls_si(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_ss(space, w_arg1, w_arg2) + else: + return Cls_so(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_float and w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) + # + else: + if w_type2 is space.w_int: + return Cls_oi(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_os(space, w_arg1, w_arg2) + else: + return Cls_oo(space, w_arg1, w_arg2) + # + elif len(list_w) == 3: + return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + else: + raise NotSpecialised + +# ____________________________________________________________ + +registerimplementation(W_SpecialisedTupleObject) + +def delegate_SpecialisedTuple2Tuple(space, w_specialised): + w_specialised.delegating() + return W_TupleObject(w_specialised.tolist()) + +def len__SpecialisedTuple(space, w_tuple): + return space.wrap(w_tuple.length()) + +def getitem__SpecialisedTuple_ANY(space, w_tuple, w_index): + index = space.getindex_w(w_index, space.w_IndexError, "tuple index") + if index < 0: + index += w_tuple.length() + try: + return w_tuple.getitem(index) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("tuple index out of range")) + +def getitem__SpecialisedTuple_Slice(space, w_tuple, w_slice): + length = w_tuple.length() + start, stop, step, slicelength = w_slice.indices4(space, length) + assert slicelength >= 0 + subitems = [None] * slicelength + for i in range(slicelength): + subitems[i] = w_tuple.getitem(start) + start += step + return space.newtuple(subitems) + +def mul_specialisedtuple_times(space, w_tuple, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + raise FailedToImplement + raise + if times == 1 and space.type(w_tuple) == space.w_tuple: + return w_tuple + items = w_tuple.tolist() + return space.newtuple(items * times) + +def mul__SpecialisedTuple_ANY(space, w_tuple, w_times): + return mul_specialisedtuple_times(space, w_tuple, w_times) + +def mul__ANY_SpecialisedTuple(space, w_times, w_tuple): + return mul_specialisedtuple_times(space, w_tuple, w_times) + +def eq__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): + return w_tuple1.eq(space, w_tuple2) + +def ne__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): + return w_tuple1.ne(space, w_tuple2) + +##from operator import lt, le, ge, gt + +##def lt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(lt, w_tuple2)) + +##def le__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(le, w_tuple2)) + +##def ge__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(ge, w_tuple2)) + +##def gt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(gt, w_tuple2)) + +def hash__SpecialisedTuple(space, w_tuple): + return w_tuple.hash(space) + +from pypy.objspace.std import tupletype +register_all(vars(), tupletype) diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -0,0 +1,234 @@ +import py, sys +from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject +from pypy.objspace.std.specialisedtupleobject import _specialisations +from pypy.interpreter.error import OperationError +from pypy.conftest import gettestobjspace, option +from pypy.objspace.std.test import test_tupleobject +from pypy.interpreter import gateway + + +for cls in _specialisations: + globals()[cls.__name__] = cls + + +class TestW_SpecialisedTupleObject(): + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + + def test_isspecialisedtupleobjectintint(self): + w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) + assert isinstance(w_tuple, W_SpecialisedTupleObject_ii) + + def test_isnotspecialisedtupleobject(self): + w_tuple = self.space.newtuple([self.space.wrap({})]) + assert not isinstance(w_tuple, W_SpecialisedTupleObject) + + def test_specialisedtupleclassname(self): + w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) + assert w_tuple.__class__.__name__ == 'W_SpecialisedTupleObject_ii' + + def test_hash_against_normal_tuple(self): + N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) + S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + + def hash_test(values): + N_values_w = [N_space.wrap(value) for value in values] + S_values_w = [S_space.wrap(value) for value in values] + N_w_tuple = N_space.newtuple(N_values_w) + S_w_tuple = S_space.newtuple(S_values_w) + + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + assert isinstance(N_w_tuple, W_TupleObject) + assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) + assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) + assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) + + hash_test([1,2]) + hash_test([1.5,2.8]) + hash_test([1.0,2.0]) + hash_test(['arbitrary','strings']) + hash_test([1,(1,2,3,4)]) + hash_test([1,(1,2)]) + hash_test([1,('a',2)]) + hash_test([1,()]) + hash_test([1,2,3]) + + +class AppTestW_SpecialisedTupleObject: + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + def forbid_delegation(space, w_tuple): + def delegation_forbidden(): + # haaaack + co = sys._getframe(2).f_code + if co.co_name.startswith('_mm_repr_tuple'): + return + raise OperationError(space.w_ReferenceError, w_tuple) + w_tuple.delegating = delegation_forbidden + return w_tuple + if option.runappdirect: + cls.w_forbid_delegation = lambda self, x: x + cls.test_delegation = lambda self: skip("runappdirect") + else: + cls.w_forbid_delegation = cls.space.wrap( + gateway.interp2app(forbid_delegation)) + + def w_isspecialised(self, obj, expected=''): + import __pypy__ + r = __pypy__.internal_repr(obj) + print obj, '==>', r, ' (expected: %r)' % expected + return ("SpecialisedTupleObject" + expected) in r + + def test_createspecialisedtuple(self): + spec = {int: 'i', + float: 'f', + str: 's', + list: 'o'} + # + for x in [42, 4.2, "foo", []]: + for y in [43, 4.3, "bar", []]: + expected1 = spec[type(x)] + expected2 = spec[type(y)] + if (expected1 == 'f') ^ (expected2 == 'f'): + if expected1 == 'f': expected1 = 'o' + if expected2 == 'f': expected2 = 'o' + obj = (x, y) + assert self.isspecialised(obj, '_' + expected1 + expected2) + # + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') + + def test_delegation(self): + t = self.forbid_delegation((42, 43)) + raises(ReferenceError, t.__getslice__, 0, 1) + + def test_len(self): + t = self.forbid_delegation((42,43)) + assert len(t) == 2 + + def test_notspecialisedtuple(self): + assert not self.isspecialised((42,43,44,45)) + assert not self.isspecialised((1.5,)) + + def test_slicing_to_specialised(self): + t = (1, 2, 3) + assert self.isspecialised(t[0:2]) + t = (1, '2', 3) + assert self.isspecialised(t[0:5:2]) + + def test_adding_to_specialised(self): + t = (1,) + assert self.isspecialised(t + (2,)) + + def test_multiply_to_specialised(self): + t = (1,) + assert self.isspecialised(t * 2) + + def test_slicing_from_specialised(self): + t = (1, 2, 3) + assert t[0:2:1] == (1, 2) + + def test_eq_no_delegation(self): + t = (1,) + a = self.forbid_delegation(t + (2,)) + b = (1, 2) + assert a == b + + c = (2, 1) + assert not a == c + + def test_eq_can_delegate(self): + a = (1,2) + b = (1,3,2) + assert not a == b + + values = [2, 2L, 2.0, 1, 1L, 1.0] + for x in values: + for y in values: + assert ((1,2) == (x,y)) == (1 == x and 2 == y) + + def test_neq(self): + a = self.forbid_delegation((1,2)) + b = (1,) + b = b+(2,) + assert not a != b + + c = (1,3) + assert a != c + + def test_ordering(self): + a = (1,2) #self.forbid_delegation((1,2)) --- code commented out + assert a < (2,2) + assert a < (1,3) + assert not a < (1,2) + + assert a <= (2,2) + assert a <= (1,2) + assert not a <= (1,1) + + assert a >= (0,2) + assert a >= (1,2) + assert not a >= (1,3) + + assert a > (0,2) + assert a > (1,1) + assert not a > (1,3) + + assert (2,2) > a + assert (1,3) > a + assert not (1,2) > a + + assert (2,2) >= a + assert (1,2) >= a + assert not (1,1) >= a + + assert (0,2) <= a + assert (1,2) <= a + assert not (1,3) <= a + + assert (0,2) < a + assert (1,1) < a + assert not (1,3) < a + + def test_hash(self): + a = (1,2) + b = (1,) + b += (2,) # else a and b refer to same constant + assert hash(a) == hash(b) + + c = (2,4) + assert hash(a) != hash(c) + + assert hash(a) == hash((1L, 2L)) == hash((1.0, 2.0)) == hash((1.0, 2L)) + + def test_getitem(self): + t = self.forbid_delegation((5,3)) + assert (t)[0] == 5 + assert (t)[1] == 3 + assert (t)[-1] == 3 + assert (t)[-2] == 5 + raises(IndexError, "t[2]") + raises(IndexError, "t[-3]") + + def test_three_tuples(self): + b = self.forbid_delegation((1, 2, 3)) + c = (1,) + d = c + (2, 3) + assert self.isspecialised(d) + assert b == d + + def test_mongrel(self): + a = self.forbid_delegation((1, 2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 3 + assert a[0] == 1 and a[1] == 2.2 and a[2] == '333' + b = ('333',) + assert a == (1, 2.2,) + b + assert not a != (1, 2.2) + b + + +class AppTestAll(test_tupleobject.AppTestW_TupleObject): + pass diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py --- a/pypy/objspace/std/test/test_tupleobject.py +++ b/pypy/objspace/std/test/test_tupleobject.py @@ -280,6 +280,8 @@ assert () * 10 == () assert (5,) * 3 == (5,5,5) assert (5,2) * 2 == (5,2,5,2) + + def test_mul_identity(self): t = (1,2,3) assert (t * 1) is t diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -12,6 +12,15 @@ class W_AbstractTupleObject(W_Object): __slots__ = () + def tolist(self): + "Returns the items, as a fixed-size list." + raise NotImplementedError + + def getitems_copy(self): + "Returns a copy of the items, as a resizable list." + raise NotImplementedError + + class W_TupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef _immutable_fields_ = ['wrappeditems[*]'] @@ -29,6 +38,12 @@ items = [space.unwrap(w_item) for w_item in w_tuple.wrappeditems] return tuple(items) + def tolist(self): + return self.wrappeditems + + def getitems_copy(self): + return self.wrappeditems[:] # returns a resizable list + registerimplementation(W_TupleObject) diff --git a/pypy/objspace/std/tupletype.py b/pypy/objspace/std/tupletype.py --- a/pypy/objspace/std/tupletype.py +++ b/pypy/objspace/std/tupletype.py @@ -5,6 +5,14 @@ def wraptuple(space, list_w): from pypy.objspace.std.tupleobject import W_TupleObject + + if space.config.objspace.std.withspecialisedtuple: + from specialisedtupleobject import makespecialisedtuple, NotSpecialised + try: + return makespecialisedtuple(space, list_w) + except NotSpecialised: + pass + if space.config.objspace.std.withsmalltuple: from pypy.objspace.std.smalltupleobject import W_SmallTupleObject2 from pypy.objspace.std.smalltupleobject import W_SmallTupleObject3 diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -110,6 +110,10 @@ 'struct GENERAL_NAME_st', [('type', rffi.INT), ]) + EVP_MD_st = rffi_platform.Struct( + 'EVP_MD', + [('md_size', rffi.INT), + ('block_size', rffi.INT)]) EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD') EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX') @@ -258,7 +262,7 @@ [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) EVP_MD_CTX = rffi.COpaquePtr('EVP_MD_CTX', compilation_info=eci) -EVP_MD = rffi.COpaquePtr('EVP_MD', compilation_info=eci) +EVP_MD = lltype.Ptr(EVP_MD_st) OpenSSL_add_all_digests = external( 'OpenSSL_add_all_digests', [], lltype.Void) diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -63,7 +63,10 @@ exec_ = eval def repr(self, w_value): - return self.space.unwrap(self.space.repr(w_value)) + try: + return self.space.unwrap(self.space.repr(w_value)) + except Exception, e: + return ""%e def is_true(self, w_value): return self.space.is_true(w_value) From noreply at buildbot.pypy.org Sun Dec 11 15:20:02 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 11 Dec 2011 15:20:02 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge default Message-ID: <20111211142002.576D582210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50386:853be60223ac Date: 2011-12-11 15:19 +0100 http://bitbucket.org/pypy/pypy/changeset/853be60223ac/ Log: hg merge default diff --git a/pypy/module/_continuation/test/test_translated.py b/pypy/module/_continuation/test/test_translated.py --- a/pypy/module/_continuation/test/test_translated.py +++ b/pypy/module/_continuation/test/test_translated.py @@ -93,13 +93,20 @@ if not option.runappdirect: py.test.skip("meant only for -A run") - def test_single_threaded(self): - for i in range(20): - yield Runner().run_test, - - def test_multi_threaded(self): - for i in range(5): - yield multithreaded_test, +def _setup(): + for _i in range(20): + def test_single_threaded(self): + Runner().run_test() + test_single_threaded.func_name = 'test_single_threaded_%d' % _i + setattr(AppTestWrapper, test_single_threaded.func_name, + test_single_threaded) + for _i in range(5): + def test_multi_threaded(self): + multithreaded_test() + test_multi_threaded.func_name = 'test_multi_threaded_%d' % _i + setattr(AppTestWrapper, test_multi_threaded.func_name, + test_multi_threaded) +_setup() class ThreadTest(object): def __init__(self, lock): diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -9,7 +9,7 @@ from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import instantiate, we_are_translated from pypy.rlib.nonconst import NonConstant -from pypy.rlib.rarithmetic import r_uint +from pypy.rlib.rarithmetic import r_uint, r_singlefloat from pypy.translator.translator import TranslationContext from pypy.tool.option import make_config @@ -145,9 +145,15 @@ self._see_interp2app(x) if isinstance(x, GetSetProperty): self._see_getsetproperty(x) + if isinstance(x, r_singlefloat): + self._wrap_not_rpython(x) return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" + def _wrap_not_rpython(self, x): + "NOT_RPYTHON" + raise NotImplementedError + def _see_interp2app(self, interp2app): "NOT_RPYTHON" activation = interp2app._code.activation From pullrequests-noreply at bitbucket.org Sun Dec 11 16:21:41 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Sun, 11 Dec 2011 15:21:41 -0000 Subject: [pypy-commit] [OPEN] Pull request #18 for pypy/pypy: Adds string constructors to ints and floats Message-ID: A new pull request has been opened by Jeff Terrace. jterrace/pypy has changes to be pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/18/adds-string-constructors-to-ints-and Title: Adds string constructors to ints and floats # Added tests for (u)int8-(u)int64 and float32/float64 for taking strings as contructors, e.g. int8('50') # Added string type to FakeSpace # Updated int and float _coerce methods to handle strings. I had to special case it because space.int(space.wrap('50')) does not work. Changes to be pulled: d0fa1bba8dd6 by Jeff Terrace: "Add string to FakeSpace to fix failing compile tests" aaa9b6a48bbb by Jeff Terrace: "Updated int and float types to take strings in their constructors so things like?" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Sun Dec 11 17:33:47 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 17:33:47 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: hg merge jit-targets Message-ID: <20111211163347.128D482210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50387:11ca1186b82f Date: 2011-12-11 17:33 +0100 http://bitbucket.org/pypy/pypy/changeset/11ca1186b82f/ Log: hg merge jit-targets diff too long, truncating to 10000 out of 40345 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,3 +1,4 @@ b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 +ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -74,7 +74,8 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + f = open(name, "w") + f.close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -201,7 +201,7 @@ RegrTest('test_difflib.py'), RegrTest('test_dircache.py', core=True), RegrTest('test_dis.py'), - RegrTest('test_distutils.py'), + RegrTest('test_distutils.py', skip=True), RegrTest('test_dl.py', skip=True), RegrTest('test_doctest.py', usemodules="thread"), RegrTest('test_doctest2.py'), diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py --- a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py +++ b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py @@ -1,6 +1,5 @@ import unittest from ctypes import * -from ctypes.test import xfail class MyInt(c_int): def __cmp__(self, other): @@ -27,7 +26,6 @@ self.assertEqual(None, cb()) - @xfail def test_int_callback(self): args = [] def func(arg): diff --git a/lib-python/modified-2.7/heapq.py b/lib-python/modified-2.7/heapq.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/heapq.py @@ -0,0 +1,442 @@ +# -*- coding: latin-1 -*- + +"""Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues + +[explanation by Fran�ois Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +an usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + +from itertools import islice, repeat, count, imap, izip, tee, chain +from operator import itemgetter +import bisect + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + else: + returnitem = lastelt + return returnitem + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(heap)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(xrange(n//2)): + _siftup(x, i) + +def nlargest(n, iterable): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, reverse=True)[:n] + """ + if n < 0: # for consistency with the c impl + return [] + it = iter(iterable) + result = list(islice(it, n)) + if not result: + return result + heapify(result) + _heappushpop = heappushpop + for elem in it: + _heappushpop(result, elem) + result.sort(reverse=True) + return result + +def nsmallest(n, iterable): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable)[:n] + """ + if n < 0: # for consistency with the c impl + return [] + if hasattr(iterable, '__len__') and n * 10 <= len(iterable): + # For smaller values of n, the bisect method is faster than a minheap. + # It is also memory efficient, consuming only n elements of space. + it = iter(iterable) + result = sorted(islice(it, 0, n)) + if not result: + return result + insort = bisect.insort + pop = result.pop + los = result[-1] # los --> Largest of the nsmallest + for elem in it: + if los <= elem: + continue + insort(result, elem) + pop() + los = result[-1] + return result + # An alternative approach manifests the whole iterable in memory but + # saves comparisons by heapifying all at once. Also, saves time + # over bisect.insort() which has O(n) data movement time for every + # insertion. Finding the n smallest of an m length iterable requires + # O(m) + O(n log m) comparisons. + h = list(iterable) + heapify(h) + return map(heappop, repeat(h, min(n, len(h)))) + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom __cmp__ methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass + +def merge(*iterables): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + ''' + _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration + + h = [] + h_append = h.append + for itnum, it in enumerate(map(iter, iterables)): + try: + next = it.next + h_append([next(), itnum, next]) + except _StopIteration: + pass + heapify(h) + + while 1: + try: + while 1: + v, itnum, next = s = h[0] # raises IndexError when h is empty + yield v + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except _StopIteration: + _heappop(h) # remove empty iterator + except IndexError: + return + +# Extend the implementations of nsmallest and nlargest to use a key= argument +_nsmallest = nsmallest +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + # Short-cut for n==1 is to use min() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [min(chain(head, it))] + return [min(chain(head, it), key=key)] + + # When n>=size, it's faster to use sort() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = izip(iterable, count()) # decorate + result = _nsmallest(n, it) + return map(itemgetter(0), result) # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = izip(imap(key, in1), count(), in2) # decorate + result = _nsmallest(n, it) + return map(itemgetter(2), result) # undecorate + +_nlargest = nlargest +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [max(chain(head, it))] + return [max(chain(head, it), key=key)] + + # When n>=size, it's faster to use sort() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = izip(iterable, count(0,-1)) # decorate + result = _nlargest(n, it) + return map(itemgetter(0), result) # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = izip(imap(key, in1), count(0,-1), in2) # decorate + result = _nlargest(n, it) + return map(itemgetter(2), result) # undecorate + +if __name__ == "__main__": + # Simple sanity test + heap = [] + data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] + for item in data: + heappush(heap, item) + sort = [] + while heap: + sort.append(heappop(heap)) + print sort + + import doctest + doctest.testmod() diff --git a/lib-python/2.7/pkgutil.py b/lib-python/modified-2.7/pkgutil.py copy from lib-python/2.7/pkgutil.py copy to lib-python/modified-2.7/pkgutil.py --- a/lib-python/2.7/pkgutil.py +++ b/lib-python/modified-2.7/pkgutil.py @@ -244,7 +244,8 @@ return mod def get_data(self, pathname): - return open(pathname, "rb").read() + with open(pathname, "rb") as f: + return f.read() def _reopen(self): if self.file and self.file.closed: diff --git a/lib-python/modified-2.7/test/test_heapq.py b/lib-python/modified-2.7/test/test_heapq.py --- a/lib-python/modified-2.7/test/test_heapq.py +++ b/lib-python/modified-2.7/test/test_heapq.py @@ -186,6 +186,11 @@ self.assertFalse(sys.modules['heapq'] is self.module) self.assertTrue(hasattr(self.module.heapify, 'func_code')) + def test_islice_protection(self): + m = self.module + self.assertFalse(m.nsmallest(-1, [1])) + self.assertFalse(m.nlargest(-1, [1])) + class TestHeapC(TestHeap): module = c_heapq diff --git a/lib-python/modified-2.7/test/test_import.py b/lib-python/modified-2.7/test/test_import.py --- a/lib-python/modified-2.7/test/test_import.py +++ b/lib-python/modified-2.7/test/test_import.py @@ -64,6 +64,7 @@ except ImportError, err: self.fail("import from %s failed: %s" % (ext, err)) else: + # XXX importing .pyw is missing on Windows self.assertEqual(mod.a, a, "module loaded (%s) but contents invalid" % mod) self.assertEqual(mod.b, b, diff --git a/lib-python/modified-2.7/test/test_repr.py b/lib-python/modified-2.7/test/test_repr.py --- a/lib-python/modified-2.7/test/test_repr.py +++ b/lib-python/modified-2.7/test/test_repr.py @@ -254,8 +254,14 @@ eq = self.assertEqual touch(os.path.join(self.subpkgname, self.pkgname + os.extsep + 'py')) from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation - eq(repr(areallylongpackageandmodulenametotestreprtruncation), - "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) + # On PyPy, we use %r to format the file name; on CPython it is done + # with '%s'. It seems to me that %r is safer . + if '__pypy__' in sys.builtin_module_names: + eq(repr(areallylongpackageandmodulenametotestreprtruncation), + "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) + else: + eq(repr(areallylongpackageandmodulenametotestreprtruncation), + "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) eq(repr(sys), "") def test_type(self): diff --git a/lib-python/2.7/test/test_subprocess.py b/lib-python/modified-2.7/test/test_subprocess.py copy from lib-python/2.7/test/test_subprocess.py copy to lib-python/modified-2.7/test/test_subprocess.py --- a/lib-python/2.7/test/test_subprocess.py +++ b/lib-python/modified-2.7/test/test_subprocess.py @@ -16,11 +16,11 @@ # Depends on the following external programs: Python # -if mswindows: - SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' - 'os.O_BINARY);') -else: - SETBINARY = '' +#if mswindows: +# SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' +# 'os.O_BINARY);') +#else: +# SETBINARY = '' try: @@ -420,8 +420,9 @@ self.assertStderrEqual(stderr, "") def test_universal_newlines(self): - p = subprocess.Popen([sys.executable, "-c", - 'import sys,os;' + SETBINARY + + # NB. replaced SETBINARY with the -u flag + p = subprocess.Popen([sys.executable, "-u", "-c", + 'import sys,os;' + #SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' @@ -448,8 +449,9 @@ def test_universal_newlines_communicate(self): # universal newlines through communicate() - p = subprocess.Popen([sys.executable, "-c", - 'import sys,os;' + SETBINARY + + # NB. replaced SETBINARY with the -u flag + p = subprocess.Popen([sys.executable, "-u", "-c", + 'import sys,os;' + #SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' diff --git a/lib-python/modified-2.7/urllib2.py b/lib-python/modified-2.7/urllib2.py --- a/lib-python/modified-2.7/urllib2.py +++ b/lib-python/modified-2.7/urllib2.py @@ -395,11 +395,7 @@ meth_name = protocol+"_response" for processor in self.process_response.get(protocol, []): meth = getattr(processor, meth_name) - try: - response = meth(req, response) - except: - response.close() - raise + response = meth(req, response) return response diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -124,7 +124,8 @@ # for now, we always allow types.pointer, else a lot of tests # break. We need to rethink how pointers are represented, though if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: - raise ArgumentError, "expected %s instance, got %s" % (type(value), ffitype) + raise ArgumentError("expected %s instance, got %s" % (type(value), + ffitype)) return value._get_buffer_value() def _cast_addr(obj, _, tp): diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -17,7 +17,7 @@ if len(f) == 3: if (not hasattr(tp, '_type_') or not isinstance(tp._type_, str) - or tp._type_ not in "iIhHbBlL"): + or tp._type_ not in "iIhHbBlLqQ"): #XXX: are those all types? # we just dont get the type name # in the interp levle thrown TypeError diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,117 +1,6 @@ -"""qvfgbcvna naq hgbcvna punvef -qlfgbcvna naq hgbcvna punvef -V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur png nf jryy? -V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur punve nf jryy? -jr cnffrq gur RH erivrj -cbfg RhebClguba fcevag fgnegf 12.IVV.2007, 10nz -RhebClguba raqrq -n Pyrna Ragrecevfrf cebqhpgvba -npnqrzl vf n pbzcyvpngrq ebyr tnzr -npnqrzvn vf n pbzcyvpngrq ebyr tnzr -jbexvat pbqr vf crn fbhc -abg lbhe snhyg, zber yvxr vg'f n zbivat gnetrg -guvf fragrapr vf snyfr -abguvat vf gehr -Yncfnat Fbhpubat -Oenpunzhgnaqn -fbeel, V'yy grnpu gur pnpghf ubj gb fjvz yngre -Jul fb znal znal znal znal znal ivbyvaf? -Jul fb znal znal znal znal znal bowrpgf? -"eha njnl naq yvir ba n snez" nccebnpu gb fbsgjner qrirybczrag -"va snpg, lbh zvtug xabj zber nobhg gur genafyngvba gbbypunva nsgre znfgrevat eclguba guna fbzr angvir fcrnxre xabjf nobhg uvf zbgure gbathr" - kbeNkNk -"jurer qvq nyy gur ivbyvaf tb?" -- ClCl fgnghf oybt: uggc://zberclcl.oybtfcbg.pbz/ -uggc://kxpq.pbz/353/ -pnfhnyvgl ivbyngvbaf naq sylvat -wrgmg abpu fpubxbynqvtre -R09 2X @PNN:85? -vs lbh'er gelvat gb oybj hc fghss, jub pnerf? -vs fghss oybjf hc, lbh pner -2008 jvyy or gur lrne bs clcl ba gur qrfxgbc -2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl -2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl, Wnahnel jvyy or gur zbagu bs gur nyc gbcf -lrf, ohg jung'g gur frafr bs 0 < "qhena qhena" -eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb -2009 jvyy or gur lrne bs WVG ba gur qrfxgbc -N ynathntr vf n qvnyrpg jvgu na nezl naq anil -gbcvpf ner sbe gur srroyr zvaqrq -2009 vf gur lrne bs ersyrpgvba ba gur qrfxgbc -gur tybor vf bhe cbal, gur pbfzbf bhe erny ubefr -jub nz V naq vs lrf, ubj znal? -cebtenzzvat va orq vf n cresrpgyl svar npgvivgl -zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja -EClguba: jr hfr vg fb lbh qba'g unir gb -Zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja. EClguba: haqrpvqrq. -guvatf jvyy or avpr naq fghss -qba'g cbfg yvaxf gb cngragf urer -Abg lbhe hfhny nanylfrf. -Gur Neg bs gur Punaary -Clguba 300 -V fhccbfr ZO bs UGZY cre frpbaq vf abg gur hfhny fcrrq zrnfher crbcyr jbhyq rkcrpg sbe n wvg -gur fha arire frgf ba gur ClCl rzcver -ghegyrf ner snfgre guna lbh guvax -cebtenzzvat vf na nrfgrguvp raqrnibhe -P vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner -trezna vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner -trezna vf tbbq sbe artngvbaf, whfg abg sbe jevgvat fbsgjner -# nffreg qvq abg penfu -lbh fubhyq fgneg n cresrpg fbsgjner zbirzrag -lbh fubhyq fgneg n cresrpg punaary gbcvp zbirzrag -guvf vf n cresrpg punaary gbcvp -guvf vf n frys-ersreragvny punaary gbcvp -crrcubcr bcgvzvmngvbaf ner jung n Fhssvpvragyl Fzneg Pbzcvyre hfrf -"crrcubcr" bcgvzvmngvbaf ner jung na bcgvzvfgvp Pbzcvyre hfrf -pubbfr lbhe unpx -gur 'fhcre' xrljbeq vf abg gung uhttnoyr -wlguba cngpurf ner abg rabhtu sbe clcl -- qb lbh xabj oreyva? - nyy bs vg? - jryy, whfg oreyva -- ubj jvyy gur snpg gung gurl ner hfrq va bhe ercy punatr bhe gbcvpf? -- ubj pna vg rire unir jbexrq? -- jurer fubhyq gur unpx or fgberq? -- Vg'f uneq gb fnl rknpgyl jung pbafgvghgrf erfrnepu va gur pbzchgre jbeyq, ohg nf n svefg nccebkvzngvba, vg'f fbsgjner gung qbrfa'g unir hfref. -- Cebtenzzvat vf nyy nobhg xabjvat jura gb obvy gur benatr fcbatr qbaxrl npebff gur cuvyyvcvarf -- Jul fb znal, znal, znal, znal, znal, znal qhpxyvatf? -- ab qrgnvy vf bofpher rabhtu gb abg unir fbzr pbqr qrcraqvat ba vg. -- jung V trarenyyl jnag vf serr fcrrqhcf -- nyy bs ClCl vf kv-dhnyvgl -"lbh pna nyjnlf xvyy -9 be bf._rkvg() vs lbh'er va n uheel" -Ohernhpengf ohvyq npnqrzvp rzcverf juvpu puhea bhg zrnavatyrff fbyhgvbaf gb veeryrinag ceboyrzf. -vg'f abg n unpx, vg'f n jbexnebhaq -ClCl qbrfa'g unir pbcbylinevnqvp qrcraqragyl-zbabzbecurq ulcresyhknqf -ClCl qbrfa'g punatr gur shaqnzragny culfvpf pbafgnagf -Qnapr bs gur Fhtnecyhz Snvel -Wnin vf whfg tbbq rabhtu gb or cenpgvpny, ohg abg tbbq rabhtu gb or hfnoyr. -RhebClguba vf unccravat, qba'g rkcrpg nal dhvpx erfcbafr gvzrf. -"V jbhyq yvxr gb fgnl njnl sebz ernyvgl gura" -"gung'f jul gur 'be' vf ernyyl na 'naq' " -jvgu nyy nccebcevngr pbagrkghnyvfngvbavat -qba'g gevc ba gur cbjre pbeq -vzcyrzragvat YBTB va YBTB: "ghegyrf nyy gur jnl qbja" -gur ohooyrfbeg jbhyq or gur jebat jnl gb tb -gur cevapvcyr bs pbafreingvba bs zrff -gb fnir n gerr, rng n ornire -Qre Ovore znpugf evpugvt: Antg nyyrf xnchgg. -"Nal jbeyqivrj gung vfag jenpxrq ol frys-qbhog naq pbashfvba bire vgf bja vqragvgl vf abg n jbeyqivrj sbe zr." - Fpbgg Nnebafba -jr oryvrir va cnapnxrf, znlor -jr oryvrir va ghegyrf, znlor -jr qrsvavgryl oryvrir va zrgn -gur zngevk unf lbh -"Yvsr vf uneq, gura lbh anc" - n png -Vf Nezva ubzr jura gur havirefr prnfrf gb rkvfg? -Qhrffryqbes fcevag fgnegrq -frys.nobeeg("pnaabg ybnq negvpyrf") -QRAGVFGEL FLZOBY YVTUG IREGVPNY NAQ JNIR -"Gur UUH pnzchf vf n tbbq Dhnxr yriry" - Nezva -"Gur UUH pnzchf jbhyq or n greevoyr dhnxr yriry - lbh'q arire unir n pyhr jurer lbh ner" - zvpunry -N enqvbnpgvir png unf 18 unys-yvirf. - : j [fvtu] -f -pbybe-pbqrq oyhrf -"Neebtnapr va pbzchgre fpvrapr vf zrnfherq va anab-Qvwxfgenf." -ClCl arrqf n Whfg-va-Gvzr WVG -"Lbh pna'g gvzr geniry whfg ol frggvat lbhe pybpxf jebat" -Gjb guernqf jnyx vagb n one. Gur onexrrcre ybbxf hc naq lryyf, "url, V jnag qba'g nal pbaqvgvbaf enpr yvxr gvzr ynfg!" Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! Clguba 2.k vf abg qrnq Riregvzr fbzrbar nethrf jvgu "Fznyygnyx unf nyjnlf qbar K", vg vf nyjnlf n tbbq uvag gung fbzrguvat arrqf gb or punatrq snfg. - Znephf Qraxre @@ -119,7 +8,6 @@ __kkk__ naq __ekkk__ if bcrengvba fybgf: cnegvpyr dhnaghz fhcrecbfvgvba xvaq bs sha ClCl vf na rkpvgvat grpuabybtl gung yrgf lbh gb jevgr snfg, cbegnoyr, zhygv-cyngsbez vagrecergref jvgu yrff rssbeg Nezva: "Cebybt vf n zrff.", PS: "Ab, vg'f irel pbby!", Nezva: "Vfa'g guvf jung V fnvq?" - tbbq, grfgf ner hfrshy fbzrgvzrf :-) ClCl vf yvxr nofheq gurngre jr unir ab nagv-vzcbffvoyr fgvpx gung znxrf fher gung nyy lbhe cebtenzf unyg clcl vf n enpr orgjrra crbcyr funivat lnxf naq gur havirefr cebqhpvat zber orneqrq lnxf. Fb sne, gur havirefr vf jvaavat @@ -136,14 +24,14 @@ ClCl 1.1.0orgn eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy "gurer fubhyq or bar naq bayl bar boivbhf jnl gb qb vg". ClCl inevnag: "gurer pna or A unys-ohttl jnlf gb qb vg" 1.1 svany eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy -1.1 svany eryrnfrq | nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba + nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba Vf gurer n clcl gvzr? - vs lbh pna srry vg (?) gura gurer vf ab, abezny jbex vf fhpu zhpu yrff gvevat guna inpngvbaf ab, abezny jbex vf fb zhpu yrff gvevat guna inpngvbaf -SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva. +-SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva.- vg'f Fhaqnl, znlor vg'f Fhaqnl, ntnva -"3 + 3 = 8" Nagb va gur WVG gnyx +"3 + 3 = 8" - Nagb va gur WVG gnyx RPBBC vf unccravat RPBBC vf svavfurq cflpb rngf bar oenva cre vapu bs cebterff @@ -175,10 +63,108 @@ "nu, whfg va gvzr qbphzragngvba" (__nc__) ClCl vf abg n erny IZ: ab frtsnhyg unaqyref gb qb gur ener pnfrf lbh pna'g unir obgu pbairavrapr naq fcrrq -gur WVG qbrfa'g jbex ba BF/K (abi'09) -ab fhccbeg sbe BF/K evtug abj! (abi'09) fyvccref urvtug pna or zrnfherq va k86 ertvfgref clcl vf n enpr orgjrra gur vaqhfgel gelvat gb ohvyq znpuvarf jvgu zber naq zber erfbheprf, naq gur clcl qrirybcref gelvat gb rng nyy bs gurz. Fb sne, gur jvaare vf fgvyy hapyrne +"znl pbagnva ahgf naq/be lbhat cbvagref" +vg'f nyy irel fvzcyr, yvxr gur ubyvqnlf +unccl ClCl'f lrne 2010! +fnzhryr fnlf gung jr ybfg n enmbe. fb jr pna'g funir lnxf +"yrg'f abg or bofpher, hayrff jr ernyyl arrq gb" + (abg guernq-fnsr, ohg jryy, abguvat vf) +clcl unf znal ceboyrzf, ohg rnpu bar unf znal fbyhgvbaf +whfg nabgure vgrz (1.333...) ba bhe erny-ahzorerq gbqb yvfg +ClCl vf Fuveg Bevtnzv erfrnepu + nafjrevat n dhrfgvba: "ab -- sbe ng yrnfg bar cbffvoyr vagrecergngvba bs lbhe fragrapr" +eryrnfr 1.2 hcpbzvat +ClCl 1.2 eryrnfrq - uggc://clcl.bet/ +AB IPF QVFPHFFVBAF +EClguba vf n svar pnzry unve oehfu +ClCl vf n npghnyyl n ivfhnyvmngvba cebwrpg, jr whfg ohvyq vagrecergref gb unir vagrerfgvat qngn gb ivfhnyvmr +clcl vf yvxr fnhfntrf +naq abj sbe fbzrguvat pbzcyrgryl qvssrerag +n 10gu bs sberire vf 1u45 +pbeerpg pbqr qbrfag arrq nal grfgf +cbfgfgehpghenyvfz rgp. +clcl UVG trarengbe +gur arj clcl fcbeg vf gb cnff clcl ohtf nf pclguba ohtf +jr unir zhpu zber vagrecergref guna hfref +ClCl 1.3 njnvgvat eryrnfr +ClCl 1.3 eryrnfrq +vg frrzf gb zr gung bapr lbh frggyr ba na rkrphgvba / bowrpg zbqry naq / be olgrpbqr sbezng, lbh'ir nyernql qrpvqrq jung ynathntrf (jurer gur 'f' frrzf fhcresyhbhf) fhccbeg vf tbvat gb or svefg pynff sbe +"Nyy ceboyrzf va ClCl pna or fbyirq ol nabgure yriry bs vagrecergngvba" +ClCl 1.3 eryrnfrq (jvaqbjf ovanevrf vapyhqrq) +jul qvq lbh thlf unir gb znxr gur ohvygva sbeghar zber vagrerfgvat guna npghny jbex? v whfg pngpurq zlfrys erfgnegvat clcl 20 gvzrf +"jr hfrq gb unir n zrff jvgu na bofpher vagresnpr, abj jr unir zrff urer naq bofpher vagresnpr gurer. cebterff" crqebavf ba n clcl fcevag +"phcf bs pbssrr ner yvxr nanybtvrf va gung V'z znxvat bar evtug abj" +"vg'f nyjnlf hc gb hf, va n jnl be gur bgure" +ClCl vf infg, naq pbagnvaf zhygvghqrf +qravny vf eneryl n tbbq qrohttvat grpuavdhr +"Yrg'f tb." - "Jr pna'g" - "Jul abg?" - "Jr'er jnvgvat sbe n Genafyngvba." - (qrfcnvevatyl) "Nu!" +'gung'f qrsvavgryl n pnfr bs "hu????"' +va gurbel gurer vf gur Ybbc, va cenpgvpr gurer ner oevqtrf +gur uneqqevir - pbafgnag qngn cvytevzntr +ClCl vf n gbby gb xrrc bgurejvfr qnatrebhf zvaqf fnsryl bpphcvrq. +jr ner n trareny senzrjbex ohvyg ba pbafvfgrag nccyvpngvba bs nqubp-arff +gur jnl gb nibvq n jbexnebhaq vf gb vagebqhpr n fgebatre jbexnebhaq fbzrjurer ryfr +pnyyvat gur genafyngvba gbby punva n 'fpevcg' vf xvaq bs bssrafvir +ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq pbafhzr nyy gur zrzbel ng nal gvzr +ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq qvr ng nal gvzr orpnhfr bs gur 32-ovg 4TO yvzvg bs ENZ +Qh jvefg rora tranh qnf reervpura, jbena xrvare tynhog +vs fjvgmreynaq jrer jurer terrpr vf (ba vfynaqf) jbhyq gurl nyy or pbaarpgrq ol oevqtrf? +genafyngvat clcl jvgu pclguba vf fbbbbbb fybj +ClCl 1.4 eryrnfrq! +Jr ner abg urebrf, whfg irel cngvrag. +QBAR zrnaf vg'f qbar +jul gurer vf ab "ClCl 1.4 eryrnfrq" va gbcvp nal zber? +fabj! fabj! +svanyyl, zrephevny zvtengvba vf unccravat! +Gur zvtengvba gb zrephevny vf pbzcyrgrq! uggc://ovgohpxrg.bet/clcl/clcl +fabj! fabj! (gre) +unccl arj lrne +naq anaanaw gb lbh nf jryy +Frrvat nf gur ynjf bs culfvpf ner ntnvafg lbh, lbh unir gb pnershyyl pbafvqre lbhe fpbcr fb gung lbhe tbnyf ner ernfbanoyr. +nf hfhny va clcl, gur fbyhgvba nccrnef pbzcyrgryl qvfcebcbegvbangr gb gur ceboyrz naq vafgrnq jr'yy tb sbe n pbzcyrgryl qvssrerag fvzcyre nccebnpu gb gur bevtvany ceboyrz +fabj, fabj! +va clcl lbh ner nyjnlf ng gur jebat yriry, va bar jnl be gur bgure +jryy, vg'f jebat ohg abg fb "irel jebat" nf vg ybbxrq + V ybir clcl +ynmvarff vzcngvrapr naq uhoevf +fabj, fabj +EClguba: guvatf lbh jbhyqa'g qb va Clguba, naq pna'g qb va P. +vg vf gur rkcrpgrq orunivbe, rkprcg jura lbh qba'g rkcrpg vg +erqrsvavat lryybj frrzf yvxr n orggre vqrn +"gung'f ubjrire whfg ratvarrevat" (svwny) +"[vg] whfg fubjf ntnva gung eclguba vf bofpher" (psobym) +"naljnl, clguba vf n infg ynathntr" (svwny) +bhg-bs-yvr-thneqf +"gurer ner qnlf ba juvpu lbh ybbx nebhaq naq abguvat fubhyq unir rire jbexrq" (svwny) +clcl vf n orggre xvaq bs sbbyvfuarff - ynp +ehaavat grfgf vf rffragvny sbe qrirybcvat clcl -- hu? qvq V oernx gur grfg? (svwny) +V'ir tbg guvf sybbe jnk gung'f nyfb n TERNG qrffreg gbccvat!! +rknexha: "gur cneg gung V gubhtug jnf tbvat gb or uneq jnf gevivny, fb abj V whfg unir guvf cneg gung V qvqa'g rira guvax bs gung vf uneq" +V fhccbfr jr pna yvir jvgu gur bofphevgl, nf ybat nf gurer vf n pbzzrag znxvat vg yvtugre +V nz n ovt oryvrire va ernfbaf. ohg gur nccnerag xvaq ner zl snibevgr. +clcl: trg n WVG sbe serr (jryy gur svefg qnl lbh jba'g znantr naq vg jvyy or irel sehfgengvat) + thgjbegu: bu, jr fubhyq znxr gur WVG zntvpnyyl orggre, jvgu qrpbengbef naq fghss +vg'f n pbzcyrgr unpx, ohg n irel zvavzny bar (nevtngb) +svefg gurl ynhtu ng lbh, gura gurl vtaber lbh, gura gurl svtug lbh, gura lbh jva +ClCl vf snzvyl sevraqyl +jr yvxr pbzcynvagf +gbqnl jr'er snfgre guna lrfgreqnl (hfhnyyl) +ClCl naq PClguba: gurl ner zbegny rarzvrf vagrag ba xvyyvat rnpu bgure +nethnoyl, rirelguvat vf n avpur +clcl unf ynlref yvxr bavbaf: crryvat gurz onpx jvyy znxr lbh pel +EClguba zntvpnyyl znxrf lbh evpu naq snzbhf (fnlf fb ba gur gva) +Vf evtbobg nebhaq jura gur havirefr prnfrf gb rkvfg? +ClCl vf gbb pbby sbe dhrelfgevatf. +< nevtngb> gura jung bpphef? < svwny> tbbq fghss V oryvrir +ClCl 1.6 eryrnfrq! + jurer ner gur grfgf? +uggc://gjvgcvp.pbz/52nr8s +N enaqbz dhbgr +Nyy rkprcgoybpxf frrz fnar. +N cvax tyvggrel ebgngvat ynzoqn +"vg'f yvxryl grzcbenel hagvy sberire" nevtb """ def some_topic(): diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,6 +231,9 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None +sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] +sqlite.sqlite3_enable_load_extension.restype = c_int + ########################################## # END Wrapped SQLite C API and constants ########################################## @@ -705,6 +708,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() + + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") + DML, DQL, DDL = range(3) class Cursor(object): diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/lib_pypy/pyrepl/commands.py b/lib_pypy/pyrepl/commands.py --- a/lib_pypy/pyrepl/commands.py +++ b/lib_pypy/pyrepl/commands.py @@ -33,10 +33,9 @@ class Command(object): finish = 0 kills_digit_arg = 1 - def __init__(self, reader, (event_name, event)): + def __init__(self, reader, cmd): self.reader = reader - self.event = event - self.event_name = event_name + self.event_name, self.event = cmd def do(self): pass diff --git a/lib_pypy/pyrepl/pygame_console.py b/lib_pypy/pyrepl/pygame_console.py --- a/lib_pypy/pyrepl/pygame_console.py +++ b/lib_pypy/pyrepl/pygame_console.py @@ -130,7 +130,7 @@ s.fill(c, [0, 600 - bmargin, 800, bmargin]) s.fill(c, [800 - rmargin, 0, lmargin, 600]) - def refresh(self, screen, (cx, cy)): + def refresh(self, screen, cxy): self.screen = screen self.pygame_screen.fill(colors.bg, [0, tmargin + self.cur_top + self.scroll, @@ -139,8 +139,8 @@ line_top = self.cur_top width, height = self.fontsize - self.cxy = (cx, cy) - cp = self.char_pos(cx, cy) + self.cxy = cxy + cp = self.char_pos(*cxy) if cp[1] < tmargin: self.scroll = - (cy*self.fh + self.cur_top) self.repaint() @@ -148,7 +148,7 @@ self.scroll += (600 - bmargin) - (cp[1] + self.fh) self.repaint() if self.curs_vis: - self.pygame_screen.blit(self.cursor, self.char_pos(cx, cy)) + self.pygame_screen.blit(self.cursor, self.char_pos(*cxy)) for line in screen: if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh): if line: diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -231,7 +231,11 @@ return ''.join(chars) def _histline(self, line): - return unicode(line.rstrip('\n'), ENCODING) + line = line.rstrip('\n') + try: + return unicode(line, ENCODING) + except UnicodeDecodeError: # bah, silently fall back... + return unicode(line, 'utf-8') def get_history_length(self): return self.saved_history_length @@ -268,7 +272,10 @@ f = open(os.path.expanduser(filename), 'w') for entry in history: if isinstance(entry, unicode): - entry = entry.encode(ENCODING) + try: + entry = entry.encode(ENCODING) + except UnicodeEncodeError: # bah, silently fall back... + entry = entry.encode('utf-8') entry = entry.replace('\n', '\r\n') # multiline history support f.write(entry + '\n') f.close() diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -163,7 +163,7 @@ def change_encoding(self, encoding): self.encoding = encoding - def refresh(self, screen, (cx, cy)): + def refresh(self, screen, cxy): # this function is still too long (over 90 lines) if not self.__gone_tall: @@ -198,6 +198,7 @@ # we make sure the cursor is on the screen, and that we're # using all of the screen if we can + cx, cy = cxy if cy < offset: offset = cy elif cy >= offset + height: @@ -411,7 +412,12 @@ e.args[4] == 'unexpected end of data': pass else: - raise + # was: "raise". But it crashes pyrepl, and by extension the + # pypy currently running, in which we are e.g. in the middle + # of some debugging session. Argh. Instead just print an + # error message to stderr and continue running, for now. + self.partial_char = '' + sys.stderr.write('\n%s: %s\n' % (e.__class__.__name__, e)) else: self.partial_char = '' self.event_queue.push(c) diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -38,9 +38,27 @@ _setlogmask.argtypes = (c_int,) _setlogmask.restype = c_int +_S_log_open = False +_S_ident_o = None + +def _get_argv(): + try: + import sys + script = sys.argv[0] + if isinstance(script, str): + return script[script.rfind('/')+1:] or None + except Exception: + pass + return None + @builtinify -def openlog(ident, option, facility): - _openlog(ident, option, facility) +def openlog(ident=None, logoption=0, facility=LOG_USER): + global _S_ident_o, _S_log_open + if ident is None: + ident = _get_argv() + _S_ident_o = c_char_p(ident) # keepalive + _openlog(_S_ident_o, logoption, facility) + _S_log_open = True @builtinify def syslog(arg1, arg2=None): @@ -48,11 +66,18 @@ priority, message = arg1, arg2 else: priority, message = LOG_INFO, arg1 + # if log is not opened, open it now + if not _S_log_open: + openlog() _syslog(priority, "%s", message) @builtinify def closelog(): - _closelog() + global _S_log_open, S_ident_o + if _S_log_open: + _closelog() + _S_log_open = False + _S_ident_o = None @builtinify def setlogmask(mask): diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 @@ -307,7 +308,7 @@ self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo - self.typename = self.type.__name__ + self.typename = getattr(self.type, "__name__", "???") self.traceback = py.code.Traceback(tb) def __repr__(self): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -92,7 +92,7 @@ module_import_dependencies = { # no _rawffi if importing pypy.rlib.clibffi raises ImportError - # or CompilationError + # or CompilationError or py.test.skip.Exception "_rawffi" : ["pypy.rlib.clibffi"], "_ffi" : ["pypy.rlib.clibffi"], @@ -113,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError), e: + except (ImportError, CompilationError, py.test.skip.Exception), e: errcls = e.__class__.__name__ config.add_warning( "The module %r is disabled\n" % (modname,) + @@ -281,6 +281,9 @@ "actually create the full list until the resulting " "list is mutated", default=False), + BoolOption("withliststrategies", + "enable optimized ways to store lists of primitives ", + default=True), BoolOption("withtypeversion", "version type objects when changing them", diff --git a/pypy/config/test/test_translationoption.py b/pypy/config/test/test_translationoption.py new file mode 100644 --- /dev/null +++ b/pypy/config/test/test_translationoption.py @@ -0,0 +1,10 @@ +import py +from pypy.config.translationoption import get_combined_translation_config +from pypy.config.translationoption import set_opt_level +from pypy.config.config import ConflictConfigError + + +def test_no_gcrootfinder_with_boehm(): + config = get_combined_translation_config() + config.translation.gcrootfinder = "shadowstack" + py.test.raises(ConflictConfigError, set_opt_level, config, '0') diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, @@ -398,6 +398,10 @@ # make_sure_not_resized often relies on it, so we always enable them config.translation.suggest(list_comprehension_operations=True) + # finally, make the choice of the gc definitive. This will fail + # if we have specified strange inconsistent settings. + config.translation.gc = config.translation.gc + # ---------------------------------------------------------------- def set_platform(config): diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -270,7 +270,12 @@ - *slicing*: the slice start must be within bounds. The stop doesn't need to, but it must not be smaller than the start. All negative indexes are disallowed, except for - the [:-1] special case. No step. + the [:-1] special case. No step. Slice deletion follows the same rules. + + - *slice assignment*: + only supports ``lst[x:y] = sublist``, if ``len(sublist) == y - x``. + In other words, slice assignment cannot change the total length of the list, + but just replace items. - *other operators*: ``+``, ``+=``, ``in``, ``*``, ``*=``, ``==``, ``!=`` work as expected. diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withliststrategies.txt b/pypy/doc/config/objspace.std.withliststrategies.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withliststrategies.txt @@ -0,0 +1,2 @@ +Enable list strategies: Use specialized representations for lists of primitive +objects, such as ints. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -262,6 +262,26 @@ documented as such (as e.g. for hasattr()), in most cases PyPy lets the exception propagate instead. +Object Identity of Primitive Values, ``is`` and ``id`` +------------------------------------------------------- + +Object identity of primitive values works by value equality, not by identity of +the wrapper. This means that ``x + 1 is x + 1`` is always true, for arbitrary +integers ``x``. The rule applies for the following types: + + - ``int`` + + - ``float`` + + - ``long`` + + - ``complex`` + +This change requires some changes to ``id`` as well. ``id`` fulfills the +following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the +above types will return a value that is computed from the argument, and can +thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long). + Miscellaneous ------------- @@ -284,14 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. -* Do not compare immutable objects with ``is``. For example on CPython - it is true that ``x is 0`` works, i.e. does the same as ``type(x) is - int and x == 0``, but it is so by accident. If you do instead - ``x is 1000``, then it stops working, because 1000 is too large and - doesn't come from the internal cache. In PyPy it fails to work in - both cases, because we have no need for a cache at all. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. -* Also, object identity of immutable keys in dictionaries is not necessarily - preserved. .. include:: _ref.txt diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,6 +1,3 @@ -.. include:: needswork.txt - -.. needs work, it talks about svn. also, it is not really user documentation Making a PyPy Release ======================= @@ -12,11 +9,8 @@ forgetting things. A set of todo files may also work. Check and prioritize all issues for the release, postpone some if necessary, -create new issues also as necessary. A meeting (or meetings) should be -organized to decide what things are priorities, should go in and work for -the release. - -An important thing is to get the documentation into an up-to-date state! +create new issues also as necessary. An important thing is to get +the documentation into an up-to-date state! Release Steps ---------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -17,17 +17,26 @@ projects, or anything else in PyPy, pop up on IRC or write to us on the `mailing list`_. +Make big integers faster +------------------------- + +PyPy's implementation of the Python ``long`` type is slower than CPython's. +Find out why and optimize them. + +Make bytearray type fast +------------------------ + +PyPy's bytearray type is very inefficient. It would be an interesting +task to look into possible optimizations on this. + Numpy improvements ------------------ -This is more of a project-container than a single project. Possible ideas: +The numpy is rapidly progressing in pypy, so feel free to come to IRC and +ask for proposed topic. A not necesarilly up-to-date `list of topics`_ +is also available. -* experiment with auto-vectorization using SSE or implement vectorization - without automatically detecting it for array operations. - -* improve numpy, for example implement memory views. - -* interface with fortran/C libraries. +.. _`list of topics`: https://bitbucket.org/pypy/extradoc/src/extradoc/planning/micronumpy.txt Improving the jitviewer ------------------------ diff --git a/pypy/doc/release-1.7.0.rst b/pypy/doc/release-1.7.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-1.7.0.rst @@ -0,0 +1,94 @@ +================================== +PyPy 1.7 - widening the sweet spot +================================== + +We're pleased to announce the 1.7 release of PyPy. As became a habit, this +release brings a lot of bugfixes and performance improvements over the 1.6 +release. However, unlike the previous releases, the focus has been on widening +the "sweet spot" of PyPy. That is, classes of Python code that PyPy can greatly +speed up should be vastly improved with this release. You can download the 1.7 +release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 1.7 and cpython 2.7.1`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 32/64 or +Windows 32. Windows 64 work is ongoing, but not yet natively supported. + +The main topic of this release is widening the range of code which PyPy +can greatly speed up. On average on +our benchmark suite, PyPy 1.7 is around **30%** faster than PyPy 1.6 and up +to **20 times** faster on some benchmarks. + +.. _`pypy 1.7 and cpython 2.7.1`: http://speed.pypy.org + + +Highlights +========== + +* Numerous performance improvements. There are too many examples which python + constructs now should behave faster to list them. + +* Bugfixes and compatibility fixes with CPython. + +* Windows fixes. + +* PyPy now comes with stackless features enabled by default. However, + any loop using stackless features will interrupt the JIT for now, so no real + performance improvement for stackless-based programs. Contact pypy-dev for + info how to help on removing this restriction. + +* NumPy effort in PyPy was renamed numpypy. In order to try using it, simply + write:: + + import numpypy as numpy + + at the beginning of your program. There is a huge progress on numpy in PyPy + since 1.6, the main feature being implementation of dtypes. + +* JSON encoder (but not decoder) has been replaced with a new one. This one + is written in pure Python, but is known to outperform CPython's C extension + up to **2 times** in some cases. It's about **20 times** faster than + the one that we had in 1.6. + +* The memory footprint of some of our RPython modules has been drastically + improved. This should impact any applications using for example cryptography, + like tornado. + +* There was some progress in exposing even more CPython C API via cpyext. + +Things that didn't make it, expect in 1.8 soon +============================================== + +There is an ongoing work, which while didn't make it to the release, is +probably worth mentioning here. This is what you should probably expect in +1.8 some time soon: + +* Specialized list implementation. There is a branch that implements lists of + integers/floats/strings as compactly as array.array. This should drastically + improve performance/memory impact of some applications + +* NumPy effort is progressing forward, with multi-dimensional arrays coming + soon. + +* There are two brand new JIT assembler backends, notably for the PowerPC and + ARM processors. + +Fundraising +=========== + +It's maybe worth mentioning that we're running fundraising campaigns for +NumPy effort in PyPy and for Python 3 in PyPy. In case you want to see any +of those happen faster, we urge you to donate to `numpy proposal`_ or +`py3k proposal`_. In case you want PyPy to progress, but you trust us with +the general direction, you can always donate to the `general pot`_. + +.. _`numpy proposal`: http://pypy.org/numpydonate.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`general pot`: http://pypy.org diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -188,6 +187,12 @@ # ------------------------------------------------------------------- + def is_w(self, space, w_other): + return self is w_other + + def unique_id(self, space): + return space.wrap(compute_unique_id(self)) + def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) raise OperationError(space.w_TypeError, w_msg) @@ -482,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -513,8 +528,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -681,9 +696,17 @@ """shortcut for space.is_true(space.eq(w_obj1, w_obj2))""" return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2)) - def is_w(self, w_obj1, w_obj2): - """shortcut for space.is_true(space.is_(w_obj1, w_obj2))""" - return self.is_true(self.is_(w_obj1, w_obj2)) + def is_(self, w_one, w_two): + return self.newbool(self.is_w(w_one, w_two)) + + def is_w(self, w_one, w_two): + # done by a method call on w_two (and not on w_one, because of the + # expected programming style where we say "if x is None" or + # "if x is object"). + return w_two.is_w(self, w_one) + + def id(self, w_obj): + return w_obj.unique_id(self) def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -777,22 +800,63 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - # If we know the expected length we can preallocate. if expected_length == -1: + # xxx special hack for speed + from pypy.interpreter.generator import GeneratorIterator + if isinstance(w_iterator, GeneratorIterator): + lst_w = [] + w_iterator.unpack_into(lst_w) + return lst_w + # /xxx + return self._unpackiterable_unknown_length(w_iterator, w_iterable) + else: + lst_w = self._unpackiterable_known_length(w_iterator, + expected_length) + return lst_w[:] # make the resulting list resizable + + @jit.dont_look_inside + def _unpackiterable_unknown_length(self, w_iterator, w_iterable): + # Unpack a variable-size list of unknown length. + # The JIT does not look inside this function because it + # contains a loop (made explicit with the decorator above). + # + # If we can guess the expected length we can preallocate. + try: + lgt_estimate = self.len_w(w_iterable) + except OperationError, o: + if (not o.match(self, self.w_AttributeError) and + not o.match(self, self.w_TypeError)): + raise + items = [] + else: try: - lgt_estimate = self.len_w(w_iterable) - except OperationError, o: - if (not o.match(self, self.w_AttributeError) and - not o.match(self, self.w_TypeError)): + items = newlist(lgt_estimate) + except MemoryError: + items = [] # it might have lied + # + while True: + try: + w_item = self.next(w_iterator) + except OperationError, e: + if not e.match(self, self.w_StopIteration): raise - items = [] - else: - try: - items = newlist(lgt_estimate) - except MemoryError: - items = [] # it might have lied - else: - items = [None] * expected_length + break # done + items.append(w_item) + # + return items + + @jit.dont_look_inside + def _unpackiterable_known_length(self, w_iterator, expected_length): + # Unpack a known length list, without letting the JIT look inside. + # Implemented by just calling the @jit.unroll_safe version, but + # the JIT stopped looking inside already. + return self._unpackiterable_known_length_jitlook(w_iterator, + expected_length) + + @jit.unroll_safe + def _unpackiterable_known_length_jitlook(self, w_iterator, + expected_length): + items = [None] * expected_length idx = 0 while True: try: @@ -801,26 +865,29 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and idx == expected_length: + if idx == expected_length: raise OperationError(self.w_ValueError, - self.wrap("too many values to unpack")) - if expected_length == -1: - items.append(w_item) - else: - items[idx] = w_item + self.wrap("too many values to unpack")) + items[idx] = w_item idx += 1 - if expected_length != -1 and idx < expected_length: + if idx < expected_length: if idx == 1: plural = "" else: plural = "s" - raise OperationError(self.w_ValueError, - self.wrap("need more than %d value%s to unpack" % - (idx, plural))) + raise operationerrfmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, plural) return items - unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, - 'unpackiterable_unroll')) + def unpackiterable_unroll(self, w_iterable, expected_length): + # Like unpackiterable(), but for the cases where we have + # an expected_length and want to unroll when JITted. + # Returns a fixed-size list. + w_iterator = self.iter(w_iterable) + assert expected_length != -1 + return self._unpackiterable_known_length_jitlook(w_iterator, + expected_length) def fixedview(self, w_iterable, expected_length=-1): """ A fixed list view of w_iterable. Don't modify the result @@ -835,6 +902,16 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_str(self, w_list): + """ Return a list of unwrapped strings out of a list of strings. If the + argument is not a list or does not contain only strings, return None. + May return None anyway. + """ + return None + + def newlist_str(self, list_s): + return self.newlist([self.wrap(s) for s in list_s]) + @jit.unroll_safe def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" @@ -969,9 +1046,6 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) - def id(self, w_obj): - return self.wrap(compute_unique_id(w_obj)) - # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the __builtin__ @@ -1543,6 +1617,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -1,14 +1,15 @@ +from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped +from pypy.interpreter.pyopcode import LoopBlock from pypy.rlib import jit -from pypy.interpreter.pyopcode import LoopBlock +from pypy.rlib.objectmodel import specialize class GeneratorIterator(Wrappable): "An iterator created by a generator." _immutable_fields_ = ['pycode'] - + def __init__(self, frame): self.space = frame.space self.frame = frame # turned into None when frame_finished_execution @@ -81,7 +82,7 @@ # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: self.frame = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) else: return w_result # YIELDed finally: @@ -97,21 +98,21 @@ def throw(self, w_type, w_val, w_tb): from pypy.interpreter.pytraceback import check_traceback space = self.space - + msg = "throw() third argument must be a traceback object" if space.is_w(w_tb, space.w_None): tb = None else: tb = check_traceback(space, w_tb, msg) - + operr = OperationError(w_type, w_val, tb) operr.normalize_exception(space) return self.send_ex(space.w_None, operr) - + def descr_next(self): """x.next() -> the next value, or raise StopIteration""" return self.send_ex(self.space.w_None) - + def descr_close(self): """x.close(arg) -> raise GeneratorExit inside generator.""" assert isinstance(self, GeneratorIterator) @@ -124,7 +125,7 @@ e.match(space, space.w_GeneratorExit): return space.w_None raise - + if w_retval is not None: msg = "generator ignored GeneratorExit" raise OperationError(space.w_RuntimeError, space.wrap(msg)) @@ -155,3 +156,44 @@ "interrupting generator of ") break block = block.previous + + # Results can be either an RPython list of W_Root, or it can be an + # app-level W_ListObject, which also has an append() method, that's why we + # generate 2 versions of the function and 2 jit drivers. + def _create_unpack_into(): + jitdriver = jit.JitDriver(greens=['pycode'], + reds=['self', 'frame', 'results']) + def unpack_into(self, results): + """This is a hack for performance: runs the generator and collects + all produced items in a list.""" + # XXX copied and simplified version of send_ex() + space = self.space + if self.running: + raise OperationError(space.w_ValueError, + space.wrap('generator already executing')) + frame = self.frame + if frame is None: # already finished + return + self.running = True + try: + pycode = self.pycode + while True: + jitdriver.jit_merge_point(self=self, frame=frame, + results=results, pycode=pycode) + try: + w_result = frame.execute_frame(space.w_None) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + # if the frame is now marked as finished, it was RETURNed from + if frame.frame_finished_execution: + break + results.append(w_result) # YIELDed + finally: + frame.f_backref = jit.vref_None + self.running = False + self.frame = None + return unpack_into + unpack_into = _create_unpack_into() + unpack_into_w = _create_unpack_into() \ No newline at end of file diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py --- a/pypy/interpreter/test/test_executioncontext.py +++ b/pypy/interpreter/test/test_executioncontext.py @@ -292,7 +292,7 @@ import os, sys print sys.executable, self.tmpfile if sys.platform == "win32": - cmdformat = '""%s" "%s""' # excellent! tons of "! + cmdformat = '"%s" "%s"' else: cmdformat = "'%s' '%s'" g = os.popen(cmdformat % (sys.executable, self.tmpfile), 'r') diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -587,7 +587,7 @@ assert isinstance(meth2, Method) assert meth2.call_args(args) == obj1 # Check method returned from unbound_method.__get__() - w_meth3 = descr_function_get(space, func, None, space.type(obj2)) + w_meth3 = descr_function_get(space, func, space.w_None, space.type(obj2)) meth3 = space.unwrap(w_meth3) w_meth4 = meth3.descr_method_get(obj2, space.w_None) meth4 = space.unwrap(w_meth4) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -117,7 +117,7 @@ g = f() raises(NameError, g.throw, NameError, "Error", None) - + def test_throw_fail(self): def f(): yield 1 @@ -129,7 +129,7 @@ yield 1 g = f() raises(TypeError, g.throw, list()) - + def test_throw_fail3(self): def f(): yield 1 @@ -188,7 +188,7 @@ g = f() g.next() raises(NameError, g.close) - + def test_close_fail(self): def f(): try: @@ -267,3 +267,15 @@ assert r.startswith("= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -111,6 +111,16 @@ def repr_of_descr(self): return '<%s %s %s>' % (self._clsname, self.name, self.offset) +class DynamicFieldDescr(BaseFieldDescr): + def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): + self.offset = offset + self._fieldsize = fieldsize + self._is_pointer_field = is_pointer + self._is_float_field = is_float + self._is_field_signed = is_signed + + def get_field_size(self, translate_support_code): + return self._fieldsize class NonGcPtrFieldDescr(BaseFieldDescr): _clsname = 'NonGcPtrFieldDescr' @@ -182,6 +192,7 @@ def repr_of_descr(self): return '<%s>' % self._clsname + class NonGcPtrArrayDescr(BaseArrayDescr): _clsname = 'NonGcPtrArrayDescr' def get_item_size(self, translate_support_code): @@ -211,6 +222,13 @@ def get_ofs_length(self, translate_support_code): return -1 +class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): + def __init__(self, itemsize): + self.itemsize = itemsize + + def get_item_size(self, translate_support_code): + return self.itemsize + class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): _clsname = 'NonGcPtrArrayNoLengthDescr' def get_item_size(self, translate_support_code): @@ -281,6 +299,9 @@ def is_float_field(self): return self.fielddescr.is_float_field() + def sort_key(self): + return self.fielddescr.sort_key() + def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() @@ -302,12 +323,16 @@ _clsname = '' loop_token = None arg_classes = '' # <-- annotation hack - ffi_flags = 0 + ffi_flags = 1 - def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): + def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): self.arg_classes = arg_classes # string of "r" and "i" (ref/int) self.extrainfo = extrainfo self.ffi_flags = ffi_flags + # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which + # makes sense on Windows as it's the one for all the C functions + # we are compiling together with the JIT. On non-Windows platforms + # it is just ignored anyway. def __repr__(self): res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) @@ -348,6 +373,10 @@ return False # unless overridden def create_call_stub(self, rtyper, RESULT): + from pypy.rlib.clibffi import FFI_DEFAULT_ABI + assert self.get_call_conv() == FFI_DEFAULT_ABI, ( + "%r: create_call_stub() with a non-default call ABI" % (self,)) + def process(c): if c == 'L': assert longlong.supports_longlong @@ -442,7 +471,7 @@ """ _clsname = 'DynamicIntCallDescr' - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): + def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) assert isinstance(result_sign, bool) self._result_size = chr(result_size) diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -8,7 +8,7 @@ class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo, ffi_flags): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -648,11 +648,10 @@ # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1<= 0 + size = self.frame_size(box.type) + for i in range(size): + while (index + i) >= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +118,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +141,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +164,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -13,44 +13,46 @@ def test_call_descr_dynamic(): args = [types.sint, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42) + descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, + ffi_flags=42) assert isinstance(descr, DynamicIntCallDescr) assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 args = [types.sint, types.double, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.void) + descr = get_call_descr_dynamic(FakeCPU(), args, types.void, None, 42) assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), - args, types.void, ffi_flags=43) + args, types.void, None, ffi_flags=43) assert isinstance(descr, VoidCallDescr) assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 - descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8) + descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) assert isinstance(descr, DynamicIntCallDescr) assert descr.get_result_size(False) == 1 assert descr.is_result_signed() == True - descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8) + descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) assert isinstance(descr, DynamicIntCallDescr) assert descr.get_result_size(False) == 1 assert descr.is_result_signed() == False if not is_64_bit: - descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong) + descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong, + None, 42) assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), - [], types.slonglong, ffi_flags=43) + [], types.slonglong, None, ffi_flags=43) assert isinstance(descr, LongLongCallDescr) assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong - descr = get_call_descr_dynamic(FakeCPU(), [], types.float) + descr = get_call_descr_dynamic(FakeCPU(), [], types.float, None, 42) assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), - [], types.float, ffi_flags=44) + [], types.float, None, ffi_flags=44) SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) assert isinstance(descr, SingleFloatCallDescr) assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -247,12 +247,14 @@ self.record = [] def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, - has_finalizer, contains_weakptr): + has_finalizer, has_light_finalizer, + contains_weakptr): assert not contains_weakptr + assert not has_finalizer # in these tests + assert not has_light_finalizer # in these tests p = llmemory.raw_malloc(size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - flags = int(has_finalizer) << 16 - tid = llop.combine_ushort(lltype.Signed, type_id, flags) + tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p @@ -568,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] @@ -40,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -280,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -303,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -325,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -346,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -173,38 +173,35 @@ lst[n] = None self.fail_descr_free_list.extend(faildescr_indices) - @staticmethod - def sizeof(S): + def sizeof(self, S): raise NotImplementedError - @staticmethod - def fielddescrof(S, fieldname): + def fielddescrof(self, S, fieldname): """Return the Descr corresponding to field 'fieldname' on the structure 'S'. It is important that this function (at least) caches the results.""" raise NotImplementedError - @staticmethod - def arraydescrof(A): + def interiorfielddescrof(self, A, fieldname): raise NotImplementedError - @staticmethod - def calldescrof(FUNC, ARGS, RESULT): + def interiorfielddescrof_dynamic(self, offset, width, fieldsize, is_pointer, + is_float, is_signed): + raise NotImplementedError + + def arraydescrof(self, A): + raise NotImplementedError + + def calldescrof(self, FUNC, ARGS, RESULT): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError - @staticmethod - def methdescrof(SELFTYPE, methname): + def methdescrof(self, SELFTYPE, methname): # must return a subclass of history.AbstractMethDescr raise NotImplementedError - @staticmethod - def typedescrof(TYPE): - raise NotImplementedError - - @staticmethod - def interiorfielddescrof(A, fieldname): + def typedescrof(self, TYPE): raise NotImplementedError # ---------- the backend-dependent operations ---------- diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -107,7 +107,7 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) expected_result = self._prepare_args(args, floats, ints) @@ -253,7 +253,7 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) @@ -284,7 +284,7 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,7 +32,7 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) args = [] for box in inputargs: @@ -103,7 +103,7 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) @@ -114,15 +114,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) fail = self.cpu.execute_token(looptoken, 2) @@ -134,18 +136,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - fail = self.cpu.execute_token(looptoken, 2) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -156,15 +162,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -184,15 +192,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -200,7 +210,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -219,17 +229,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -237,7 +251,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -253,15 +267,17 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) fail = self.cpu.execute_token(looptoken, 2) @@ -281,7 +297,7 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] @@ -291,7 +307,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -301,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -310,7 +326,7 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) @@ -322,7 +338,7 @@ res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -339,14 +355,16 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) @@ -406,7 +424,7 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() @@ -1067,16 +1085,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1124,22 +1144,24 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) @@ -1190,7 +1212,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1245,7 +1267,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1302,7 +1324,7 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1373,7 +1395,7 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, @@ -1647,13 +1669,14 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.execute_token(loop.token, 1) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.execute_token(loop.token, 0) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1670,8 +1693,9 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.execute_token(loop.token, 1) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1687,12 +1711,13 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.execute_token(loop.token, 1) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.execute_token(loop.token, 0) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1862,7 +1887,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 @@ -1903,7 +1928,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 @@ -1945,7 +1970,7 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 @@ -1986,7 +2011,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 @@ -2045,7 +2070,7 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) args = [rffi.cast(lltype.Signed, raw), 2, @@ -2101,7 +2126,7 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') @@ -2122,7 +2147,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) fail = self.cpu.execute_token(looptoken, -42, 9) @@ -2360,7 +2385,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 @@ -2379,7 +2404,7 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] res = self.cpu.execute_token(othertoken, *args) @@ -2414,7 +2439,7 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) args = [longlong.getfloatstorage(1.2), @@ -2429,7 +2454,7 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(3.2)] @@ -2442,7 +2467,7 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(3.2)] @@ -2504,7 +2529,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) args = [longlong.getfloatstorage(1.25), @@ -2521,7 +2546,7 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken @@ -2539,7 +2564,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) @@ -2900,12 +2925,139 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.set_future_value_int(0, 2) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + self.cpu.set_future_value_int(0, 2) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2) + assert fail.identifier == 42 + class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_ll_random.py b/pypy/jit/backend/test/test_ll_random.py --- a/pypy/jit/backend/test/test_ll_random.py +++ b/pypy/jit/backend/test/test_ll_random.py @@ -28,16 +28,27 @@ fork.structure_types_and_vtables = self.structure_types_and_vtables return fork - def get_structptr_var(self, r, must_have_vtable=False, type=lltype.Struct): + def _choose_ptr_vars(self, from_, type, array_of_structs): + ptrvars = [] + for i in range(len(from_)): + v, S = from_[i][:2] + if not isinstance(S, type): + continue + if ((isinstance(S, lltype.Array) and + isinstance(S.OF, lltype.Struct)) == array_of_structs): + ptrvars.append((v, S)) + return ptrvars + + def get_structptr_var(self, r, must_have_vtable=False, type=lltype.Struct, + array_of_structs=False): while True: - ptrvars = [(v, S) for (v, S) in self.ptrvars - if isinstance(S, type)] + ptrvars = self._choose_ptr_vars(self.ptrvars, type, + array_of_structs) if ptrvars and r.random() < 0.8: v, S = r.choice(ptrvars) else: - prebuilt_ptr_consts = [(v, S) - for (v, S, _) in self.prebuilt_ptr_consts - if isinstance(S, type)] + prebuilt_ptr_consts = self._choose_ptr_vars( + self.prebuilt_ptr_consts, type, array_of_structs) if prebuilt_ptr_consts and r.random() < 0.7: v, S = r.choice(prebuilt_ptr_consts) else: @@ -48,7 +59,8 @@ has_vtable=must_have_vtable) else: # create a new constant array - p = self.get_random_array(r) + p = self.get_random_array(r, + must_be_array_of_structs=array_of_structs) S = lltype.typeOf(p).TO v = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, p)) self.prebuilt_ptr_consts.append((v, S, @@ -74,7 +86,8 @@ TYPE = lltype.Signed return TYPE - def get_random_structure_type(self, r, with_vtable=None, cache=True): + def get_random_structure_type(self, r, with_vtable=None, cache=True, + type=lltype.GcStruct): if cache and self.structure_types and r.random() < 0.5: return r.choice(self.structure_types) fields = [] @@ -85,7 +98,7 @@ for i in range(r.randrange(1, 5)): TYPE = self.get_random_primitive_type(r) fields.append(('f%d' % i, TYPE)) - S = lltype.GcStruct('S%d' % self.counter, *fields, **kwds) + S = type('S%d' % self.counter, *fields, **kwds) self.counter += 1 if cache: self.structure_types.append(S) @@ -125,17 +138,29 @@ setattr(p, fieldname, rffi.cast(TYPE, r.random_integer())) return p - def get_random_array_type(self, r): - TYPE = self.get_random_primitive_type(r) + def get_random_array_type(self, r, can_be_array_of_struct=False, + must_be_array_of_structs=False): + if ((can_be_array_of_struct and r.random() < 0.1) or + must_be_array_of_structs): + TYPE = self.get_random_structure_type(r, cache=False, + type=lltype.Struct) + else: + TYPE = self.get_random_primitive_type(r) return lltype.GcArray(TYPE) - def get_random_array(self, r): - A = self.get_random_array_type(r) + def get_random_array(self, r, must_be_array_of_structs=False): + A = self.get_random_array_type(r, + must_be_array_of_structs=must_be_array_of_structs) length = (r.random_integer() // 15) % 300 # length: between 0 and 299 # likely to be small p = lltype.malloc(A, length) - for i in range(length): - p[i] = rffi.cast(A.OF, r.random_integer()) + if isinstance(A.OF, lltype.Primitive): + for i in range(length): + p[i] = rffi.cast(A.OF, r.random_integer()) + else: + for i in range(length): + for fname, TP in A.OF._flds.iteritems(): + setattr(p[i], fname, rffi.cast(TP, r.random_integer())) return p def get_index(self, length, r): @@ -155,8 +180,16 @@ dic[fieldname] = getattr(p, fieldname) else: assert isinstance(S, lltype.Array) - for i in range(len(p)): - dic[i] = p[i] + if isinstance(S.OF, lltype.Struct): + for i in range(len(p)): + item = p[i] + s1 = {} + for fieldname in S.OF._names: + s1[fieldname] = getattr(item, fieldname) + dic[i] = s1 + else: + for i in range(len(p)): + dic[i] = p[i] return dic def print_loop_prebuilt(self, names, writevar, s): @@ -220,7 +253,7 @@ class GetFieldOperation(test_random.AbstractOperation): def field_descr(self, builder, r): - v, S = builder.get_structptr_var(r) + v, S = builder.get_structptr_var(r, ) names = S._names if names[0] == 'parent': names = names[1:] @@ -239,6 +272,28 @@ continue break +class GetInteriorFieldOperation(test_random.AbstractOperation): + def field_descr(self, builder, r): + v, A = builder.get_structptr_var(r, type=lltype.Array, + array_of_structs=True) + array = v.getref(lltype.Ptr(A)) + v_index = builder.get_index(len(array), r) + name = r.choice(A.OF._names) + descr = builder.cpu.interiorfielddescrof(A, name) + descr._random_info = 'cpu.interiorfielddescrof(%s, %r)' % (A.OF._name, + name) + TYPE = getattr(A.OF, name) + return v, v_index, descr, TYPE + + def produce_into(self, builder, r): + while True: + try: + v, v_index, descr, _ = self.field_descr(builder, r) + self.put(builder, [v, v_index], descr) + except lltype.UninitializedMemoryAccess: + continue + break + class SetFieldOperation(GetFieldOperation): def produce_into(self, builder, r): v, descr, TYPE = self.field_descr(builder, r) @@ -251,6 +306,18 @@ break builder.do(self.opnum, [v, w], descr) +class SetInteriorFieldOperation(GetInteriorFieldOperation): + def produce_into(self, builder, r): + v, v_index, descr, TYPE = self.field_descr(builder, r) + while True: + if r.random() < 0.3: + w = ConstInt(r.random_integer()) + else: + w = r.choice(builder.intvars) + if rffi.cast(lltype.Signed, rffi.cast(TYPE, w.value)) == w.value: + break + builder.do(self.opnum, [v, v_index, w], descr) + class NewOperation(test_random.AbstractOperation): def size_descr(self, builder, S): descr = builder.cpu.sizeof(S) @@ -306,7 +373,7 @@ class NewArrayOperation(ArrayOperation): def produce_into(self, builder, r): - A = builder.get_random_array_type(r) + A = builder.get_random_array_type(r, can_be_array_of_struct=True) v_size = builder.get_index(300, r) v_ptr = builder.do(self.opnum, [v_size], self.array_descr(builder, A)) builder.ptrvars.append((v_ptr, A)) @@ -586,7 +653,9 @@ for i in range(4): # make more common OPERATIONS.append(GetFieldOperation(rop.GETFIELD_GC)) OPERATIONS.append(GetFieldOperation(rop.GETFIELD_GC)) + OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC)) OPERATIONS.append(SetFieldOperation(rop.SETFIELD_GC)) + OPERATIONS.append(SetInteriorFieldOperation(rop.SETINTERIORFIELD_GC)) OPERATIONS.append(NewOperation(rop.NEW)) OPERATIONS.append(NewOperation(rop.NEW_WITH_VTABLE)) diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,8 +3,8 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec @@ -179,7 +179,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -495,9 +495,9 @@ if pytest.config.option.backend == 'llgraph': from pypy.jit.backend.llgraph.runner import LLtypeCPU return LLtypeCPU(None) - elif pytest.config.option.backend == 'x86': - from pypy.jit.backend.x86.runner import CPU386 - return CPU386(None, None) + elif pytest.config.option.backend == 'cpu': + from pypy.jit.backend.detect_cpu import getcpuclass + return getcpuclass()(None, None) else: assert 0, "unknown backend %r" % pytest.config.option.backend @@ -525,29 +525,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +580,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +611,17 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + self.cpu.compile_loop(self.startvars[:], + [ResOperation(rop.JUMP, self.startvars[:], None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -595,6 +636,10 @@ for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) + elif isinstance(value, dict): + item = container.getitem(name) + for key1, value1 in value.items(): + setattr(item, key1, value1) else: container.setitem(name, value) @@ -611,7 +656,7 @@ cpu.set_future_value_float(i, box.value) else: raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + fail = cpu.execute_token(self.runjitcelltoken()) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -679,26 +724,37 @@ args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/test/test_zll_random.py b/pypy/jit/backend/test/test_zll_stress.py rename from pypy/jit/backend/x86/test/test_zll_random.py rename to pypy/jit/backend/test/test_zll_stress.py diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,14 +2,14 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, - _get_scale, gpr_reg_mgr_cls) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, _get_scale, + gpr_reg_mgr_cls, _valid_addressing_size) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -152,14 +152,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -425,8 +424,6 @@ _x86_loop_code (an integer giving an address) _x86_bootstrap_code (an integer giving an address) _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth _x86_arglocs _x86_debug_checksum ''' @@ -443,7 +440,6 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) @@ -455,15 +451,16 @@ bootstrappos = self.mc.get_relative_pos() stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth + clt.frame_depth = frame_depth + clt.param_depth = param_depth directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, + self._assemble_bootstrap_direct_call(arglocs, looppos, frame_depth+param_depth) self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -472,7 +469,7 @@ debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, + rawstart + looppos, rawstart + directbootstrappos, rawstart)) debug_stop("jit-backend-addr") @@ -488,8 +485,8 @@ looptoken._x86_ops_offset = ops_offset looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -548,6 +545,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +668,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,20 +690,24 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -1596,13 +1605,33 @@ genop_getarrayitem_gc_pure = genop_getarrayitem_gc genop_getarrayitem_raw = genop_getarrayitem_gc + def _get_interiorfield_addr(self, temp_loc, index_loc, itemsize_loc, + base_loc, ofs_loc): + assert isinstance(itemsize_loc, ImmedLoc) + if isinstance(index_loc, ImmedLoc): + temp_loc = imm(index_loc.value * itemsize_loc.value) + elif _valid_addressing_size(itemsize_loc.value): + return AddressLoc(base_loc, index_loc, _get_scale(itemsize_loc.value), ofs_loc.value) + else: + # XXX should not use IMUL in more cases, it can use a clever LEA + assert isinstance(temp_loc, RegLoc) + assert isinstance(index_loc, RegLoc) + assert not temp_loc.is_xmm + self.mc.IMUL_rri(temp_loc.value, index_loc.value, + itemsize_loc.value) + assert isinstance(ofs_loc, ImmedLoc) + return AddressLoc(base_loc, temp_loc, 0, ofs_loc.value) + def genop_getinteriorfield_gc(self, op, arglocs, resloc): - base_loc, ofs_loc, itemsize_loc, fieldsize_loc, index_loc, sign_loc = arglocs - # XXX should not use IMUL in most cases - self.mc.IMUL(index_loc, itemsize_loc) - src_addr = AddressLoc(base_loc, index_loc, 0, ofs_loc.value) + (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, + index_loc, temp_loc, sign_loc) = arglocs + src_addr = self._get_interiorfield_addr(temp_loc, index_loc, + itemsize_loc, base_loc, + ofs_loc) self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) + genop_getinteriorfield_raw = genop_getinteriorfield_gc + def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs @@ -1611,12 +1640,15 @@ self.save_into_mem(dest_addr, value_loc, size_loc) def genop_discard_setinteriorfield_gc(self, op, arglocs): - base_loc, ofs_loc, itemsize_loc, fieldsize_loc, index_loc, value_loc = arglocs - # XXX should not use IMUL in most cases - self.mc.IMUL(index_loc, itemsize_loc) - dest_addr = AddressLoc(base_loc, index_loc, 0, ofs_loc.value) + (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, + index_loc, temp_loc, value_loc) = arglocs + dest_addr = self._get_interiorfield_addr(temp_loc, index_loc, + itemsize_loc, base_loc, + ofs_loc) self.save_into_mem(dest_addr, value_loc, fieldsize_loc) + genop_discard_setinteriorfield_raw = genop_discard_setinteriorfield_gc + def genop_discard_setarrayitem_gc(self, op, arglocs): base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs assert isinstance(baseofs, ImmedLoc) @@ -2321,7 +2353,7 @@ fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler @@ -2555,15 +2587,13 @@ gcrootmap.put(self.gcrootmap_retaddr_forced, mark) self.gcrootmap_retaddr_forced = -1 - def target_arglocs(self, loop_token): - return loop_token._x86_arglocs - - def closing_jump(self, loop_token): - if loop_token is self.currently_compiling_loop: + def closing_jump(self, target_token): + target = target_token._x86_loop_code + if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(self.looppos - curpos) + self.mc.JMP_l(target - curpos) else: - self.mc.JMP(imm(loop_token._x86_loop_code)) + self.mc.JMP(imm(target)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -5,7 +5,8 @@ import os from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ResOperation, BoxPtr, ConstFloat, - BoxFloat, LoopToken, INT, REF, FLOAT) + BoxFloat, INT, REF, FLOAT, + TargetToken, JitCellToken) from pypy.jit.backend.x86.regloc import * from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rlib.objectmodel import we_are_translated @@ -138,6 +139,10 @@ return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -159,6 +164,7 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -167,35 +173,30 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +212,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway @@ -287,15 +288,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -311,7 +312,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -320,7 +321,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -356,7 +357,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -450,8 +451,14 @@ def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,8 +466,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -486,7 +496,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + return longevity, useful def loc(self, v): if v is None: # xxx kludgy @@ -883,7 +893,7 @@ def consider_call_assembler(self, op, guard_op): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) @@ -1042,16 +1052,32 @@ t = self._unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, _ = t args = op.getarglist() - tmpvar = TempBox() - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.rm.force_result_in_reg(tmpvar, op.getarg(1), - args) - # we're free to modify index now - value_loc = self.make_sure_var_in_reg(op.getarg(2), args) - self.possibly_free_vars(args) - self.rm.possibly_free_var(tmpvar) + if fieldsize.value == 1: + need_lower_byte = True + else: + need_lower_byte = False + box_base, box_index, box_value = args + base_loc = self.rm.make_sure_var_in_reg(box_base, args) + index_loc = self.rm.make_sure_var_in_reg(box_index, args) + value_loc = self.make_sure_var_in_reg(box_value, args, + need_lower_byte=need_lower_byte) + # If 'index_loc' is not an immediate, then we need a 'temp_loc' that + # is a register whose value will be destroyed. It's fine to destroy + # the same register as 'index_loc', but not the other ones. + self.rm.possibly_free_var(box_index) + if not isinstance(index_loc, ImmedLoc): + tempvar = TempBox() + temp_loc = self.rm.force_allocate_reg(tempvar, [box_base, + box_value]) + self.rm.possibly_free_var(tempvar) + else: + temp_loc = None + self.rm.possibly_free_var(box_base) + self.possibly_free_var(box_value) self.PerformDiscard(op, [base_loc, ofs, itemsize, fieldsize, - index_loc, value_loc]) + index_loc, temp_loc, value_loc]) + + consider_setinteriorfield_raw = consider_setinteriorfield_gc def consider_strsetitem(self, op): args = op.getarglist() @@ -1122,15 +1148,29 @@ else: sign_loc = imm0 args = op.getarglist() - tmpvar = TempBox() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.rm.force_result_in_reg(tmpvar, op.getarg(1), - args) - self.rm.possibly_free_vars_for_op(op) - self.rm.possibly_free_var(tmpvar) - result_loc = self.force_allocate_reg(op.result) + index_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) + # 'base' and 'index' are put in two registers (or one if 'index' + # is an immediate). 'result' can be in the same register as + # 'index' but must be in a different register than 'base'. + self.rm.possibly_free_var(op.getarg(1)) + result_loc = self.force_allocate_reg(op.result, [op.getarg(0)]) + assert isinstance(result_loc, RegLoc) + # two cases: 1) if result_loc is a normal register, use it as temp_loc + if not result_loc.is_xmm: + temp_loc = result_loc + else: + # 2) if result_loc is an xmm register, we (likely) need another + # temp_loc that is a normal register. It can be in the same + # register as 'index' but not 'base'. + tempvar = TempBox() + temp_loc = self.rm.force_allocate_reg(tempvar, [op.getarg(0)]) + self.rm.possibly_free_var(tempvar) + self.rm.possibly_free_var(op.getarg(0)) self.Perform(op, [base_loc, ofs, itemsize, fieldsize, - index_loc, sign_loc], result_loc) + index_loc, temp_loc, sign_loc], result_loc) + + consider_getinteriorfield_raw = consider_getinteriorfield_gc def consider_int_is_true(self, op, guard_op): # doesn't need arg to be in a register @@ -1283,13 +1323,50 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of 'fm' based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + self.final_jump_op = op + descr = op.getdescr() + assert isinstance(descr, TargetToken) + if descr._x86_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): + nonfloatlocs, floatlocs = descr._x86_arglocs + jump_op = self.final_jump_op + assert len(nonfloatlocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) + if isinstance(box, Box): + loc = nonfloatlocs[i] + if isinstance(loc, StackLoc): + assert box.type != FLOAT + self.fm.hint_frame_locations[box] = loc + else: + loc = floatlocs[i] + if isinstance(loc, StackLoc): + assert box.type == FLOAT + self.fm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) + nonfloatlocs, floatlocs = descr._x86_arglocs self.jump_target_descr = descr - nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) # compute 'tmploc' to be all_regs[0] by spilling what is there box = TempBox() box1 = TempBox() @@ -1327,7 +1404,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) @@ -1362,6 +1439,74 @@ # the FORCE_TOKEN operation returns directly 'ebp' self.rm.force_allocate_frame_reg(op.result) + def consider_label(self, op): + # XXX big refactoring needed? + descr = op.getdescr() + assert isinstance(descr, TargetToken) + inputargs = op.getarglist() + floatlocs = [None] * len(inputargs) + nonfloatlocs = [None] * len(inputargs) + # + # we need to make sure that the tmpreg and xmmtmp are free + tmpreg = X86RegisterManager.all_regs[0] + tmpvar = TempBox() + self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) + self.rm.possibly_free_var(tmpvar) + # + xmmtmp = X86XMMRegisterManager.all_regs[0] + tmpvar = TempBox() + self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) + self.xrm.possibly_free_var(tmpvar) + # + # we need to make sure that no variable is stored in ebp + for arg in inputargs: + if self.loc(arg) is ebp: + loc2 = self.fm.loc(arg) + self.assembler.mc.MOV(loc2, ebp) + self.rm.bindings_to_frame_reg.clear() + # + for i in range(len(inputargs)): + arg = inputargs[i] + assert not isinstance(arg, Const) + loc = self.loc(arg) + assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) + if arg.type == FLOAT: + floatlocs[i] = loc + else: + nonfloatlocs[i] = loc + if isinstance(loc, RegLoc): + self.fm.mark_as_free(arg) + descr._x86_arglocs = nonfloatlocs, floatlocs + descr._x86_loop_code = self.assembler.mc.get_relative_pos() + descr._x86_clt = self.assembler.current_clt + self.assembler.target_tokens_currently_compiling[descr] = None + self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) + +## from pypy.rpython.annlowlevel import llhelper +## def fn(addr): +## print '...label:', hex(addr), nonfloatlocs +## FUNC = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) +## ll_disp = llhelper(FUNC, fn) +## faddr = rffi.cast(lltype.Signed, ll_disp) +## for i in range(16): +## self.assembler.mc.PUSH_r(i) +## self.assembler.mc.CALL_l(0) +## self.assembler.mc.POP(edi) +## self.assembler.mc.MOV(r11, imm(faddr)) +## self.assembler.mc.CALL(r11) +## for i in range(15, -1, -1): +## if i == esp.value: +## i -= 1 +## self.assembler.mc.POP_r(i) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) @@ -1404,8 +1549,11 @@ # i.e. the n'th word beyond the fixed frame size. return -WORD * (FRAME_FIXED_SIZE + position) +def _valid_addressing_size(size): + return size == 1 or size == 2 or size == 4 or size == 8 + def _get_scale(size): - assert size == 1 or size == 2 or size == 4 or size == 8 + assert _valid_addressing_size(size) if size < 4: return size - 1 # 1, 2 => 0, 1 else: @@ -1414,3 +1562,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If 0, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -17,7 +17,7 @@ class AssemblerLocation(object): # XXX: Is adding "width" here correct? - __slots__ = ('value', 'width') + _attrs_ = ('value', 'width', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -25,6 +25,9 @@ def is_memory_reference(self): return self.location_code() in ('b', 's', 'j', 'a', 'm') + def location_code(self): + return self._location_code + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -38,6 +41,8 @@ class StackLoc(AssemblerLocation): _immutable_ = True + _location_code = 'b' + def __init__(self, position, ebp_offset, num_words, type): assert ebp_offset < 0 # so no confusion with RegLoc.value self.position = position @@ -49,9 +54,6 @@ def __repr__(self): return '%d(%%ebp)' % (self.value,) - def location_code(self): - return 'b' - def assembler(self): return repr(self) @@ -63,8 +65,10 @@ self.is_xmm = is_xmm if self.is_xmm: self.width = 8 + self._location_code = 'x' else: self.width = WORD + self._location_code = 'r' def __repr__(self): if self.is_xmm: return rx86.R.xmmnames[self.value] @@ -79,12 +83,6 @@ assert not self.is_xmm return RegLoc(rx86.high_byte(self.value), False) - def location_code(self): - if self.is_xmm: - return 'x' - else: - return 'r' - def assembler(self): return '%' + repr(self) @@ -97,14 +95,13 @@ class ImmedLoc(AssemblerLocation): _immutable_ = True width = WORD + _location_code = 'i' + def __init__(self, value): from pypy.rpython.lltypesystem import rffi, lltype # force as a real int self.value = rffi.cast(lltype.Signed, value) - def location_code(self): - return 'i' - def getint(self): return self.value @@ -149,9 +146,6 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) - def location_code(self): - return self._location_code - def value_a(self): return self.loc_a @@ -191,6 +185,7 @@ # we want a width of 8 (... I think. Check this!) _immutable_ = True width = 8 + _location_code = 'j' def __init__(self, address): self.value = address @@ -198,9 +193,6 @@ def __repr__(self): return '' % (self.value,) - def location_code(self): - return 'j' - if IS_X86_32: class FloatImmedLoc(AssemblerLocation): # This stands for an immediate float. It cannot be directly used in @@ -209,6 +201,7 @@ # instead; see below. _immutable_ = True width = 8 + _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage @@ -229,9 +222,6 @@ floatvalue = longlong.getrealfloat(self.aslonglong) return '' % (floatvalue,) - def location_code(self): - raise NotImplementedError - if IS_X86_64: def FloatImmedLoc(floatstorage): from pypy.rlib.longlong2float import float2longlong @@ -270,6 +260,11 @@ else: raise AssertionError(methname + " undefined") +def _missing_binary_insn(name, code1, code2): + raise AssertionError(name + "_" + code1 + code2 + " missing") +_missing_binary_insn._dont_inline_ = True + + class LocationCodeBuilder(object): _mixin_ = True @@ -303,6 +298,8 @@ else: # For this case, we should not need the scratch register more than here. self._load_scratch(val2) + if name == 'MOV' and loc1 is X86_64_SCRATCH_REG: + return # don't need a dummy "MOV r11, r11" INSN(self, loc1, X86_64_SCRATCH_REG) def invoke(self, codes, val1, val2): @@ -310,6 +307,23 @@ _rx86_getattr(self, methname)(val1, val2) invoke._annspecialcase_ = 'specialize:arg(1)' + def has_implementation_for(loc1, loc2): + # A memo function that returns True if there is any NAME_xy that could match. + # If it returns False we know the whole subcase can be omitted from translated + # code. Without this hack, the size of most _binaryop INSN functions ends up + # quite large in C code. + if loc1 == '?': + return any([has_implementation_for(loc1, loc2) + for loc1 in unrolling_location_codes]) + methname = name + "_" + loc1 + loc2 + if not hasattr(rx86.AbstractX86CodeBuilder, methname): + return False + # any NAME_j should have a NAME_m as a fallback, too. Check it + if loc1 == 'j': assert has_implementation_for('m', loc2), methname + if loc2 == 'j': assert has_implementation_for(loc1, 'm'), methname + return True + has_implementation_for._annspecialcase_ = 'specialize:memo' + def INSN(self, loc1, loc2): code1 = loc1.location_code() code2 = loc2.location_code() @@ -325,6 +339,8 @@ assert code2 not in ('j', 'i') for possible_code2 in unrolling_location_codes: + if not has_implementation_for('?', possible_code2): + continue if code2 == possible_code2: val2 = getattr(loc2, "value_" + possible_code2)() # @@ -335,28 +351,32 @@ # # Regular case for possible_code1 in unrolling_location_codes: + if not has_implementation_for(possible_code1, + possible_code2): + continue if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): + fits32 = rx86.fits_in_32bits + if possible_code1 == 'j' and not fits32(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): + return + if possible_code2 == 'j' and not fits32(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) - elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): + return + if possible_code1 == 'm' and not fits32(val1[1]): val1 = self._fix_static_offset_64_m(val1) - invoke(self, "a" + possible_code2, val1, val2) - elif possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]): + if possible_code2 == 'm' and not fits32(val2[1]): val2 = self._fix_static_offset_64_m(val2) - invoke(self, possible_code1 + "a", val1, val2) - else: - if possible_code1 == 'a' and not rx86.fits_in_32bits(val1[3]): - val1 = self._fix_static_offset_64_a(val1) - if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]): - val2 = self._fix_static_offset_64_a(val2) - invoke(self, possible_code1 + possible_code2, val1, val2) + if possible_code1 == 'a' and not fits32(val1[3]): + val1 = self._fix_static_offset_64_a(val1) + if possible_code2 == 'a' and not fits32(val2[3]): + val2 = self._fix_static_offset_64_a(val2) + invoke(self, possible_code1 + possible_code2, val1, val2) return + _missing_binary_insn(name, code1, code2) return func_with_new_name(INSN, "INSN_" + name) @@ -431,12 +451,14 @@ def _fix_static_offset_64_m(self, (basereg, static_offset)): # For cases where an AddressLoc has the location_code 'm', but # where the static offset does not fit in 32-bits. We have to fall - # back to the X86_64_SCRATCH_REG. Note that this returns a location - # encoded as mode 'a'. These are all possibly rare cases; don't try + # back to the X86_64_SCRATCH_REG. Returns a new location encoded + # as mode 'm' too. These are all possibly rare cases; don't try # to reuse a past value of the scratch register at all. self._scratch_register_known = False self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset) - return (basereg, X86_64_SCRATCH_REG.value, 0, 0) + self.LEA_ra(X86_64_SCRATCH_REG.value, + (basereg, X86_64_SCRATCH_REG.value, 0, 0)) + return (X86_64_SCRATCH_REG.value, 0) def _fix_static_offset_64_a(self, (basereg, scalereg, scale, static_offset)): diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -215,14 +215,3 @@ super(CPU_X86_64, self).__init__(*args, **kwargs) CPU = CPU386 - -# silence warnings -##history.LoopToken._x86_param_depth = 0 -##history.LoopToken._x86_arglocs = (None, None) -##history.LoopToken._x86_frame_depth = 0 -##history.LoopToken._x86_bootstrap_code = 0 -##history.LoopToken._x86_direct_bootstrap_code = 0 -##history.LoopToken._x86_loop_code = 0 -##history.LoopToken._x86_debug_checksum = 0 -##compile.AbstractFailDescr._x86_current_depths = (0, 0) -##compile.AbstractFailDescr._x86_adr_jump_offset = 0 diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -745,6 +745,7 @@ assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') add_insn('j', abs_, immediate(2)) + add_insn('m', mem_reg_plus_const(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/jit/backend/x86/test/test_fficall.py b/pypy/jit/backend/x86/test/test_fficall.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/x86/test/test_fficall.py @@ -0,0 +1,8 @@ +import py +from pypy.jit.metainterp.test import test_fficall +from pypy.jit.backend.x86.test.test_basic import Jit386Mixin + +class TestFfiLookups(Jit386Mixin, test_fficall.FfiLookupTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_fficall.py + supports_all = True diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, TreeLoop + BoxPtr, ConstPtr, TreeLoop, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo @@ -113,6 +113,8 @@ descr0 = cpu.fielddescrof(S, 'int') ptr0 = struct_ref + targettoken = TargetToken() + namespace = locals().copy() def test_basic(self): @@ -136,6 +138,7 @@ def test_bug_0(self): ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, descr=targettoken) guard_value(i2, 1) [i2, i3, i4, i5, i6, i7, i0, i1, i8] guard_class(i4, 138998336) [i4, i5, i6, i7, i0, i1, i8] i11 = getfield_gc(i4, descr=descr0) @@ -163,7 +166,7 @@ guard_false(i32) [i4, i6, i7, i0, i1, i24] i33 = getfield_gc(i0, descr=descr0) guard_value(i33, ConstPtr(ptr0)) [i4, i6, i7, i0, i1, i33, i24] - jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24) + jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -5,10 +5,11 @@ def test_compile_bridge_not_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -26,14 +27,15 @@ def test_compile_bridge_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) - previous = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + previous = loop._jitcelltoken.compiled_loop_token.frame_depth + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -42,17 +44,17 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].getdescr() + descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous self.cpu.set_future_value_int(0, 0) fail = self.run(loop) assert fail.identifier == 2 @@ -64,21 +66,23 @@ def test_bridge_jump_to_other_loop(self): loop = self.interpret(''' [i0, i10, i11, i12, i13, i14, i15, i16] + label(i0, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1, i10, i11, i12, i13, i14, i15, i16) + jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) ''', [0]) other_loop = self.interpret(''' [i3] + label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] - jump(i3) + jump(i3, descr=targettoken2) ''', [1]) ops = ''' [i3] - jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=looptoken) + jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, other_loop, 0, looptoken=loop.token) + bridge = self.attach_bridge(ops, other_loop, 1) self.cpu.set_future_value_int(0, 1) fail = self.run(other_loop) assert fail.identifier == 1 @@ -86,6 +90,7 @@ def test_bridge_jumps_to_self_deeper(self): loop = self.interpret(''' [i0, i1, i2, i31, i32, i33] + label(i0, i1, i2, i31, i32, i33, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i30 = int_add(i1, i2) @@ -94,7 +99,7 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i30, 1, i30, i30, i30) + jump(i3, i30, 1, i30, i30, i30, descr=targettoken) ''', [0]) assert self.getint(0) == 0 assert self.getint(1) == 1 @@ -104,17 +109,19 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) - jump(i3, i12, i11, i10, i6, i7, descr=looptoken) + jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 5, looptoken=loop.token) - guard_op = loop.operations[5] - loop_frame_depth = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth + bridge = self.attach_bridge(ops, loop, 6) + guard_op = loop.operations[6] + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) @@ -126,6 +133,7 @@ def test_bridge_jumps_to_self_shallower(self): loop = self.interpret(''' [i0, i1, i2] + label(i0, i1, i2, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i3 = int_add(i0, 1) @@ -133,15 +141,15 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i1, i2) + jump(i3, i1, i2, descr=targettoken) ''', [0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' [i97, i3] - jump(i3, 0, 1, descr=looptoken) + jump(i3, 0, 1, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 4, looptoken=loop.token) + bridge = self.attach_bridge(ops, loop, 5) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) self.cpu.set_future_value_int(2, 0) diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, LoopToken, BasicFailDescr + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass @@ -96,10 +96,16 @@ raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + targettoken = TargetToken() + targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 @@ -134,7 +140,8 @@ def interpret(self, ops, args, run=True): loop = self.parse(ops) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) for i, arg in enumerate(args): if isinstance(arg, int): self.cpu.set_future_value_int(i, arg) @@ -145,10 +152,18 @@ assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) self.cpu.set_future_value_ref(i, llgcref) + loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.original_jitcell_token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -167,10 +182,7 @@ gcref = self.cpu.get_latest_value_ref(index) return lltype.cast_opaque_ptr(T, gcref) - def attach_bridge(self, ops, loop, guard_op_index, looptoken=None, **kwds): - if looptoken is not None: - self.namespace = self.namespace.copy() - self.namespace['looptoken'] = looptoken + def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) @@ -178,20 +190,21 @@ [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, - loop.token) + loop._jitcelltoken) return bridge def run(self, loop): - return self.cpu.execute_token(loop.token) + return self.cpu.execute_token(loop._jitcelltoken) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -199,27 +212,29 @@ def test_two_loops_and_a_bridge(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(i0, 1) i5 = int_lt(i4, 20) guard_true(i5) [i4, i1, i2, i3] - jump(i4, i1, i2, i3) + jump(i4, i1, i2, i3, descr=targettoken) ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' [i5] + label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) i4 = int_add(i3, 1) i2 = int_lt(i4, 30) guard_true(i2) [i4] - jump(i4) + jump(i4, descr=targettoken2) ''' loop2 = self.interpret(ops2, [0]) bridge_ops = ''' [i4] - jump(i4, i4, i4, i4, descr=looptoken) + jump(i4, i4, i4, i4, descr=targettoken) ''' - bridge = self.attach_bridge(bridge_ops, loop2, 4, looptoken=loop.token) + bridge = self.attach_bridge(bridge_ops, loop2, 5) self.cpu.set_future_value_int(0, 0) self.run(loop2) assert self.getint(0) == 31 @@ -230,10 +245,11 @@ def test_pointer_arg(self): ops = ''' [i0, p0] + label(i0, p0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 10) guard_true(i2) [p0] - jump(i1, p0) + jump(i1, p0, descr=targettoken) ''' S = lltype.GcStruct('S') ptr = lltype.malloc(S) @@ -311,10 +327,11 @@ def test_spill_for_constant(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(3, i1) i5 = int_lt(i4, 30) guard_true(i5) [i0, i4, i2, i3] - jump(1, i4, 3, 4) + jump(1, i4, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1, 30, 3, 4] @@ -322,31 +339,34 @@ def test_spill_for_constant_lshift(self): ops = ''' [i0, i2, i1, i3] + label(i0, i2, i1, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 3, i5, 4) + jump(i4, 3, i5, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, i5, 3, 4) + jump(i4, i5, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i3, i1, i2] + label(i0, i3, i1, i2, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 4, i5, 3) + jump(i4, 4, i5, 3, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] @@ -354,11 +374,12 @@ def test_result_selected_reg_via_neg(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i6 = int_neg(i2) i7 = int_add(1, i1) i4 = int_lt(i7, 10) guard_true(i4) [i0, i6, i7] - jump(1, i7, i2, i6) + jump(1, i7, i2, i6, descr=targettoken) ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] @@ -366,11 +387,12 @@ def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lt(i0, i1) i5 = int_add(i3, 1) i6 = int_lt(i5, 30) guard_true(i6) [i4] - jump(i0, i1, i4, i5) + jump(i0, i1, i4, i5, descr=targettoken) ''' self.interpret(ops, [0, 10, 0, 0]) assert self.getint(0) == 1 @@ -378,10 +400,11 @@ def test_jump_different_args(self): ops = ''' [i0, i15, i16, i18, i1, i2, i3] + label(i0, i15, i16, i18, i1, i2, i3, descr=targettoken) i4 = int_add(i3, 1) i5 = int_lt(i4, 20) guard_true(i5) [i2, i1] - jump(i0, i18, i15, i16, i2, i1, i4) + jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' self.interpret(ops, [0, 1, 2, 3]) @@ -422,6 +445,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): @@ -438,6 +490,7 @@ class TestRegallocMoreRegisters(BaseTestRegalloc): cpu = BaseTestRegalloc.cpu + targettoken = TargetToken() S = lltype.GcStruct('S', ('field', lltype.Char)) fielddescr = cpu.fielddescrof(S, 'field') @@ -510,6 +563,7 @@ def test_division_optimized(self): ops = ''' [i7, i6] + label(i7, i6, descr=targettoken) i18 = int_floordiv(i7, i6) i19 = int_xor(i7, i6) i21 = int_lt(i19, 0) @@ -517,7 +571,7 @@ i23 = int_is_true(i22) i24 = int_eq(i6, 4) guard_false(i24) [i18] - jump(i18, i6) + jump(i18, i6, descr=targettoken) ''' self.interpret(ops, [10, 4]) assert self.getint(0) == 2 @@ -588,7 +642,8 @@ ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(1) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(1) def test_two_calls(self): ops = ''' @@ -599,7 +654,8 @@ ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(2) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(2) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -612,7 +668,8 @@ ''' loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 - assert loop.token._x86_param_depth == self.expected_param_depth(10) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(10) def test_bridge_calls_1(self): ops = ''' diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -1,6 +1,6 @@ import py from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, LoopToken + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD @@ -20,7 +20,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 9) cpu.execute_token(looptoken) @@ -43,7 +43,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -10) cpu.execute_token(looptoken) @@ -140,7 +140,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -13) cpu.set_future_value_int(1, 10) @@ -255,7 +255,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 17) cpu.set_future_value_int(1, -20) diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py --- a/pypy/jit/backend/x86/test/test_regloc.py +++ b/pypy/jit/backend/x86/test/test_regloc.py @@ -146,8 +146,10 @@ expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov rcx, [rdx+r11] - '\x4A\x8B\x0C\x1A' + # lea r11, [rdx+r11] + '\x4E\x8D\x1C\x1A' + # mov rcx, [r11] + '\x49\x8B\x0B' ) assert cb.getvalue() == expected_instructions @@ -174,6 +176,30 @@ # ------------------------------------------------------------ + def test_MOV_64bit_constant_into_r11(self): + base_constant = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(r11, imm(base_constant)) + + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + ) + assert cb.getvalue() == expected_instructions + + def test_MOV_64bit_address_into_r11(self): + base_addr = 0xFEDCBA9876543210 + cb = LocationCodeBuilder64() + cb.MOV(r11, heap(base_addr)) + + expected_instructions = ( + # mov r11, 0xFEDCBA9876543210 + '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + + # mov r11, [r11] + '\x4D\x8B\x1B' + ) + assert cb.getvalue() == expected_instructions + def test_MOV_immed32_into_64bit_address_1(self): immed = -0x01234567 base_addr = 0xFEDCBA9876543210 @@ -217,8 +243,10 @@ expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov [rdx+r11], -0x01234567 - '\x4A\xC7\x04\x1A\x99\xBA\xDC\xFE' + # lea r11, [rdx+r11] + '\x4E\x8D\x1C\x1A' + # mov [r11], -0x01234567 + '\x49\xC7\x03\x99\xBA\xDC\xFE' ) assert cb.getvalue() == expected_instructions @@ -300,8 +328,10 @@ '\x48\xBA\xEF\xCD\xAB\x89\x67\x45\x23\x01' # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov [rax+r11], rdx - '\x4A\x89\x14\x18' + # lea r11, [rax+r11] + '\x4E\x8D\x1C\x18' + # mov [r11], rdx + '\x49\x89\x13' # pop rdx '\x5A' ) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -1,9 +1,10 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr, rclass from pypy.rpython.annlowlevel import llhelper -from pypy.jit.metainterp.history import ResOperation, LoopToken +from pypy.jit.metainterp.history import ResOperation, TargetToken, JitCellToken from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstFloat, - ConstPtr, Box, BoxFloat, BasicFailDescr) + ConstPtr, Box, BoxFloat, + BasicFailDescr) from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD from pypy.jit.backend.x86.rx86 import fits_in_32bits @@ -279,7 +280,7 @@ descr=BasicFailDescr()), ] ops[-2].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) if op == rop.INT_IS_TRUE: self.cpu.set_future_value_int(0, b.value) @@ -329,7 +330,7 @@ ] ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) for i, box in enumerate(inputargs): self.cpu.set_future_value_int(i, box.value) @@ -353,9 +354,10 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.number = 17 class FakeString(object): def __init__(self, val): @@ -365,14 +367,15 @@ return self.val operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) + operations[-2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" @@ -385,7 +388,7 @@ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -408,11 +411,13 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] debug._log = dlog = debug.DebugLog() @@ -455,6 +460,9 @@ EffectInfo.MOST_GENERAL, ffi_flags=-1) calldescr.get_call_conv = lambda: ffi # <==== hack + # ^^^ we patch get_call_conv() so that the test also makes sense + # on Linux, because clibffi.get_call_conv() would always + # return FFI_DEFAULT_ABI on non-Windows platforms. funcbox = ConstInt(rawstart) i1 = BoxInt() i2 = BoxInt() @@ -496,7 +504,7 @@ ops[3].setfailargs([]) ops[5].setfailargs([]) ops[7].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) self.cpu.set_future_value_int(0, 123450) @@ -520,19 +528,21 @@ loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) + self.cpu.execute_token(looptoken) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -544,16 +554,18 @@ def test_debugger_checksum(self): loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) - assert ops.token._x86_debug_checksum == sum([op.getopnum() + self.cpu.execute_token(looptoken) + assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + n -= x.foo + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/backend/x86/test/test_ztranslation.py b/pypy/jit/backend/x86/test/test_ztranslation.py --- a/pypy/jit/backend/x86/test/test_ztranslation.py +++ b/pypy/jit/backend/x86/test/test_ztranslation.py @@ -1,6 +1,6 @@ import py, os, sys from pypy.tool.udir import udir -from pypy.rlib.jit import JitDriver, unroll_parameters +from pypy.rlib.jit import JitDriver, unroll_parameters, set_param from pypy.rlib.jit import PARAMETERS, dont_look_inside from pypy.rlib.jit import promote from pypy.jit.metainterp.jitprof import Profiler @@ -47,9 +47,9 @@ def f(i, j): for param, _ in unroll_parameters: defl = PARAMETERS[param] - jitdriver.set_param(param, defl) - jitdriver.set_param("threshold", 3) - jitdriver.set_param("trace_eagerness", 2) + set_param(jitdriver, param, defl) + set_param(jitdriver, "threshold", 3) + set_param(jitdriver, "trace_eagerness", 2) total = 0 frame = Frame(i) while frame.i > 3: @@ -213,8 +213,8 @@ else: return Base() def myportal(i): - jitdriver.set_param("threshold", 3) - jitdriver.set_param("trace_eagerness", 2) + set_param(jitdriver, "threshold", 3) + set_param(jitdriver, "trace_eagerness", 2) total = 0 n = i while True: diff --git a/pypy/jit/backend/x86/tool/viewcode.py b/pypy/jit/backend/x86/tool/viewcode.py --- a/pypy/jit/backend/x86/tool/viewcode.py +++ b/pypy/jit/backend/x86/tool/viewcode.py @@ -58,7 +58,7 @@ assert not p.returncode, ('Encountered an error running objdump: %s' % stderr) # drop some objdump cruft - lines = stdout.splitlines()[6:] + lines = stdout.splitlines(True)[6:] # drop some objdump cruft return format_code_dump_with_labels(originaddr, lines, label_list) def format_code_dump_with_labels(originaddr, lines, label_list): @@ -97,7 +97,7 @@ stdout, stderr = p.communicate() assert not p.returncode, ('Encountered an error running nm: %s' % stderr) - for line in stdout.splitlines(): + for line in stdout.splitlines(True): match = re_symbolentry.match(line) if match: addr = long(match.group(1), 16) diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -212,7 +212,10 @@ elidable = False loopinvariant = False if op.opname == "direct_call": - func = getattr(get_funcobj(op.args[0].value), '_callable', None) + funcobj = get_funcobj(op.args[0].value) + assert getattr(funcobj, 'calling_conv', 'c') == 'c', ( + "%r: getcalldescr() with a non-default call ABI" % (op,)) + func = getattr(funcobj, '_callable', None) elidable = getattr(func, "_elidable_function_", False) loopinvariant = getattr(func, "_jit_loop_invariant_", False) if loopinvariant: diff --git a/pypy/jit/codewriter/codewriter.py b/pypy/jit/codewriter/codewriter.py --- a/pypy/jit/codewriter/codewriter.py +++ b/pypy/jit/codewriter/codewriter.py @@ -104,6 +104,8 @@ else: name = 'unnamed' % id(ssarepr) i = 1 + # escape names for windows + name = name.replace('', '_(lambda)_') extra = '' while name+extra in self._seen_files: i += 1 diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -48,6 +48,8 @@ OS_LIBFFI_PREPARE = 60 OS_LIBFFI_PUSH_ARG = 61 OS_LIBFFI_CALL = 62 + OS_LIBFFI_GETARRAYITEM = 63 + OS_LIBFFI_SETARRAYITEM = 64 # OS_LLONG_INVERT = 69 OS_LLONG_ADD = 70 @@ -78,6 +80,9 @@ # OS_MATH_SQRT = 100 + # for debugging: + _OS_CANRAISE = set([OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL]) + def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, write_descrs_fields, write_descrs_arrays, extraeffect=EF_CAN_RAISE, @@ -116,6 +121,8 @@ result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex + if result.check_can_raise(): + assert oopspecindex in cls._OS_CANRAISE cls._cache[key] = result return result @@ -125,6 +132,10 @@ def check_can_invalidate(self): return self.can_invalidate + def check_is_elidable(self): + return (self.extraeffect == self.EF_ELIDABLE_CAN_RAISE or + self.extraeffect == self.EF_ELIDABLE_CANNOT_RAISE) + def check_forces_virtual_or_virtualizable(self): return self.extraeffect >= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE @@ -230,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + return True # better safe than sorry + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -481,8 +500,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) @@ -844,6 +877,10 @@ if self._is_gc(op.args[0]): return op + def rewrite_op_cast_opaque_ptr(self, op): + # None causes the result of this op to get aliased to op.args[0] + return [SpaceOperation('mark_opaque_ptr', op.args, None), None] + def rewrite_op_force_cast(self, op): v_arg = op.args[0] v_result = op.result @@ -1049,35 +1086,20 @@ # jit.codewriter.support. for _op, _oopspec in [('llong_invert', 'INVERT'), - ('ullong_invert', 'INVERT'), ('llong_lt', 'LT'), ('llong_le', 'LE'), ('llong_eq', 'EQ'), ('llong_ne', 'NE'), ('llong_gt', 'GT'), ('llong_ge', 'GE'), - ('ullong_lt', 'ULT'), - ('ullong_le', 'ULE'), - ('ullong_eq', 'EQ'), - ('ullong_ne', 'NE'), - ('ullong_gt', 'UGT'), - ('ullong_ge', 'UGE'), ('llong_add', 'ADD'), ('llong_sub', 'SUB'), ('llong_mul', 'MUL'), ('llong_and', 'AND'), ('llong_or', 'OR'), ('llong_xor', 'XOR'), - ('ullong_add', 'ADD'), - ('ullong_sub', 'SUB'), - ('ullong_mul', 'MUL'), - ('ullong_and', 'AND'), - ('ullong_or', 'OR'), - ('ullong_xor', 'XOR'), ('llong_lshift', 'LSHIFT'), ('llong_rshift', 'RSHIFT'), - ('ullong_lshift', 'LSHIFT'), - ('ullong_rshift', 'URSHIFT'), ('cast_int_to_longlong', 'FROM_INT'), ('truncate_longlong_to_int', 'TO_INT'), ('cast_float_to_longlong', 'FROM_FLOAT'), @@ -1100,6 +1122,21 @@ ('cast_uint_to_ulonglong', 'FROM_UINT'), ('cast_float_to_ulonglong', 'FROM_FLOAT'), ('cast_ulonglong_to_float', 'U_TO_FLOAT'), + ('ullong_invert', 'INVERT'), + ('ullong_lt', 'ULT'), + ('ullong_le', 'ULE'), + ('ullong_eq', 'EQ'), + ('ullong_ne', 'NE'), + ('ullong_gt', 'UGT'), + ('ullong_ge', 'UGE'), + ('ullong_add', 'ADD'), + ('ullong_sub', 'SUB'), + ('ullong_mul', 'MUL'), + ('ullong_and', 'AND'), + ('ullong_or', 'OR'), + ('ullong_xor', 'XOR'), + ('ullong_lshift', 'LSHIFT'), + ('ullong_rshift', 'URSHIFT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): @@ -1130,7 +1167,7 @@ def rewrite_op_llong_is_true(self, op): v = varoftype(op.args[0].concretetype) - op0 = SpaceOperation('cast_int_to_longlong', + op0 = SpaceOperation('cast_primitive', [Constant(0, lltype.Signed)], v) args = [op.args[0], v] @@ -1611,6 +1648,12 @@ elif oopspec_name.startswith('libffi_call_'): oopspecindex = EffectInfo.OS_LIBFFI_CALL extraeffect = EffectInfo.EF_RANDOM_EFFECTS + elif oopspec_name == 'libffi_array_getitem': + oopspecindex = EffectInfo.OS_LIBFFI_GETARRAYITEM + extraeffect = EffectInfo.EF_CANNOT_RAISE + elif oopspec_name == 'libffi_array_setitem': + oopspecindex = EffectInfo.OS_LIBFFI_SETARRAYITEM + extraeffect = EffectInfo.EF_CANNOT_RAISE else: assert False, 'unsupported oopspec: %s' % oopspec_name return self._handle_oopspec_call(op, args, oopspecindex, extraeffect) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -37,9 +37,11 @@ return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, - type_system="lltype"): + type_system="lltype", translationoptions={}): # build the normal ll graphs for ll_function t = TranslationContext() + for key, value in translationoptions.items(): + setattr(t.config.translation, key, value) annpolicy = AnnotatorPolicy() annpolicy.allow_someobjects = False a = t.buildannotator(policy=annpolicy) @@ -256,6 +258,9 @@ y = ~r_ulonglong(xll) return u_to_longlong(y) +def _ll_1_ullong_invert(xull): + return ~xull + def _ll_2_llong_lt(xll, yll): return xll < yll @@ -274,16 +279,22 @@ def _ll_2_llong_ge(xll, yll): return xll >= yll -def _ll_2_llong_ult(xull, yull): +def _ll_2_ullong_eq(xull, yull): + return xull == yull + +def _ll_2_ullong_ne(xull, yull): + return xull != yull + +def _ll_2_ullong_ult(xull, yull): return xull < yull -def _ll_2_llong_ule(xull, yull): +def _ll_2_ullong_ule(xull, yull): return xull <= yull -def _ll_2_llong_ugt(xull, yull): +def _ll_2_ullong_ugt(xull, yull): return xull > yull -def _ll_2_llong_uge(xull, yull): +def _ll_2_ullong_uge(xull, yull): return xull >= yull def _ll_2_llong_add(xll, yll): @@ -310,14 +321,41 @@ z = r_ulonglong(xll) ^ r_ulonglong(yll) return u_to_longlong(z) +def _ll_2_ullong_add(xull, yull): + z = (xull) + (yull) + return (z) + +def _ll_2_ullong_sub(xull, yull): + z = (xull) - (yull) + return (z) + +def _ll_2_ullong_mul(xull, yull): + z = (xull) * (yull) + return (z) + +def _ll_2_ullong_and(xull, yull): + z = (xull) & (yull) + return (z) + +def _ll_2_ullong_or(xull, yull): + z = (xull) | (yull) + return (z) + +def _ll_2_ullong_xor(xull, yull): + z = (xull) ^ (yull) + return (z) + def _ll_2_llong_lshift(xll, y): z = r_ulonglong(xll) << y return u_to_longlong(z) +def _ll_2_ullong_lshift(xull, y): + return xull << y + def _ll_2_llong_rshift(xll, y): return xll >> y -def _ll_2_llong_urshift(xull, y): +def _ll_2_ullong_urshift(xull, y): return xull >> y def _ll_1_llong_from_int(x): @@ -561,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -5,7 +5,7 @@ from pypy.jit.codewriter.format import assert_format from pypy.jit.codewriter import longlong from pypy.jit.metainterp.history import AbstractDescr -from pypy.rpython.lltypesystem import lltype, rclass, rstr +from pypy.rpython.lltypesystem import lltype, rclass, rstr, rffi from pypy.objspace.flow.model import SpaceOperation, Variable, Constant from pypy.translator.unsimplify import varoftype from pypy.rlib.rarithmetic import ovfcheck, r_uint, r_longlong, r_ulonglong @@ -743,7 +743,6 @@ """, transform=True) def test_force_cast(self): - from pypy.rpython.lltypesystem import rffi # NB: we don't need to test for INT here, the logic in jtransform is # general enough so that if we have the below cases it should # generalize also to INT @@ -849,7 +848,6 @@ transform=True) def test_force_cast_pointer(self): - from pypy.rpython.lltypesystem import rffi def h(p): return rffi.cast(rffi.VOIDP, p) self.encoding_test(h, [lltype.nullptr(rffi.CCHARP.TO)], """ @@ -857,7 +855,6 @@ """, transform=True) def test_force_cast_floats(self): - from pypy.rpython.lltypesystem import rffi # Caststs to lltype.Float def f(n): return rffi.cast(lltype.Float, n) @@ -964,7 +961,6 @@ """, transform=True) def test_direct_ptradd(self): - from pypy.rpython.lltypesystem import rffi def f(p, n): return lltype.direct_ptradd(p, n) self.encoding_test(f, [lltype.nullptr(rffi.CCHARP.TO), 123], """ @@ -975,7 +971,6 @@ def check_force_cast(FROM, TO, operations, value): """Check that the test is correctly written...""" - from pypy.rpython.lltypesystem import rffi import re r = re.compile('(\w+) \%i\d, \$(-?\d+)') # diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1128,3 +1168,16 @@ varoftype(lltype.Signed)) tr = Transformer(None, None) raises(NotImplementedError, tr.rewrite_operation, op) + +def test_cast_opaque_ptr(): + S = lltype.GcStruct("S", ("x", lltype.Signed)) + v1 = varoftype(lltype.Ptr(S)) + v2 = varoftype(lltype.Ptr(rclass.OBJECT)) + + op = SpaceOperation('cast_opaque_ptr', [v1], v2) + tr = Transformer() + [op1, op2] = tr.rewrite_operation(op) + assert op1.opname == 'mark_opaque_ptr' + assert op1.args == [v1] + assert op1.result is None + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -505,9 +505,6 @@ @arguments("r", "r", returns="i") def bhimpl_instance_ptr_ne(a, b): return a != b - @arguments("r", returns="r") - def bhimpl_cast_opaque_ptr(a): - return a @arguments("r", returns="i") def bhimpl_cast_ptr_to_int(a): i = lltype.cast_ptr_to_int(a) @@ -518,6 +515,13 @@ ll_assert((i & 1) == 1, "bhimpl_cast_int_to_ptr: not an odd int") return lltype.cast_int_to_ptr(llmemory.GCREF, i) + @arguments("r") + def bhimpl_mark_opaque_ptr(a): + pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass + @arguments("i", returns="i") def bhimpl_int_copy(a): return a diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -9,12 +9,13 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist -from pypy.jit.metainterp.history import TreeLoop, Box, History, LoopToken +from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP from pypy.jit.codewriter import heaptracker, longlong @@ -23,7 +24,7 @@ from pypy.jit.metainterp.jitprof import ABORT_BRIDGE raise SwitchToBlackhole(ABORT_BRIDGE) -def show_loop(metainterp_sd, loop=None, error=None): +def show_procedures(metainterp_sd, procedure=None, error=None): # debugging if option.view or option.viewloops: if error: @@ -32,11 +33,12 @@ errmsg += ': ' + str(error) else: errmsg = None - if loop is None: # or type(loop) is TerminatingLoop: - extraloops = [] + if procedure is None: + extraprocedures = [] else: - extraloops = [loop] - metainterp_sd.stats.view(errmsg=errmsg, extraloops=extraloops) + extraprocedures = [procedure] + metainterp_sd.stats.view(errmsg=errmsg, + extraprocedures=extraprocedures) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -45,131 +47,222 @@ return loop -def make_loop_token(nb_args, jitdriver_sd): - loop_token = LoopToken() - loop_token.outermost_jitdriver_sd = jitdriver_sd - return loop_token +def make_jitcell_token(jitdriver_sd): + jitcell_token = JitCellToken() + jitcell_token.outermost_jitdriver_sd = jitdriver_sd + return jitcell_token def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ - # get the original loop token (corresponding to 'loop', or if that is - # a bridge, to the loop that this bridge belongs to) - looptoken = loop.token - assert looptoken is not None + # get the original jitcell token corresponding to jitcell form which + # this trace starts + original_jitcell_token = loop.original_jitcell_token + assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests - assert looptoken.generation > 0 # has been registered with memmgr - wref = weakref.ref(looptoken) + assert original_jitcell_token.generation > 0 # has been registered with memmgr + wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number - looptoken.compiled_loop_token.record_faildescr_index(n) - elif isinstance(descr, LoopToken): - # for a JUMP or a CALL_ASSEMBLER: record it as a potential jump. + original_jitcell_token.compiled_loop_token.record_faildescr_index(n) + elif isinstance(descr, JitCellToken): + # for a CALL_ASSEMBLER: record it as a potential jump. + if descr is not original_jitcell_token: + original_jitcell_token.record_jump_to(descr) + descr.exported_state = None + op._descr = None # clear reference, mostly for tests + elif isinstance(descr, TargetToken): + # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) - if descr is not looptoken: - looptoken.record_jump_to(descr) - op._descr = None # clear reference, mostly for tests + if descr.original_jitcell_token is not original_jitcell_token: + assert descr.original_jitcell_token is not None + original_jitcell_token.record_jump_to(descr.original_jitcell_token) + # exported_state is clear by optimizeopt when the short preamble is + # constrcucted. if that did not happen the label should not show up + # in a trace that will be used + assert descr.exported_state is None if not we_are_translated(): - op._jumptarget_number = descr.number + op._descr_wref = weakref.ref(op._descr) + op._descr = None # clear reference to prevent the history.Stats + # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken - loop.token = None + loop.original_jitcell_token = None if not we_are_translated(): - loop._looptoken_number = looptoken.number + loop._looptoken_number = original_jitcell_token.number # ____________________________________________________________ -def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, - start_resumedescr, full_preamble_needed=True): - """Try to compile a new loop by closing the current history back +def compile_loop(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, full_preamble_needed=True): + """Try to compile a new procedure by closing the current history back to the first operation. """ - from pypy.jit.metainterp.optimize import optimize_loop + from pypy.jit.metainterp.optimizeopt import optimize_trace history = metainterp.history - loop = create_empty_loop(metainterp) - loop.inputargs = history.inputargs[:] + metainterp_sd = metainterp.staticdata + jitdriver_sd = metainterp.jitdriver_sd + + if False: + part = partial_trace + assert False + procedur_token = metainterp.get_procedure_token(greenkey) + assert procedure_token + all_target_tokens = [] + else: + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.start_resumedescr = start_resumedescr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] + + loop = create_empty_loop(metainterp) + loop.inputargs = part.inputargs + loop.operations = part.operations + loop.quasi_immutable_deps = {} + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + while part.operations[-1].getopnum() == rop.LABEL: + inliner = Inliner(inputargs, jumpargs) + part.quasi_immutable_deps = None + part.operations = [part.operations[-1]] + \ + [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], + None, descr=jitcell_token)] + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens.append(target_token) + inputargs = jumpargs + jumpargs = part.operations[-1].getarglist() + + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + + loop.operations = loop.operations[:-1] + part.operations + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + + if not loop.quasi_immutable_deps: + loop.quasi_immutable_deps = None for box in loop.inputargs: assert isinstance(box, Box) - # make a copy, because optimize_loop can mutate the ops and descrs - h_ops = history.operations - loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] + + loop.original_jitcell_token = jitcell_token + for label in all_target_tokens: + assert isinstance(label, TargetToken) + label.original_jitcell_token = jitcell_token + if label.virtual_state and label.short_preamble: + metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) + jitcell_token.target_tokens = all_target_tokens + send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") + record_loop_or_bridge(metainterp_sd, loop) + return all_target_tokens[0] + +def compile_retrace(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, partial_trace, resumekey): + """Try to compile a new procedure by closing the current history back + to the first operation. + """ + from pypy.jit.metainterp.optimizeopt import optimize_trace + + history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.token = loop_token - loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP - loop.preamble = create_empty_loop(metainterp, 'Preamble ') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.preamble.start_resumedescr = start_resumedescr + loop_jitcell_token = metainterp.get_procedure_token(greenkey) + assert loop_jitcell_token + assert partial_trace.operations[-1].getopnum() == rop.LABEL + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + part.start_resumedescr = start_resumedescr + h_ops = history.operations + + part.operations = [partial_trace.operations[-1]] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] + label = part.operations[0] + assert label.getopnum() == rop.LABEL try: - old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop, - jitdriver_sd.warmstate.enable_opts) + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - debug_print("compile_new_loop: got an InvalidLoop") - return None - if old_loop_token is not None: - metainterp.staticdata.log("reusing old loop") - return old_loop_token + #return None # XXX: Dissable for now + # Fall back on jumping to preamble + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert target_token.exported_state + part.operations = [label] + \ + [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + None, descr=loop_jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + inline_short_preamble=False) + except InvalidLoop: + return None + assert part.operations[-1].getopnum() != rop.LABEL + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert loop_jitcell_token.target_tokens + loop_jitcell_token.target_tokens.append(target_token) - if loop.preamble.operations is not None: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - record_loop_or_bridge(metainterp_sd, loop) - token = loop.preamble.token - if full_preamble_needed: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, - loop.preamble, "entry bridge") - insert_loop_token(old_loop_tokens, loop.preamble.token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.preamble.token) - record_loop_or_bridge(metainterp_sd, loop.preamble) - elif token.short_preamble: - short = token.short_preamble[-1] - metainterp_sd.logger_ops.log_short_preamble(short.inputargs, - short.operations) - return token - else: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - insert_loop_token(old_loop_tokens, loop_token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.token) - record_loop_or_bridge(metainterp_sd, loop) - return loop_token + loop = partial_trace + loop.operations = loop.operations[:-1] + part.operations -def insert_loop_token(old_loop_tokens, loop_token): - # Find where in old_loop_tokens we should insert this new loop_token. - # The following algo means "as late as possible, but before another - # loop token that would be more general and so completely mask off - # the new loop_token". - # XXX do we still need a list? - old_loop_tokens.append(loop_token) + quasi_immutable_deps = {} + if loop.quasi_immutable_deps: + quasi_immutable_deps.update(loop.quasi_immutable_deps) + if part.quasi_immutable_deps: + quasi_immutable_deps.update(part.quasi_immutable_deps) + if quasi_immutable_deps: + loop.quasi_immutable_deps = quasi_immutable_deps + + for box in loop.inputargs: + assert isinstance(box, Box) + + target_token = loop.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + target_token.original_jitcell_token = loop.original_jitcell_token + record_loop_or_bridge(metainterp_sd, loop) + return target_token def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): - jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, + original_jitcell_token = loop.original_jitcell_token + jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata - loop_token = loop.token - loop_token.number = n = globaldata.loopnumbering + original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): - show_loop(metainterp_sd, loop) + show_procedures(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) @@ -177,26 +270,19 @@ debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - loop.token, name=loopname) + original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): - if type != "entry bridge": - metainterp_sd.stats.compiled() - else: - loop._ignore_during_counting = True + metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) - short = loop.token.short_preamble - if short: - metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, - short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token) + metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): @@ -204,8 +290,9 @@ jitdriver_sd.on_compile_bridge(metainterp_sd.logger_ops, original_loop_token, operations, n) if not we_are_translated(): - show_loop(metainterp_sd) - TreeLoop.check_consistency_of(inputargs, operations) + show_procedures(metainterp_sd) + seen = dict.fromkeys(inputargs) + TreeLoop.check_consistency_of_branch(operations, seen) metainterp_sd.profiler.start_backend() operations = get_deep_immutable_oplist(operations) debug_start("jit-backend") @@ -221,9 +308,9 @@ # metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset) # - if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( - original_loop_token) + #if metainterp_sd.warmrunnerdesc is not None: # for tests + # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( + # original_loop_token) # ____________________________________________________________ @@ -263,7 +350,7 @@ raise metainterp_sd.ExitFrameWithExceptionRef(cpu, value) -class TerminatingLoopToken(LoopToken): +class TerminatingLoopToken(JitCellToken): # FIXME: kill? terminating = True def __init__(self, nargs, finishdescr): @@ -298,7 +385,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +396,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +416,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,13 +426,17 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - self._trace_and_compile_from_bridge(metainterp_sd, jitdriver_sd) + self.start_compiling() + try: + self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) @@ -358,12 +454,22 @@ def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -390,7 +496,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -399,13 +514,13 @@ # We managed to create a bridge. Attach the new operations # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None - new_loop.token = metainterp.resumekey_original_loop_token + new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, - new_loop.token) + new_loop.original_jitcell_token) def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here @@ -588,44 +703,32 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs - # We make a new LoopToken for this entry bridge, and stick it - # to every guard in the loop. - new_loop_token = make_loop_token(len(redargs), jitdriver_sd) - new_loop.token = new_loop_token + new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - self.original_greenkey, - new_loop_token) - # store the new loop in compiled_merge_points_wref too - old_loop_tokens = metainterp.get_compiled_merge_points( - self.original_greenkey) - # it always goes at the end of the list, as it is the most - # general loop token - old_loop_tokens.append(new_loop_token) - metainterp.set_compiled_merge_points(self.original_greenkey, - old_loop_tokens) + jitdriver_sd.warmstate.attach_procedure_to_interp( + self.original_greenkey, jitcell_token) + metainterp_sd.stats.add_jitcell_token(jitcell_token) - def reset_counter_from_failure(self): - pass - -def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): +def compile_trace(metainterp, resumekey, start_resumedescr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ - from pypy.jit.metainterp.optimize import optimize_bridge + from pypy.jit.metainterp.optimizeopt import optimize_trace # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. - # + # # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. - new_loop = create_empty_loop(metainterp) - new_loop.inputargs = metainterp.history.inputargs[:] + new_trace = create_empty_loop(metainterp) + new_trace.inputargs = inputargs = metainterp.history.inputargs[:] # clone ops, as optimize_bridge can mutate the ops - new_loop.operations = [op.clone() for op in metainterp.history.operations] + + new_trace.operations = [op.clone() for op in metainterp.history.operations] + new_trace.start_resumedescr = start_resumedescr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): @@ -633,38 +736,25 @@ else: inline_short_preamble = True try: - target_loop_token = optimize_bridge(metainterp_sd, old_loop_tokens, - new_loop, state.enable_opts, - inline_short_preamble, retraced) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop debug_print('InvalidLoop in compile_new_bridge') return None - # Did it work? - if target_loop_token is not None: - # Yes, we managed to create a bridge. Dispatch to resumekey to + + if new_trace.operations[-1].getopnum() != rop.LABEL: + # We managed to create a bridge. Dispatch to resumekey to # know exactly what we must do (ResumeGuardDescr/ResumeFromInterpDescr) - prepare_last_operation(new_loop, target_loop_token) - resumekey.compile_and_attach(metainterp, new_loop) - record_loop_or_bridge(metainterp_sd, new_loop) - return target_loop_token - -def prepare_last_operation(new_loop, target_loop_token): - op = new_loop.operations[-1] - if not isinstance(target_loop_token, TerminatingLoopToken): - # normal case - #op.setdescr(target_loop_token) # patch the jump target - pass + target_token = new_trace.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, new_trace) + record_loop_or_bridge(metainterp_sd, new_trace) + return target_token else: - # The target_loop_token is a pseudo loop token, - # e.g. loop_tokens_done_with_this_frame_void[0] - # Replace the operation with the real operation we want, i.e. a FINISH - descr = target_loop_token.finishdescr - args = op.getarglist() - new_op = ResOperation(rop.FINISH, args, None, descr=descr) - new_loop.operations[-1] = new_op + metainterp.retrace_needed(new_trace) + return None + # ____________________________________________________________ @@ -683,7 +773,7 @@ """ # 'redboxes' is only used to know the types of red arguments. inputargs = [box.clonebox() for box in redboxes] - loop_token = make_loop_token(len(inputargs), jitdriver_sd) + jitcell_token = make_jitcell_token(jitdriver_sd) # 'nb_red_args' might be smaller than len(redboxes), # because it doesn't include the virtualizable boxes. nb_red_args = jitdriver_sd.num_red_args @@ -716,7 +806,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, loop_token, log=False) + cpu.compile_loop(inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests - memory_manager.keep_loop_alive(loop_token) - return loop_token + memory_manager.keep_loop_alive(jitcell_token) + return jitcell_token diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -340,8 +340,11 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.GETINTERIORFIELD_RAW, + rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.LABEL, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,8 +12,9 @@ def get_display_text(self): return None -def display_loops(loops, errmsg=None, highlight_loops={}): - graphs = [(loop, highlight_loops.get(loop, 0)) for loop in loops] +def display_procedures(procedures, errmsg=None, highlight_procedures={}): + graphs = [(procedure, highlight_procedures.get(procedure, 0)) + for procedure in procedures] for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): @@ -25,18 +26,19 @@ def is_interesting_guard(op): return hasattr(op.getdescr(), '_debug_suboperations') +def getdescr(op): + if op._descr is not None: + return op._descr + if hasattr(op, '_descr_wref'): + return op._descr_wref() + return None + class ResOpGraphPage(GraphPage): def compute(self, graphs, errmsg=None): resopgen = ResOpGen() for graph, highlight in graphs: - if getattr(graph, 'token', None) is not None: - resopgen.jumps_to_graphs[graph.token] = graph - if getattr(graph, '_looptoken_number', None) is not None: - resopgen.jumps_to_graphs[graph._looptoken_number] = graph - - for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: resopgen.set_errmsg(errmsg) @@ -54,7 +56,7 @@ self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None - self.jumps_to_graphs = {} + self.target_tokens = {} def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -73,16 +75,21 @@ for graphindex in range(len(self.graphs)): self.block_starters[graphindex] = {0: True} for graphindex, graph in enumerate(self.graphs): - last_was_mergepoint = False + mergepointblock = None for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) if op.getopnum() == rop.DEBUG_MERGE_POINT: - if not last_was_mergepoint: - last_was_mergepoint = True - self.mark_starter(graphindex, i) + if mergepointblock is None: + mergepointblock = i + elif op.getopnum() == rop.LABEL: + self.mark_starter(graphindex, i) + self.target_tokens[getdescr(op)] = (graphindex, i) + mergepointblock = i else: - last_was_mergepoint = False + if mergepointblock is not None: + self.mark_starter(graphindex, mergepointblock) + mergepointblock = None def set_errmsg(self, errmsg): self.errmsg = errmsg @@ -172,24 +179,10 @@ (graphindex, opindex)) break if op.getopnum() == rop.JUMP: - tgt_g = -1 - tgt = None - tgt_number = getattr(op, '_jumptarget_number', None) - if tgt_number is not None: - tgt = self.jumps_to_graphs.get(tgt_number) - else: - tgt_descr = op.getdescr() - if tgt_descr is None: - tgt_g = graphindex - else: - tgt = self.jumps_to_graphs.get(tgt_descr.number) - if tgt is None: - tgt = self.jumps_to_graphs.get(tgt_descr) - if tgt is not None: - tgt_g = self.graphs.index(tgt) - if tgt_g != -1: + tgt_descr = getdescr(op) + if tgt_descr is not None and tgt_descr in self.target_tokens: self.genedge((graphindex, opstartindex), - (tgt_g, 0), + self.target_tokens[tgt_descr], weight="0") lines.append("") label = "\\l".join(lines) diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -34,7 +34,6 @@ self.clear_caches(opnum, descr, argboxes) def mark_escaped(self, opnum, argboxes): - idx = 0 if opnum == rop.SETFIELD_GC: assert len(argboxes) == 2 box, valuebox = argboxes @@ -42,8 +41,20 @@ self.dependencies.setdefault(box, []).append(valuebox) else: self._escape(valuebox) - # GETFIELD_GC doesn't escape it's argument - elif opnum != rop.GETFIELD_GC: + elif opnum == rop.SETARRAYITEM_GC: + assert len(argboxes) == 3 + box, indexbox, valuebox = argboxes + if self.is_unescaped(box) and self.is_unescaped(valuebox): + self.dependencies.setdefault(box, []).append(valuebox) + else: + self._escape(valuebox) + # GETFIELD_GC, MARK_OPAQUE_PTR, PTR_EQ, and PTR_NE don't escape their + # arguments + elif (opnum != rop.GETFIELD_GC and + opnum != rop.MARK_OPAQUE_PTR and + opnum != rop.PTR_EQ and + opnum != rop.PTR_NE): + idx = 0 for box in argboxes: # setarrayitem_gc don't escape its first argument if not (idx == 0 and opnum in [rop.SETARRAYITEM_GC]): @@ -60,13 +71,13 @@ self._escape(dep) def clear_caches(self, opnum, descr, argboxes): - if opnum == rop.SETFIELD_GC: - return - if opnum == rop.SETARRAYITEM_GC: - return - if opnum == rop.SETFIELD_RAW: - return - if opnum == rop.SETARRAYITEM_RAW: + if (opnum == rop.SETFIELD_GC or + opnum == rop.SETARRAYITEM_GC or + opnum == rop.SETFIELD_RAW or + opnum == rop.SETARRAYITEM_RAW or + opnum == rop.SETINTERIORFIELD_GC or + opnum == rop.COPYSTRCONTENT or + opnum == rop.COPYUNICODECONTENT): return if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: return @@ -75,9 +86,9 @@ if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: effectinfo = descr.get_extra_info() ef = effectinfo.extraeffect - if ef == effectinfo.EF_LOOPINVARIANT or \ - ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ - ef == effectinfo.EF_ELIDABLE_CAN_RAISE: + if (ef == effectinfo.EF_LOOPINVARIANT or + ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or + ef == effectinfo.EF_ELIDABLE_CAN_RAISE): return # A special case for ll_arraycopy, because it is so common, and its # effects are so well defined. diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker, longlong from pypy.rlib.objectmodel import compute_identity_hash +import weakref # ____________________________________________________________ @@ -692,18 +693,17 @@ # ____________________________________________________________ -# The TreeLoop class contains a loop or a generalized loop, i.e. a tree -# of operations. Each branch ends in a jump which can go either to -# the top of the same loop, or to another TreeLoop; or it ends in a FINISH. +# The JitCellToken class is the root of a tree of traces. Each branch ends +# in a jump which goes to a LABEL operation; or it ends in a FINISH. -class LoopToken(AbstractDescr): +class JitCellToken(AbstractDescr): """Used for rop.JUMP, giving the target of the jump. This is different from TreeLoop: the TreeLoop class contains the whole loop, including 'operations', and goes away after the loop was compiled; but the LoopDescr remains alive and points to the generated assembler. """ - short_preamble = None + target_tokens = None failed_states = None retraced_count = 0 terminating = False # see TerminatingLoopToken in compile.py @@ -720,10 +720,11 @@ def __init__(self): # For memory management of assembled loops - self._keepalive_target_looktokens = {} # set of other LoopTokens + self._keepalive_jitcell_tokens = {} # set of other JitCellToken - def record_jump_to(self, target_loop_token): - self._keepalive_target_looktokens[target_loop_token] = None + def record_jump_to(self, jitcell_token): + assert isinstance(jitcell_token, JitCellToken) + self._keepalive_jitcell_tokens[jitcell_token] = None def __repr__(self): return '' % (self.number, self.generation) @@ -734,17 +735,36 @@ def dump(self): self.compiled_loop_token.cpu.dump_loop_token(self) +class TargetToken(AbstractDescr): + def __init__(self, targeting_jitcell_token=None): + # The jitcell to which jumps might result in a jump to this label + self.targeting_jitcell_token = targeting_jitcell_token + + # The jitcell where the trace containing the label with this TargetToken begins + self.original_jitcell_token = None + + self.virtual_state = None + self.exported_state = None + class TreeLoop(object): inputargs = None operations = None - token = None call_pure_results = None logops = None quasi_immutable_deps = None + start_resumedescr = None + + def _token(*args): + raise Exception("TreeLoop.token is killed") + token = property(_token, _token) + + # This is the jitcell where the trace starts. Labels within the trace might + # belong to some other jitcells in the sens that jumping to this other + # jitcell will result in a jump to the label. + original_jitcell_token = None def __init__(self, name): self.name = name - # self.inputargs = list of distinct Boxes # self.operations = list of ResOperations # ops of the kind 'guard_xxx' contain a further list of operations, # which may itself contain 'guard_xxx' and so on, making a tree. @@ -777,6 +797,10 @@ def check_consistency(self): # for testing "NOT_RPYTHON" self.check_consistency_of(self.inputargs, self.operations) + for op in self.operations: + descr = op.getdescr() + if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): + assert descr.original_jitcell_token is self.original_jitcell_token @staticmethod def check_consistency_of(inputargs, operations): @@ -811,15 +835,23 @@ assert isinstance(box, Box) assert box not in seen seen[box] = True + if op.getopnum() == rop.LABEL: + inputargs = op.getarglist() + for box in inputargs: + assert isinstance(box, Box), "LABEL contains %r" % (box,) + seen = dict.fromkeys(inputargs) + assert len(seen) == len(inputargs), ( + "duplicate Box in the LABEL arguments") + assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: target = operations[-1].getdescr() if target is not None: - assert isinstance(target, LoopToken) + assert isinstance(target, TargetToken) def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -898,6 +930,12 @@ def view(self, **kwds): pass + def clear(self): + pass + + def add_jitcell_token(self, token): + pass + class Stats(object): """For tests.""" @@ -910,8 +948,22 @@ self.loops = [] self.locations = [] self.aborted_keys = [] - self.invalidated_token_numbers = set() + self.invalidated_token_numbers = set() # <- not RPython + self.jitcell_token_wrefs = [] + def clear(self): + del self.loops[:] + del self.locations[:] + del self.aborted_keys[:] + self.invalidated_token_numbers.clear() + self.compiled_count = 0 + self.enter_count = 0 + self.aborted_count = 0 + + def add_jitcell_token(self, token): + assert isinstance(token, JitCellToken) + self.jitcell_token_wrefs.append(weakref.ref(token)) + def set_history(self, history): self.operations = history.operations @@ -941,6 +993,15 @@ def get_all_loops(self): return self.loops + def get_all_jitcell_tokens(self): + tokens = [t() for t in self.jitcell_token_wrefs] + if None in tokens: + assert False, "get_all_jitcell_tokens will not work as "+\ + "loops have been freed" + return tokens + + + def check_history(self, expected=None, **check): insns = {} for op in self.operations: @@ -956,16 +1017,90 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns + def check_resops(self, expected=None, **check): + insns = {} + for loop in self.get_all_loops(): + insns = loop.summary(adding_insns=insns) + return self._check_insns(insns, expected, check) + + def _check_insns(self, insns, expected, check): + if expected is not None: + insns.pop('debug_merge_point', None) + insns.pop('label', None) + assert insns == expected + for insn, expected_count in check.items(): + getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist + found = insns.get(insn, 0) + assert found == expected_count, ( + "found %d %r, expected %d" % (found, insn, expected_count)) + return insns + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + + # XXX hacked version, ignore and remove me when jit-targets is merged. + loops = self.get_all_loops() + loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX + assert len(loops) == 1 + loop, = loops + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + insns = {} + for op in loop.operations: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + loops = self.get_all_loops() + assert len(loops) == 1 + loop = loops[0] + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + assert self.check_resops(jump=1) + labels = [op for op in loop.operations if op.getopnum() == rop.LABEL] + targets = [op._descr_wref() for op in labels] + assert None not in targets # TargetToken was freed, give up + target = jumpop._descr_wref() + assert target + assert targets.count(target) == 1 + i = loop.operations.index(labels[targets.index(target)]) + insns = {} + for op in loop.operations[i:]: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_loops(self, expected=None, everywhere=False, **check): insns = {} - for loop in self.loops: - if not everywhere: - if getattr(loop, '_ignore_during_counting', False): - continue + for loop in self.get_all_loops(): + #if not everywhere: + # if getattr(loop, '_ignore_during_counting', False): + # continue insns = loop.summary(adding_insns=insns) if expected is not None: insns.pop('debug_merge_point', None) - assert insns == expected + print + print + print " self.check_resops(%s)" % str(insns) + print + import pdb; pdb.set_trace() + else: + chk = ['%s=%d' % (i, insns.get(i, 0)) for i in check] + print + print + print " self.check_resops(%s)" % ', '.join(chk) + print + import pdb; pdb.set_trace() + return + for insn, expected_count in check.items(): getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist found = insns.get(insn, 0) @@ -975,26 +1110,26 @@ def check_consistency(self): "NOT_RPYTHON" - for loop in self.loops: + for loop in self.get_all_loops(): loop.check_consistency() def maybe_view(self): if option.view: self.view() - def view(self, errmsg=None, extraloops=[]): - from pypy.jit.metainterp.graphpage import display_loops - loops = self.get_all_loops()[:] - for loop in extraloops: - if loop in loops: - loops.remove(loop) - loops.append(loop) - highlight_loops = dict.fromkeys(extraloops, 1) - for loop in loops: - if hasattr(loop, '_looptoken_number') and ( - loop._looptoken_number in self.invalidated_token_numbers): - highlight_loops.setdefault(loop, 2) - display_loops(loops, errmsg, highlight_loops) + def view(self, errmsg=None, extraprocedures=[]): + from pypy.jit.metainterp.graphpage import display_procedures + procedures = self.get_all_loops()[:] + for procedure in extraprocedures: + if procedure in procedures: + procedures.remove(procedure) + procedures.append(procedure) + highlight_procedures = dict.fromkeys(extraprocedures, 1) + for procedure in procedures: + if hasattr(procedure, '_looptoken_number') and ( + procedure._looptoken_number in self.invalidated_token_numbers): + highlight_procedures.setdefault(procedure, 2) + display_procedures(procedures, errmsg, highlight_procedures) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/inliner.py b/pypy/jit/metainterp/inliner.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/inliner.py @@ -0,0 +1,57 @@ +from pypy.jit.metainterp.history import Const +from pypy.jit.metainterp.resume import Snapshot + +class Inliner(object): + def __init__(self, inputargs, jump_args): + assert len(inputargs) == len(jump_args) + self.argmap = {} + for i in range(len(inputargs)): + if inputargs[i] in self.argmap: + assert self.argmap[inputargs[i]] == jump_args[i] + else: + self.argmap[inputargs[i]] = jump_args[i] + self.snapshot_map = {None: None} + + def inline_op(self, newop, ignore_result=False, clone=True, + ignore_failargs=False): + if clone: + newop = newop.clone() + args = newop.getarglist() + newop.initarglist([self.inline_arg(a) for a in args]) + + if newop.is_guard(): + args = newop.getfailargs() + if args and not ignore_failargs: + newop.setfailargs([self.inline_arg(a) for a in args]) + else: + newop.setfailargs([]) + + if newop.result and not ignore_result: + old_result = newop.result + newop.result = newop.result.clonebox() + self.argmap[old_result] = newop.result + + self.inline_descr_inplace(newop.getdescr()) + + return newop + + def inline_descr_inplace(self, descr): + from pypy.jit.metainterp.compile import ResumeGuardDescr + if isinstance(descr, ResumeGuardDescr): + descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) + + def inline_arg(self, arg): + if arg is None: + return None + if isinstance(arg, Const): + return arg + return self.argmap[arg] + + def inline_snapshot(self, snapshot): + if snapshot in self.snapshot_map: + return self.snapshot_map[snapshot] + boxes = [self.inline_arg(a) for a in snapshot.boxes] + new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) + self.snapshot_map[snapshot] = new_snapshot + return new_snapshot + diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -4,13 +4,15 @@ from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap from pypy.jit.metainterp.optimizeopt.vstring import OptString -from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble +from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce from pypy.rlib.jit import PARAMETERS from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_start, debug_stop, debug_print + ALL_OPTS = [('intbounds', OptIntBounds), ('rewrite', OptRewrite), @@ -28,8 +30,7 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) -def build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble=True, retraced=False): +def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict @@ -45,17 +46,14 @@ optimizations.append(OptFfiCall()) if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + or 'heap' not in enable_opts or 'unroll' not in enable_opts): optimizations.append(OptSimplify()) - if inline_short_preamble: - optimizations = [OptInlineShortPreamble(retraced)] + optimizations - return optimizations, unroll def optimize_loop_1(metainterp_sd, loop, enable_opts, - inline_short_preamble=True, retraced=False, bridge=False): + inline_short_preamble=True, retraced=False): """Optimize loop.operations to remove internal overheadish operations. """ @@ -64,7 +62,7 @@ if unroll: optimize_unroll(metainterp_sd, loop, optimizations) else: - optimizer = Optimizer(metainterp_sd, loop, optimizations, bridge) + optimizer = Optimizer(metainterp_sd, loop, optimizations) optimizer.propagate_all_forward() def optimize_bridge_1(metainterp_sd, bridge, enable_opts, @@ -76,7 +74,25 @@ except KeyError: pass optimize_loop_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced, bridge=True) + inline_short_preamble, retraced) if __name__ == '__main__': print ALL_OPTS_NAMES + +def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): + """Optimize loop.operations to remove internal overheadish operations. + """ + + debug_start("jit-optimize") + try: + loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, + loop.operations) + optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) + if unroll: + optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) + else: + optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer.propagate_all_forward() + finally: + debug_stop("jit-optimize") + diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -1,11 +1,13 @@ +from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method +from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.rlib import clibffi, libffi +from pypy.rlib.debug import debug_print +from pypy.rlib.libffi import Func +from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.libffi import Func -from pypy.rlib.debug import debug_print -from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.resoperation import rop, ResOperation -from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.rpython.lltypesystem import llmemory, rffi class FuncInfo(object): @@ -78,7 +80,7 @@ def new(self): return OptFfiCall() - + def begin_optimization(self, funcval, op): self.rollback_maybe('begin_optimization', op) self.funcinfo = FuncInfo(funcval, self.optimizer.cpu, op) @@ -116,6 +118,9 @@ ops = self.do_push_arg(op) elif oopspec == EffectInfo.OS_LIBFFI_CALL: ops = self.do_call(op) + elif (oopspec == EffectInfo.OS_LIBFFI_GETARRAYITEM or + oopspec == EffectInfo.OS_LIBFFI_SETARRAYITEM): + ops = self.do_getsetarrayitem(op, oopspec) # for op in ops: self.emit_operation(op) @@ -190,6 +195,56 @@ ops.append(newop) return ops + def do_getsetarrayitem(self, op, oopspec): + ffitypeval = self.getvalue(op.getarg(1)) + widthval = self.getvalue(op.getarg(2)) + offsetval = self.getvalue(op.getarg(5)) + if not ffitypeval.is_constant() or not widthval.is_constant() or not offsetval.is_constant(): + return [op] + + ffitypeaddr = ffitypeval.box.getaddr() + ffitype = llmemory.cast_adr_to_ptr(ffitypeaddr, clibffi.FFI_TYPE_P) + offset = offsetval.box.getint() + width = widthval.box.getint() + descr = self._get_interior_descr(ffitype, width, offset) + + arglist = [ + self.getvalue(op.getarg(3)).force_box(self.optimizer), + self.getvalue(op.getarg(4)).force_box(self.optimizer), + ] + if oopspec == EffectInfo.OS_LIBFFI_GETARRAYITEM: + opnum = rop.GETINTERIORFIELD_RAW + elif oopspec == EffectInfo.OS_LIBFFI_SETARRAYITEM: + opnum = rop.SETINTERIORFIELD_RAW + arglist.append(self.getvalue(op.getarg(6)).force_box(self.optimizer)) + else: + assert False + return [ + ResOperation(opnum, arglist, op.result, descr=descr), + ] + + def _get_interior_descr(self, ffitype, width, offset): + kind = libffi.types.getkind(ffitype) + is_pointer = is_float = is_signed = False + if ffitype is libffi.types.pointer: + is_pointer = True + elif kind == 'i': + is_signed = True + elif kind == 'f' or kind == 'I' or kind == 'U': + # longlongs are treated as floats, see + # e.g. llsupport/descr.py:getDescrClass + is_float = True + elif kind == 'u': + # they're all False + pass + else: + assert False, "unsupported ffitype or kind" + # + fieldsize = rffi.getintfield(ffitype, 'c_size') + return self.optimizer.cpu.interiorfielddescrof_dynamic( + offset, width, fieldsize, is_pointer, is_float, is_signed + ) + def propagate_forward(self, op): if self.logops is not None: debug_print(self.logops.repr_of_resop(op)) diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -43,7 +43,7 @@ optheap.optimizer.ensure_imported(cached_fieldvalue) cached_fieldvalue = self._cached_fields.get(structvalue, None) - if cached_fieldvalue is not fieldvalue: + if not fieldvalue.same_value(cached_fieldvalue): # common case: store the 'op' as lazy_setfield, and register # myself in the optheap's _lazy_setfields_and_arrayitems list self._lazy_setfield = op @@ -140,6 +140,15 @@ getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)], result, op.getdescr()) shortboxes.add_potential(getop, synthetic=True) + if op.getopnum() == rop.SETARRAYITEM_GC: + result = op.getarg(2) + if isinstance(result, Const): + newresult = result.clonebox() + optimizer.make_constant(newresult, result) + result = newresult + getop = ResOperation(rop.GETARRAYITEM_GC, [op.getarg(0), op.getarg(1)], + result, op.getdescr()) + shortboxes.add_potential(getop, synthetic=True) elif op.result is not None: shortboxes.add_potential(op) @@ -225,7 +234,7 @@ or op.is_ovf()): self.posponedop = op else: - self.next_optimization.propagate_forward(op) + Optimization.emit_operation(self, op) def emitting_operation(self, op): if op.has_no_side_effect(): diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -1,3 +1,4 @@ +import sys from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0, \ MODE_ARRAY, MODE_STR, MODE_UNICODE from pypy.jit.metainterp.history import ConstInt @@ -5,36 +6,18 @@ IntUpperBound) from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.rlib.rarithmetic import LONG_BIT class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def setup(self): - self.posponedop = None - self.nextop = None - def new(self): - assert self.posponedop is None return OptIntBounds() - - def flush(self): - assert self.posponedop is None - - def setup(self): - self.posponedop = None - self.nextop = None def propagate_forward(self, op): - if op.is_ovf(): - self.posponedop = op - return - if self.posponedop: - self.nextop = op - op = self.posponedop - self.posponedop = None - dispatch_opt(self, op) def opt_default(self, op): @@ -126,14 +109,29 @@ r.intbound.intersect(v1.intbound.div_bound(v2.intbound)) def optimize_INT_MOD(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + known_nonneg = (v1.intbound.known_ge(IntBound(0, 0)) and + v2.intbound.known_ge(IntBound(0, 0))) + if known_nonneg and v2.is_constant(): + val = v2.box.getint() + if (val & (val-1)) == 0: + # nonneg % power-of-two ==> nonneg & (power-of-two - 1) + arg1 = op.getarg(0) + arg2 = ConstInt(val-1) + op = op.copy_and_change(rop.INT_AND, args=[arg1, arg2]) self.emit_operation(op) - v2 = self.getvalue(op.getarg(1)) if v2.is_constant(): val = v2.box.getint() r = self.getvalue(op.result) if val < 0: + if val == -sys.maxint-1: + return # give up val = -val - r.intbound.make_gt(IntBound(-val, -val)) + if known_nonneg: + r.intbound.make_ge(IntBound(0, 0)) + else: + r.intbound.make_gt(IntBound(-val, -val)) r.intbound.make_lt(IntBound(val, val)) def optimize_INT_LSHIFT(self, op): @@ -153,72 +151,84 @@ def optimize_INT_RSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) + b = v1.intbound.rshift_bound(v2.intbound) + if b.has_lower and b.has_upper and b.lower == b.upper: + # constant result (likely 0, for rshifts that kill all bits) + self.make_constant_int(op.result, b.lower) + else: + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(b) + + def optimize_GUARD_NO_OVERFLOW(self, op): + lastop = self.last_emitted_operation + if lastop is not None: + opnum = lastop.getopnum() + args = lastop.getarglist() + result = lastop.result + # If the INT_xxx_OVF was replaced with INT_xxx, then we can kill + # the GUARD_NO_OVERFLOW. + if (opnum == rop.INT_ADD or + opnum == rop.INT_SUB or + opnum == rop.INT_MUL): + return + # Else, synthesize the non overflowing op for optimize_default to + # reuse, as well as the reverse op + elif opnum == rop.INT_ADD_OVF: + self.pure(rop.INT_ADD, args[:], result) + self.pure(rop.INT_SUB, [result, args[1]], args[0]) + self.pure(rop.INT_SUB, [result, args[0]], args[1]) + elif opnum == rop.INT_SUB_OVF: + self.pure(rop.INT_SUB, args[:], result) + self.pure(rop.INT_ADD, [result, args[1]], args[0]) + self.pure(rop.INT_SUB, [args[0], result], args[1]) + elif opnum == rop.INT_MUL_OVF: + self.pure(rop.INT_MUL, args[:], result) self.emit_operation(op) - r = self.getvalue(op.result) - r.intbound.intersect(v1.intbound.rshift_bound(v2.intbound)) + + def optimize_GUARD_OVERFLOW(self, op): + # If INT_xxx_OVF was replaced by INT_xxx, *but* we still see + # GUARD_OVERFLOW, then the loop is invalid. + lastop = self.last_emitted_operation + if lastop is None: + raise InvalidLoop + opnum = lastop.getopnum() + if opnum not in (rop.INT_ADD_OVF, rop.INT_SUB_OVF, rop.INT_MUL_OVF): + raise InvalidLoop + self.emit_operation(op) def optimize_INT_ADD_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.add_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Transform into INT_ADD and remove guard + if resbound.bounded(): + # Transform into INT_ADD. The following guard will be killed + # by optimize_GUARD_NO_OVERFLOW; if we see instead an + # optimize_GUARD_OVERFLOW, then InvalidLoop. op = op.copy_and_change(rop.INT_ADD) - self.optimize_INT_ADD(op) # emit the op - else: - self.emit_operation(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - self.emit_operation(self.nextop) - if self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Synthesize the non overflowing op for optimize_default to reuse - self.pure(rop.INT_ADD, op.getarglist()[:], op.result) - # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) - self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) - + self.emit_operation(op) # emit the op + r = self.getvalue(op.result) + r.intbound.intersect(resbound) def optimize_INT_SUB_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.sub_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Transform into INT_SUB and remove guard + if resbound.bounded(): op = op.copy_and_change(rop.INT_SUB) - self.optimize_INT_SUB(op) # emit the op - else: - self.emit_operation(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - self.emit_operation(self.nextop) - if self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Synthesize the non overflowing op for optimize_default to reuse - self.pure(rop.INT_SUB, op.getarglist()[:], op.result) - # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) - self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) - + self.emit_operation(op) # emit the op + r = self.getvalue(op.result) + r.intbound.intersect(resbound) def optimize_INT_MUL_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) resbound = v1.intbound.mul_bound(v2.intbound) - if resbound.has_lower and resbound.has_upper and \ - self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Transform into INT_MUL and remove guard + if resbound.bounded(): op = op.copy_and_change(rop.INT_MUL) - self.optimize_INT_MUL(op) # emit the op - else: - self.emit_operation(op) - r = self.getvalue(op.result) - r.intbound.intersect(resbound) - self.emit_operation(self.nextop) - if self.nextop.getopnum() == rop.GUARD_NO_OVERFLOW: - # Synthesize the non overflowing op for optimize_default to reuse - self.pure(rop.INT_MUL, op.getarglist()[:], op.result) - + self.emit_operation(op) + r = self.getvalue(op.result) + r.intbound.intersect(resbound) def optimize_INT_LT(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py --- a/pypy/jit/metainterp/optimizeopt/intutils.py +++ b/pypy/jit/metainterp/optimizeopt/intutils.py @@ -1,4 +1,5 @@ -from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift, LONG_BIT +from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT +from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.history import BoxInt, ConstInt import sys @@ -13,6 +14,10 @@ self.has_lower = True self.upper = upper self.lower = lower + # check for unexpected overflows: + if not we_are_translated(): + assert type(upper) is not long + assert type(lower) is not long # Returns True if the bound was updated def make_le(self, other): @@ -169,10 +174,10 @@ other.known_ge(IntBound(0, 0)) and \ other.known_lt(IntBound(LONG_BIT, LONG_BIT)): try: - vals = (ovfcheck_lshift(self.upper, other.upper), - ovfcheck_lshift(self.upper, other.lower), - ovfcheck_lshift(self.lower, other.upper), - ovfcheck_lshift(self.lower, other.lower)) + vals = (ovfcheck(self.upper << other.upper), + ovfcheck(self.upper << other.lower), + ovfcheck(self.lower << other.upper), + ovfcheck(self.lower << other.lower)) return IntBound(min4(vals), max4(vals)) except (OverflowError, ValueError): return IntUnbounded() diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -1,12 +1,12 @@ from pypy.jit.metainterp import jitprof, resume, compile from pypy.jit.metainterp.executor import execute_nonspec -from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF +from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF, INT from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ ImmutableIntUnbounded, \ IntLowerBound, MININT, MAXINT from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method, args_dict) -from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.tool.pairtype import extendabletype from pypy.rlib.debug import debug_start, debug_stop, debug_print @@ -95,6 +95,10 @@ return guards def import_from(self, other, optimizer): + if self.level == LEVEL_CONSTANT: + assert other.level == LEVEL_CONSTANT + assert other.box.same_constant(self.box) + return assert self.level <= LEVEL_NONNULL if other.level == LEVEL_CONSTANT: self.make_constant(other.get_key_box()) @@ -141,6 +145,13 @@ return not box.nonnull() return False + def same_value(self, other): + if not other: + return False + if self.is_constant() and other.is_constant(): + return self.box.same_constant(other.box) + return self is other + def make_constant(self, constbox): """Replace 'self.box' with a Const box.""" assert isinstance(constbox, Const) @@ -209,13 +220,19 @@ def setfield(self, ofs, value): raise NotImplementedError + def getlength(self): + raise NotImplementedError + def getitem(self, index): raise NotImplementedError - def getlength(self): + def setitem(self, index, value): raise NotImplementedError - def setitem(self, index, value): + def getinteriorfield(self, index, ofs, default): + raise NotImplementedError + + def setinteriorfield(self, index, ofs, value): raise NotImplementedError @@ -230,9 +247,10 @@ CONST_1 = ConstInt(1) CVAL_ZERO = ConstantValue(CONST_0) CVAL_ZERO_FLOAT = ConstantValue(Const._new(0.0)) -CVAL_UNINITIALIZED_ZERO = ConstantValue(CONST_0) llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL) oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL) +REMOVED = AbstractResOp(None) + class Optimization(object): next_optimization = None @@ -244,6 +262,7 @@ raise NotImplementedError def emit_operation(self, op): + self.last_emitted_operation = op self.next_optimization.propagate_forward(op) # FIXME: Move some of these here? @@ -283,11 +302,11 @@ return self.optimizer.optpure.has_pure_result(opnum, args, descr) return False - def get_pure_result(self, key): + def get_pure_result(self, key): if self.optimizer.optpure: return self.optimizer.optpure.get_pure_result(key) return None - + def setup(self): pass @@ -311,24 +330,25 @@ def forget_numberings(self, box): self.optimizer.forget_numberings(box) + class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=None, bridge=False): + def __init__(self, metainterp_sd, loop, optimizations=None): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop - self.bridge = bridge self.values = {} self.interned_refs = self.cpu.ts.new_ref_dict() + self.interned_ints = {} self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd) self.bool_boxes = {} self.producer = {} self.pendingfields = [] - self.exception_might_have_happened = False self.quasi_immutable_deps = None self.opaque_pointers = {} self.replaces_guard = {} self._newoperations = [] + self.seen_results = {} self.optimizer = self self.optpure = None self.optearlyforce = None @@ -346,6 +366,7 @@ optimizations[-1].next_optimization = self for o in optimizations: o.optimizer = self + o.last_emitted_operation = None o.setup() else: optimizations = [] @@ -392,6 +413,9 @@ if not value: return box return self.interned_refs.setdefault(value, box) + #elif constbox.type == INT: + # value = constbox.getint() + # return self.interned_ints.setdefault(value, box) else: return box @@ -476,9 +500,9 @@ else: return CVAL_ZERO - def propagate_all_forward(self): - self.exception_might_have_happened = self.bridge - self.clear_newoperations() + def propagate_all_forward(self, clear=True): + if clear: + self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) self.loop.operations = self.get_newoperations() @@ -520,11 +544,15 @@ op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True + if op.result: + if op.result in self.seen_results: + raise ValueError, "invalid optimization" + self.seen_results[op.result] = None self._newoperations.append(op) def replace_op(self, old_op, new_op): # XXX: Do we want to cache indexes to prevent search? - i = len(self._newoperations) + i = len(self._newoperations) while i > 0: i -= 1 if self._newoperations[i] is old_op: @@ -537,9 +565,12 @@ descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - newboxes = modifier.finish(self.values, self.pendingfields) - if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here - compile.giveup() + try: + newboxes = modifier.finish(self.values, self.pendingfields) + if len(newboxes) > self.metainterp_sd.options.failargs_limit: + raise resume.TagOverflow + except resume.TagOverflow: + raise compile.giveup() descr.store_final_boxes(op, newboxes) # if op.getopnum() == rop.GUARD_VALUE: diff --git a/pypy/jit/metainterp/optimizeopt/pure.py b/pypy/jit/metainterp/optimizeopt/pure.py --- a/pypy/jit/metainterp/optimizeopt/pure.py +++ b/pypy/jit/metainterp/optimizeopt/pure.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, REMOVED from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method, args_dict) @@ -61,7 +61,10 @@ oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.getdescr() is op.getdescr(): assert oldop.getopnum() == op.getopnum() + # this removes a CALL_PURE that has the same (non-constant) + # arguments as a previous CALL_PURE. self.make_equal_to(op.result, self.getvalue(oldop.result)) + self.last_emitted_operation = REMOVED return else: self.pure_operations[args] = op @@ -72,6 +75,13 @@ self.emit_operation(ResOperation(rop.CALL, args, op.result, op.getdescr())) + def optimize_GUARD_NO_EXCEPTION(self, op): + if self.last_emitted_operation is REMOVED: + # it was a CALL_PURE that was killed; so we also kill the + # following GUARD_NO_EXCEPTION + return + self.emit_operation(op) + def flush(self): assert self.posponedop is None diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -294,12 +304,6 @@ raise InvalidLoop self.optimize_GUARD_CLASS(op) - def optimize_GUARD_NO_EXCEPTION(self, op): - if not self.optimizer.exception_might_have_happened: - return - self.emit_operation(op) - self.optimizer.exception_might_have_happened = False - def optimize_CALL_LOOPINVARIANT(self, op): arg = op.getarg(0) # 'arg' must be a Const, because residual_call in codewriter @@ -310,6 +314,7 @@ resvalue = self.loop_invariant_results.get(key, None) if resvalue is not None: self.make_equal_to(op.result, resvalue) + self.last_emitted_operation = REMOVED return # change the op to be a normal call, from the backend's point of view # there is no reason to have a separate operation for this @@ -444,10 +449,19 @@ except KeyError: pass else: + # this removes a CALL_PURE with all constant arguments. self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED return self.emit_operation(op) + def optimize_GUARD_NO_EXCEPTION(self, op): + if self.last_emitted_operation is REMOVED: + # it was a CALL_PURE or a CALL_LOOPINVARIANT that was killed; + # so we also kill the following GUARD_NO_EXCEPTION + return + self.emit_operation(op) + def optimize_INT_FLOORDIV(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) @@ -465,10 +479,9 @@ args = [op.getarg(0), ConstInt(highest_bit(val))]) self.emit_operation(op) - def optimize_CAST_OPAQUE_PTR(self, op): + def optimize_MARK_OPAQUE_PTR(self, op): value = self.getvalue(op.getarg(0)) self.optimizer.opaque_pointers[value] = True - self.make_equal_to(op.result, value) def optimize_CAST_PTR_TO_INT(self, op): self.pure(rop.CAST_INT_TO_PTR, [op.result], op.getarg(0)) @@ -478,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -1,9 +1,12 @@ from pypy.jit.metainterp.optimizeopt.optimizer import Optimization from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import ResOperation, rop - +from pypy.jit.metainterp.history import TargetToken, JitCellToken class OptSimplify(Optimization): + def __init__(self): + self.last_label_descr = None + def optimize_CALL_PURE(self, op): args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, @@ -25,8 +28,29 @@ # but it's a bit hard to implement robustly if heap.py is also run pass - optimize_CAST_OPAQUE_PTR = optimize_VIRTUAL_REF + def optimize_MARK_OPAQUE_PTR(self, op): + pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + + def optimize_LABEL(self, op): + self.last_label_descr = op.getdescr() + self.emit_operation(op) + + def optimize_JUMP(self, op): + descr = op.getdescr() + assert isinstance(descr, JitCellToken) + if not descr.target_tokens: + assert self.last_label_descr is not None + target_token = self.last_label_descr + assert isinstance(target_token, TargetToken) + assert target_token.targeting_jitcell_token is descr + op.setdescr(self.last_label_descr) + else: + assert len(descr.target_tokens) == 1 + op.setdescr(descr.target_tokens[0]) + self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -0,0 +1,200 @@ +from pypy.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimize import InvalidLoop +from py.test import raises + +class BaseTestMultiLabel(BaseTest): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + + def optimize_loop(self, ops, expected): + loop = self.parse(ops) + if expected != "crash!": + expected = self.parse(expected) + + part = TreeLoop('part') + part.inputargs = loop.inputargs + part.start_resumedescr = FakeDescrWithSnapshot() + token = loop.original_jitcell_token + + optimized = TreeLoop('optimized') + optimized.inputargs = loop.inputargs + optimized.operations = [] + + labels = [i for i, op in enumerate(loop.operations) \ + if op.getopnum()==rop.LABEL] + prv = 0 + last_label = [] + for nxt in labels + [len(loop.operations)]: + assert prv != nxt + operations = last_label + loop.operations[prv:nxt] + if nxt < len(loop.operations): + label = loop.operations[nxt] + assert label.getopnum() == rop.LABEL + jumpop = ResOperation(rop.JUMP, label.getarglist(), + None, descr=token) + operations.append(jumpop) + part.operations = operations + self._do_optimize_loop(part, None) + if part.operations[-1].getopnum() == rop.LABEL: + last_label = [part.operations.pop()] + else: + last_label = [] + optimized.operations.extend(part.operations) + prv = nxt + 1 + + # + print + print "Optimized:" + if optimized.operations: + print '\n'.join([str(o) for o in optimized.operations]) + else: + print 'Failed!' + print + + assert expected != "crash!", "should have raised an exception" + self.assert_equal(optimized, expected) + + return optimized + + def test_simple(self): + ops = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1) + i3 = int_add(i1, 1) + escape(i3) + jump(i1) + """ + expected = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1, i2) + escape(i2) + jump(i1, i2) + """ + self.optimize_loop(ops, expected) + + def test_forced_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + escape(p3) + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_with_nonmatching_fields(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, 1, descr=valuedescr) + label(p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p4, 1, descr=nextdescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_arrays_with_nonmatching_lens(self): + ops = """ + [p1] + p2 = new_array(3, descr=arraydescr) + label(p2) + p4 = new_array(2, descr=arraydescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_1(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p4, 2, f0, descr=compleximagdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_2(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(2, descr=complexarraydescr) + setinteriorfield_gc(p4, 0, f0, descr=complexrealdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_array(self): + ops = """ + [p1] + p3 = new_array(3, descr=arraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_arraystruct(self): + ops = """ + [p1] + p3 = new_array(3, descr=complexarraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_turns_constant(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + guard_value(p3, ConstPtr(myptr)) [] + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_turns_not_equal(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3, p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + jump(p3, p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + +class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + pass + diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1,7 +1,8 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, FakeMetaInterpStaticData) + LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) +from pypy.jit.metainterp.history import TargetToken, JitCellToken from pypy.jit.metainterp.test.test_compile import FakeLogger import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize @@ -9,7 +10,7 @@ from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation - +from pypy.rlib.rarithmetic import LONG_BIT def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr @@ -115,9 +116,13 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" def optimize_loop(self, ops, optops, call_pure_results=None): - loop = self.parse(ops) - expected = self.parse(optops) + token = JitCellToken() + loop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=TargetToken(token))] + \ + loop.operations + if loop.operations[-1].getopnum() == rop.JUMP: + loop.operations[-1].setdescr(token) + expected = convert_old_style_to_targets(self.parse(optops), jump=True) self._do_optimize_loop(loop, call_pure_results) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) @@ -680,25 +685,60 @@ # ---------- - def test_fold_guard_no_exception(self): - ops = """ - [i] - guard_no_exception() [] - i1 = int_add(i, 3) - guard_no_exception() [] + def test_keep_guard_no_exception(self): + ops = """ + [i1] i2 = call(i1, descr=nonwritedescr) guard_no_exception() [i1, i2] - guard_no_exception() [] - i3 = call(i2, descr=nonwritedescr) - jump(i1) # the exception is considered lost when we loop back - """ - expected = """ - [i] - i1 = int_add(i, 3) - i2 = call(i1, descr=nonwritedescr) + jump(i2) + """ + self.optimize_loop(ops, ops) + + def test_keep_guard_no_exception_with_call_pure_that_is_not_folded(self): + ops = """ + [i1] + i2 = call_pure(123456, i1, descr=nonwritedescr) guard_no_exception() [i1, i2] - i3 = call(i2, descr=nonwritedescr) - jump(i1) + jump(i2) + """ + expected = """ + [i1] + i2 = call(123456, i1, descr=nonwritedescr) + guard_no_exception() [i1, i2] + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_remove_guard_no_exception_with_call_pure_on_constant_args(self): + arg_consts = [ConstInt(i) for i in (123456, 81)] + call_pure_results = {tuple(arg_consts): ConstInt(5)} + ops = """ + [i1] + i3 = same_as(81) + i2 = call_pure(123456, i3, descr=nonwritedescr) + guard_no_exception() [i1, i2] + jump(i2) + """ + expected = """ + [i1] + jump(5) + """ + self.optimize_loop(ops, expected, call_pure_results) + + def test_remove_guard_no_exception_with_duplicated_call_pure(self): + ops = """ + [i1] + i2 = call_pure(123456, i1, descr=nonwritedescr) + guard_no_exception() [i1, i2] + i3 = call_pure(123456, i1, descr=nonwritedescr) + guard_no_exception() [i1, i2, i3] + jump(i3) + """ + expected = """ + [i1] + i2 = call(123456, i1, descr=nonwritedescr) + guard_no_exception() [i1, i2] + jump(i2) """ self.optimize_loop(ops, expected) @@ -935,7 +975,6 @@ """ self.optimize_loop(ops, expected) - def test_virtual_constant_isnonnull(self): ops = """ [i0] @@ -951,6 +990,55 @@ """ self.optimize_loop(ops, expected) + def test_virtual_array_of_struct(self): + ops = """ + [f0, f1, f2, f3] + p0 = new_array(2, descr=complexarraydescr) + setinteriorfield_gc(p0, 0, f0, descr=complexrealdescr) + setinteriorfield_gc(p0, 0, f1, descr=compleximagdescr) + setinteriorfield_gc(p0, 1, f2, descr=complexrealdescr) + setinteriorfield_gc(p0, 1, f3, descr=compleximagdescr) + f4 = getinteriorfield_gc(p0, 0, descr=complexrealdescr) + f5 = getinteriorfield_gc(p0, 1, descr=complexrealdescr) + f6 = float_mul(f4, f5) + f7 = getinteriorfield_gc(p0, 0, descr=compleximagdescr) + f8 = getinteriorfield_gc(p0, 1, descr=compleximagdescr) + f9 = float_mul(f7, f8) + f10 = float_add(f6, f9) + finish(f10) + """ + expected = """ + [f0, f1, f2, f3] + f4 = float_mul(f0, f2) + f5 = float_mul(f1, f3) + f6 = float_add(f4, f5) + finish(f6) + """ + self.optimize_loop(ops, expected) + + def test_virtual_array_of_struct_forced(self): + ops = """ + [f0, f1] + p0 = new_array(1, descr=complexarraydescr) + setinteriorfield_gc(p0, 0, f0, descr=complexrealdescr) + setinteriorfield_gc(p0, 0, f1, descr=compleximagdescr) + f2 = getinteriorfield_gc(p0, 0, descr=complexrealdescr) + f3 = getinteriorfield_gc(p0, 0, descr=compleximagdescr) + f4 = float_mul(f2, f3) + i0 = escape(f4, p0) + finish(i0) + """ + expected = """ + [f0, f1] + f2 = float_mul(f0, f1) + p0 = new_array(1, descr=complexarraydescr) + setinteriorfield_gc(p0, 0, f0, descr=complexrealdescr) + setinteriorfield_gc(p0, 0, f1, descr=compleximagdescr) + i0 = escape(f2, p0) + finish(i0) + """ + self.optimize_loop(ops, expected) + def test_nonvirtual_1(self): ops = """ [i] @@ -4074,6 +4162,38 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_str_concat_constant_lengths(self): + ops = """ + [i0] + p0 = newstr(1) + strsetitem(p0, 0, i0) + p1 = newstr(0) + p2 = call(0, p0, p1, descr=strconcatdescr) + i1 = call(0, p2, p0, descr=strequaldescr) + finish(i1) + """ + expected = """ + [i0] + finish(1) + """ + self.optimize_strunicode_loop(ops, expected) + + def test_str_concat_constant_lengths_2(self): + ops = """ + [i0] + p0 = newstr(0) + p1 = newstr(1) + strsetitem(p1, 0, i0) + p2 = call(0, p0, p1, descr=strconcatdescr) + i1 = call(0, p2, p1, descr=strequaldescr) + finish(i1) + """ + expected = """ + [i0] + finish(1) + """ + self.optimize_strunicode_loop(ops, expected) + def test_str_slice_1(self): ops = """ [p1, i1, i2] @@ -4176,15 +4296,38 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_str_slice_plain_virtual(self): + ops = """ + [] + p0 = newstr(11) + copystrcontent(s"hello world", p0, 0, 0, 11) + p1 = call(0, p0, 0, 5, descr=strslicedescr) + finish(p1) + """ + expected = """ + [] + p0 = newstr(11) + copystrcontent(s"hello world", p0, 0, 0, 11) + # Eventually this should just return s"hello", but ATM this test is + # just verifying that it doesn't return "\0\0\0\0\0", so being + # slightly underoptimized is ok. + p1 = newstr(5) + copystrcontent(p0, p1, 0, 0, 5) + finish(p1) + """ + self.optimize_strunicode_loop(ops, expected) + # ---------- def optimize_strunicode_loop_extradescrs(self, ops, optops): class FakeCallInfoCollection: def callinfo_for_oopspec(self, oopspecindex): calldescrtype = type(LLtypeMixin.strequaldescr) + effectinfotype = type(LLtypeMixin.strequaldescr.get_extra_info()) for value in LLtypeMixin.__dict__.values(): if isinstance(value, calldescrtype): extra = value.get_extra_info() - if extra and extra.oopspecindex == oopspecindex: + if (extra and isinstance(extra, effectinfotype) and + extra.oopspecindex == oopspecindex): # returns 0 for 'func' in this test return value, 0 raise AssertionError("not found: oopspecindex=%d" % @@ -4664,11 +4807,11 @@ i5 = int_ge(i0, 0) guard_true(i5) [] i1 = int_mod(i0, 42) - i2 = int_rshift(i1, 63) + i2 = int_rshift(i1, %d) i3 = int_and(42, i2) i4 = int_add(i1, i3) finish(i4) - """ + """ % (LONG_BIT-1) expected = """ [i0] i5 = int_ge(i0, 0) @@ -4676,21 +4819,41 @@ i1 = int_mod(i0, 42) finish(i1) """ - py.test.skip("in-progress") self.optimize_loop(ops, expected) - # Also, 'n % power-of-two' can be turned into int_and(), - # but that's a bit harder to detect here because it turns into - # several operations, and of course it is wrong to just turn + # 'n % power-of-two' can be turned into int_and(); at least that's + # easy to do now if n is known to be non-negative. + ops = """ + [i0] + i5 = int_ge(i0, 0) + guard_true(i5) [] + i1 = int_mod(i0, 8) + i2 = int_rshift(i1, %d) + i3 = int_and(42, i2) + i4 = int_add(i1, i3) + finish(i4) + """ % (LONG_BIT-1) + expected = """ + [i0] + i5 = int_ge(i0, 0) + guard_true(i5) [] + i1 = int_and(i0, 7) + finish(i1) + """ + self.optimize_loop(ops, expected) + + # Of course any 'maybe-negative % power-of-two' can be turned into + # int_and(), but that's a bit harder to detect here because it turns + # into several operations, and of course it is wrong to just turn # int_mod(i0, 16) into int_and(i0, 15). ops = """ [i0] i1 = int_mod(i0, 16) - i2 = int_rshift(i1, 63) + i2 = int_rshift(i1, %d) i3 = int_and(16, i2) i4 = int_add(i1, i3) finish(i4) - """ + """ % (LONG_BIT-1) expected = """ [i0] i4 = int_and(i0, 15) @@ -4699,6 +4862,16 @@ py.test.skip("harder") self.optimize_loop(ops, expected) + def test_intmod_bounds_bug1(self): + ops = """ + [i0] + i1 = int_mod(i0, %d) + i2 = int_eq(i1, 0) + guard_false(i2) [] + finish() + """ % (-(1<<(LONG_BIT-1)),) + self.optimize_loop(ops, ops) + def test_bounded_lazy_setfield(self): ops = """ [p0, i0] @@ -4781,6 +4954,27 @@ def test_plain_virtual_string_copy_content(self): ops = """ + [i1] + p0 = newstr(6) + copystrcontent(s"hello!", p0, 0, 0, 6) + p1 = call(0, p0, s"abc123", descr=strconcatdescr) + i0 = strgetitem(p1, i1) + finish(i0) + """ + expected = """ + [i1] + p0 = newstr(6) + copystrcontent(s"hello!", p0, 0, 0, 6) + p1 = newstr(12) + copystrcontent(p0, p1, 0, 0, 6) + copystrcontent(s"abc123", p1, 0, 6, 6) + i0 = strgetitem(p1, i1) + finish(i0) + """ + self.optimize_strunicode_loop(ops, expected) + + def test_plain_virtual_string_copy_content_2(self): + ops = """ [] p0 = newstr(6) copystrcontent(s"hello!", p0, 0, 0, 6) @@ -4792,10 +4986,7 @@ [] p0 = newstr(6) copystrcontent(s"hello!", p0, 0, 0, 6) - p1 = newstr(12) - copystrcontent(p0, p1, 0, 0, 6) - copystrcontent(s"abc123", p1, 0, 6, 6) - i0 = strgetitem(p1, 0) + i0 = strgetitem(p0, 0) finish(i0) """ self.optimize_strunicode_loop(ops, expected) @@ -4812,6 +5003,34 @@ """ self.optimize_loop(ops, expected) + def test_known_equal_ints(self): + py.test.skip("in-progress") + ops = """ + [i0, i1, i2, p0] + i3 = int_eq(i0, i1) + guard_true(i3) [] + + i4 = int_lt(i2, i0) + guard_true(i4) [] + i5 = int_lt(i2, i1) + guard_true(i5) [] + + i6 = getarrayitem_gc(p0, i2) + finish(i6) + """ + expected = """ + [i0, i1, i2, p0] + i3 = int_eq(i0, i1) + guard_true(i3) [] + + i4 = int_lt(i2, i0) + guard_true(i4) [] + + i6 = getarrayitem_gc(p0, i3) + finish(i6) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,13 +1,13 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes) + LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation @@ -15,7 +15,7 @@ from pypy.jit.metainterp.optimizeopt.util import args_dict from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData from pypy.config.pypyoption import get_pypy_config - +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_build_opt_chain(): def check(chain, expected_names): @@ -23,49 +23,37 @@ assert names == expected_names # metainterp_sd = FakeMetaInterpStaticData(None) - chain, _ = build_opt_chain(metainterp_sd, "", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "") check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "heap:intbounds") - check(chain, ["OptInlineShortPreamble", "OptIntBounds", "OptHeap", "OptSimplify"]) + check(chain, ["OptIntBounds", "OptHeap", "OptSimplify"]) # chain, unroll = build_opt_chain(metainterp_sd, "unroll") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) assert unroll # - chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptFfiCall", "OptSimplify"]) # metainterp_sd.config = get_pypy_config(translating=True) assert not metainterp_sd.config.translation.jit_ffi - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptSimplify"]) # ____________________________________________________________ -class FakeDescr(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescr() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescr) - - class BaseTestWithUnroll(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -79,40 +67,41 @@ expected_preamble = self.parse(expected_preamble) if expected_short: expected_short = self.parse(expected_short) - loop.preamble = TreeLoop('preamble') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = LoopToken() - loop.preamble.start_resumedescr = FakeDescr() - # - self._do_optimize_loop(loop, call_pure_results) + + preamble = self.unroll_and_optimize(loop, call_pure_results) + # print print "Preamble:" - print loop.preamble.inputargs - if loop.preamble.operations: - print '\n'.join([str(o) for o in loop.preamble.operations]) + if preamble.operations: + print '\n'.join([str(o) for o in preamble.operations]) else: print 'Failed!' print print "Loop:" - print loop.inputargs print '\n'.join([str(o) for o in loop.operations]) print if expected_short: print "Short Preamble:" - short = loop.preamble.token.short_preamble[0] - print short.inputargs - print '\n'.join([str(o) for o in short.operations]) + short = loop.operations[0].getdescr().short_preamble + print '\n'.join([str(o) for o in short]) print assert expected != "crash!", "should have raised an exception" - self.assert_equal(loop, expected) + self.assert_equal(loop, convert_old_style_to_targets(expected, jump=True)) + assert loop.operations[0].getdescr() == loop.operations[-1].getdescr() if expected_preamble: - self.assert_equal(loop.preamble, expected_preamble, + self.assert_equal(preamble, convert_old_style_to_targets(expected_preamble, jump=False), text_right='expected preamble') + assert preamble.operations[-1].getdescr() == loop.operations[0].getdescr() if expected_short: - self.assert_equal(short, expected_short, + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, convert_old_style_to_targets(expected_short, jump=True), text_right='expected short preamble') + assert short[-1].getdescr() == loop.operations[0].getdescr() return loop @@ -234,7 +223,7 @@ """ % expected_value self.optimize_loop(ops, expected) - def test_reverse_of_cast(self): + def test_reverse_of_cast_1(self): ops = """ [i0] p0 = cast_int_to_ptr(i0) @@ -246,6 +235,8 @@ jump(i0) """ self.optimize_loop(ops, expected) + + def test_reverse_of_cast_2(self): ops = """ [p0] i1 = cast_ptr_to_int(p0) @@ -931,17 +922,14 @@ [i] guard_no_exception() [] i1 = int_add(i, 3) - guard_no_exception() [] i2 = call(i1, descr=nonwritedescr) guard_no_exception() [i1, i2] - guard_no_exception() [] i3 = call(i2, descr=nonwritedescr) jump(i1) # the exception is considered lost when we loop back """ - # note that 'guard_no_exception' at the very start is kept around - # for bridges, but not for loops preamble = """ [i] + guard_no_exception() [] # occurs at the start of bridges, so keep it i1 = int_add(i, 3) i2 = call(i1, descr=nonwritedescr) guard_no_exception() [i1, i2] @@ -950,6 +938,7 @@ """ expected = """ [i] + guard_no_exception() [] # occurs at the start of bridges, so keep it i1 = int_add(i, 3) i2 = call(i1, descr=nonwritedescr) guard_no_exception() [i1, i2] @@ -958,6 +947,23 @@ """ self.optimize_loop(ops, expected, preamble) + def test_bug_guard_no_exception(self): + ops = """ + [] + i0 = call(123, descr=nonwritedescr) + p0 = call(0, "xy", descr=s2u_descr) # string -> unicode + guard_no_exception() [] + escape(p0) + jump() + """ + expected = """ + [] + i0 = call(123, descr=nonwritedescr) + escape(u"xy") + jump() + """ + self.optimize_loop(ops, expected) + # ---------- def test_call_loopinvariant(self): @@ -1166,6 +1172,7 @@ i1 = getfield_gc(p0, descr=valuedescr) i2 = int_sub(i1, 1) i3 = int_add(i0, i1) + i4 = same_as(i2) # This same_as should be killed by backend jump(i3, i2, i1) """ expected = """ @@ -1176,6 +1183,75 @@ """ self.optimize_loop(ops, expected, preamble) + def test_virtual_recursive(self): + ops = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + i1 = int_add(i0, 1) + setfield_gc(p2, i1, descr=valuedescr) + jump(p1) + """ + preamble = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i3 = int_add(i0, 1) + jump(i3) + """ + expected = """ + [i0] + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize_loop(ops, expected, preamble) + + def test_virtual_recursive_forced(self): + ops = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + i1 = int_add(i0, 1) + setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p0, p1, descr=nextdescr) + jump(p1) + """ + preamble = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i1 = int_add(i0, 1) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + setfield_gc(p0, p1, descr=nextdescr) + jump(p1) + """ + loop = """ + [p0] + p41 = getfield_gc(p0, descr=nextdescr) + i0 = getfield_gc(p41, descr=valuedescr) + i1 = int_add(i0, 1) + p1 = new_with_vtable(ConstClass(node_vtable2)) + p2 = new_with_vtable(ConstClass(node_vtable2)) + setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p1, p2, descr=nextdescr) + setfield_gc(p0, p1, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, loop, preamble) + def test_virtual_constant_isnull(self): ops = """ [i0] @@ -1233,6 +1309,7 @@ p30 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) + p46 = same_as(p30) # This same_as should be killed by backend jump(i29, p30, p3) """ expected = """ @@ -1240,8 +1317,8 @@ i28 = int_add(i0, 1) i29 = int_add(i28, 1) p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) - setfield_gc(p30, i28, descr=nextdescr) jump(i29, p30, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2034,7 +2111,9 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i7 = same_as(i2) # This same_as should be killed by backend + i6 = same_as(i4) + jump(p1, i1, i2, i4, i6) """ expected = """ [p1, i1, i2, i4, i5] @@ -2064,7 +2143,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2093,7 +2173,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2123,7 +2204,9 @@ guard_true(i5) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i8 = same_as(i2) # This same_as should be killed by backend + i7 = same_as(i4) + jump(p1, i1, i2, i4, i7) """ expected = """ [p1, i1, i2, i4, i7] @@ -2168,13 +2251,13 @@ ops = """ [p0, i0, p1, i1, i2] setfield_gc(p0, i1, descr=valuedescr) - copystrcontent(p0, i0, p1, i1, i2) + copystrcontent(p0, p1, i0, i1, i2) escape() jump(p0, i0, p1, i1, i2) """ expected = """ [p0, i0, p1, i1, i2] - copystrcontent(p0, i0, p1, i1, i2) + copystrcontent(p0, p1, i0, i1, i2) setfield_gc(p0, i1, descr=valuedescr) escape() jump(p0, i0, p1, i1, i2) @@ -2349,7 +2432,8 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p4, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - jump(p1, i2, i4, p4, i4) + i101 = same_as(i4) + jump(p1, i2, i4, p4, i101) """ expected = """ [p1, i2, i4, p4, i5] @@ -3192,7 +3276,15 @@ setfield_gc(p1, i3, descr=valuedescr) jump(p1, i4, i3) ''' - self.optimize_loop(ops, ops, ops) + preamble = ''' + [p1, i1, i4] From noreply at buildbot.pypy.org Sun Dec 11 18:33:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 18:33:13 +0100 (CET) Subject: [pypy-commit] pypy default: Added a FAQ entry. Message-ID: <20111211173313.E218682210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50388:b2c92d4e8383 Date: 2011-12-11 18:32 +0100 http://bitbucket.org/pypy/pypy/changeset/b2c92d4e8383/ Log: Added a FAQ entry. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: From noreply at buildbot.pypy.org Sun Dec 11 19:31:56 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 11 Dec 2011 19:31:56 +0100 (CET) Subject: [pypy-commit] pypy default: Recommit the tolist() for numpy stuff, now that it translates. Message-ID: <20111211183156.DB31082210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50389:cf00150bde7c Date: 2011-12-11 13:31 -0500 http://bitbucket.org/pypy/pypy/changeset/cf00150bde7c/ Log: Recommit the tolist() for numpy stuff, now that it translates. diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,6 +91,9 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") + def descr_tolist(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -179,6 +182,8 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), + + tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,6 +876,17 @@ arr.setshape(space, new_shape) return arr + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1485,6 +1496,7 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), + tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,6 +879,45 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_tolist_scalar(self): + from numpypy import int32, bool_ + x = int32(23) + assert x.tolist() == 23 + assert type(x.tolist()) is int + y = bool_(True) + assert y.tolist() is True + + def test_tolist_zerodim(self): + from numpypy import array + x = array(3) + assert x.tolist() == 3 + assert type(x.tolist()) is int + + def test_tolist_singledim(self): + from numpypy import array + a = array(range(5)) + assert a.tolist() == [0, 1, 2, 3, 4] + assert type(a.tolist()[0]) is int + b = array([0.2, 0.4, 0.6]) + assert b.tolist() == [0.2, 0.4, 0.6] + + def test_tolist_multidim(self): + from numpypy import array + a = array([[1, 2], [3, 4]]) + assert a.tolist() == [[1, 2], [3, 4]] + + def test_tolist_view(self): + from numpypy import array + a = array([[1,2],[3,4]]) + assert (a + a).tolist() == [[2, 4], [6, 8]] + + def test_tolist_slice(self): + from numpypy import array + a = array([[17.1, 27.2], [40.3, 50.3]]) + assert a[:,0].tolist() == [17.1, 40.3] + assert a[0].tolist() == [17.1, 27.2] + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -78,6 +78,9 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj + def to_builtin_type(self, space, box): + return space.wrap(self.for_computation(self.unbox(box))) + def _coerce(self, space, w_item): raise NotImplementedError @@ -180,6 +183,9 @@ def _coerce(self, space, w_item): return self.box(space.is_true(w_item)) + def to_builtin_type(self, space, w_item): + return space.wrap(self.unbox(w_item)) + def str_format(self, box): value = self.unbox(box) return "True" if value else "False" From noreply at buildbot.pypy.org Sun Dec 11 21:04:02 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 11 Dec 2011 21:04:02 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Put a compiled version of this paper in. Message-ID: <20111211200402.B18CE82210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3981:dd2f021e2aa1 Date: 2011-12-11 15:03 -0500 http://bitbucket.org/pypy/extradoc/changeset/dd2f021e2aa1/ Log: Put a compiled version of this paper in. diff --git a/talk/iwtc11/licm.pdf b/talk/iwtc11/licm.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ff2a7bf547f542771702ac86ea8531f8ba16cc28 GIT binary patch [cut] From noreply at buildbot.pypy.org Sun Dec 11 21:57:56 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 21:57:56 +0100 (CET) Subject: [pypy-commit] pypy default: fix comment Message-ID: <20111211205756.C077982210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50390:a83bd631dde3 Date: 2011-12-11 19:03 +0100 http://bitbucket.org/pypy/pypy/changeset/a83bd631dde3/ Log: fix comment diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -261,8 +261,10 @@ return fail_index def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. + """Calls the fake 'assembler' generated for the given loop. + Returns the descr of the last executed operation: either the one + attached to the failing guard, or the one attached to the FINISH. + Use set_future_value_xxx() before, and get_latest_value_xxx() after. """ fail_index = self._execute_token(loop_token) return self.get_fail_descr_from_number(fail_index) From noreply at buildbot.pypy.org Sun Dec 11 21:57:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 21:57:57 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fixed virtualizables. Message-ID: <20111211205757.F167982210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50391:ab500e92817c Date: 2011-12-11 21:05 +0100 http://bitbucket.org/pypy/pypy/changeset/ab500e92817c/ Log: Fixed virtualizables. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -11,7 +11,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt -from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const +from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const, ConstInt from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop @@ -252,7 +252,44 @@ record_loop_or_bridge(metainterp_sd, loop) return target_token +def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): + vinfo = jitdriver_sd.virtualizable_info + extra_ops = [] + inputargs = loop.inputargs + vable_box = inputargs[jitdriver_sd.index_of_virtualizable] + i = jitdriver_sd.num_red_args + loop.inputargs = inputargs[:i] + for descr in vinfo.static_field_descrs: + assert i < len(inputargs) + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], box, descr)) + i += 1 + arrayindex = 0 + for descr in vinfo.array_field_descrs: + vable = vable_box.getref_base() + arraylen = vinfo.get_array_length(vable, arrayindex) + arraybox = BoxPtr() + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], arraybox, descr)) + arraydescr = vinfo.array_descrs[arrayindex] + assert i + arraylen <= len(inputargs) + for index in range(arraylen): + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETARRAYITEM_GC, + [arraybox, ConstInt(index)], + box, descr=arraydescr)) + i += 1 + arrayindex += 1 + assert i == len(inputargs) + loop.operations = extra_ops + loop.operations + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): + vinfo = jitdriver_sd.virtualizable_info + if vinfo is not None: + patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) + original_jitcell_token = loop.original_jitcell_token jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) @@ -776,6 +813,7 @@ jitcell_token = make_jitcell_token(jitdriver_sd) # 'nb_red_args' might be smaller than len(redboxes), # because it doesn't include the virtualizable boxes. + XXX # review and fix me :-) nb_red_args = jitdriver_sd.num_red_args k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2011,7 +2011,8 @@ "NOT_RPYTHON" args = [] num_green_args = self.jitdriver_sd.num_green_args - for box in live_arg_boxes[num_green_args:]: + num_red_args = self.jitdriver_sd.num_red_args + for box in live_arg_boxes[num_green_args:num_green_args+num_red_args]: if box.type == history.INT: args.append(box.getint()) elif box.type == history.REF: args.append(box.getref_base()) elif box.type == history.FLOAT: args.append(box.getfloatstorage()) diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -77,7 +77,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 30 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_preexisting_access_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -102,7 +102,8 @@ assert f(5) == 185 res = self.meta_interp(f, [5]) assert res == 185 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, + getfield_gc=2) # <= at the header of the loop def test_two_paths_access(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -124,7 +125,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10118 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_synchronize_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -146,7 +147,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_virtualizable_and_greens(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'xy'], @@ -174,7 +175,7 @@ return res res = self.meta_interp(f, [40]) assert res == 50 * 4 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=4) def test_double_frame(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy', 'other'], @@ -197,7 +198,8 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_resops(setfield_gc=2, getfield_gc=1) + self.check_simple_loop(setfield_gc=1, getfield_gc=0) + self.check_resops(setfield_gc=2, getfield_gc=3) # ------------------------------ @@ -247,8 +249,8 @@ return xy2.inst_l1[2] res = self.meta_interp(f, [16]) assert res == 3001 + 16 * 80 - self.check_resops(setarrayitem_gc=0, setfield_gc=0, - getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(setarrayitem_gc=0, setfield_gc=0, + getarrayitem_gc=0, getfield_gc=0) def test_synchronize_arrays_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -278,7 +280,8 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getarrayitem_gc=0, + getfield_gc=0, setarrayitem_gc=0) def test_array_length(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -304,8 +307,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, - arraylen_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=0, getfield_gc=0) def test_residual_function(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -338,8 +341,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_resops(call=2, setfield_gc=0, getarrayitem_gc=0, - arraylen_gc=2, getfield_gc=0) + self.check_simple_loop(call=1, setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=1, getfield_gc=0) def test_double_frame_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2', 'other'], @@ -375,8 +378,8 @@ expected = f(20) res = self.meta_interp(f, [20], enable_opts='') assert res == expected - self.check_resops(setarrayitem_gc=1, setfield_gc=0, - getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) + self.check_simple_loop(setarrayitem_gc=1, setfield_gc=0, + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) # ------------------------------ @@ -423,7 +426,8 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(getfield_gc=0, getarrayitem_gc=0, + setfield_gc=0, setarrayitem_gc=0) # ------------------------------ @@ -457,7 +461,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_virtualizable_with_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'frame'], @@ -491,7 +495,8 @@ res = self.meta_interp(f, [10, 1], listops=True) assert res == f(10, 1) - self.check_resops(getarrayitem_gc=0) + self.check_simple_loop(getfield_gc=0, getarrayitem_gc=0) + self.check_resops(getfield_gc=2, getarrayitem_gc=4) def test_subclass_of_virtualizable(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -519,7 +524,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_external_pass(self): jitdriver = JitDriver(greens = [], reds = ['n', 'z', 'frame'], @@ -1037,7 +1042,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) from pypy.jit.backend.test.support import BaseCompiledMixin if isinstance(self, BaseCompiledMixin): @@ -1197,7 +1202,8 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_blackhole_should_synchronize(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1233,7 +1239,8 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_blackhole_should_not_reenter(self): if not self.basic: diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -296,7 +296,7 @@ # 'vable_token' field afterwards if vinfo is not None: virtualizable = args[index_of_virtualizable] - virtualizable = vinfo.cast_to_vtype(virtualizable) + virtualizable = vinfo.cast_gcref_to_vtype(virtualizable) vinfo.reset_vable_token(virtualizable) # # Record in the memmgr that we just ran this loop, From noreply at buildbot.pypy.org Sun Dec 11 21:57:59 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 21:57:59 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix for compile_tmp_callback(). Message-ID: <20111211205759.1FEC282210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50392:f9e61788fa85 Date: 2011-12-11 21:08 +0100 http://bitbucket.org/pypy/pypy/changeset/f9e61788fa85/ Log: Fix for compile_tmp_callback(). diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -809,15 +809,14 @@ version of the code may end up replacing it. """ # 'redboxes' is only used to know the types of red arguments. - inputargs = [box.clonebox() for box in redboxes] jitcell_token = make_jitcell_token(jitdriver_sd) # 'nb_red_args' might be smaller than len(redboxes), # because it doesn't include the virtualizable boxes. - XXX # review and fix me :-) nb_red_args = jitdriver_sd.num_red_args + inputargs = [box.clonebox() for box in redboxes[:nb_red_args]] k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + callargs = [funcbox] + greenboxes + inputargs # result_type = jitdriver_sd.result_type if result_type == history.INT: diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -55,6 +55,7 @@ warmstate = FakeState() on_compile = staticmethod(lambda *args: None) on_compile_bridge = staticmethod(lambda *args: None) + virtualizable_info = None def test_compile_loop(): cpu = FakeCPU() @@ -175,14 +176,14 @@ [BoxInt(56), ConstInt(78), BoxInt(90)]) # raiseme = None - # arg -190 is passed in, but dropped - fail_descr = cpu.execute_token(loop_token, -156, -178, -190) + # only two arguments must be passed in + fail_descr = cpu.execute_token(loop_token, -156, -178) assert fail_descr is FakeJitDriverSD().portal_finishtoken # EXC = lltype.GcStruct('EXC') llexc = lltype.malloc(EXC) raiseme = LLException("exception class", llexc) - fail_descr = cpu.execute_token(loop_token, -156, -178, -190) + fail_descr = cpu.execute_token(loop_token, -156, -178) assert isinstance(fail_descr, compile.PropagateExceptionDescr) got = cpu.grab_exc_value() assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), got) == llexc @@ -191,7 +192,7 @@ class ExitFrameWithExceptionRef(Exception): pass FakeMetaInterpSD.cpu = cpu - fail_descr = cpu.execute_token(loop_token, -156, -178, -190) + fail_descr = cpu.execute_token(loop_token, -156, -178) try: fail_descr.handle_fail(FakeMetaInterpSD(), None) except FakeMetaInterpSD.ExitFrameWithExceptionRef, e: From noreply at buildbot.pypy.org Sun Dec 11 21:58:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 21:58:00 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix test. Message-ID: <20111211205800.4796782210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50393:c87a6c079a01 Date: 2011-12-11 21:10 +0100 http://bitbucket.org/pypy/pypy/changeset/c87a6c079a01/ Log: Fix test. diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2955,8 +2955,7 @@ operations[6].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -2968,8 +2967,7 @@ ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) assert res == -10 From noreply at buildbot.pypy.org Sun Dec 11 21:58:01 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 21:58:01 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix test. Message-ID: <20111211205801.6894382210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50394:6ea88ccf9778 Date: 2011-12-11 21:21 +0100 http://bitbucket.org/pypy/pypy/changeset/6ea88ccf9778/ Log: Fix test. diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -72,16 +72,16 @@ res = self.meta_interp(main, [0, 6], listops=True, backendopt=True) assert res == 5040 - self.check_resops({'jump': 1, 'int_le': 2, 'guard_value': 1, - 'int_mul': 2, 'guard_false': 2, 'int_sub': 2}) + self.check_simple_loop({'jump': 1, 'int_le': 1, + 'int_mul': 1, 'guard_false': 1, 'int_sub': 1}) def test_tl_2(self): main = self._get_main() res = self.meta_interp(main, [1, 10], listops=True, backendopt=True) assert res == main(1, 10) - self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 1, - 'guard_false': 2, 'guard_value': 1}) + self.check_simple_loop({'int_le': 1, 'int_sub': 1, 'jump': 1, + 'guard_false': 1}) def test_tl_call(self, listops=True, policy=None): from pypy.jit.tl.tl import interp From noreply at buildbot.pypy.org Sun Dec 11 21:58:02 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 21:58:02 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Translation fixes (front-end only so far). Message-ID: <20111211205802.8E65F82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50395:874a608a3377 Date: 2011-12-11 21:28 +0100 http://bitbucket.org/pypy/pypy/changeset/874a608a3377/ Log: Translation fixes (front-end only so far). diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -264,8 +264,16 @@ """Calls the assembler generated for the given loop. Returns the ResOperation that failed, of type rop.FAIL. """ - # XXX RPythonize me - for index, x in enumerate(args): + if we_are_translated(): + assert len(args) <= 10 + iterator = unrolling_iterable_10 + else: + iterator = range(len(args)) + # + for index in iterator: + if index == len(args): + break + x = args[index] TYPE = lltype.typeOf(x) if TYPE == lltype.Signed: llimpl.set_future_value_int(index, x) @@ -692,6 +700,8 @@ return x +unrolling_iterable_10 = unrolling_iterable(range(10)) + def make_getargs(ARGS): argsiter = unrolling_iterable(ARGS) args_n = len([ARG for ARG in ARGS if ARG is not ootype.Void]) diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -771,16 +771,16 @@ def assembler_call_helper(failindex, virtualizableref): fail_descr = self.cpu.get_fail_descr_from_number(failindex) - while True: - if vinfo is not None: - virtualizable = lltype.cast_opaque_ptr( - vinfo.VTYPEPTR, virtualizableref) - vinfo.reset_vable_token(virtualizable) - try: - loop_token = fail_descr.handle_fail(self.metainterp_sd, jd) - except JitException, e: - return handle_jitexception(e) - fail_descr = self.execute_token(loop_token) + if vinfo is not None: + virtualizable = lltype.cast_opaque_ptr( + vinfo.VTYPEPTR, virtualizableref) + vinfo.reset_vable_token(virtualizable) + try: + fail_descr.handle_fail(self.metainterp_sd, jd) + except JitException, e: + return handle_jitexception(e) + else: + assert 0, "should have raised" jd._assembler_call_helper = assembler_call_helper # for debugging jd._assembler_helper_ptr = self.helper_func( From noreply at buildbot.pypy.org Sun Dec 11 21:58:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 21:58:03 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix test. Message-ID: <20111211205803.AFA4C82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50396:3481aa4226dc Date: 2011-12-11 21:56 +0100 http://bitbucket.org/pypy/pypy/changeset/3481aa4226dc/ Log: Fix test. diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -303,18 +303,11 @@ exc_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) cls.exc_vtable = exc_vtable - class FakeLoopToken: + class FakeFailDescr(object): def __init__(self, no): self.no = no - self.generation = 0 - - class FakeFailDescr(object): - def __init__(self, looptoken): - assert isinstance(looptoken, FakeLoopToken) - self.looptoken = looptoken - def handle_fail(self, metainterp_sd, jitdrivers_sd): - no = self.looptoken.no + no = self.no if no == 0: raise metainterp_sd.warmrunnerdesc.DoneWithThisFrameInt(3) if no == 1: @@ -326,7 +319,7 @@ raise metainterp_sd.warmrunnerdesc.ExitFrameWithExceptionRef( metainterp_sd.cpu, lltype.cast_opaque_ptr(llmemory.GCREF, exc)) - return self.looptoken + assert 0 class FakeDescr: def as_vtable_size_descr(self): @@ -353,11 +346,10 @@ sizeof = nodescr def get_fail_descr_from_number(self, no): - return FakeFailDescr(FakeLoopToken(no)) + return FakeFailDescr(no) - def execute_token(self, token): - assert token.no == 2 - return FakeFailDescr(FakeLoopToken(1)) + def execute_token(self, token, *args): + assert 0 driver = JitDriver(reds = ['red'], greens = ['green']) @@ -381,7 +373,6 @@ [jd] = self.desc.jitdrivers_sd assert jd._assembler_call_helper(0, 0) == 3 assert jd._assembler_call_helper(1, 0) == 10 - assert jd._assembler_call_helper(2, 0) == 10 try: jd._assembler_call_helper(3, 0) except LLException, lle: From noreply at buildbot.pypy.org Sun Dec 11 21:58:04 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 21:58:04 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111211205804.D6D0B82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50397:6fb87770b5d2 Date: 2011-12-11 21:57 +0100 http://bitbucket.org/pypy/pypy/changeset/6fb87770b5d2/ Log: merge heads diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,6 +91,9 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") + def descr_tolist(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -179,6 +182,8 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), + + tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,6 +876,17 @@ arr.setshape(space, new_shape) return arr + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1485,6 +1496,7 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), + tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,6 +879,45 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_tolist_scalar(self): + from numpypy import int32, bool_ + x = int32(23) + assert x.tolist() == 23 + assert type(x.tolist()) is int + y = bool_(True) + assert y.tolist() is True + + def test_tolist_zerodim(self): + from numpypy import array + x = array(3) + assert x.tolist() == 3 + assert type(x.tolist()) is int + + def test_tolist_singledim(self): + from numpypy import array + a = array(range(5)) + assert a.tolist() == [0, 1, 2, 3, 4] + assert type(a.tolist()[0]) is int + b = array([0.2, 0.4, 0.6]) + assert b.tolist() == [0.2, 0.4, 0.6] + + def test_tolist_multidim(self): + from numpypy import array + a = array([[1, 2], [3, 4]]) + assert a.tolist() == [[1, 2], [3, 4]] + + def test_tolist_view(self): + from numpypy import array + a = array([[1,2],[3,4]]) + assert (a + a).tolist() == [[2, 4], [6, 8]] + + def test_tolist_slice(self): + from numpypy import array + a = array([[17.1, 27.2], [40.3, 50.3]]) + assert a[:,0].tolist() == [17.1, 40.3] + assert a[0].tolist() == [17.1, 27.2] + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -78,6 +78,9 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj + def to_builtin_type(self, space, box): + return space.wrap(self.for_computation(self.unbox(box))) + def _coerce(self, space, w_item): raise NotImplementedError @@ -180,6 +183,9 @@ def _coerce(self, space, w_item): return self.box(space.is_true(w_item)) + def to_builtin_type(self, space, w_item): + return space.wrap(self.unbox(w_item)) + def str_format(self, box): value = self.unbox(box) return "True" if value else "False" From noreply at buildbot.pypy.org Sun Dec 11 22:42:26 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 22:42:26 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Switch to a less magical way of specializing execute_token(), Message-ID: <20111211214226.0DD3482210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50398:82684fd1231e Date: 2011-12-11 22:24 +0100 http://bitbucket.org/pypy/pypy/changeset/82684fd1231e/ Log: Switch to a less magical way of specializing execute_token(), one which has a chance to work in a real backend too. diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -260,32 +260,31 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token, *args): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. - """ - if we_are_translated(): - assert len(args) <= 10 - iterator = unrolling_iterable_10 - else: - iterator = range(len(args)) + def make_execute_token(self, *argkinds): + nb_args = len(argkinds) + unroll_argkinds = unrolling_iterable(list(enumerate(argkinds))) # - for index in iterator: - if index == len(args): - break - x = args[index] - TYPE = lltype.typeOf(x) - if TYPE == lltype.Signed: - llimpl.set_future_value_int(index, x) - elif TYPE == llmemory.GCREF: - llimpl.set_future_value_ref(index, x) - elif TYPE == longlong.FLOATSTORAGE: - llimpl.set_future_value_float(index, x) - else: - raise ValueError(TYPE) + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, kind in unroll_argkinds: + x = args[index] + TYPE = lltype.typeOf(x) + if kind == INT: + assert TYPE == lltype.Signed + llimpl.set_future_value_int(index, x) + elif kind == REF: + assert TYPE == llmemory.GCREF + llimpl.set_future_value_ref(index, x) + elif kind == FLOAT: + assert TYPE == longlong.FLOATSTORAGE + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) # - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) @@ -700,8 +699,6 @@ return x -unrolling_iterable_10 = unrolling_iterable(range(10)) - def make_getargs(ARGS): argsiter = unrolling_iterable(ARGS) args_n = len([ARG for ARG in ARGS if ARG is not ootype.Void]) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -85,12 +85,18 @@ raise NotImplementedError def execute_token(self, looptoken, *args): - """Execute the generated code referenced by the looptoken. + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. Use get_latest_value_xxx() afterwards to read the result(s). - (This method is automatically specialized by the front-end if - needed, for various types and numbers of *args.) + """ + execute = self.make_execute_token(*[history.getkind(x) for x in args]) + return execute(looptoken, *args) + + def make_execute_token(self, *argkinds): + """Must make and return an execute_token() function that will be + called with the given argtypes. """ raise NotImplementedError diff --git a/pypy/jit/metainterp/jitdriver.py b/pypy/jit/metainterp/jitdriver.py --- a/pypy/jit/metainterp/jitdriver.py +++ b/pypy/jit/metainterp/jitdriver.py @@ -11,6 +11,7 @@ # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.num_red_args ... pypy.jit.metainterp.warmspot + # self.red_args_types ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.greenfield_info ... pypy.jit.metainterp.warmspot diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -522,9 +522,9 @@ greens_v, reds_v = support.decode_hp_hint_args(op) ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] - jd._red_args_types = [history.getkind(v.concretetype) for v in reds_v] + jd.red_args_types = [history.getkind(v.concretetype) for v in reds_v] jd.num_green_args = len(jd._green_args_spec) - jd.num_red_args = len(jd._red_args_types) + jd.num_red_args = len(jd.red_args_types) RESTYPE = graph.getreturnvar().concretetype (jd._JIT_ENTER_FUNCTYPE, jd._PTR_JIT_ENTER_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, lltype.Void) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -282,15 +282,14 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) - # hack: make a new copy of the method - func_execute_token = self.cpu.execute_token.im_func - func_execute_token = func_with_new_name(func_execute_token, - "execute_token_spec") + # get a new specialized copy of the method + func_execute_token = self.cpu.make_execute_token( + *[kind[0] for kind in jitdriver_sd.red_args_types]) def execute_assembler(loop_token, *args): # Call the backend to run the 'looptoken' with the given # input args. - fail_descr = func_execute_token(self.cpu, loop_token, *args) + fail_descr = func_execute_token(loop_token, *args) # # If we have a virtualizable, we have to reset its # 'vable_token' field afterwards From noreply at buildbot.pypy.org Sun Dec 11 22:42:27 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 22:42:27 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: fix Message-ID: <20111211214227.3004482210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50399:c1bad6dee4e2 Date: 2011-12-11 22:27 +0100 http://bitbucket.org/pypy/pypy/changeset/c1bad6dee4e2/ Log: fix diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -91,7 +92,8 @@ attached to the failing guard, or the one attached to the FINISH. Use get_latest_value_xxx() afterwards to read the result(s). """ - execute = self.make_execute_token(*[history.getkind(x) for x in args]) + argkinds = [history.getkind(lltype.typeOf(x))[0] for x in args] + execute = self.make_execute_token(*argkinds) return execute(looptoken, *args) def make_execute_token(self, *argkinds): From noreply at buildbot.pypy.org Sun Dec 11 22:42:28 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 22:42:28 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: In-progress Message-ID: <20111211214228.589BD82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50400:2bc80fdea1c1 Date: 2011-12-11 22:37 +0100 http://bitbucket.org/pypy/pypy/changeset/2bc80fdea1c1/ Log: In-progress diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -260,23 +260,20 @@ self.latest_frame = frame return fail_index - def make_execute_token(self, *argkinds): - nb_args = len(argkinds) - unroll_argkinds = unrolling_iterable(list(enumerate(argkinds))) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) # def execute_token(loop_token, *args): assert len(args) == nb_args - for index, kind in unroll_argkinds: + for index, TYPE in unroll_argtypes: x = args[index] - TYPE = lltype.typeOf(x) - if kind == INT: - assert TYPE == lltype.Signed + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: llimpl.set_future_value_int(index, x) - elif kind == REF: - assert TYPE == llmemory.GCREF + elif TYPE == llmemory.GCREF: llimpl.set_future_value_ref(index, x) - elif kind == FLOAT: - assert TYPE == longlong.FLOATSTORAGE + elif TYPE == longlong.FLOATSTORAGE: llimpl.set_future_value_float(index, x) else: assert 0 diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -92,11 +92,11 @@ attached to the failing guard, or the one attached to the FINISH. Use get_latest_value_xxx() afterwards to read the result(s). """ - argkinds = [history.getkind(lltype.typeOf(x))[0] for x in args] - execute = self.make_execute_token(*argkinds) + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) return execute(looptoken, *args) - def make_execute_token(self, *argkinds): + def make_execute_token(self, *argtypes): """Must make and return an execute_token() function that will be called with the given argtypes. """ diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.objectmodel import we_are_translated +from pypy.jit.codewriter import longlong from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS @@ -21,7 +22,6 @@ supports_floats = True supports_singlefloats = True - BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) dont_keepalive_stuff = False # for tests with_threads = False @@ -122,27 +122,35 @@ # the FORCE_TOKEN operation and this helper both return 'ebp'. return self.assembler.fail_ebp - def execute_token(self, executable_token): - addr = executable_token._x86_bootstrap_code - #llop.debug_print(lltype.Void, ">>>> Entering", addr) - func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) - fail_index = self._execute_call(func) - #llop.debug_print(lltype.Void, "<<<< Back") - return self.get_fail_descr_from_number(fail_index) - - def _execute_call(self, func): - # help flow objspace - prev_interpreter = None - if not self.translate_support_code: - prev_interpreter = LLInterpreter.current_interpreter - LLInterpreter.current_interpreter = self.debug_ll_interpreter - res = 0 - try: - res = func() - finally: + def make_execute_token(self, *argkinds): + ARGS = [] + for kind in argkinds: + if kind == history.INT: + ARGS.append(lltype.Signed) + elif kind == history.REF: + ARGS.append(llmemory.GCREF) + elif kind == history.FLOAT: + ARGS.append(longlong.FLOATSTORAGE) + else: + assert 0 + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) + # + def execute_token(executable_token, *args): + addr = executable_token._x86_direct_bootstrap_code + func = rffi.cast(FUNCPTR, addr) + #llop.debug_print(lltype.Void, ">>>> Entering", addr) + prev_interpreter = None # help flow space if not self.translate_support_code: - LLInterpreter.current_interpreter = prev_interpreter - return res + prev_interpreter = LLInterpreter.current_interpreter + LLInterpreter.current_interpreter = self.debug_ll_interpreter + try: + fail_index = func(*args) + finally: + if not self.translate_support_code: + LLInterpreter.current_interpreter = prev_interpreter + #llop.debug_print(lltype.Void, "<<<< Back") + return self.get_fail_descr_from_number(fail_index) + return execute_token def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -283,8 +283,17 @@ range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) # get a new specialized copy of the method - func_execute_token = self.cpu.make_execute_token( - *[kind[0] for kind in jitdriver_sd.red_args_types]) + ARGS = [] + for kind in jitdriver_sd.red_args_types: + if kind == 'int': + ARGS.append(lltype.Signed) + elif kind == 'ref': + ARGS.append(llmemory.GCREF) + elif kind == 'float': + ARGS.append(longlong.FLOATSTORAGE) + else: + assert 0, kind + func_execute_token = self.cpu.make_execute_token(*ARGS) def execute_assembler(loop_token, *args): # Call the backend to run the 'looptoken' with the given From noreply at buildbot.pypy.org Sun Dec 11 22:42:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 22:42:29 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Kill. Message-ID: <20111211214229.7BDCE82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50401:9677e2ec7aef Date: 2011-12-11 22:41 +0100 http://bitbucket.org/pypy/pypy/changeset/9677e2ec7aef/ Log: Kill. diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -91,15 +91,6 @@ return self.assembler.assemble_bridge(faildescr, inputargs, operations, original_loop_token, log=log) - def set_future_value_int(self, index, intvalue): - self.assembler.fail_boxes_int.setitem(index, intvalue) - - def set_future_value_float(self, index, floatvalue): - self.assembler.fail_boxes_float.setitem(index, floatvalue) - - def set_future_value_ref(self, index, ptrvalue): - self.assembler.fail_boxes_ptr.setitem(index, ptrvalue) - def get_latest_value_int(self, index): return self.assembler.fail_boxes_int.getitem(index) @@ -122,17 +113,7 @@ # the FORCE_TOKEN operation and this helper both return 'ebp'. return self.assembler.fail_ebp - def make_execute_token(self, *argkinds): - ARGS = [] - for kind in argkinds: - if kind == history.INT: - ARGS.append(lltype.Signed) - elif kind == history.REF: - ARGS.append(llmemory.GCREF) - elif kind == history.FLOAT: - ARGS.append(longlong.FLOATSTORAGE) - else: - assert 0 + def make_execute_token(self, *ARGS): FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) # def execute_token(executable_token, *args): diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -282,11 +282,7 @@ ops[-2].setfailargs([i1]) looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) - if op == rop.INT_IS_TRUE: - self.cpu.set_future_value_int(0, b.value) - else: - self.cpu.set_future_value_ref(0, b.value) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_latest_value_int(0) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, From noreply at buildbot.pypy.org Sun Dec 11 23:42:40 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Dec 2011 23:42:40 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: In-progress. Three complicated and long functions are gone :-) Message-ID: <20111211224240.9C9E582210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50402:a4f5ba4d79b4 Date: 2011-12-11 23:42 +0100 http://bitbucket.org/pypy/pypy/changeset/a4f5ba4d79b4/ Log: In-progress. Three complicated and long functions are gone :-) diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -69,6 +69,8 @@ self.bindings[box] = loc # index = self.get_loc_index(loc) + if index < 0: + return endindex = index + self.frame_size(box.type) while len(self.used) < endindex: self.used.append(False) @@ -91,6 +93,8 @@ # size = self.frame_size(box.type) baseindex = self.get_loc_index(loc) + if baseindex < 0: + return for i in range(size): index = baseindex + i assert 0 <= index < len(self.used) @@ -98,7 +102,8 @@ def try_to_reuse_location(self, box, loc): index = self.get_loc_index(loc) - assert index >= 0 + if index < 0: + return False size = self.frame_size(box.type) for i in range(size): while (index + i) >= len(self.used): @@ -158,7 +163,7 @@ if not we_are_translated() and self.box_types is not None: assert isinstance(v, TempBox) or v.type in self.box_types - def possibly_free_var(self, v): + def possibly_free_var(self, v, _hint_dont_reuse_quickly=False): """ If v is stored in a register and v is not used beyond the current position, then free it. Must be called at some point for all variables that might be in registers. @@ -168,7 +173,10 @@ return if v not in self.longevity or self.longevity[v][1] <= self.position: if v in self.reg_bindings: - self.free_regs.append(self.reg_bindings[v]) + if _hint_dont_reuse_quickly: + self.free_regs.insert(0, self.reg_bindings[v]) + else: + self.free_regs.append(self.reg_bindings[v]) del self.reg_bindings[v] if self.frame_manager is not None: self.frame_manager.mark_as_free(v) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -421,10 +421,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -445,12 +443,11 @@ operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily @@ -458,19 +455,17 @@ frame_depth, param_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth clt.param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, looppos, - frame_depth+param_depth) + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, rawstart + looppos, - rawstart + directbootstrappos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -481,18 +476,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -802,98 +796,6 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs @@ -909,45 +811,6 @@ mc.JMP(imm(target)) mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -2104,9 +1967,9 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA in _call_footer below throws - # away most of the frame, including all the PUSHes that we did just - # above. + # _call_header_with_stack_check(). The LEA in _call_footer below + # throws away most of the frame, including all the PUSHes that we + # did just above. self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -28,7 +28,7 @@ class X86RegisterManager(RegisterManager): box_types = [INT, REF] - all_regs = [eax, ecx, edx, ebx, esi, edi] + all_regs = [ecx, eax, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] frame_reg = ebp @@ -60,7 +60,7 @@ class X86_64_RegisterManager(X86RegisterManager): # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -173,22 +173,26 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity, useful = self._compute_vars_longevity(inputargs, operations) + longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations, useful + return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations, useful = self._prepare(inputargs, operations, allgcrefs) - return self._process_inputargs(inputargs, useful), operations + operations = self._prepare(inputargs, operations, allgcrefs) + self._set_initial_bindings(inputargs) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations, _ = self._prepare(inputargs, operations, allgcrefs) + operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.param_depth = prev_depths[1] return operations @@ -196,46 +200,30 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs, useful): - # XXX we can sort out here by longevity if we need something - # more optimal - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - # Don't use all_regs[0] for passing arguments around a loop. - # Must be kept in sync with consider_jump(). - # XXX this should probably go to llsupport/regalloc.py - xmmtmp = self.xrm.free_regs.pop(0) - tmpreg = self.rm.free_regs.pop(0) - assert tmpreg == X86RegisterManager.all_regs[0] - assert xmmtmp == X86XMMRegisterManager.all_regs[0] + def _set_initial_bindings(self, inputargs): + if IS_X86_64: + return self._set_initial_bindings_64(inputargs) + # ... + # stack layout: arg2 + # arg1 + # arg0 + # return address + # saved ebp <-- ebp points here + # ... + cur_frame_pos = - 1 - FRAME_FIXED_SIZE + assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD + assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD + # for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - reg = None - if self.longevity[arg][1] > -1 and arg in useful: - if arg.type == FLOAT: - # xxx is it really a good idea? at the first CALL they - # will all be flushed anyway - reg = self.xrm.try_allocate_reg(arg) - else: - reg = self.rm.try_allocate_reg(arg) - if reg: - loc = reg + box = inputargs[i] + assert isinstance(box, Box) + # + if box.type == FLOAT: + cur_frame_pos -= 2 else: - loc = self.fm.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc - # otherwise we have it saved on stack, so no worry - self.rm.free_regs.insert(0, tmpreg) - self.xrm.free_regs.insert(0, xmmtmp) - assert tmpreg not in nonfloatlocs - assert xmmtmp not in floatlocs - # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op args, which is a non-resizable list - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + cur_frame_pos -= 1 + loc = self.fm.frame_pos(cur_frame_pos, box.type) + self.fm.set_binding(box, loc) def possibly_free_var(self, var): if var.type == FLOAT: @@ -458,7 +446,7 @@ # only to guard operations or to jump or to finish produced = {} last_used = {} - useful = {} + #useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -469,8 +457,8 @@ opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - if opnum != rop.JUMP and opnum != rop.FINISH: - useful[arg] = None + #if opnum != rop.JUMP and opnum != rop.FINISH: + # useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -496,7 +484,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity, useful + return longevity#, useful def loc(self, v): if v is None: # xxx kludgy @@ -1451,12 +1439,12 @@ tmpreg = X86RegisterManager.all_regs[0] tmpvar = TempBox() self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) - self.rm.possibly_free_var(tmpvar) + self.rm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) # xmmtmp = X86XMMRegisterManager.all_regs[0] tmpvar = TempBox() self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) - self.xrm.possibly_free_var(tmpvar) + self.xrm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) # # we need to make sure that no variable is stored in ebp for arg in inputargs: diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -44,7 +44,6 @@ _location_code = 'b' def __init__(self, position, ebp_offset, num_words, type): - assert ebp_offset < 0 # so no confusion with RegLoc.value self.position = position self.value = ebp_offset self.width = num_words * WORD diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -117,7 +117,7 @@ FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) # def execute_token(executable_token, *args): - addr = executable_token._x86_direct_bootstrap_code + addr = executable_token._x86_function_addr func = rffi.cast(FUNCPTR, addr) #llop.debug_print(lltype.Void, ">>>> Entering", addr) prev_interpreter = None # help flow space diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -328,9 +328,8 @@ inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) - for i, box in enumerate(inputargs): - self.cpu.set_future_value_int(i, box.value) - self.cpu.execute_token(looptoken) + inputvalues = [box.value for box in inputargs] + self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_latest_value_int(0) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: @@ -396,8 +395,7 @@ assert address >= loopaddress + loopsize assert size >= 10 # randomish number - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -503,9 +501,7 @@ looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) - self.cpu.set_future_value_int(0, 123450) - self.cpu.set_future_value_int(1, 123408) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 123450, 123408) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert self.cpu.get_latest_value_int(1) == 42 @@ -537,8 +533,7 @@ self.cpu.assembler.set_debug(True) looptoken = JitCellToken() self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -561,7 +556,6 @@ self.cpu.assembler.set_debug(True) looptoken = JitCellToken() self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) From noreply at buildbot.pypy.org Mon Dec 12 09:04:08 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 12 Dec 2011 09:04:08 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: Retracing does not fail in the same set of situations as before. In test_getattr_promote we previously got an retrace that was never used. Now it is not even passed to the backend Message-ID: <20111212080408.A7CCC82210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50403:073665c57a62 Date: 2011-12-12 08:52 +0100 http://bitbucket.org/pypy/pypy/changeset/073665c57a62/ Log: Retracing does not fail in the same set of situations as before. In test_getattr_promote we previously got an retrace that was never used. Now it is not even passed to the backend diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -34,7 +34,10 @@ jump(p0, p1, p2, p3, p4, p5, i13, i11, i8, descr=...) """ assert loop0.match(expected) - assert loop1.match(expected) + # XXX: The retracing fails to form a loop since j + # becomes constant 0 after the bridge and constant 1 at the end of the + # loop. A bridge back to the peramble is produced instead. + #assert loop1.match(expected) def test_factorial(self): def fact(n): diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -179,7 +179,7 @@ log = self.run(main, [1000]) assert log.result == main(1000) loops = log.loops_by_filename(self.filepath) - assert len(loops) == 2 + assert len(loops) == 1 for loop in loops: loop.match_by_id('getattr',''' guard_not_invalidated(descr=...) From noreply at buildbot.pypy.org Mon Dec 12 09:04:14 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 12 Dec 2011 09:04:14 +0100 (CET) Subject: [pypy-commit] pypy default: Merge jit-targets. It introduces an label ResOperation which can be used as a target for jump operations. Currently the optimizer only supports constructs with a single intermediate label (ie a preamble followed by a peeled loop). The LoopToken have been replaced by a JitCellToken that represents a JitCell and a TargetToken that represents a specific label. The frontend works with jumps between JitCellTokens. Each JitCellToken contains a list of TargetTokens that points to labels where different specialized versions of traces starting at the JitCellToken can be found. The optimizer replaces a jump to a JitCellToken with a jump to a specific TargetToken. Message-ID: <20111212080414.42D3582ABF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50406:1a9a44331010 Date: 2011-12-12 09:03 +0100 http://bitbucket.org/pypy/pypy/changeset/1a9a44331010/ Log: Merge jit-targets. It introduces an label ResOperation which can be used as a target for jump operations. Currently the optimizer only supports constructs with a single intermediate label (ie a preamble followed by a peeled loop). The LoopToken have been replaced by a JitCellToken that represents a JitCell and a TargetToken that represents a specific label. The frontend works with jumps between JitCellTokens. Each JitCellToken contains a list of TargetTokens that points to labels where different specialized versions of traces starting at the JitCellToken can be found. The optimizer replaces a jump to a JitCellToken with a jump to a specific TargetToken. diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -48,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -347,6 +353,14 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr): + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -381,13 +395,17 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + loop_target, target_opindex, target_inputargs = TARGET_TOKENS[descrobj] + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert len(op.args) == len(target_inputargs) if loop_target == loop: log.info("compiling new loop") else: @@ -521,10 +539,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -617,6 +636,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -1791,6 +1819,7 @@ setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -142,17 +142,17 @@ old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt + jitcell_token.compiled_loop_token = clt self._compile_loop_or_bridge(c, inputargs, operations) def free_loop_and_bridges(self, compiled_loop_token): @@ -183,9 +183,11 @@ llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types, descr.extrainfo, descr.width) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -239,9 +241,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -107,7 +107,7 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) expected_result = self._prepare_args(args, floats, ints) @@ -253,7 +253,7 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) @@ -284,7 +284,7 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,7 +32,7 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) j = 0 for box in inputargs: @@ -106,7 +106,7 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) fail = self.cpu.execute_token(looptoken) @@ -118,15 +118,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -139,18 +141,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) + self.cpu.set_future_value_int(0, 44) fail = self.cpu.execute_token(looptoken) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) @@ -162,15 +168,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -190,15 +198,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -206,7 +216,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -226,17 +236,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -244,7 +258,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -261,15 +275,17 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -290,7 +306,7 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] @@ -301,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -311,7 +327,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -320,7 +336,7 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) @@ -333,7 +349,7 @@ res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -350,14 +366,16 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) @@ -419,7 +437,7 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() @@ -1082,16 +1100,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1149,22 +1169,24 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) @@ -1214,7 +1236,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1271,7 +1293,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1330,7 +1352,7 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1400,7 +1422,7 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, @@ -1675,15 +1697,16 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1700,9 +1723,10 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1718,14 +1742,15 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1895,7 +1920,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) self.cpu.set_future_value_int(1, 0) @@ -1940,7 +1965,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) self.cpu.set_future_value_int(1, 0) @@ -1986,7 +2011,7 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) self.cpu.set_future_value_int(1, 0) @@ -2031,7 +2056,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) self.cpu.set_future_value_int(0, ord('G')) fail = self.cpu.execute_token(looptoken) @@ -2091,7 +2116,7 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) self.cpu.set_future_value_int(1, 2) @@ -2147,7 +2172,7 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') @@ -2169,7 +2194,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, -42) @@ -2415,7 +2440,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 @@ -2435,7 +2460,7 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) for i in range(10): self.cpu.set_future_value_int(i, i+1) @@ -2471,7 +2496,7 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) @@ -2486,7 +2511,7 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) @@ -2499,7 +2524,7 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) @@ -2561,7 +2586,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) @@ -2578,7 +2603,7 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken @@ -2596,7 +2621,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) @@ -2958,13 +2983,140 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) fail = self.cpu.execute_token(looptoken) assert fail.identifier == excdescr.identifier + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.set_future_value_int(0, 2) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + self.cpu.set_future_value_int(0, 2) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2) + assert fail.identifier == 42 + class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,8 +3,8 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec @@ -179,7 +179,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -525,29 +525,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +580,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +611,17 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + self.cpu.compile_loop(self.startvars[:], + [ResOperation(rop.JUMP, self.startvars[:], None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -615,7 +656,7 @@ cpu.set_future_value_float(i, box.value) else: raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + fail = cpu.execute_token(self.runjitcelltoken()) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -683,26 +724,37 @@ args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,8 +2,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper @@ -152,14 +152,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -425,8 +424,6 @@ _x86_loop_code (an integer giving an address) _x86_bootstrap_code (an integer giving an address) _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth _x86_arglocs _x86_debug_checksum ''' @@ -443,7 +440,6 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) @@ -455,15 +451,16 @@ bootstrappos = self.mc.get_relative_pos() stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth + clt.frame_depth = frame_depth + clt.param_depth = param_depth directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, + self._assemble_bootstrap_direct_call(arglocs, looppos, frame_depth+param_depth) self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -472,7 +469,7 @@ debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, + rawstart + looppos, rawstart + directbootstrappos, rawstart)) debug_stop("jit-backend-addr") @@ -488,8 +485,8 @@ looptoken._x86_ops_offset = ops_offset looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -548,6 +545,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +668,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,7 +690,10 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): @@ -698,8 +706,8 @@ param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -2345,7 +2353,7 @@ fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler @@ -2579,15 +2587,13 @@ gcrootmap.put(self.gcrootmap_retaddr_forced, mark) self.gcrootmap_retaddr_forced = -1 - def target_arglocs(self, loop_token): - return loop_token._x86_arglocs - - def closing_jump(self, loop_token): - if loop_token is self.currently_compiling_loop: + def closing_jump(self, target_token): + target = target_token._x86_loop_code + if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(self.looppos - curpos) + self.mc.JMP_l(target - curpos) else: - self.mc.JMP(imm(loop_token._x86_loop_code)) + self.mc.JMP(imm(target)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -5,7 +5,8 @@ import os from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ResOperation, BoxPtr, ConstFloat, - BoxFloat, LoopToken, INT, REF, FLOAT) + BoxFloat, INT, REF, FLOAT, + TargetToken, JitCellToken) from pypy.jit.backend.x86.regloc import * from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rlib.objectmodel import we_are_translated @@ -163,6 +164,7 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -891,7 +893,7 @@ def consider_call_assembler(self, op, guard_op): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) @@ -1323,16 +1325,30 @@ def compute_hint_frame_locations(self, operations): # optimization only: fill in the 'hint_frame_locations' dictionary - # of rm and xrm based on the JUMP at the end of the loop, by looking + # of 'fm' based on the JUMP at the end of the loop, by looking # at where we would like the boxes to be after the jump. op = operations[-1] if op.getopnum() != rop.JUMP: return + self.final_jump_op = op descr = op.getdescr() - assert isinstance(descr, LoopToken) - nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) - for i in range(op.numargs()): - box = op.getarg(i) + assert isinstance(descr, TargetToken) + if descr._x86_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): + nonfloatlocs, floatlocs = descr._x86_arglocs + jump_op = self.final_jump_op + assert len(nonfloatlocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) if isinstance(box, Box): loc = nonfloatlocs[i] if isinstance(loc, StackLoc): @@ -1348,9 +1364,9 @@ assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) + nonfloatlocs, floatlocs = descr._x86_arglocs self.jump_target_descr = descr - nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) # compute 'tmploc' to be all_regs[0] by spilling what is there box = TempBox() box1 = TempBox() @@ -1423,6 +1439,74 @@ # the FORCE_TOKEN operation returns directly 'ebp' self.rm.force_allocate_frame_reg(op.result) + def consider_label(self, op): + # XXX big refactoring needed? + descr = op.getdescr() + assert isinstance(descr, TargetToken) + inputargs = op.getarglist() + floatlocs = [None] * len(inputargs) + nonfloatlocs = [None] * len(inputargs) + # + # we need to make sure that the tmpreg and xmmtmp are free + tmpreg = X86RegisterManager.all_regs[0] + tmpvar = TempBox() + self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) + self.rm.possibly_free_var(tmpvar) + # + xmmtmp = X86XMMRegisterManager.all_regs[0] + tmpvar = TempBox() + self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) + self.xrm.possibly_free_var(tmpvar) + # + # we need to make sure that no variable is stored in ebp + for arg in inputargs: + if self.loc(arg) is ebp: + loc2 = self.fm.loc(arg) + self.assembler.mc.MOV(loc2, ebp) + self.rm.bindings_to_frame_reg.clear() + # + for i in range(len(inputargs)): + arg = inputargs[i] + assert not isinstance(arg, Const) + loc = self.loc(arg) + assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) + if arg.type == FLOAT: + floatlocs[i] = loc + else: + nonfloatlocs[i] = loc + if isinstance(loc, RegLoc): + self.fm.mark_as_free(arg) + descr._x86_arglocs = nonfloatlocs, floatlocs + descr._x86_loop_code = self.assembler.mc.get_relative_pos() + descr._x86_clt = self.assembler.current_clt + self.assembler.target_tokens_currently_compiling[descr] = None + self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) + +## from pypy.rpython.annlowlevel import llhelper +## def fn(addr): +## print '...label:', hex(addr), nonfloatlocs +## FUNC = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) +## ll_disp = llhelper(FUNC, fn) +## faddr = rffi.cast(lltype.Signed, ll_disp) +## for i in range(16): +## self.assembler.mc.PUSH_r(i) +## self.assembler.mc.CALL_l(0) +## self.assembler.mc.POP(edi) +## self.assembler.mc.MOV(r11, imm(faddr)) +## self.assembler.mc.CALL(r11) +## for i in range(15, -1, -1): +## if i == esp.value: +## i -= 1 +## self.assembler.mc.POP_r(i) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) @@ -1478,3 +1562,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If 0, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -215,14 +215,3 @@ super(CPU_X86_64, self).__init__(*args, **kwargs) CPU = CPU386 - -# silence warnings -##history.LoopToken._x86_param_depth = 0 -##history.LoopToken._x86_arglocs = (None, None) -##history.LoopToken._x86_frame_depth = 0 -##history.LoopToken._x86_bootstrap_code = 0 -##history.LoopToken._x86_direct_bootstrap_code = 0 -##history.LoopToken._x86_loop_code = 0 -##history.LoopToken._x86_debug_checksum = 0 -##compile.AbstractFailDescr._x86_current_depths = (0, 0) -##compile.AbstractFailDescr._x86_adr_jump_offset = 0 diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, TreeLoop + BoxPtr, ConstPtr, TreeLoop, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo @@ -113,6 +113,8 @@ descr0 = cpu.fielddescrof(S, 'int') ptr0 = struct_ref + targettoken = TargetToken() + namespace = locals().copy() def test_basic(self): @@ -136,6 +138,7 @@ def test_bug_0(self): ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, descr=targettoken) guard_value(i2, 1) [i2, i3, i4, i5, i6, i7, i0, i1, i8] guard_class(i4, 138998336) [i4, i5, i6, i7, i0, i1, i8] i11 = getfield_gc(i4, descr=descr0) @@ -163,7 +166,7 @@ guard_false(i32) [i4, i6, i7, i0, i1, i24] i33 = getfield_gc(i0, descr=descr0) guard_value(i33, ConstPtr(ptr0)) [i4, i6, i7, i0, i1, i33, i24] - jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24) + jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -5,10 +5,11 @@ def test_compile_bridge_not_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -26,14 +27,15 @@ def test_compile_bridge_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) - previous = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + previous = loop._jitcelltoken.compiled_loop_token.frame_depth + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -48,7 +50,7 @@ finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].getdescr() + descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow @@ -64,21 +66,23 @@ def test_bridge_jump_to_other_loop(self): loop = self.interpret(''' [i0, i10, i11, i12, i13, i14, i15, i16] + label(i0, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1, i10, i11, i12, i13, i14, i15, i16) + jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) ''', [0]) other_loop = self.interpret(''' [i3] + label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] - jump(i3) + jump(i3, descr=targettoken2) ''', [1]) ops = ''' [i3] - jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=looptoken) + jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, other_loop, 0, looptoken=loop.token) + bridge = self.attach_bridge(ops, other_loop, 1) self.cpu.set_future_value_int(0, 1) fail = self.run(other_loop) assert fail.identifier == 1 @@ -86,6 +90,7 @@ def test_bridge_jumps_to_self_deeper(self): loop = self.interpret(''' [i0, i1, i2, i31, i32, i33] + label(i0, i1, i2, i31, i32, i33, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i30 = int_add(i1, i2) @@ -94,7 +99,7 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i30, 1, i30, i30, i30) + jump(i3, i30, 1, i30, i30, i30, descr=targettoken) ''', [0]) assert self.getint(0) == 0 assert self.getint(1) == 1 @@ -109,12 +114,12 @@ force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) - jump(i3, i12, i11, i10, i6, i7, descr=looptoken) + jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 5, looptoken=loop.token) - guard_op = loop.operations[5] - loop_frame_depth = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth + bridge = self.attach_bridge(ops, loop, 6) + guard_op = loop.operations[6] + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 @@ -128,6 +133,7 @@ def test_bridge_jumps_to_self_shallower(self): loop = self.interpret(''' [i0, i1, i2] + label(i0, i1, i2, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i3 = int_add(i0, 1) @@ -135,15 +141,15 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i1, i2) + jump(i3, i1, i2, descr=targettoken) ''', [0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' [i97, i3] - jump(i3, 0, 1, descr=looptoken) + jump(i3, 0, 1, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 4, looptoken=loop.token) + bridge = self.attach_bridge(ops, loop, 5) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) self.cpu.set_future_value_int(2, 0) diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, LoopToken, BasicFailDescr + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass @@ -96,10 +96,16 @@ raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + targettoken = TargetToken() + targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 @@ -134,7 +140,8 @@ def interpret(self, ops, args, run=True): loop = self.parse(ops) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) for i, arg in enumerate(args): if isinstance(arg, int): self.cpu.set_future_value_int(i, arg) @@ -145,15 +152,16 @@ assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) self.cpu.set_future_value_ref(i, llgcref) + loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) return loop def prepare_loop(self, ops): loop = self.parse(ops) regalloc = RegAlloc(self.cpu.assembler, False) regalloc.prepare_loop(loop.inputargs, loop.operations, - loop.token, []) + loop.original_jitcell_token, []) return regalloc def getint(self, index): @@ -174,10 +182,7 @@ gcref = self.cpu.get_latest_value_ref(index) return lltype.cast_opaque_ptr(T, gcref) - def attach_bridge(self, ops, loop, guard_op_index, looptoken=None, **kwds): - if looptoken is not None: - self.namespace = self.namespace.copy() - self.namespace['looptoken'] = looptoken + def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) @@ -185,20 +190,21 @@ [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, - loop.token) + loop._jitcelltoken) return bridge def run(self, loop): - return self.cpu.execute_token(loop.token) + return self.cpu.execute_token(loop._jitcelltoken) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -206,27 +212,29 @@ def test_two_loops_and_a_bridge(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(i0, 1) i5 = int_lt(i4, 20) guard_true(i5) [i4, i1, i2, i3] - jump(i4, i1, i2, i3) + jump(i4, i1, i2, i3, descr=targettoken) ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' [i5] + label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) i4 = int_add(i3, 1) i2 = int_lt(i4, 30) guard_true(i2) [i4] - jump(i4) + jump(i4, descr=targettoken2) ''' loop2 = self.interpret(ops2, [0]) bridge_ops = ''' [i4] - jump(i4, i4, i4, i4, descr=looptoken) + jump(i4, i4, i4, i4, descr=targettoken) ''' - bridge = self.attach_bridge(bridge_ops, loop2, 4, looptoken=loop.token) + bridge = self.attach_bridge(bridge_ops, loop2, 5) self.cpu.set_future_value_int(0, 0) self.run(loop2) assert self.getint(0) == 31 @@ -237,10 +245,11 @@ def test_pointer_arg(self): ops = ''' [i0, p0] + label(i0, p0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 10) guard_true(i2) [p0] - jump(i1, p0) + jump(i1, p0, descr=targettoken) ''' S = lltype.GcStruct('S') ptr = lltype.malloc(S) @@ -318,10 +327,11 @@ def test_spill_for_constant(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(3, i1) i5 = int_lt(i4, 30) guard_true(i5) [i0, i4, i2, i3] - jump(1, i4, 3, 4) + jump(1, i4, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1, 30, 3, 4] @@ -329,31 +339,34 @@ def test_spill_for_constant_lshift(self): ops = ''' [i0, i2, i1, i3] + label(i0, i2, i1, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 3, i5, 4) + jump(i4, 3, i5, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, i5, 3, 4) + jump(i4, i5, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i3, i1, i2] + label(i0, i3, i1, i2, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 4, i5, 3) + jump(i4, 4, i5, 3, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] @@ -361,11 +374,12 @@ def test_result_selected_reg_via_neg(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i6 = int_neg(i2) i7 = int_add(1, i1) i4 = int_lt(i7, 10) guard_true(i4) [i0, i6, i7] - jump(1, i7, i2, i6) + jump(1, i7, i2, i6, descr=targettoken) ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] @@ -373,11 +387,12 @@ def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lt(i0, i1) i5 = int_add(i3, 1) i6 = int_lt(i5, 30) guard_true(i6) [i4] - jump(i0, i1, i4, i5) + jump(i0, i1, i4, i5, descr=targettoken) ''' self.interpret(ops, [0, 10, 0, 0]) assert self.getint(0) == 1 @@ -385,10 +400,11 @@ def test_jump_different_args(self): ops = ''' [i0, i15, i16, i18, i1, i2, i3] + label(i0, i15, i16, i18, i1, i2, i3, descr=targettoken) i4 = int_add(i3, 1) i5 = int_lt(i4, 20) guard_true(i5) [i2, i1] - jump(i0, i18, i15, i16, i2, i1, i4) + jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' self.interpret(ops, [0, 1, 2, 3]) @@ -474,6 +490,7 @@ class TestRegallocMoreRegisters(BaseTestRegalloc): cpu = BaseTestRegalloc.cpu + targettoken = TargetToken() S = lltype.GcStruct('S', ('field', lltype.Char)) fielddescr = cpu.fielddescrof(S, 'field') @@ -546,6 +563,7 @@ def test_division_optimized(self): ops = ''' [i7, i6] + label(i7, i6, descr=targettoken) i18 = int_floordiv(i7, i6) i19 = int_xor(i7, i6) i21 = int_lt(i19, 0) @@ -553,7 +571,7 @@ i23 = int_is_true(i22) i24 = int_eq(i6, 4) guard_false(i24) [i18] - jump(i18, i6) + jump(i18, i6, descr=targettoken) ''' self.interpret(ops, [10, 4]) assert self.getint(0) == 2 @@ -624,7 +642,8 @@ ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(1) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(1) def test_two_calls(self): ops = ''' @@ -635,7 +654,8 @@ ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(2) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(2) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -648,7 +668,8 @@ ''' loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 - assert loop.token._x86_param_depth == self.expected_param_depth(10) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(10) def test_bridge_calls_1(self): ops = ''' diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -1,6 +1,6 @@ import py from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, LoopToken + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD @@ -20,7 +20,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 9) cpu.execute_token(looptoken) @@ -43,7 +43,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -10) cpu.execute_token(looptoken) @@ -140,7 +140,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -13) cpu.set_future_value_int(1, 10) @@ -255,7 +255,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 17) cpu.set_future_value_int(1, -20) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -1,9 +1,10 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr, rclass from pypy.rpython.annlowlevel import llhelper -from pypy.jit.metainterp.history import ResOperation, LoopToken +from pypy.jit.metainterp.history import ResOperation, TargetToken, JitCellToken from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstFloat, - ConstPtr, Box, BoxFloat, BasicFailDescr) + ConstPtr, Box, BoxFloat, + BasicFailDescr) from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD from pypy.jit.backend.x86.rx86 import fits_in_32bits @@ -279,7 +280,7 @@ descr=BasicFailDescr()), ] ops[-2].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) if op == rop.INT_IS_TRUE: self.cpu.set_future_value_int(0, b.value) @@ -329,7 +330,7 @@ ] ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) for i, box in enumerate(inputargs): self.cpu.set_future_value_int(i, box.value) @@ -353,9 +354,10 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.number = 17 class FakeString(object): def __init__(self, val): @@ -365,14 +367,15 @@ return self.val operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) + operations[-2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" @@ -385,7 +388,7 @@ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -408,11 +411,13 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] debug._log = dlog = debug.DebugLog() @@ -499,7 +504,7 @@ ops[3].setfailargs([]) ops[5].setfailargs([]) ops[7].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) self.cpu.set_future_value_int(0, 123450) @@ -523,19 +528,21 @@ loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) + self.cpu.execute_token(looptoken) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -547,16 +554,18 @@ def test_debugger_checksum(self): loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) - assert ops.token._x86_debug_checksum == sum([op.getopnum() + self.cpu.execute_token(looptoken) + assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -9,12 +9,13 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist -from pypy.jit.metainterp.history import TreeLoop, Box, History, LoopToken +from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP from pypy.jit.codewriter import heaptracker, longlong @@ -23,7 +24,7 @@ from pypy.jit.metainterp.jitprof import ABORT_BRIDGE raise SwitchToBlackhole(ABORT_BRIDGE) -def show_loop(metainterp_sd, loop=None, error=None): +def show_procedures(metainterp_sd, procedure=None, error=None): # debugging if option.view or option.viewloops: if error: @@ -32,11 +33,12 @@ errmsg += ': ' + str(error) else: errmsg = None - if loop is None: # or type(loop) is TerminatingLoop: - extraloops = [] + if procedure is None: + extraprocedures = [] else: - extraloops = [loop] - metainterp_sd.stats.view(errmsg=errmsg, extraloops=extraloops) + extraprocedures = [procedure] + metainterp_sd.stats.view(errmsg=errmsg, + extraprocedures=extraprocedures) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -45,131 +47,224 @@ return loop -def make_loop_token(nb_args, jitdriver_sd): - loop_token = LoopToken() - loop_token.outermost_jitdriver_sd = jitdriver_sd - return loop_token +def make_jitcell_token(jitdriver_sd): + jitcell_token = JitCellToken() + jitcell_token.outermost_jitdriver_sd = jitdriver_sd + return jitcell_token def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ - # get the original loop token (corresponding to 'loop', or if that is - # a bridge, to the loop that this bridge belongs to) - looptoken = loop.token - assert looptoken is not None + # get the original jitcell token corresponding to jitcell form which + # this trace starts + original_jitcell_token = loop.original_jitcell_token + assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests - assert looptoken.generation > 0 # has been registered with memmgr - wref = weakref.ref(looptoken) + assert original_jitcell_token.generation > 0 # has been registered with memmgr + wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number - looptoken.compiled_loop_token.record_faildescr_index(n) - elif isinstance(descr, LoopToken): - # for a JUMP or a CALL_ASSEMBLER: record it as a potential jump. + original_jitcell_token.compiled_loop_token.record_faildescr_index(n) + elif isinstance(descr, JitCellToken): + # for a CALL_ASSEMBLER: record it as a potential jump. + if descr is not original_jitcell_token: + original_jitcell_token.record_jump_to(descr) + descr.exported_state = None + op._descr = None # clear reference, mostly for tests + elif isinstance(descr, TargetToken): + # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) - if descr is not looptoken: - looptoken.record_jump_to(descr) - op._descr = None # clear reference, mostly for tests + if descr.original_jitcell_token is not original_jitcell_token: + assert descr.original_jitcell_token is not None + original_jitcell_token.record_jump_to(descr.original_jitcell_token) + # exported_state is clear by optimizeopt when the short preamble is + # constrcucted. if that did not happen the label should not show up + # in a trace that will be used + assert descr.exported_state is None if not we_are_translated(): - op._jumptarget_number = descr.number + op._descr_wref = weakref.ref(op._descr) + op._descr = None # clear reference to prevent the history.Stats + # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken - loop.token = None + loop.original_jitcell_token = None if not we_are_translated(): - loop._looptoken_number = looptoken.number + loop._looptoken_number = original_jitcell_token.number # ____________________________________________________________ -def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, - start_resumedescr, full_preamble_needed=True): - """Try to compile a new loop by closing the current history back +def compile_loop(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, full_preamble_needed=True): + """Try to compile a new procedure by closing the current history back to the first operation. """ - from pypy.jit.metainterp.optimize import optimize_loop + from pypy.jit.metainterp.optimizeopt import optimize_trace history = metainterp.history - loop = create_empty_loop(metainterp) - loop.inputargs = history.inputargs[:] + metainterp_sd = metainterp.staticdata + jitdriver_sd = metainterp.jitdriver_sd + + if False: + part = partial_trace + assert False + procedur_token = metainterp.get_procedure_token(greenkey) + assert procedure_token + all_target_tokens = [] + else: + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.start_resumedescr = start_resumedescr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] + + loop = create_empty_loop(metainterp) + loop.inputargs = part.inputargs + loop.operations = part.operations + loop.quasi_immutable_deps = {} + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + while part.operations[-1].getopnum() == rop.LABEL: + inliner = Inliner(inputargs, jumpargs) + part.quasi_immutable_deps = None + part.operations = [part.operations[-1]] + \ + [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], + None, descr=jitcell_token)] + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens.append(target_token) + inputargs = jumpargs + jumpargs = part.operations[-1].getarglist() + + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + + loop.operations = loop.operations[:-1] + part.operations + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + + if not loop.quasi_immutable_deps: + loop.quasi_immutable_deps = None for box in loop.inputargs: assert isinstance(box, Box) - # make a copy, because optimize_loop can mutate the ops and descrs - h_ops = history.operations - loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] + + loop.original_jitcell_token = jitcell_token + for label in all_target_tokens: + assert isinstance(label, TargetToken) + label.original_jitcell_token = jitcell_token + if label.virtual_state and label.short_preamble: + metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) + jitcell_token.target_tokens = all_target_tokens + send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") + record_loop_or_bridge(metainterp_sd, loop) + return all_target_tokens[0] + +def compile_retrace(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, partial_trace, resumekey): + """Try to compile a new procedure by closing the current history back + to the first operation. + """ + from pypy.jit.metainterp.optimizeopt import optimize_trace + + history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.token = loop_token - loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP - loop.preamble = create_empty_loop(metainterp, 'Preamble ') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.preamble.start_resumedescr = start_resumedescr + loop_jitcell_token = metainterp.get_procedure_token(greenkey) + assert loop_jitcell_token + assert partial_trace.operations[-1].getopnum() == rop.LABEL + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + part.start_resumedescr = start_resumedescr + h_ops = history.operations + + part.operations = [partial_trace.operations[-1]] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] + label = part.operations[0] + orignial_label = label.clone() + assert label.getopnum() == rop.LABEL try: - old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop, - jitdriver_sd.warmstate.enable_opts) + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - debug_print("compile_new_loop: got an InvalidLoop") - return None - if old_loop_token is not None: - metainterp.staticdata.log("reusing old loop") - return old_loop_token + #return None # XXX: Dissable for now + # Fall back on jumping to preamble + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert target_token.exported_state + part.operations = [orignial_label] + \ + [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + None, descr=loop_jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + inline_short_preamble=False) + + except InvalidLoop: + return None + assert part.operations[-1].getopnum() != rop.LABEL + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert loop_jitcell_token.target_tokens + loop_jitcell_token.target_tokens.append(target_token) - if loop.preamble.operations is not None: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - record_loop_or_bridge(metainterp_sd, loop) - token = loop.preamble.token - if full_preamble_needed: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, - loop.preamble, "entry bridge") - insert_loop_token(old_loop_tokens, loop.preamble.token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.preamble.token) - record_loop_or_bridge(metainterp_sd, loop.preamble) - elif token.short_preamble: - short = token.short_preamble[-1] - metainterp_sd.logger_ops.log_short_preamble(short.inputargs, - short.operations) - return token - else: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - insert_loop_token(old_loop_tokens, loop_token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.token) - record_loop_or_bridge(metainterp_sd, loop) - return loop_token + loop = partial_trace + loop.operations = loop.operations[:-1] + part.operations -def insert_loop_token(old_loop_tokens, loop_token): - # Find where in old_loop_tokens we should insert this new loop_token. - # The following algo means "as late as possible, but before another - # loop token that would be more general and so completely mask off - # the new loop_token". - # XXX do we still need a list? - old_loop_tokens.append(loop_token) + quasi_immutable_deps = {} + if loop.quasi_immutable_deps: + quasi_immutable_deps.update(loop.quasi_immutable_deps) + if part.quasi_immutable_deps: + quasi_immutable_deps.update(part.quasi_immutable_deps) + if quasi_immutable_deps: + loop.quasi_immutable_deps = quasi_immutable_deps + + for box in loop.inputargs: + assert isinstance(box, Box) + + target_token = loop.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + target_token.original_jitcell_token = loop.original_jitcell_token + record_loop_or_bridge(metainterp_sd, loop) + return target_token def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): - jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, + original_jitcell_token = loop.original_jitcell_token + jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata - loop_token = loop.token - loop_token.number = n = globaldata.loopnumbering + original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): - show_loop(metainterp_sd, loop) + show_procedures(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) @@ -177,26 +272,19 @@ debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - loop.token, name=loopname) + original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): - if type != "entry bridge": - metainterp_sd.stats.compiled() - else: - loop._ignore_during_counting = True + metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) - short = loop.token.short_preamble - if short: - metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, - short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token) + metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): @@ -204,8 +292,9 @@ jitdriver_sd.on_compile_bridge(metainterp_sd.logger_ops, original_loop_token, operations, n) if not we_are_translated(): - show_loop(metainterp_sd) - TreeLoop.check_consistency_of(inputargs, operations) + show_procedures(metainterp_sd) + seen = dict.fromkeys(inputargs) + TreeLoop.check_consistency_of_branch(operations, seen) metainterp_sd.profiler.start_backend() operations = get_deep_immutable_oplist(operations) debug_start("jit-backend") @@ -221,9 +310,9 @@ # metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset) # - if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( - original_loop_token) + #if metainterp_sd.warmrunnerdesc is not None: # for tests + # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( + # original_loop_token) # ____________________________________________________________ @@ -263,7 +352,7 @@ raise metainterp_sd.ExitFrameWithExceptionRef(cpu, value) -class TerminatingLoopToken(LoopToken): +class TerminatingLoopToken(JitCellToken): # FIXME: kill? terminating = True def __init__(self, nargs, finishdescr): @@ -427,13 +516,13 @@ # We managed to create a bridge. Attach the new operations # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None - new_loop.token = metainterp.resumekey_original_loop_token + new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, - new_loop.token) + new_loop.original_jitcell_token) def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here @@ -616,41 +705,32 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs - # We make a new LoopToken for this entry bridge, and stick it - # to every guard in the loop. - new_loop_token = make_loop_token(len(redargs), jitdriver_sd) - new_loop.token = new_loop_token + new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - self.original_greenkey, - new_loop_token) - # store the new loop in compiled_merge_points_wref too - old_loop_tokens = metainterp.get_compiled_merge_points( - self.original_greenkey) - # it always goes at the end of the list, as it is the most - # general loop token - old_loop_tokens.append(new_loop_token) - metainterp.set_compiled_merge_points(self.original_greenkey, - old_loop_tokens) + jitdriver_sd.warmstate.attach_procedure_to_interp( + self.original_greenkey, jitcell_token) + metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): +def compile_trace(metainterp, resumekey, start_resumedescr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ - from pypy.jit.metainterp.optimize import optimize_bridge + from pypy.jit.metainterp.optimizeopt import optimize_trace # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. - # + # # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. - new_loop = create_empty_loop(metainterp) - new_loop.inputargs = metainterp.history.inputargs[:] + new_trace = create_empty_loop(metainterp) + new_trace.inputargs = inputargs = metainterp.history.inputargs[:] # clone ops, as optimize_bridge can mutate the ops - new_loop.operations = [op.clone() for op in metainterp.history.operations] + + new_trace.operations = [op.clone() for op in metainterp.history.operations] + new_trace.start_resumedescr = start_resumedescr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): @@ -658,38 +738,25 @@ else: inline_short_preamble = True try: - target_loop_token = optimize_bridge(metainterp_sd, old_loop_tokens, - new_loop, state.enable_opts, - inline_short_preamble, retraced) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop debug_print('InvalidLoop in compile_new_bridge') return None - # Did it work? - if target_loop_token is not None: - # Yes, we managed to create a bridge. Dispatch to resumekey to + + if new_trace.operations[-1].getopnum() != rop.LABEL: + # We managed to create a bridge. Dispatch to resumekey to # know exactly what we must do (ResumeGuardDescr/ResumeFromInterpDescr) - prepare_last_operation(new_loop, target_loop_token) - resumekey.compile_and_attach(metainterp, new_loop) - record_loop_or_bridge(metainterp_sd, new_loop) - return target_loop_token - -def prepare_last_operation(new_loop, target_loop_token): - op = new_loop.operations[-1] - if not isinstance(target_loop_token, TerminatingLoopToken): - # normal case - #op.setdescr(target_loop_token) # patch the jump target - pass + target_token = new_trace.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, new_trace) + record_loop_or_bridge(metainterp_sd, new_trace) + return target_token else: - # The target_loop_token is a pseudo loop token, - # e.g. loop_tokens_done_with_this_frame_void[0] - # Replace the operation with the real operation we want, i.e. a FINISH - descr = target_loop_token.finishdescr - args = op.getarglist() - new_op = ResOperation(rop.FINISH, args, None, descr=descr) - new_loop.operations[-1] = new_op + metainterp.retrace_needed(new_trace) + return None + # ____________________________________________________________ @@ -708,7 +775,7 @@ """ # 'redboxes' is only used to know the types of red arguments. inputargs = [box.clonebox() for box in redboxes] - loop_token = make_loop_token(len(inputargs), jitdriver_sd) + jitcell_token = make_jitcell_token(jitdriver_sd) # 'nb_red_args' might be smaller than len(redboxes), # because it doesn't include the virtualizable boxes. nb_red_args = jitdriver_sd.num_red_args @@ -741,7 +808,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, loop_token, log=False) + cpu.compile_loop(inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests - memory_manager.keep_loop_alive(loop_token) - return loop_token + memory_manager.keep_loop_alive(jitcell_token) + return jitcell_token diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -344,6 +344,7 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.LABEL, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,8 +12,9 @@ def get_display_text(self): return None -def display_loops(loops, errmsg=None, highlight_loops={}): - graphs = [(loop, highlight_loops.get(loop, 0)) for loop in loops] +def display_procedures(procedures, errmsg=None, highlight_procedures={}): + graphs = [(procedure, highlight_procedures.get(procedure, 0)) + for procedure in procedures] for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): @@ -25,18 +26,19 @@ def is_interesting_guard(op): return hasattr(op.getdescr(), '_debug_suboperations') +def getdescr(op): + if op._descr is not None: + return op._descr + if hasattr(op, '_descr_wref'): + return op._descr_wref() + return None + class ResOpGraphPage(GraphPage): def compute(self, graphs, errmsg=None): resopgen = ResOpGen() for graph, highlight in graphs: - if getattr(graph, 'token', None) is not None: - resopgen.jumps_to_graphs[graph.token] = graph - if getattr(graph, '_looptoken_number', None) is not None: - resopgen.jumps_to_graphs[graph._looptoken_number] = graph - - for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: resopgen.set_errmsg(errmsg) @@ -54,7 +56,7 @@ self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None - self.jumps_to_graphs = {} + self.target_tokens = {} def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -73,16 +75,21 @@ for graphindex in range(len(self.graphs)): self.block_starters[graphindex] = {0: True} for graphindex, graph in enumerate(self.graphs): - last_was_mergepoint = False + mergepointblock = None for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) if op.getopnum() == rop.DEBUG_MERGE_POINT: - if not last_was_mergepoint: - last_was_mergepoint = True - self.mark_starter(graphindex, i) + if mergepointblock is None: + mergepointblock = i + elif op.getopnum() == rop.LABEL: + self.mark_starter(graphindex, i) + self.target_tokens[getdescr(op)] = (graphindex, i) + mergepointblock = i else: - last_was_mergepoint = False + if mergepointblock is not None: + self.mark_starter(graphindex, mergepointblock) + mergepointblock = None def set_errmsg(self, errmsg): self.errmsg = errmsg @@ -172,24 +179,10 @@ (graphindex, opindex)) break if op.getopnum() == rop.JUMP: - tgt_g = -1 - tgt = None - tgt_number = getattr(op, '_jumptarget_number', None) - if tgt_number is not None: - tgt = self.jumps_to_graphs.get(tgt_number) - else: - tgt_descr = op.getdescr() - if tgt_descr is None: - tgt_g = graphindex - else: - tgt = self.jumps_to_graphs.get(tgt_descr.number) - if tgt is None: - tgt = self.jumps_to_graphs.get(tgt_descr) - if tgt is not None: - tgt_g = self.graphs.index(tgt) - if tgt_g != -1: + tgt_descr = getdescr(op) + if tgt_descr is not None and tgt_descr in self.target_tokens: self.genedge((graphindex, opstartindex), - (tgt_g, 0), + self.target_tokens[tgt_descr], weight="0") lines.append("") label = "\\l".join(lines) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker, longlong from pypy.rlib.objectmodel import compute_identity_hash +import weakref # ____________________________________________________________ @@ -723,18 +724,17 @@ # ____________________________________________________________ -# The TreeLoop class contains a loop or a generalized loop, i.e. a tree -# of operations. Each branch ends in a jump which can go either to -# the top of the same loop, or to another TreeLoop; or it ends in a FINISH. +# The JitCellToken class is the root of a tree of traces. Each branch ends +# in a jump which goes to a LABEL operation; or it ends in a FINISH. -class LoopToken(AbstractDescr): +class JitCellToken(AbstractDescr): """Used for rop.JUMP, giving the target of the jump. This is different from TreeLoop: the TreeLoop class contains the whole loop, including 'operations', and goes away after the loop was compiled; but the LoopDescr remains alive and points to the generated assembler. """ - short_preamble = None + target_tokens = None failed_states = None retraced_count = 0 terminating = False # see TerminatingLoopToken in compile.py @@ -751,10 +751,11 @@ def __init__(self): # For memory management of assembled loops - self._keepalive_target_looktokens = {} # set of other LoopTokens + self._keepalive_jitcell_tokens = {} # set of other JitCellToken - def record_jump_to(self, target_loop_token): - self._keepalive_target_looktokens[target_loop_token] = None + def record_jump_to(self, jitcell_token): + assert isinstance(jitcell_token, JitCellToken) + self._keepalive_jitcell_tokens[jitcell_token] = None def __repr__(self): return '' % (self.number, self.generation) @@ -765,17 +766,36 @@ def dump(self): self.compiled_loop_token.cpu.dump_loop_token(self) +class TargetToken(AbstractDescr): + def __init__(self, targeting_jitcell_token=None): + # The jitcell to which jumps might result in a jump to this label + self.targeting_jitcell_token = targeting_jitcell_token + + # The jitcell where the trace containing the label with this TargetToken begins + self.original_jitcell_token = None + + self.virtual_state = None + self.exported_state = None + class TreeLoop(object): inputargs = None operations = None - token = None call_pure_results = None logops = None quasi_immutable_deps = None + start_resumedescr = None + + def _token(*args): + raise Exception("TreeLoop.token is killed") + token = property(_token, _token) + + # This is the jitcell where the trace starts. Labels within the trace might + # belong to some other jitcells in the sens that jumping to this other + # jitcell will result in a jump to the label. + original_jitcell_token = None def __init__(self, name): self.name = name - # self.inputargs = list of distinct Boxes # self.operations = list of ResOperations # ops of the kind 'guard_xxx' contain a further list of operations, # which may itself contain 'guard_xxx' and so on, making a tree. @@ -808,6 +828,10 @@ def check_consistency(self): # for testing "NOT_RPYTHON" self.check_consistency_of(self.inputargs, self.operations) + for op in self.operations: + descr = op.getdescr() + if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): + assert descr.original_jitcell_token is self.original_jitcell_token @staticmethod def check_consistency_of(inputargs, operations): @@ -842,15 +866,23 @@ assert isinstance(box, Box) assert box not in seen seen[box] = True + if op.getopnum() == rop.LABEL: + inputargs = op.getarglist() + for box in inputargs: + assert isinstance(box, Box), "LABEL contains %r" % (box,) + seen = dict.fromkeys(inputargs) + assert len(seen) == len(inputargs), ( + "duplicate Box in the LABEL arguments") + assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: target = operations[-1].getdescr() if target is not None: - assert isinstance(target, LoopToken) + assert isinstance(target, TargetToken) def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -932,6 +964,9 @@ def clear(self): pass + def add_jitcell_token(self, token): + pass + class Stats(object): """For tests.""" @@ -944,7 +979,8 @@ self.loops = [] self.locations = [] self.aborted_keys = [] - self.invalidated_token_numbers = set() + self.invalidated_token_numbers = set() # <- not RPython + self.jitcell_token_wrefs = [] def clear(self): del self.loops[:] @@ -955,6 +991,10 @@ self.enter_count = 0 self.aborted_count = 0 + def add_jitcell_token(self, token): + assert isinstance(token, JitCellToken) + self.jitcell_token_wrefs.append(weakref.ref(token)) + def set_history(self, history): self.operations = history.operations @@ -984,6 +1024,15 @@ def get_all_loops(self): return self.loops + def get_all_jitcell_tokens(self): + tokens = [t() for t in self.jitcell_token_wrefs] + if None in tokens: + assert False, "get_all_jitcell_tokens will not work as "+\ + "loops have been freed" + return tokens + + + def check_history(self, expected=None, **check): insns = {} for op in self.operations: @@ -1001,13 +1050,14 @@ def check_resops(self, expected=None, **check): insns = {} - for loop in self.loops: + for loop in self.get_all_loops(): insns = loop.summary(adding_insns=insns) return self._check_insns(insns, expected, check) def _check_insns(self, insns, expected, check): if expected is not None: insns.pop('debug_merge_point', None) + insns.pop('label', None) assert insns == expected for insn, expected_count in check.items(): getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist @@ -1034,29 +1084,83 @@ opname = op.getopname() insns[opname] = insns.get(opname, 0) + 1 return self._check_insns(insns, expected, check) + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + loops = self.get_all_loops() + assert len(loops) == 1 + loop = loops[0] + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + assert self.check_resops(jump=1) + labels = [op for op in loop.operations if op.getopnum() == rop.LABEL] + targets = [op._descr_wref() for op in labels] + assert None not in targets # TargetToken was freed, give up + target = jumpop._descr_wref() + assert target + assert targets.count(target) == 1 + i = loop.operations.index(labels[targets.index(target)]) + insns = {} + for op in loop.operations[i:]: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_loops(self, expected=None, everywhere=False, **check): + insns = {} + for loop in self.get_all_loops(): + #if not everywhere: + # if getattr(loop, '_ignore_during_counting', False): + # continue + insns = loop.summary(adding_insns=insns) + if expected is not None: + insns.pop('debug_merge_point', None) + print + print + print " self.check_resops(%s)" % str(insns) + print + import pdb; pdb.set_trace() + else: + chk = ['%s=%d' % (i, insns.get(i, 0)) for i in check] + print + print + print " self.check_resops(%s)" % ', '.join(chk) + print + import pdb; pdb.set_trace() + return + + for insn, expected_count in check.items(): + getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist + found = insns.get(insn, 0) + assert found == expected_count, ( + "found %d %r, expected %d" % (found, insn, expected_count)) + return insns + def check_consistency(self): "NOT_RPYTHON" - for loop in self.loops: + for loop in self.get_all_loops(): loop.check_consistency() def maybe_view(self): if option.view: self.view() - def view(self, errmsg=None, extraloops=[]): - from pypy.jit.metainterp.graphpage import display_loops - loops = self.get_all_loops()[:] - for loop in extraloops: - if loop in loops: - loops.remove(loop) - loops.append(loop) - highlight_loops = dict.fromkeys(extraloops, 1) - for loop in loops: - if hasattr(loop, '_looptoken_number') and ( - loop._looptoken_number in self.invalidated_token_numbers): - highlight_loops.setdefault(loop, 2) - display_loops(loops, errmsg, highlight_loops) + def view(self, errmsg=None, extraprocedures=[]): + from pypy.jit.metainterp.graphpage import display_procedures + procedures = self.get_all_loops()[:] + for procedure in extraprocedures: + if procedure in procedures: + procedures.remove(procedure) + procedures.append(procedure) + highlight_procedures = dict.fromkeys(extraprocedures, 1) + for procedure in procedures: + if hasattr(procedure, '_looptoken_number') and ( + procedure._looptoken_number in self.invalidated_token_numbers): + highlight_procedures.setdefault(procedure, 2) + display_procedures(procedures, errmsg, highlight_procedures) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/inliner.py b/pypy/jit/metainterp/inliner.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/inliner.py @@ -0,0 +1,57 @@ +from pypy.jit.metainterp.history import Const +from pypy.jit.metainterp.resume import Snapshot + +class Inliner(object): + def __init__(self, inputargs, jump_args): + assert len(inputargs) == len(jump_args) + self.argmap = {} + for i in range(len(inputargs)): + if inputargs[i] in self.argmap: + assert self.argmap[inputargs[i]] == jump_args[i] + else: + self.argmap[inputargs[i]] = jump_args[i] + self.snapshot_map = {None: None} + + def inline_op(self, newop, ignore_result=False, clone=True, + ignore_failargs=False): + if clone: + newop = newop.clone() + args = newop.getarglist() + newop.initarglist([self.inline_arg(a) for a in args]) + + if newop.is_guard(): + args = newop.getfailargs() + if args and not ignore_failargs: + newop.setfailargs([self.inline_arg(a) for a in args]) + else: + newop.setfailargs([]) + + if newop.result and not ignore_result: + old_result = newop.result + newop.result = newop.result.clonebox() + self.argmap[old_result] = newop.result + + self.inline_descr_inplace(newop.getdescr()) + + return newop + + def inline_descr_inplace(self, descr): + from pypy.jit.metainterp.compile import ResumeGuardDescr + if isinstance(descr, ResumeGuardDescr): + descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) + + def inline_arg(self, arg): + if arg is None: + return None + if isinstance(arg, Const): + return arg + return self.argmap[arg] + + def inline_snapshot(self, snapshot): + if snapshot in self.snapshot_map: + return self.snapshot_map[snapshot] + boxes = [self.inline_arg(a) for a in snapshot.boxes] + new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) + self.snapshot_map[snapshot] = new_snapshot + return new_snapshot + diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -4,13 +4,15 @@ from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap from pypy.jit.metainterp.optimizeopt.vstring import OptString -from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble +from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce from pypy.rlib.jit import PARAMETERS from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_start, debug_stop, debug_print + ALL_OPTS = [('intbounds', OptIntBounds), ('rewrite', OptRewrite), @@ -28,8 +30,7 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) -def build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble=True, retraced=False): +def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict @@ -45,12 +46,9 @@ optimizations.append(OptFfiCall()) if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + or 'heap' not in enable_opts or 'unroll' not in enable_opts): optimizations.append(OptSimplify()) - if inline_short_preamble: - optimizations = [OptInlineShortPreamble(retraced)] + optimizations - return optimizations, unroll @@ -80,3 +78,21 @@ if __name__ == '__main__': print ALL_OPTS_NAMES + +def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): + """Optimize loop.operations to remove internal overheadish operations. + """ + + debug_start("jit-optimize") + try: + loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, + loop.operations) + optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) + if unroll: + optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) + else: + optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer.propagate_all_forward() + finally: + debug_stop("jit-optimize") + diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -500,8 +500,9 @@ else: return CVAL_ZERO - def propagate_all_forward(self): - self.clear_newoperations() + def propagate_all_forward(self, clear=True): + if clear: + self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) self.loop.operations = self.get_newoperations() diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -1,9 +1,12 @@ from pypy.jit.metainterp.optimizeopt.optimizer import Optimization from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import ResOperation, rop - +from pypy.jit.metainterp.history import TargetToken, JitCellToken class OptSimplify(Optimization): + def __init__(self): + self.last_label_descr = None + def optimize_CALL_PURE(self, op): args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, @@ -31,6 +34,23 @@ def optimize_RECORD_KNOWN_CLASS(self, op): pass + def optimize_LABEL(self, op): + self.last_label_descr = op.getdescr() + self.emit_operation(op) + + def optimize_JUMP(self, op): + descr = op.getdescr() + assert isinstance(descr, JitCellToken) + if not descr.target_tokens: + assert self.last_label_descr is not None + target_token = self.last_label_descr + assert isinstance(target_token, TargetToken) + assert target_token.targeting_jitcell_token is descr + op.setdescr(self.last_label_descr) + else: + assert len(descr.target_tokens) == 1 + op.setdescr(descr.target_tokens[0]) + self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -0,0 +1,200 @@ +from pypy.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimize import InvalidLoop +from py.test import raises + +class BaseTestMultiLabel(BaseTest): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + + def optimize_loop(self, ops, expected): + loop = self.parse(ops) + if expected != "crash!": + expected = self.parse(expected) + + part = TreeLoop('part') + part.inputargs = loop.inputargs + part.start_resumedescr = FakeDescrWithSnapshot() + token = loop.original_jitcell_token + + optimized = TreeLoop('optimized') + optimized.inputargs = loop.inputargs + optimized.operations = [] + + labels = [i for i, op in enumerate(loop.operations) \ + if op.getopnum()==rop.LABEL] + prv = 0 + last_label = [] + for nxt in labels + [len(loop.operations)]: + assert prv != nxt + operations = last_label + loop.operations[prv:nxt] + if nxt < len(loop.operations): + label = loop.operations[nxt] + assert label.getopnum() == rop.LABEL + jumpop = ResOperation(rop.JUMP, label.getarglist(), + None, descr=token) + operations.append(jumpop) + part.operations = operations + self._do_optimize_loop(part, None) + if part.operations[-1].getopnum() == rop.LABEL: + last_label = [part.operations.pop()] + else: + last_label = [] + optimized.operations.extend(part.operations) + prv = nxt + 1 + + # + print + print "Optimized:" + if optimized.operations: + print '\n'.join([str(o) for o in optimized.operations]) + else: + print 'Failed!' + print + + assert expected != "crash!", "should have raised an exception" + self.assert_equal(optimized, expected) + + return optimized + + def test_simple(self): + ops = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1) + i3 = int_add(i1, 1) + escape(i3) + jump(i1) + """ + expected = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1, i2) + escape(i2) + jump(i1, i2) + """ + self.optimize_loop(ops, expected) + + def test_forced_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + escape(p3) + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_with_nonmatching_fields(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, 1, descr=valuedescr) + label(p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p4, 1, descr=nextdescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_arrays_with_nonmatching_lens(self): + ops = """ + [p1] + p2 = new_array(3, descr=arraydescr) + label(p2) + p4 = new_array(2, descr=arraydescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_1(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p4, 2, f0, descr=compleximagdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_2(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(2, descr=complexarraydescr) + setinteriorfield_gc(p4, 0, f0, descr=complexrealdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_array(self): + ops = """ + [p1] + p3 = new_array(3, descr=arraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_arraystruct(self): + ops = """ + [p1] + p3 = new_array(3, descr=complexarraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_turns_constant(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + guard_value(p3, ConstPtr(myptr)) [] + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_turns_not_equal(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3, p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + jump(p3, p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + +class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + pass + diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1,7 +1,8 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, FakeMetaInterpStaticData) + LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) +from pypy.jit.metainterp.history import TargetToken, JitCellToken from pypy.jit.metainterp.test.test_compile import FakeLogger import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize @@ -11,7 +12,6 @@ from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.rlib.rarithmetic import LONG_BIT - def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -116,9 +116,13 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" def optimize_loop(self, ops, optops, call_pure_results=None): - loop = self.parse(ops) - expected = self.parse(optops) + token = JitCellToken() + loop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=TargetToken(token))] + \ + loop.operations + if loop.operations[-1].getopnum() == rop.JUMP: + loop.operations[-1].setdescr(token) + expected = convert_old_style_to_targets(self.parse(optops), jump=True) self._do_optimize_loop(loop, call_pure_results) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,13 +1,13 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes) + LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation @@ -15,7 +15,7 @@ from pypy.jit.metainterp.optimizeopt.util import args_dict from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData from pypy.config.pypyoption import get_pypy_config - +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_build_opt_chain(): def check(chain, expected_names): @@ -23,49 +23,37 @@ assert names == expected_names # metainterp_sd = FakeMetaInterpStaticData(None) - chain, _ = build_opt_chain(metainterp_sd, "", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "") check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "heap:intbounds") - check(chain, ["OptInlineShortPreamble", "OptIntBounds", "OptHeap", "OptSimplify"]) + check(chain, ["OptIntBounds", "OptHeap", "OptSimplify"]) # chain, unroll = build_opt_chain(metainterp_sd, "unroll") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) assert unroll # - chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptFfiCall", "OptSimplify"]) # metainterp_sd.config = get_pypy_config(translating=True) assert not metainterp_sd.config.translation.jit_ffi - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptSimplify"]) # ____________________________________________________________ -class FakeDescr(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescr() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescr) - - class BaseTestWithUnroll(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -79,40 +67,41 @@ expected_preamble = self.parse(expected_preamble) if expected_short: expected_short = self.parse(expected_short) - loop.preamble = TreeLoop('preamble') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = LoopToken() - loop.preamble.start_resumedescr = FakeDescr() - # - self._do_optimize_loop(loop, call_pure_results) + + preamble = self.unroll_and_optimize(loop, call_pure_results) + # print print "Preamble:" - print loop.preamble.inputargs - if loop.preamble.operations: - print '\n'.join([str(o) for o in loop.preamble.operations]) + if preamble.operations: + print '\n'.join([str(o) for o in preamble.operations]) else: print 'Failed!' print print "Loop:" - print loop.inputargs print '\n'.join([str(o) for o in loop.operations]) print if expected_short: print "Short Preamble:" - short = loop.preamble.token.short_preamble[0] - print short.inputargs - print '\n'.join([str(o) for o in short.operations]) + short = loop.operations[0].getdescr().short_preamble + print '\n'.join([str(o) for o in short]) print assert expected != "crash!", "should have raised an exception" - self.assert_equal(loop, expected) + self.assert_equal(loop, convert_old_style_to_targets(expected, jump=True)) + assert loop.operations[0].getdescr() == loop.operations[-1].getdescr() if expected_preamble: - self.assert_equal(loop.preamble, expected_preamble, + self.assert_equal(preamble, convert_old_style_to_targets(expected_preamble, jump=False), text_right='expected preamble') + assert preamble.operations[-1].getdescr() == loop.operations[0].getdescr() if expected_short: - self.assert_equal(short, expected_short, + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, convert_old_style_to_targets(expected_short, jump=True), text_right='expected short preamble') + assert short[-1].getdescr() == loop.operations[0].getdescr() return loop @@ -234,7 +223,7 @@ """ % expected_value self.optimize_loop(ops, expected) - def test_reverse_of_cast(self): + def test_reverse_of_cast_1(self): ops = """ [i0] p0 = cast_int_to_ptr(i0) @@ -246,6 +235,8 @@ jump(i0) """ self.optimize_loop(ops, expected) + + def test_reverse_of_cast_2(self): ops = """ [p0] i1 = cast_ptr_to_int(p0) @@ -1181,6 +1172,7 @@ i1 = getfield_gc(p0, descr=valuedescr) i2 = int_sub(i1, 1) i3 = int_add(i0, i1) + i4 = same_as(i2) # This same_as should be killed by backend jump(i3, i2, i1) """ expected = """ @@ -1252,10 +1244,10 @@ i1 = int_add(i0, 1) p1 = new_with_vtable(ConstClass(node_vtable2)) p2 = new_with_vtable(ConstClass(node_vtable2)) - setfield_gc(p0, p1, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) setfield_gc(p2, p1, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p0, p1, descr=nextdescr) jump(p1) """ self.optimize_loop(ops, loop, preamble) @@ -1317,6 +1309,7 @@ p30 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) + p46 = same_as(p30) # This same_as should be killed by backend jump(i29, p30, p3) """ expected = """ @@ -1324,8 +1317,8 @@ i28 = int_add(i0, 1) i29 = int_add(i28, 1) p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) - setfield_gc(p30, i28, descr=nextdescr) jump(i29, p30, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2118,7 +2111,9 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i7 = same_as(i2) # This same_as should be killed by backend + i6 = same_as(i4) + jump(p1, i1, i2, i4, i6) """ expected = """ [p1, i1, i2, i4, i5] @@ -2148,7 +2143,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2177,7 +2173,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2207,7 +2204,9 @@ guard_true(i5) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i8 = same_as(i2) # This same_as should be killed by backend + i7 = same_as(i4) + jump(p1, i1, i2, i4, i7) """ expected = """ [p1, i1, i2, i4, i7] @@ -2433,7 +2432,8 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p4, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - jump(p1, i2, i4, p4, i4) + i101 = same_as(i4) + jump(p1, i2, i4, p4, i101) """ expected = """ [p1, i2, i4, p4, i5] @@ -3276,7 +3276,15 @@ setfield_gc(p1, i3, descr=valuedescr) jump(p1, i4, i3) ''' - self.optimize_loop(ops, ops, ops) + preamble = ''' + [p1, i1, i4] + setfield_gc(p1, i1, descr=valuedescr) + i3 = call_assembler(i1, descr=asmdescr) + setfield_gc(p1, i3, descr=valuedescr) + i143 = same_as(i3) # Should be killed by backend + jump(p1, i4, i3) + ''' + self.optimize_loop(ops, ops, preamble) def test_call_assembler_invalidates_heap_knowledge(self): ops = ''' @@ -3307,7 +3315,9 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i3, descr=valuedescr) - jump(p1, i4, i3, i3) + i148 = same_as(i3) + i147 = same_as(i3) + jump(p1, i4, i3, i148) ''' self.optimize_loop(ops, expected, preamble) @@ -3330,7 +3340,8 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i4, i3, i3) + i151 = same_as(i3) + jump(p1, i4, i3, i151) ''' self.optimize_loop(ops, expected, preamble) @@ -3350,7 +3361,8 @@ escape(i1) escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) - jump(i0, i4, i4) + i153 = same_as(i4) + jump(i0, i4, i153) ''' expected = ''' [i0, i4, i5] @@ -3380,7 +3392,8 @@ escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) guard_no_exception() [] - jump(i0, i4, i4) + i155 = same_as(i4) + jump(i0, i4, i155) ''' expected = ''' [i0, i2, i3] @@ -4198,6 +4211,7 @@ preamble = """ [p0] i0 = strlen(p0) + i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5418,6 +5432,7 @@ [p0] p1 = getfield_gc(p0, descr=valuedescr) setfield_gc(p0, p0, descr=valuedescr) + p4450 = same_as(p0) # Should be killed by backend jump(p0) """ expected = """ @@ -5653,7 +5668,8 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - jump(p2, p3, i2) + i7 = same_as(i2) + jump(p2, p3, i7) """ expected = """ [p1, p2, i1] @@ -5728,7 +5744,9 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - jump(p2, p3, p5, i2, i3) + i129 = same_as(i2) + i130 = same_as(i3) + jump(p2, p3, p5, i129, i130) """ expected = """ [p1, p2, p3, i1, i2] @@ -5788,7 +5806,8 @@ [p1, i1, i2, i3] escape(i3) i4 = int_sub(i2, i1) - jump(p1, i1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i1, i2, i4, i5) """ expected = """ [p1, i1, i2, i3, i4] @@ -5813,7 +5832,8 @@ escape(i5) i4 = int_sub(i2, i1) setfield_gc(p2, i4, descr=valuedescr) - jump(p1, i1, i2, p2, i4, i4) + i8 = same_as(i4) + jump(p1, i1, i2, p2, i8, i4) """ expected = """ [p1, i1, i2, p2, i5, i6] @@ -5939,7 +5959,8 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - jump(p4, i1, i2, p2, i5, i3, i4) + i9 = same_as(i4) + jump(p4, i1, i2, p2, i5, i3, i9) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6061,7 +6082,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - jump(p1, p2, p3, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, p3, i3, i11, i12) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6281,6 +6304,7 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) + i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6326,7 +6350,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - jump(p1, p2, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, i3, i11, i12) """ expected = """ [p1, p2, i3, i1, i2] @@ -6629,7 +6655,8 @@ p188 = getarrayitem_gc(p187, 42, descr=) guard_value(p188, ConstPtr(myptr)) [] p25 = getfield_gc(ConstPtr(myptr), descr=otherdescr) - jump(p25, p187, i184, p25) + p26 = same_as(p25) + jump(p25, p187, i184, p26) """ short = """ [p1, p187, i184] @@ -6898,7 +6925,8 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - jump(p9, i843) + i0 = same_as(i843) + jump(p9, i0) """ short = """ [p9] @@ -7014,6 +7042,40 @@ """ self.optimize_loop(ops, expected) + def test_duplicated_aliased_virtual(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + jump(p3, p4) + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, expected) + + def test_imported_aliased_virtual_in_failargs(self): + ops = """ + [p1, p2, i0] + i2 = int_lt(i0, 10) + guard_true(i2) [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + i1 = int_add(i0, 1) + jump(p3, p4, i1) + """ + expected = """ + [i0] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize_loop(ops, expected) + def test_chained_virtuals(self): ops = """ [p0, p1] @@ -7590,7 +7652,8 @@ call(i2, descr=nonwritedescr) setfield_gc(p22, i1, descr=valuedescr) guard_nonnull_class(p18, ConstClass(node_vtable)) [] - jump(p22, p18, i1, i1) + i10 = same_as(i1) + jump(p22, p18, i1, i10) """ short = """ [p22, p18, i1] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -8,7 +8,8 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, Const, TreeLoop, BoxObj, - ConstObj, AbstractDescr) + ConstObj, AbstractDescr, + JitCellToken, TargetToken) from pypy.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.codewriter.effectinfo import EffectInfo @@ -18,6 +19,8 @@ from pypy.jit.metainterp import compile, resume, history from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.config.pypyoption import get_pypy_config +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -344,6 +347,11 @@ self.config = get_pypy_config(translating=True) self.config.translation.jit_ffi = True + class logger_noopt: + @classmethod + def log_loop(*args): + pass + class warmrunnerdesc: class memory_manager: retrace_limit = 5 @@ -394,7 +402,7 @@ expected.operations, False, remap, text_right) def _do_optimize_loop(self, loop, call_pure_results): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 + from pypy.jit.metainterp.optimizeopt import optimize_trace from pypy.jit.metainterp.optimizeopt.util import args_dict self.loop = loop @@ -408,7 +416,83 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - optimize_loop_1(metainterp_sd, loop, self.enable_opts) + optimize_trace(metainterp_sd, loop, self.enable_opts) + + def unroll_and_optimize(self, loop, call_pure_results=None): + operations = loop.operations + jumpop = operations[-1] + assert jumpop.getopnum() == rop.JUMP + inputargs = loop.inputargs + + jump_args = jumpop.getarglist()[:] + operations = operations[:-1] + cloned_operations = [op.clone() for op in operations] + + preamble = TreeLoop('preamble') + preamble.inputargs = inputargs + preamble.start_resumedescr = FakeDescrWithSnapshot() + + token = JitCellToken() + preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ + operations + \ + [ResOperation(rop.JUMP, jump_args, None, descr=token)] + self._do_optimize_loop(preamble, call_pure_results) + + assert preamble.operations[-1].getopnum() == rop.LABEL + + inliner = Inliner(inputargs, jump_args) + loop.start_resumedescr = preamble.start_resumedescr + loop.operations = [preamble.operations[-1]] + \ + [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], + None, descr=token)] + #[inliner.inline_op(jumpop)] + assert loop.operations[-1].getopnum() == rop.JUMP + assert loop.operations[0].getopnum() == rop.LABEL + loop.inputargs = loop.operations[0].getarglist() + + self._do_optimize_loop(loop, call_pure_results) + extra_same_as = [] + while loop.operations[0].getopnum() != rop.LABEL: + extra_same_as.append(loop.operations[0]) + del loop.operations[0] + + # Hack to prevent random order of same_as ops + extra_same_as.sort(key=lambda op: str(preamble.operations).find(str(op.getarg(0)))) + + for op in extra_same_as: + preamble.operations.insert(-1, op) + + return preamble + + +class FakeDescr(compile.ResumeGuardDescr): + def clone_if_mutable(self): + return FakeDescr() + def __eq__(self, other): + return isinstance(other, FakeDescr) + +class FakeDescrWithSnapshot(compile.ResumeGuardDescr): + class rd_snapshot: + class prev: + prev = None + boxes = [] + boxes = [] + def clone_if_mutable(self): + return FakeDescrWithSnapshot() + def __eq__(self, other): + return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) + + +def convert_old_style_to_targets(loop, jump): + newloop = TreeLoop(loop.name) + newloop.inputargs = loop.inputargs + newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=FakeDescr())] + \ + loop.operations + if not jump: + assert newloop.operations[-1].getopnum() == rop.JUMP + newloop.operations[-1] = ResOperation(rop.LABEL, newloop.operations[-1].getarglist(), None, descr=FakeDescr()) + return newloop # ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -1,11 +1,12 @@ from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes +from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState from pypy.jit.metainterp.compile import ResumeGuardDescr -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot from pypy.rlib.debug import debug_print @@ -13,63 +14,11 @@ # FIXME: Introduce some VirtualOptimizer super class instead -def optimize_unroll(metainterp_sd, loop, optimizations): +def optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble=True): opt = UnrollOptimizer(metainterp_sd, loop, optimizations) + opt.inline_short_preamble = inline_short_preamble opt.propagate_all_forward() -class Inliner(object): - def __init__(self, inputargs, jump_args): - assert len(inputargs) == len(jump_args) - self.argmap = {} - for i in range(len(inputargs)): - if inputargs[i] in self.argmap: - assert self.argmap[inputargs[i]] == jump_args[i] - else: - self.argmap[inputargs[i]] = jump_args[i] - self.snapshot_map = {None: None} - - def inline_op(self, newop, ignore_result=False, clone=True, - ignore_failargs=False): - if clone: - newop = newop.clone() - args = newop.getarglist() - newop.initarglist([self.inline_arg(a) for a in args]) - - if newop.is_guard(): - args = newop.getfailargs() - if args and not ignore_failargs: - newop.setfailargs([self.inline_arg(a) for a in args]) - else: - newop.setfailargs([]) - - if newop.result and not ignore_result: - old_result = newop.result - newop.result = newop.result.clonebox() - self.argmap[old_result] = newop.result - - self.inline_descr_inplace(newop.getdescr()) - - return newop - - def inline_descr_inplace(self, descr): - if isinstance(descr, ResumeGuardDescr): - descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) - - def inline_arg(self, arg): - if arg is None: - return None - if isinstance(arg, Const): - return arg - return self.argmap[arg] - - def inline_snapshot(self, snapshot): - if snapshot in self.snapshot_map: - return self.snapshot_map[snapshot] - boxes = [self.inline_arg(a) for a in snapshot.boxes] - new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) - self.snapshot_map[snapshot] = new_snapshot - return new_snapshot - class UnrollableOptimizer(Optimizer): def setup(self): self.importable_values = {} @@ -101,14 +50,13 @@ become the preamble or entry bridge (don't think there is a distinction anymore)""" + inline_short_preamble = True + did_import = False + def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) - self.cloned_operations = [] - for op in self.optimizer.loop.operations: - newop = op.clone() - self.cloned_operations.append(newop) - def fix_snapshot(self, loop, jump_args, snapshot): + def fix_snapshot(self, jump_args, snapshot): if snapshot is None: return None snapshot_args = snapshot.boxes @@ -116,116 +64,348 @@ for a in snapshot_args: a = self.getvalue(a).get_key_box() new_snapshot_args.append(a) - prev = self.fix_snapshot(loop, jump_args, snapshot.prev) + prev = self.fix_snapshot(jump_args, snapshot.prev) return Snapshot(prev, new_snapshot_args) def propagate_all_forward(self): loop = self.optimizer.loop + self.optimizer.clear_newoperations() + + + start_label = loop.operations[0] + if start_label.getopnum() == rop.LABEL: + loop.operations = loop.operations[1:] + # We need to emit the label op before import_state() as emitting it + # will clear heap caches + self.optimizer.send_extra_operation(start_label) + else: + start_label = None + jumpop = loop.operations[-1] if jumpop.getopnum() == rop.JUMP: loop.operations = loop.operations[:-1] else: - loopop = None + jumpop = None - self.optimizer.propagate_all_forward() + self.import_state(start_label) + self.optimizer.propagate_all_forward(clear=False) + if not jumpop: + return + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.did_import: - if jumpop: - assert jumpop.getdescr() is loop.token - jump_args = jumpop.getarglist() - jumpop.initarglist([]) + self.close_bridge(start_label) + self.finilize_short_preamble(start_label) + return + + cell_token = jumpop.getdescr() + assert isinstance(cell_token, JitCellToken) + stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) + + if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() - KillHugeIntBounds(self.optimizer).apply() + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + else: + assert stop_label + assert start_label + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + + self.close_loop(jumpop) + self.finilize_short_preamble(start_label) + + def export_state(self, targetop): + original_jump_args = targetop.getarglist() + jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] + + assert self.optimizer.loop.start_resumedescr + start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() + assert isinstance(start_resumedescr, ResumeGuardDescr) + start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) + # FIXME: I dont thnik we need fix_snapshot anymore + + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(jump_args) - loop.preamble.operations = self.optimizer.get_newoperations() - jump_args = [self.getvalue(a).get_key_box() for a in jump_args] + values = [self.getvalue(arg) for arg in jump_args] + inputargs = virtual_state.make_inputargs(values, self.optimizer) + short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() - self.start_resumedescr = start_resumedescr - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(loop, jump_args, - start_resumedescr.rd_snapshot) + constant_inputargs = {} + for box in jump_args: + const = self.get_constant_box(box) + if const: + constant_inputargs[box] = const - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(jump_args) + short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) + aliased_vrituals = {} + for i in range(len(original_jump_args)): + if original_jump_args[i] is not jump_args[i]: + if values[i].is_virtual(): + aliased_vrituals[original_jump_args[i]] = jump_args[i] + else: + short_boxes.alias(original_jump_args[i], jump_args[i]) + + self.optimizer.clear_newoperations() + for box in short_inputargs: + value = self.getvalue(box) + if value.is_virtual(): + value.force_box(self.optimizer) + inputarg_setup_ops = self.optimizer.get_newoperations() + + target_token = targetop.getdescr() + assert isinstance(target_token, TargetToken) + targetop.initarglist(inputargs) + target_token.virtual_state = virtual_state + target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] + target_token.start_resumedescr = start_resumedescr + target_token.exported_state = ExportedState(constant_inputargs, short_boxes, + inputarg_setup_ops, self.optimizer, + aliased_vrituals, jump_args) + + def import_state(self, targetop): + self.did_import = False + if not targetop: + # FIXME: Set up some sort of empty state with no virtuals? + return + target_token = targetop.getdescr() + if not target_token: + return + assert isinstance(target_token, TargetToken) + exported_state = target_token.exported_state + if not exported_state: + # FIXME: Set up some sort of empty state with no virtuals + return + self.did_import = True + + self.short = target_token.short_preamble[:] + self.short_seen = {} + self.short_boxes = exported_state.short_boxes.clone() + for box, const in exported_state.constant_inputargs.items(): + self.short_seen[box] = True + self.imported_state = exported_state + self.inputargs = targetop.getarglist() + self.initial_virtual_state = target_token.virtual_state + self.start_resumedescr = target_token.start_resumedescr + + seen = {} + for box in self.inputargs: + if box in seen: + continue + seen[box] = True + preamble_value = exported_state.optimizer.getvalue(box) + value = self.optimizer.getvalue(box) + value.import_from(preamble_value, self.optimizer) + + for newbox, oldbox in self.short_boxes.aliases.items(): + self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) + + # Setup the state of the new optimizer by emiting the + # short operations and discarding the result + self.optimizer.emitting_dissabled = True + for op in exported_state.inputarg_setup_ops: + self.optimizer.send_extra_operation(op) + seen = {} + + for op in self.short_boxes.operations(): + self.ensure_short_op_emitted(op, self.optimizer, seen) + if op and op.result: + preamble_value = exported_state.optimizer.getvalue(op.result) + value = self.optimizer.getvalue(op.result) + if not value.is_virtual(): + imp = ValueImporter(self, preamble_value, op) + self.optimizer.importable_values[value] = imp + newvalue = self.optimizer.getvalue(op.result) + newresult = newvalue.get_key_box() + if newresult is not op.result and not newvalue.is_constant(): + self.short_boxes.alias(newresult, op.result) + op = ResOperation(rop.SAME_AS, [op.result], newresult) + self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX + #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! + self.optimizer.flush() + self.optimizer.emitting_dissabled = False + + for box, key_box in exported_state.aliased_vrituals.items(): + self.optimizer.make_equal_to(box, self.getvalue(key_box)) + + def close_bridge(self, start_label): + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # We dont need to inline the short preamble we are creating as we are conneting + # the bridge to a different trace with a different short preamble + self.short_inliner = None + + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations): + op = newoperations[i] + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + for a in args: + self.import_box(a, inputargs, short_jumpargs, []) + i += 1 + newoperations = self.optimizer.get_newoperations() + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) + + def close_loop(self, jumpop): + virtual_state = self.initial_virtual_state + short_inputargs = self.short[0].getarglist() + constant_inputargs = self.imported_state.constant_inputargs + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # Construct jumpargs from the virtual state + original_jumpargs = jumpop.getarglist()[:] + values = [self.getvalue(arg) for arg in jumpop.getarglist()] + try: + jumpargs = virtual_state.make_inputargs(values, self.optimizer) + except BadVirtualState: + raise InvalidLoop + jumpop.initarglist(jumpargs) + + # Inline the short preamble at the end of the loop + jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) + assert len(short_inputargs) == len(jmp_to_short_args) + args = {} + for i in range(len(short_inputargs)): + if short_inputargs[i] in args: + if args[short_inputargs[i]] != jmp_to_short_args[i]: + raise InvalidLoop + args[short_inputargs[i]] = jmp_to_short_args[i] + self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) + for box, const in constant_inputargs.items(): + self.short_inliner.argmap[box] = const + for op in self.short[1:]: + newop = self.short_inliner.inline_op(op) + self.optimizer.send_extra_operation(newop) + + # Import boxes produced in the preamble but used in the loop + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = j = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations) or j < len(jumpargs): + if i == len(newoperations): + while j < len(jumpargs): + a = jumpargs[j] + if self.optimizer.loop.logops: + debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + j += 1 + else: + op = newoperations[i] + + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + + if self.optimizer.loop.logops: + debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + for a in args: + if self.optimizer.loop.logops: + debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + i += 1 + newoperations = self.optimizer.get_newoperations() + + jumpop.initarglist(jumpargs) + self.optimizer.send_extra_operation(jumpop) + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=jumpop.getdescr())) + + # Verify that the virtual state at the end of the loop is one + # that is compatible with the virtual state at the start of the loop + modifier = VirtualStateAdder(self.optimizer) + final_virtual_state = modifier.get_virtual_state(original_jumpargs) + debug_start('jit-log-virtualstate') + virtual_state.debug_print('Closed loop with ') + bad = {} + if not virtual_state.generalization_of(final_virtual_state, bad): + # We ended up with a virtual state that is not compatible + # and we are thus unable to jump to the start of the loop + final_virtual_state.debug_print("Bad virtual state at end of loop, ", + bad) + debug_stop('jit-log-virtualstate') + raise InvalidLoop - values = [self.getvalue(arg) for arg in jump_args] - inputargs = virtual_state.make_inputargs(values, self.optimizer) - short_inputargs = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) + debug_stop('jit-log-virtualstate') - self.constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - self.constant_inputargs[box] = const + maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards + if self.optimizer.emitted_guards > maxguards: + target_token = jumpop.getdescr() + assert isinstance(target_token, TargetToken) + target_token.targeting_jitcell_token.retraced_count = sys.maxint + + def finilize_short_preamble(self, start_label): + short = self.short + assert short[-1].getopnum() == rop.JUMP + target_token = start_label.getdescr() + assert isinstance(target_token, TargetToken) - sb = ShortBoxes(self.optimizer, inputargs + self.constant_inputargs.keys()) - self.short_boxes = sb + # Turn guards into conditional jumps to the preamble + for i in range(len(short)): + op = short[i] + if op.is_guard(): + op = op.clone() + op.setfailargs(None) + descr = target_token.start_resumedescr.clone_if_mutable() + op.setdescr(descr) + short[i] = op + + # Clone ops and boxes to get private versions and + short_inputargs = short[0].getarglist() + boxmap = {} + newargs = [None] * len(short_inputargs) + for i in range(len(short_inputargs)): + a = short_inputargs[i] + if a in boxmap: + newargs[i] = boxmap[a] + else: + newargs[i] = a.clonebox() + boxmap[a] = newargs[i] + inliner = Inliner(short_inputargs, newargs) + for box, const in self.imported_state.constant_inputargs.items(): + inliner.argmap[box] = const + for i in range(len(short)): + short[i] = inliner.inline_op(short[i]) + + target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.start_resumedescr) + + # Forget the values to allow them to be freed + for box in short[0].getarglist(): + box.forget_value() + for op in short: + if op.result: + op.result.forget_value() + target_token.short_preamble = self.short + target_token.exported_state = None + + + def FIXME_old_stuff(): preamble_optimizer = self.optimizer loop.preamble.quasi_immutable_deps = ( self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.new() loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps - logops = self.optimizer.loop.logops - if logops: - args = ", ".join([logops.repr_of_arg(arg) for arg in inputargs]) - debug_print('inputargs: ' + args) - args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs]) - debug_print('short inputargs: ' + args) - self.short_boxes.debug_print(logops) - - - # Force virtuals amoung the jump_args of the preamble to get the - # operations needed to setup the proper state of those virtuals - # in the peeled loop - inputarg_setup_ops = [] - preamble_optimizer.clear_newoperations() - seen = {} - for box in inputargs: - if box in seen: - continue - seen[box] = True - preamble_value = preamble_optimizer.getvalue(box) - value = self.optimizer.getvalue(box) - value.import_from(preamble_value, self.optimizer) - for box in short_inputargs: - if box in seen: - continue - seen[box] = True - value = preamble_optimizer.getvalue(box) - value.force_box(preamble_optimizer) - inputarg_setup_ops += preamble_optimizer.get_newoperations() - - # Setup the state of the new optimizer by emiting the - # short preamble operations and discarding the result - self.optimizer.emitting_dissabled = True - for op in inputarg_setup_ops: - self.optimizer.send_extra_operation(op) - seen = {} - for op in self.short_boxes.operations(): - self.ensure_short_op_emitted(op, self.optimizer, seen) - if op and op.result: - preamble_value = preamble_optimizer.getvalue(op.result) - value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): - imp = ValueImporter(self, preamble_value, op) - self.optimizer.importable_values[value] = imp - newresult = self.optimizer.getvalue(op.result).get_key_box() - if newresult is not op.result: - self.short_boxes.alias(newresult, op.result) - self.optimizer.flush() - self.optimizer.emitting_dissabled = False - - initial_inputargs_len = len(inputargs) - self.inliner = Inliner(loop.inputargs, jump_args) - - - short = self.inline(inputargs, self.cloned_operations, - loop.inputargs, short_inputargs, - virtual_state) loop.inputargs = inputargs args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\ @@ -241,149 +421,7 @@ loop.preamble.token.retraced_count = sys.maxint if short: - assert short[-1].getopnum() == rop.JUMP - short[-1].setdescr(loop.token) - - # Turn guards into conditional jumps to the preamble - for i in range(len(short)): - op = short[i] - if op.is_guard(): - op = op.clone() - op.setfailargs(None) - descr = self.start_resumedescr.clone_if_mutable() - op.setdescr(descr) - short[i] = op - - short_loop = TreeLoop('short preamble') - short_loop.inputargs = short_inputargs - short_loop.operations = short - - # Clone ops and boxes to get private versions and - boxmap = {} - newargs = [None] * len(short_loop.inputargs) - for i in range(len(short_loop.inputargs)): - a = short_loop.inputargs[i] - if a in boxmap: - newargs[i] = boxmap[a] - else: - newargs[i] = a.clonebox() - boxmap[a] = newargs[i] - inliner = Inliner(short_loop.inputargs, newargs) - for box, const in self.constant_inputargs.items(): - inliner.argmap[box] = const - short_loop.inputargs = newargs - ops = [inliner.inline_op(op) for op in short_loop.operations] - short_loop.operations = ops - descr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - short_loop.start_resumedescr = descr - - assert isinstance(loop.preamble.token, LoopToken) - if loop.preamble.token.short_preamble: - loop.preamble.token.short_preamble.append(short_loop) - else: - loop.preamble.token.short_preamble = [short_loop] - short_loop.virtual_state = virtual_state - - # Forget the values to allow them to be freed - for box in short_loop.inputargs: - box.forget_value() - for op in short_loop.operations: - if op.result: - op.result.forget_value() - - def inline(self, inputargs, loop_operations, loop_args, short_inputargs, virtual_state): - inliner = self.inliner - - short_jumpargs = inputargs[:] - - short = self.short = [] - short_seen = self.short_seen = {} - for box, const in self.constant_inputargs.items(): - short_seen[box] = True - - # This loop is equivalent to the main optimization loop in - # Optimizer.propagate_all_forward - jumpop = None - for newop in loop_operations: - newop = inliner.inline_op(newop, clone=False) - if newop.getopnum() == rop.JUMP: - jumpop = newop - break - - #self.optimizer.first_optimization.propagate_forward(newop) - self.optimizer.send_extra_operation(newop) - - self.boxes_created_this_iteration = {} - - assert jumpop - original_jumpargs = jumpop.getarglist()[:] - values = [self.getvalue(arg) for arg in jumpop.getarglist()] - jumpargs = virtual_state.make_inputargs(values, self.optimizer) - jumpop.initarglist(jumpargs) - jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - - for box, const in self.constant_inputargs.items(): - self.short_inliner.argmap[box] = const - - for op in short: - newop = self.short_inliner.inline_op(op) - self.optimizer.send_extra_operation(newop) - - newoperations = self.optimizer.get_newoperations() - - i = j = 0 - while i < len(newoperations) or j < len(jumpargs): - if i == len(newoperations): - while j < len(jumpargs): - a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - j += 1 - else: - op = newoperations[i] - - self.boxes_created_this_iteration[op.result] = True - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) - for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - i += 1 - newoperations = self.optimizer.get_newoperations() - - jumpop.initarglist(jumpargs) - self.optimizer.send_extra_operation(jumpop) - short.append(ResOperation(rop.JUMP, short_jumpargs, None)) - - modifier = VirtualStateAdder(self.optimizer) - final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') - bad = {} - if not virtual_state.generalization_of(final_virtual_state, bad): - # We ended up with a virtual state that is not compatible - # and we are thus unable to jump to the start of the loop - # XXX Is it possible to end up here? If so, consider: - # - Fallback on having the preamble jump to itself? - # - Would virtual_state.generate_guards make sense here? - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') - raise InvalidLoop - debug_stop('jit-log-virtualstate') - - return short + pass def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: @@ -399,19 +437,18 @@ guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) optimizer.send_extra_operation(guard) - def add_op_to_short(self, op, short, short_seen, emit=True, guards_needed=False): + def add_op_to_short(self, op, emit=True, guards_needed=False): if op is None: return None - if op.result is not None and op.result in short_seen: - if emit: + if op.result is not None and op.result in self.short_seen: + if emit and self.short_inliner: return self.short_inliner.inline_arg(op.result) else: return None for a in op.getarglist(): - if not isinstance(a, Const) and a not in short_seen: - self.add_op_to_short(self.short_boxes.producer(a), short, short_seen, - emit, guards_needed) + if not isinstance(a, Const) and a not in self.short_seen: + self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): descr = self.start_resumedescr.clone_if_mutable() op.setdescr(descr) @@ -421,9 +458,9 @@ else: value_guards = [] - short.append(op) - short_seen[op.result] = True - if emit: + self.short.append(op) + self.short_seen[op.result] = True + if emit and self.short_inliner: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) else: @@ -432,23 +469,22 @@ if op.is_ovf(): # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) for guard in value_guards: - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) if newop: return newop.result return None - def import_box(self, box, inputargs, short, short_jumpargs, - jumpargs, short_seen): + def import_box(self, box, inputargs, short_jumpargs, jumpargs): if isinstance(box, Const) or box in inputargs: return if box in self.boxes_created_this_iteration: return short_op = self.short_boxes.producer(box) - newresult = self.add_op_to_short(short_op, short, short_seen) + newresult = self.add_op_to_short(short_op) short_jumpargs.append(short_op.result) inputargs.append(box) @@ -456,98 +492,94 @@ if box in self.optimizer.values: box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) - -class OptInlineShortPreamble(Optimization): - def __init__(self, retraced): - self.retraced = retraced + def jump_to_already_compiled_trace(self, jumpop): + assert jumpop.getopnum() == rop.JUMP + cell_token = jumpop.getdescr() - def new(self): - return OptInlineShortPreamble(self.retraced) + assert isinstance(cell_token, JitCellToken) + if not cell_token.target_tokens: + return False - def propagate_forward(self, op): - if op.getopnum() == rop.JUMP: - loop_token = op.getdescr() - assert isinstance(loop_token, LoopToken) - short = loop_token.short_preamble - if short: - args = op.getarglist() - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + if not self.inline_short_preamble: + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return True - for sh in short: - ok = False - extra_guards = [] + args = jumpop.getarglist() + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(args) + debug_start('jit-log-virtualstate') + virtual_state.debug_print("Looking for ") - bad = {} - debugmsg = 'Did not match ' - if sh.virtual_state.generalization_of(virtual_state, bad): - ok = True - debugmsg = 'Matched ' - else: - try: - cpu = self.optimizer.cpu - sh.virtual_state.generate_guards(virtual_state, - args, cpu, - extra_guards) + for target in cell_token.target_tokens: + if not target.virtual_state: + continue + ok = False + extra_guards = [] - ok = True - debugmsg = 'Guarded to match ' - except InvalidLoop: - pass - sh.virtual_state.debug_print(debugmsg, bad) - - if ok: - debug_stop('jit-log-virtualstate') + bad = {} + debugmsg = 'Did not match ' + if target.virtual_state.generalization_of(virtual_state, bad): + ok = True + debugmsg = 'Matched ' + else: + try: + cpu = self.optimizer.cpu + target.virtual_state.generate_guards(virtual_state, + args, cpu, + extra_guards) - values = [self.getvalue(arg) - for arg in op.getarglist()] - args = sh.virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - inliner = Inliner(sh.inputargs, args) - - for guard in extra_guards: - if guard.is_guard(): - descr = sh.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - guard.setdescr(descr) - self.emit_operation(guard) - - try: - for shop in sh.operations: - newop = inliner.inline_op(shop) - self.emit_operation(newop) - except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") - self.emit_operation(op) - return + ok = True + debugmsg = 'Guarded to match ' + except InvalidLoop: + pass + target.virtual_state.debug_print(debugmsg, bad) + + if ok: debug_stop('jit-log-virtualstate') - retraced_count = loop_token.retraced_count - limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit - if not self.retraced and retraced_count self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -180,10 +188,15 @@ self.arraydescr is other.arraydescr) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState for i in range(len(self.fieldstate)): - v = value._items[i] + try: + v = value._items[i] + except IndexError: + raise BadVirtualState s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -248,12 +261,19 @@ s.enum(virtual_state) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayStructValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayStructValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): - v = value._items[i][self.fielddescrs[i][j]] + try: + v = value._items[i][self.fielddescrs[i][j]] + except IndexError: + raise BadVirtualState + except KeyError: + raise BadVirtualState s = self.fieldstate[p] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -546,18 +566,27 @@ self.aliases = {} self.rename = {} self.optimizer = optimizer - for box in surviving_boxes: - self.potential_ops[box] = None - optimizer.produce_potential_short_preamble_ops(self) - self.short_boxes = {} - self.short_boxes_in_production = {} + if surviving_boxes is not None: + for box in surviving_boxes: + self.potential_ops[box] = None + optimizer.produce_potential_short_preamble_ops(self) - for box in self.potential_ops.keys(): - try: - self.produce_short_preamble_box(box) - except BoxNotProducable: - pass + self.short_boxes = {} + self.short_boxes_in_production = {} + + for box in self.potential_ops.keys(): + try: + self.produce_short_preamble_box(box) + except BoxNotProducable: + pass + + def clone(self): + sb = ShortBoxes(self.optimizer, None) + sb.aliases.update(self.aliases) + sb.short_boxes = {} + sb.short_boxes.update(self.short_boxes) + return sb def prioritized_alternatives(self, box): if box not in self.alternatives: @@ -598,6 +627,7 @@ newbox = newop.result = op.result.clonebox() self.short_boxes[newop.result] = newop value = self.optimizer.getvalue(box) + self.optimizer.emit_operation(ResOperation(rop.SAME_AS, [box], newbox)) self.optimizer.make_equal_to(newbox, value) else: self.short_boxes[box] = op diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp import history, compile, resume from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstFloat -from pypy.jit.metainterp.history import Box +from pypy.jit.metainterp.history import Box, TargetToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import executor from pypy.jit.metainterp.logger import Logger @@ -22,7 +22,6 @@ from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr from pypy.jit.codewriter import heaptracker from pypy.jit.metainterp.optimizeopt.util import args_dict_box -from pypy.jit.metainterp.optimize import RetraceLoop # ____________________________________________________________ @@ -1567,10 +1566,17 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None - self.retracing_loop_from = None + self.partial_trace = None + self.retracing_from = -1 self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + def retrace_needed(self, trace): + self.partial_trace = trace + self.retracing_from = len(self.history.operations) - 1 + self.heapcache.reset() + + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction f = self.newframe(jitcode, greenkey) @@ -1937,14 +1943,9 @@ # that failed; # - if self.resumekey is a ResumeFromInterpDescr, it starts directly # from the interpreter. - if not self.retracing_loop_from: - try: - self.compile_bridge(live_arg_boxes) - except RetraceLoop: - start = len(self.history.operations) - self.current_merge_points.append((live_arg_boxes, start)) - self.retracing_loop_from = RetraceState(self, live_arg_boxes) - return + if not self.partial_trace: + # FIXME: Support a retrace to be a bridge as well as a loop + self.compile_trace(live_arg_boxes, resumedescr) # raises in case it works -- which is the common case, hopefully, # at least for bridges starting from a guard. @@ -1966,14 +1967,10 @@ else: # Found! Compile it as a loop. # raises in case it works -- which is the common case - if self.retracing_loop_from and \ - self.retracing_loop_from.merge_point == j: - bridge_arg_boxes = self.retracing_loop_from.live_arg_boxes - self.compile_bridge_and_loop(original_boxes, \ - live_arg_boxes, start, - bridge_arg_boxes, resumedescr) - else: - self.compile(original_boxes, live_arg_boxes, start, resumedescr) + if self.partial_trace: + if start != self.retracing_from: + raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now + self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.staticdata.log('cancelled, tracing more...') #self.staticdata.log('cancelled, stopping tracing') @@ -2029,54 +2026,59 @@ from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) - def get_compiled_merge_points(self, greenkey): - """Get the list of looptokens corresponding to the greenkey. - Turns the (internal) list of weakrefs into regular refs. - """ + def get_procedure_token(self, greenkey): cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - return cell.get_compiled_merge_points() + return cell.get_procedure_token() + + def compile_loop(self, original_boxes, live_arg_boxes, start, start_resumedescr): + num_green_args = self.jitdriver_sd.num_green_args + greenkey = original_boxes[:num_green_args] + if not self.partial_trace: + assert self.get_procedure_token(greenkey) is None or \ + self.get_procedure_token(greenkey).target_tokens is None + if self.partial_trace: + target_token = compile.compile_retrace(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr, self.partial_trace, + self.resumekey) + else: + target_token = compile.compile_loop(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr) + if target_token is not None: + assert isinstance(target_token, TargetToken) + self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token) + self.staticdata.stats.add_jitcell_token(target_token.targeting_jitcell_token) - def set_compiled_merge_points(self, greenkey, looptokens): - cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - cell.set_compiled_merge_points(looptokens) - def compile(self, original_boxes, live_arg_boxes, start, start_resumedescr): - num_green_args = self.jitdriver_sd.num_green_args - original_inputargs = self.history.inputargs - self.history.inputargs = original_boxes[num_green_args:] - greenkey = original_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) - loop_token = compile.compile_new_loop(self, old_loop_tokens, - greenkey, start, start_resumedescr) - if loop_token is not None: # raise if it *worked* correctly - self.set_compiled_merge_points(greenkey, old_loop_tokens) + if target_token is not None: # raise if it *worked* correctly self.history.inputargs = None self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, loop_token) + assert isinstance(target_token, TargetToken) + raise GenerateMergePoint(live_arg_boxes, target_token.targeting_jitcell_token) - self.history.inputargs = original_inputargs - self.history.operations.pop() # remove the JUMP - - def compile_bridge(self, live_arg_boxes): + def compile_trace(self, live_arg_boxes, start_resumedescr): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - if len(old_loop_tokens) == 0: + target_jitcell_token = self.get_procedure_token(greenkey) + if not target_jitcell_token: return - #if self.resumekey.guard_opnum == rop.GUARD_CLASS: - # return # Kepp tracing for another iteration - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) + if not target_jitcell_token.target_tokens: + return + + self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, + descr=target_jitcell_token) try: - target_loop_token = compile.compile_new_bridge(self, - old_loop_tokens, - self.resumekey) + target_token = compile.compile_trace(self, self.resumekey, start_resumedescr) finally: self.history.operations.pop() # remove the JUMP - if target_loop_token is not None: # raise if it *worked* correctly + if target_token is not None: # raise if it *worked* correctly self.history.inputargs = None self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, target_loop_token) + assert isinstance(target_token, TargetToken) + raise GenerateMergePoint(live_arg_boxes, target_token.targeting_jitcell_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, bridge_arg_boxes, start_resumedescr): @@ -2137,21 +2139,21 @@ loop_tokens = sd.loop_tokens_done_with_this_frame_float else: assert False - self.history.record(rop.JUMP, exits, None) - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + # FIXME: kill TerminatingLoopToken? + # FIXME: can we call compile_trace? + token = loop_tokens[0].finishdescr + self.history.record(rop.FINISH, exits, None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() def compile_exit_frame_with_exception(self, valuebox): self.gen_store_back_in_virtualizable() - # temporarily put a JUMP to a pseudo-loop - self.history.record(rop.JUMP, [valuebox], None) sd = self.staticdata - loop_tokens = sd.loop_tokens_exit_frame_with_exception_ref - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr + self.history.record(rop.FINISH, [valuebox], None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() @specialize.arg(1) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -369,6 +369,8 @@ 'FINISH/*d', '_FINAL_LAST', + 'LABEL/*d', + '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', 'GUARD_TRUE/1d', diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -16,15 +16,16 @@ from pypy.jit.codewriter import support class FakeJitCell(object): - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst + __product_token = None + def get_procedure_token(self): + return self.__product_token + def set_procedure_token(self, token): + self.__product_token = token class FakeWarmRunnerState(object): - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass + def attach_procedure_to_interp(self, greenkey, procedure_token): + cell = self.jit_cell_at_key(greenkey) + cell.set_procedure_token(procedure_token) def helper_func(self, FUNCPTR, func): from pypy.rpython.annlowlevel import llhelper @@ -132,16 +133,14 @@ def _run_with_machine_code(testself, args): metainterp = testself.metainterp num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented + procedure_token = metainterp.get_procedure_token(args[:num_green_args]) # a loop was successfully created by _run_with_pyjitpl(); call it cpu = metainterp.cpu for i in range(len(args) - num_green_args): x = args[num_green_args + i] typecode = history.getkind(lltype.typeOf(x)) set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) + faildescr = cpu.execute_token(procedure_token) assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') if metainterp.jitdriver_sd.result_type == history.INT: return cpu.get_latest_value_int(0) @@ -160,23 +159,31 @@ def check_simple_loop(self, expected=None, **check): get_stats().check_simple_loop(expected=expected, **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" + + + def check_trace_count(self, count): # was check_loop_count + # The number of traces compiled assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): + def check_trace_count_at_most(self, count): assert get_stats().compiled_count <= count + + def check_jitcell_token_count(self, count): # was check_tree_loop_count + assert len(get_stats().jitcell_token_wrefs) == count + + def check_target_token_count(self, count): + tokens = get_stats().get_all_jitcell_tokens() + n = sum ([len(t.target_tokens) for t in tokens]) + assert n == count + def check_enter_count(self, count): assert get_stats().enter_count == count def check_enter_count_at_most(self, count): assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + return # FIXME assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): assert get_stats().aborted_count == count def check_aborted_count_at_least(self, count): @@ -219,7 +226,7 @@ # this can be used after interp_operations if expected is not None: expected = dict(expected) - expected['jump'] = 1 + expected['finish'] = 1 self.metainterp.staticdata.stats.check_history(expected, **isns) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -66,7 +66,7 @@ res = self.interp_operations(f, [8, 98]) assert res == 110 - def test_loop(self): + def test_loop_1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) def f(x, y): res = 0 @@ -78,19 +78,20 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 42 - self.check_loop_count(1) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + self.check_trace_count(1) + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, + 'guard_true': 2, 'int_sub': 2}) if self.basic: found = 0 - for op in get_stats().loops[0]._all_operations(): + for op in get_stats().get_all_loops()[0]._all_operations(): if op.getopname() == 'guard_true': liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) found += 1 - assert found == 1 + assert found == 2 def test_loop_variant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -106,7 +107,7 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop(int_mul=1) def test_loop_variant_mul_ovf(self): @@ -123,7 +124,7 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop(int_mul_ovf=1) def test_loop_invariant_mul1(self): @@ -138,9 +139,9 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 252 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop(int_mul=0) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) @@ -157,67 +158,63 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 308 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop(int_mul_ovf=0) - self.check_resops({'jump': 2, 'int_lshift': 2, 'int_gt': 2, + self.check_resops({'jump': 1, 'int_lshift': 2, 'int_gt': 2, 'int_mul_ovf': 1, 'int_add': 4, 'guard_true': 2, 'guard_no_overflow': 1, 'int_sub': 2}) def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'n']) + def f(x, y, n): res = 0 while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, n=n, res=res) + myjitdriver.jit_merge_point(x=x, y=y, n=n, res=res) res += x * x - if y<16: + if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x=x, y=y, res=res, n=n) res += x * x - if y<16: + if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x=x, y=y, res=res, n=n) z = x * x res += z - if y<16: + if y>10]) assert res == 11 - self.check_tree_loop_count(2) + self.check_jitcell_token_count(1) def test_wrap_around_sub(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) @@ -2086,7 +2083,7 @@ return n res = self.meta_interp(f, [10-sys.maxint]) assert res == 12 - self.check_tree_loop_count(2) + self.check_jitcell_token_count(1) def test_caching_setfield(self): myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node']) @@ -2606,10 +2603,12 @@ i += 1 return sa assert self.meta_interp(f, [20, 2]) == f(20, 2) - self.check_tree_loop_count(4) + self.check_jitcell_token_count(1) + self.check_target_token_count(4) assert self.meta_interp(f, [20, 3]) == f(20, 3) - self.check_tree_loop_count(5) - + self.check_jitcell_token_count(1) + self.check_target_token_count(5) + def test_max_retrace_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2625,10 +2624,11 @@ i += 1 return sa assert self.meta_interp(f, [20, 1]) == f(20, 1) - self.check_tree_loop_count(2) + self.check_jitcell_token_count(1) + self.check_target_token_count(2) assert self.meta_interp(f, [20, 10]) == f(20, 10) - self.check_tree_loop_count(5) - + self.check_jitcell_token_count(1) + self.check_target_token_count(5) def test_retrace_limit_with_extra_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', @@ -2648,11 +2648,13 @@ i += 1 return sa assert self.meta_interp(f, [20, 2]) == f(20, 2) - self.check_tree_loop_count(4) + self.check_jitcell_token_count(1) + self.check_target_token_count(4) assert self.meta_interp(f, [20, 3]) == f(20, 3) - self.check_tree_loop_count(5) - - def test_retrace_ending_up_retrazing_another_loop(self): + self.check_jitcell_token_count(1) + self.check_target_token_count(5) + + def test_retrace_ending_up_retracing_another_loop(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'i', 'sa']) bytecode = "0+sI0+SI" @@ -2694,11 +2696,9 @@ # The attempts of retracing first loop will end up retracing the # second and thus fail 5 times, saturating the retrace_count. Instead a # bridge back to the preamble of the first loop is produced. A guard in - # this bridge is later traced resulting in a retrace of the second loop. - # Thus we end up with: - # 1 preamble and 1 specialized version of first loop - # 1 preamble and 2 specialized version of second loop - self.check_tree_loop_count(2 + 3) + # this bridge is later traced resulting in a failed attempt of retracing + # the second loop. + self.check_trace_count(8) # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times. @@ -2709,9 +2709,12 @@ res = self.meta_interp(g, [10]) assert res == g(10) - # 1 preamble and 6 speciealized versions of each loop - self.check_tree_loop_count(2*(1 + 6)) - + + self.check_jitcell_token_count(2) + for cell in get_stats().get_all_jitcell_tokens(): + # Initialal trace with two labels and 5 retraces + assert len(cell.target_tokens) <= 7 + def test_nested_retrace(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa']) @@ -2748,22 +2751,33 @@ res = self.meta_interp(f, [10, 7]) assert res == f(10, 7) - self.check_tree_loop_count(4) + self.check_jitcell_token_count(2) + for cell in get_stats().get_all_jitcell_tokens(): + assert len(cell.target_tokens) == 2 def g(n): return f(n, 2) + f(n, 3) res = self.meta_interp(g, [10]) assert res == g(10) - self.check_tree_loop_count(6) - + self.check_jitcell_token_count(2) + for cell in get_stats().get_all_jitcell_tokens(): + assert len(cell.target_tokens) <= 3 def g(n): return f(n, 2) + f(n, 3) + f(n, 4) + f(n, 5) + f(n, 6) + f(n, 7) res = self.meta_interp(g, [10]) assert res == g(10) - self.check_tree_loop_count(8) + # 2 loops and one function + self.check_jitcell_token_count(3) + cnt = 0 + for cell in get_stats().get_all_jitcell_tokens(): + if cell.target_tokens is None: + cnt += 1 + else: + assert len(cell.target_tokens) <= 4 + assert cnt == 1 def test_frame_finished_during_retrace(self): class Base(object): @@ -2846,66 +2860,6 @@ assert res == -2 self.check_resops(setarrayitem_gc=2, getarrayitem_gc=1) - def test_retrace_ending_up_retracing_another_loop(self): - - myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'i', 'sa']) - bytecode = "0+sI0+SI" - def f(n): - set_param(None, 'threshold', 3) - set_param(None, 'trace_eagerness', 1) - set_param(None, 'retrace_limit', 5) - set_param(None, 'function_threshold', -1) - pc = sa = i = 0 - while pc < len(bytecode): - myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i) - n = hint(n, promote=True) - op = bytecode[pc] - if op == '0': - i = 0 - elif op == '+': - i += 1 - elif op == 's': - sa += i - elif op == 'S': - sa += 2 - elif op == 'I': - if i < n: - pc -= 2 - myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i) - continue - pc += 1 - return sa - - def g(n1, n2): - for i in range(10): - f(n1) - for i in range(10): - f(n2) - - nn = [10, 3] - assert self.meta_interp(g, nn) == g(*nn) - - # The attempts of retracing first loop will end up retracing the - # second and thus fail 5 times, saturating the retrace_count. Instead a - # bridge back to the preamble of the first loop is produced. A guard in - # this bridge is later traced resulting in a retrace of the second loop. - # Thus we end up with: - # 1 preamble and 1 specialized version of first loop - # 1 preamble and 2 specialized version of second loop - self.check_tree_loop_count(2 + 3) - - # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times. - - def g(n): - for i in range(n): - for j in range(10): - f(n-i) - - res = self.meta_interp(g, [10]) - assert res == g(10) - # 1 preamble and 6 speciealized versions of each loop - self.check_tree_loop_count(2*(1 + 6)) - def test_continue_tracing_with_boxes_in_start_snapshot_replaced_by_optimizer(self): myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'a', 'b']) def f(n): @@ -3153,7 +3107,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_tree_loop_count(3) + self.check_trace_count(2) def test_two_loopinvariant_arrays2(self): from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -3176,7 +3130,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_tree_loop_count(3) + self.check_trace_count(2) def test_two_loopinvariant_arrays3(self): from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -3200,7 +3154,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_tree_loop_count(2) + self.check_trace_count(3) def test_two_loopinvariant_arrays_boxed(self): class A(object): @@ -3371,7 +3325,7 @@ res = self.meta_interp(main, [10]) assert res == main(10) self.check_resops({'int_gt': 2, 'strlen': 2, 'guard_true': 2, - 'int_sub': 2, 'jump': 2, 'call': 2, + 'int_sub': 2, 'jump': 1, 'call': 2, 'guard_no_exception': 2, 'int_add': 4}) def test_look_inside_iff_const_getarrayitem_gc_pure(self): @@ -3508,7 +3462,7 @@ res = self.meta_interp(f, [10]) assert res == 0 - self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) def test_virtual_opaque_ptr(self): @@ -3528,7 +3482,7 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) @@ -3551,7 +3505,7 @@ res = self.meta_interp(f, [10]) assert res == 0 self.check_resops({'int_gt': 2, 'getfield_gc': 1, 'int_eq': 1, - 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'guard_true': 2, 'int_sub': 2, 'jump': 1, 'guard_false': 1}) @@ -3799,6 +3753,31 @@ x = self.interp_operations(f, [1000, 1], translationoptions=topt) assert x == 999 + def test_retracing_bridge_from_interpreter_to_finnish(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa']) + def f(n): + sa = i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, sa=sa) + n = hint(n, promote=True) + sa += 2*n + i += 1 + return sa + def g(n): + return f(n) + f(n) + f(n) + f(n) + f(10*n) + f(11*n) + res = self.meta_interp(g, [1], repeat=3) + assert res == g(1) + #self.check_jitcell_token_count(1) + self.check_jitcell_token_count(2) + # XXX A bridge from the interpreter to a finish is first + # constructed for n=1. It is later replaced with a trace for + # the case n=10 which is extended with a retrace for n=11 and + # finnaly a new bridge to finnish is again traced and created + # for the case n=1. We were not able to reuse the orignial n=1 + # bridge as a preamble since it does not start with a + # label. The alternative would be to have all such bridges + # start with labels. I dont know which is better... + def test_ll_arraycopy(self): from pypy.rlib import rgc A = lltype.GcArray(lltype.Char) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -1,7 +1,7 @@ from pypy.config.pypyoption import get_pypy_config -from pypy.jit.metainterp.history import LoopToken, ConstInt, History, Stats +from pypy.jit.metainterp.history import TargetToken, ConstInt, History, Stats from pypy.jit.metainterp.history import BoxInt, INT -from pypy.jit.metainterp.compile import insert_loop_token, compile_new_loop +from pypy.jit.metainterp.compile import compile_loop from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.compile import ResumeGuardCountersInt from pypy.jit.metainterp.compile import compile_tmp_callback @@ -10,23 +10,6 @@ from pypy.jit.tool.oparser import parse from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT -def test_insert_loop_token(): - # XXX this test is a bit useless now that there are no specnodes - lst = [] - # - tok1 = LoopToken() - insert_loop_token(lst, tok1) - assert lst == [tok1] - # - tok2 = LoopToken() - insert_loop_token(lst, tok2) - assert lst == [tok1, tok2] - # - tok3 = LoopToken() - insert_loop_token(lst, tok3) - assert lst == [tok1, tok2, tok3] - - class FakeCPU(object): ts = typesystem.llhelper def __init__(self): @@ -73,7 +56,7 @@ on_compile = staticmethod(lambda *args: None) on_compile_bridge = staticmethod(lambda *args: None) -def test_compile_new_loop(): +def test_compile_loop(): cpu = FakeCPU() staticdata = FakeMetaInterpStaticData() staticdata.cpu = cpu @@ -93,34 +76,26 @@ metainterp.staticdata = staticdata metainterp.cpu = cpu metainterp.history = History() - metainterp.history.operations = loop.operations[:] + metainterp.history.operations = loop.operations[:-1] metainterp.history.inputargs = loop.inputargs[:] cpu._all_size_descrs_with_vtable = ( LLtypeMixin.cpu._all_size_descrs_with_vtable) # - loop_tokens = [] - loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) - assert loop_tokens == [loop_token] - assert loop_token.number == 1 + greenkey = 'faked' + target_token = compile_loop(metainterp, greenkey, 0, + loop.inputargs, + loop.operations[-1].getarglist(), + None) + jitcell_token = target_token.targeting_jitcell_token + assert jitcell_token == target_token.original_jitcell_token + assert jitcell_token.target_tokens == [target_token] + assert jitcell_token.number == 1 assert staticdata.globaldata.loopnumbering == 2 # assert len(cpu.seen) == 1 - assert cpu.seen[0][2] == loop_token + assert cpu.seen[0][2] == jitcell_token # del cpu.seen[:] - metainterp = FakeMetaInterp() - metainterp.staticdata = staticdata - metainterp.cpu = cpu - metainterp.history = History() - metainterp.history.operations = loop.operations[:] - metainterp.history.inputargs = loop.inputargs[:] - # - loop_token_2 = compile_new_loop(metainterp, loop_tokens, [], 0, None) - assert loop_token_2 is loop_token - assert loop_tokens == [loop_token] - assert len(cpu.seen) == 0 - assert staticdata.globaldata.loopnumbering == 2 - def test_resume_guard_counters(): rgc = ResumeGuardCountersInt() diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -27,7 +27,7 @@ 'int_sub': 2, 'int_gt': 2, 'guard_true': 2, - 'jump': 2}) + 'jump': 1}) def test_class_of_allocated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -154,7 +154,7 @@ res = self.meta_interp(f, [100], listops=True) assert res == f(50) self.check_resops({'new_array': 2, 'getfield_gc': 2, - 'guard_true': 2, 'jump': 2, + 'guard_true': 2, 'jump': 1, 'new_with_vtable': 2, 'getinteriorfield_gc': 2, 'setfield_gc': 6, 'int_gt': 2, 'int_sub': 2, 'call': 10, 'int_and': 2, diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -35,7 +35,7 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_resops({'jump': 2, 'guard_true': 2, + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) def test_bridge_from_guard_exception(self): @@ -512,7 +512,7 @@ res = self.meta_interp(main, [41], repeat=7) assert res == -1 - self.check_tree_loop_count(2) # the loop and the entry path + self.check_target_token_count(2) # the loop and the entry path # we get: # ENTER - compile the new loop and the entry bridge # ENTER - compile the leaving path (raising MyError) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -77,14 +77,14 @@ int_add=2, int_lt=2, guard_true=2, - jump=2) + jump=1) else: self.check_resops( call_release_gil=0, # no CALL_RELEASE_GIL int_add=2, int_lt=2, guard_true=2, - jump=2) + jump=1) return res def test_byval_result(self): @@ -145,7 +145,7 @@ return result_point[0].x * result_point[0].y assert self.meta_interp(main, [10]) == main(10) == 9000 - self.check_resops({'jump': 2, 'int_lt': 2, 'setinteriorfield_raw': 4, + self.check_resops({'jump': 1, 'int_lt': 2, 'setinteriorfield_raw': 4, 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2}) def test_array_getitem_uint8(self): @@ -167,7 +167,7 @@ return f(data, n) assert self.meta_interp(main, [10]) == 2000 - self.check_resops({'jump': 2, 'int_lt': 2, 'getinteriorfield_raw': 2, + self.check_resops({'jump': 1, 'int_lt': 2, 'getinteriorfield_raw': 2, 'guard_true': 2, 'int_add': 4}) diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -24,7 +24,7 @@ # res = self.meta_interp(g, [7]) assert res == -2 - self.check_loop_count(2) + self.check_trace_count(2) self.check_resops(guard_value=0) def test_green_field_2(self): @@ -49,7 +49,7 @@ # res = self.meta_interp(g, [7]) assert res == -22 - self.check_loop_count(6) + self.check_trace_count(6) self.check_resops(guard_value=0) diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -28,10 +28,10 @@ i += 1 self.meta_interp(loop, [1, 4]) - assert sorted(called.keys()) == [(4, 1, "entry bridge"), (4, 1, "loop")] + assert sorted(called.keys()) == [(4, 1, "loop")] self.meta_interp(loop, [2, 4]) - assert sorted(called.keys()) == [(4, 1, "entry bridge"), (4, 1, "loop"), - (4, 2, "entry bridge"), (4, 2, "loop")] + assert sorted(called.keys()) == [(4, 1, "loop"), + (4, 2, "loop")] def test_on_compile_bridge(self): called = {} @@ -55,8 +55,7 @@ i += 1 self.meta_interp(loop, [1, 10]) - assert sorted(called.keys()) == ['bridge', (10, 1, "entry bridge"), - (10, 1, "loop")] + assert sorted(called.keys()) == ['bridge', (10, 1, "loop")] class TestLLtypeSingle(JitDriverTests, LLJitMixin): @@ -92,8 +91,9 @@ # the following numbers are not really expectations of the test # itself, but just the numbers that we got after looking carefully # at the generated machine code - self.check_loop_count(5) - self.check_tree_loop_count(4) # 2 x loop, 2 x enter bridge + self.check_trace_count(5) + self.check_jitcell_token_count(2) # 2 x loop including enter bridge + self.check_target_token_count(4) # 2 x loop, 2 x enter bridge self.check_enter_count(5) def test_inline(self): @@ -125,7 +125,7 @@ # we expect no loop at all for 'loop1': it should always be inlined # we do however get several version of 'loop2', all of which contains # at least one int_add, while there are no int_add's in 'loop1' - self.check_tree_loop_count(5) + self.check_jitcell_token_count(1) for loop in get_stats().loops: assert loop.summary()['int_add'] >= 1 diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -55,8 +55,6 @@ TRACING, BACKEND, ~ BACKEND, - BACKEND, - ~ BACKEND, ~ TRACING, RUNNING, ~ RUNNING, @@ -64,8 +62,8 @@ ~ BLACKHOLE ] assert profiler.events == expected - assert profiler.times == [3, 2, 1, 1] - assert profiler.counters == [1, 2, 1, 1, 3, 3, 1, 13, 2, 0, 0, 0, 0, + assert profiler.times == [2, 1, 1, 1] + assert profiler.counters == [1, 1, 1, 1, 3, 3, 1, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0] def test_simple_loop_with_call(self): diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -225,7 +225,7 @@ return s res = self.meta_interp(f, [15], listops=True) assert res == f(15) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) class TestOOtype(ListTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -5,7 +5,7 @@ from pypy.jit.metainterp.typesystem import llhelper from StringIO import StringIO from pypy.jit.metainterp.optimizeopt.util import equaloplists -from pypy.jit.metainterp.history import AbstractDescr, LoopToken, BasicFailDescr +from pypy.jit.metainterp.history import AbstractDescr, JitCellToken, BasicFailDescr from pypy.jit.backend.model import AbstractCPU @@ -131,7 +131,7 @@ equaloplists(loop.operations, oloop.operations) def test_jump(self): - namespace = {'target': LoopToken()} + namespace = {'target': JitCellToken()} namespace['target'].number = 3 inp = ''' [i0] diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -36,7 +36,7 @@ return res * 2 res = self.meta_interp(f, [6, 7]) assert res == 84 - self.check_loop_count(1) + self.check_trace_count(1) def test_loop_with_delayed_setfield(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res', 'a']) @@ -58,7 +58,7 @@ return res * 2 res = self.meta_interp(f, [6, 13]) assert res == f(6, 13) - self.check_loop_count(1) + self.check_trace_count(1) if self.enable_opts: self.check_resops(setfield_gc=2, getfield_gc=0) @@ -90,9 +90,9 @@ res = self.meta_interp(f, [6, 33], policy=StopAtXPolicy(l)) assert res == f(6, 33) if self.enable_opts: - self.check_loop_count(3) + self.check_trace_count(2) else: - self.check_loop_count(2) + self.check_trace_count(2) def test_alternating_loops(self): myjitdriver = JitDriver(greens = [], reds = ['pattern']) @@ -108,9 +108,9 @@ return 42 self.meta_interp(f, [0xF0F0F0]) if self.enable_opts: - self.check_loop_count(3) + self.check_trace_count(3) else: - self.check_loop_count(2) + self.check_trace_count(2) def test_interp_simple(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -135,7 +135,7 @@ return x res = self.meta_interp(f, [100, 30]) assert res == 42 - self.check_loop_count(0) + self.check_trace_count(0) def test_green_prevents_loop(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -154,7 +154,7 @@ return x res = self.meta_interp(f, [100, 5]) assert res == f(100, 5) - self.check_loop_count(0) + self.check_trace_count(0) def test_interp_single_loop(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -179,7 +179,7 @@ return x res = self.meta_interp(f, [5, 8]) assert res == 42 - self.check_loop_count(1) + self.check_trace_count(1) # the 'int_eq' and following 'guard' should be constant-folded if 'unroll' in self.enable_opts: self.check_resops(int_eq=0, guard_true=2, guard_false=0) @@ -194,7 +194,10 @@ assert isinstance(liveboxes[0], history.BoxInt) assert isinstance(liveboxes[1], history.BoxInt) found += 1 - assert found == 1 + if 'unroll' in self.enable_opts: + assert found == 2 + else: + assert found == 1 def test_interp_many_paths(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'node']) @@ -229,7 +232,7 @@ expected = f(node1) res = self.meta_interp(f, [node1]) assert res == expected - self.check_loop_count_at_most(19) + self.check_trace_count_at_most(19) def test_interp_many_paths_2(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'node']) @@ -268,7 +271,7 @@ expected = f(node1) res = self.meta_interp(f, [node1]) assert res == expected - self.check_loop_count_at_most(19) + self.check_trace_count_at_most(19) def test_nested_loops(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -601,11 +604,11 @@ assert res == expected if self.enable_opts: - self.check_loop_count(2) - self.check_tree_loop_count(2) # 1 loop, 1 bridge from interp + self.check_trace_count(2) + self.check_jitcell_token_count(1) # 1 loop with bridge from interp else: - self.check_loop_count(2) - self.check_tree_loop_count(1) # 1 loop, callable from the interp + self.check_trace_count(2) + self.check_jitcell_token_count(1) # 1 loop, callable from the interp def test_example(self): myjitdriver = JitDriver(greens = ['i'], @@ -646,10 +649,10 @@ res = self.meta_interp(main_interpreter_loop, [1]) assert res == 102 - self.check_loop_count(1) + self.check_trace_count(1) if 'unroll' in self.enable_opts: self.check_resops({'int_add' : 6, 'int_gt' : 2, - 'guard_false' : 2, 'jump' : 2}) + 'guard_false' : 2, 'jump' : 1}) else: self.check_resops({'int_add' : 3, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1}) @@ -691,7 +694,7 @@ res = self.meta_interp(main_interpreter_loop, [1]) assert res == main_interpreter_loop(1) - self.check_loop_count(1) + self.check_trace_count(1) # These loops do different numbers of ops based on which optimizer we # are testing with. self.check_resops(self.automatic_promotion_result) @@ -753,7 +756,7 @@ res = self.meta_interp(interpret, [1]) assert res == interpret(1) # XXX it's unsure how many loops should be there - self.check_loop_count(3) + self.check_trace_count(3) def test_path_with_operations_not_from_start(self): jitdriver = JitDriver(greens = ['k'], reds = ['n', 'z']) diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -8,7 +8,7 @@ enable_opts = ALL_OPTS_NAMES automatic_promotion_result = { - 'int_gt': 2, 'guard_false': 2, 'jump': 2, 'int_add': 6, + 'int_gt': 2, 'guard_false': 2, 'jump': 1, 'int_add': 6, 'guard_value': 1 } diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -14,7 +14,9 @@ from pypy.jit.metainterp.memmgr import MemoryManager from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside - +from pypy.jit.metainterp.warmspot import get_stats +from pypy.jit.metainterp.warmstate import JitCell +from pypy.rlib import rgc class FakeLoopToken: generation = 0 @@ -81,6 +83,20 @@ # See comments in TestMemoryManager. To get temporarily the normal # behavior just rename this class to TestIntegration. + # We need an extra rgc.collect in get_procedure_token() for some of + # these tests to pass. But we dont want it there always since that will + # make all other tests take forever. + def setup_class(cls): + original_get_procedure_token = JitCell.get_procedure_token + def get_procedure_token(self): + rgc.collect(); + return original_get_procedure_token(self) + JitCell.get_procedure_token = get_procedure_token + cls.original_get_procedure_token = original_get_procedure_token + + def teardown_class(cls): + JitCell.get_procedure_token = cls.original_get_procedure_token + def test_loop_kept_alive(self): myjitdriver = JitDriver(greens=[], reds=['n']) def g(): @@ -99,7 +115,7 @@ assert res == 42 # we should see only the loop and the entry bridge - self.check_tree_loop_count(2) + self.check_target_token_count(2) def test_target_loop_kept_alive_or_not(self): myjitdriver = JitDriver(greens=['m'], reds=['n']) @@ -114,6 +130,8 @@ # Depending on loop_longevity, either: # A. create the loop and the entry bridge for 'g(5)' # B. create 8 loops (and throw them away at each iteration) + # Actually, it's 4 loops and 4 exit bridges thrown away + # every second iteration for i in range(8): g(5) # create another loop and another entry bridge for 'g(7)', @@ -132,14 +150,15 @@ # case A res = self.meta_interp(f, [], loop_longevity=3) assert res == 42 - # we should see only the loop and the entry bridge for g(5) and g(7) - self.check_tree_loop_count(4) + # we should see only the loop with preamble and the exit bridge + # for g(5) and g(7) + self.check_enter_count(4) # case B, with a lower longevity res = self.meta_interp(f, [], loop_longevity=1) assert res == 42 # we should see a loop for each call to g() - self.check_tree_loop_count(8 + 20*2*2) + self.check_enter_count(8 + 20*2) def test_throw_away_old_loops(self): myjitdriver = JitDriver(greens=['m'], reds=['n']) @@ -152,9 +171,9 @@ return 21 def f(): for i in range(10): - g(1) # g(1) gets a loop and an entry bridge, stays alive - g(2) # (and an exit bridge, which does not count in - g(1) # check_tree_loop_count) + g(1) # g(1) gets a loop with an entry bridge + g(2) # and an exit bridge, stays alive + g(1) g(3) g(1) g(4) # g(2), g(3), g(4), g(5) are thrown away every iteration @@ -164,7 +183,7 @@ res = self.meta_interp(f, [], loop_longevity=3) assert res == 42 - self.check_tree_loop_count(2 + 10*4*2) + self.check_enter_count(2 + 10*4) def test_call_assembler_keep_alive(self): myjitdriver1 = JitDriver(greens=['m'], reds=['n']) @@ -187,7 +206,7 @@ return 21 def f(u): for i in range(8): - h(u, 32) # make a loop and an entry bridge for h(u) + h(u, 32) # make a loop and an exit bridge for h(u) g(u, 8) # make a loop for g(u) with a call_assembler g(u, 0); g(u+1, 0) # \ g(u, 0); g(u+2, 0) # \ make more loops for g(u+1) to g(u+4), @@ -198,7 +217,12 @@ res = self.meta_interp(f, [1], loop_longevity=4, inline=True) assert res == 42 - self.check_tree_loop_count(12) + self.check_jitcell_token_count(6) + tokens = [t() for t in get_stats().jitcell_token_wrefs] + # Some loops have been freed + assert None in tokens + # Loop with number 0, h(), has not been freed + assert 0 in [t.number for t in tokens if t] # ____________________________________________________________ @@ -217,10 +241,17 @@ if __name__ == '__main__': # occurs in the subprocess for test in [_TestMemoryManager(), _TestIntegration()]: - for name in dir(test): - if name.startswith('test_'): - print - print '-'*79 - print '----- Now running test', name, '-----' - print - getattr(test, name)() + if hasattr(test, 'setup_class'): + test.setup_class() + try: + for name in dir(test): + if name.startswith('test_'): + print + print '-'*79 + print '----- Now running test', name, '-----' + print + getattr(test, name)() + finally: + if hasattr(test, 'teardown_class'): + test.teardown_class() + diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -294,7 +294,8 @@ return total res = self.meta_interp(main, []) - self.check_tree_loop_count(6) + self.check_trace_count(6) + self.check_jitcell_token_count(3) assert res == main() def test_change_during_running(self): @@ -305,7 +306,7 @@ self.a = a @dont_look_inside def residual_call(foo, x): - if x == 5: + if x == 10: foo.a += 1 def f(a, x): foo = Foo(a) @@ -319,9 +320,9 @@ x -= 1 return total # - assert f(100, 15) == 3009 - res = self.meta_interp(f, [100, 15]) - assert res == 3009 + assert f(100, 30) == 6019 + res = self.meta_interp(f, [100, 30]) + assert res == 6019 self.check_resops(guard_not_invalidated=8, guard_not_forced=0, call_may_force=0, getfield_gc=0) @@ -434,7 +435,7 @@ self.lst = lst @dont_look_inside def residual_call(foo, x): - if x == 5: + if x == 10: lst2 = [0, 0] lst2[1] = foo.lst[1] + 1 foo.lst = lst2 @@ -452,9 +453,9 @@ x -= 1 return total # - assert f(100, 15) == 3009 - res = self.meta_interp(f, [100, 15]) - assert res == 3009 + assert f(100, 30) == 6019 + res = self.meta_interp(f, [100, 30]) + assert res == 6019 self.check_resops(call_may_force=0, getfield_gc=0, getarrayitem_gc_pure=0, guard_not_forced=0, getarrayitem_gc=0, guard_not_invalidated=8) @@ -477,7 +478,7 @@ return foo.step res = self.meta_interp(f, [60]) assert res == 1 - self.check_tree_loop_count(4) # at least not 2 like before + self.check_jitcell_token_count(2) class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -530,8 +530,8 @@ result = 0 for i in range(m): result += f('+-cl--', i) - g(50) - self.meta_interp(g, [50], backendopt=True) + res = self.meta_interp(g, [50], backendopt=True) + assert res == g(50) py.test.skip("tracing from start is by now only longer enabled " "if a trace gets too big") self.check_tree_loop_count(3) @@ -577,7 +577,7 @@ self.meta_interp(g, [10], backendopt=True) self.check_aborted_count(1) self.check_resops(call=0, call_assembler=2) - self.check_tree_loop_count(3) + self.check_jitcell_token_count(2) def test_directly_call_assembler(self): driver = JitDriver(greens = ['codeno'], reds = ['i'], @@ -1211,11 +1211,11 @@ portal(c, i, v) self.meta_interp(main, [10, 10, False, False], inline=True) - self.check_tree_loop_count(1) - self.check_loop_count(0) + self.check_jitcell_token_count(1) + self.check_trace_count(1) self.meta_interp(main, [3, 10, True, False], inline=True) - self.check_tree_loop_count(0) - self.check_loop_count(0) + self.check_jitcell_token_count(0) + self.check_trace_count(0) def test_trace_from_start_does_not_prevent_inlining(self): driver = JitDriver(greens = ['c', 'bc'], reds = ['i']) @@ -1260,7 +1260,7 @@ return portal(level + 1) self.meta_interp(portal, [0]) - self.check_loop_count_at_most(2) # and not, e.g., 24 + self.check_trace_count_at_most(2) # and not, e.g., 24 class TestLLtype(RecursiveTests, LLJitMixin): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -20,7 +20,7 @@ return c res = self.meta_interp(f, [1]) assert res == 2 - self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) # all folded away def test_red_builtin_send(self): @@ -67,7 +67,7 @@ backendopt=True) assert res == 43 self.check_resops({'int_gt': 2, 'getfield_gc': 2, - 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'guard_true': 2, 'int_sub': 2, 'jump': 1, 'call': 2, 'guard_no_exception': 2, 'int_add': 2}) @@ -160,7 +160,7 @@ res = self.meta_interp(f, [j], policy=policy) assert res == 42 self.check_enter_count_at_most(5) - self.check_loop_count_at_most(5) + self.check_trace_count_at_most(5) def test_oosend_guard_failure(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'w']) @@ -199,7 +199,7 @@ # InvalidLoop condition, and was then unrolled, giving two copies # of the body in a single bigger loop with no failing guard except # the final one. - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) @@ -240,7 +240,7 @@ assert res == f(3, 28) res = self.meta_interp(f, [4, 28]) assert res == f(4, 28) - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) @@ -277,7 +277,7 @@ # looking only at the loop, we deduce that the class of 'w' is 'W2'. # However, this doesn't match the initial value of 'w'. # XXX This not completely easy to check... - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(guard_class=1, new_with_vtable=0, int_lshift=2, int_add=0, new=0) @@ -306,7 +306,7 @@ return x res = self.meta_interp(f, [198], policy=StopAtXPolicy(externfn)) assert res == f(198) - self.check_loop_count(4) + self.check_trace_count(4) def test_indirect_call_unknown_object_2(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'state']) @@ -340,9 +340,9 @@ res = self.meta_interp(f, [198], policy=StopAtXPolicy(State.externfn.im_func)) assert res == f(198) - # we get two TreeLoops: an initial one, and one entering from - # the interpreter - self.check_tree_loop_count(2) + # we get two TargetTokens, one for the loop and one for the preamble + self.check_jitcell_token_count(1) + self.check_target_token_count(2) def test_indirect_call_unknown_object_3(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'z', 'state']) @@ -377,9 +377,10 @@ res = self.meta_interp(f, [198], policy=StopAtXPolicy(State.externfn.im_func)) assert res == f(198) - # we get four TreeLoops: one for each of the 3 getvalue functions, - # and one entering from the interpreter - self.check_tree_loop_count(4) + # we get four TargetTokens: one for each of the 3 getvalue functions, + # and one entering from the interpreter (the preamble) + self.check_jitcell_token_count(1) + self.check_target_token_count(4) def test_two_behaviors(self): py.test.skip("XXX fix me!!!!!!! problem in optimize.py") @@ -403,7 +404,7 @@ # is true if we replace "if cases[y]" above with "if not cases[y]" # -- so there is no good reason that it fails. self.check_loops(new_with_vtable=0) - self.check_loop_count(2) + self.check_trace_count(2) def test_behavior_change_after_a_while(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x']) @@ -431,9 +432,10 @@ assert res == 200 # we expect 2 versions of the loop, 1 entry bridge, # and 1 bridge going from the - # loop back to the start of the entry bridge - self.check_loop_count(3) # 2 loop + 1 bridge - self.check_tree_loop_count(3) # 2 loop + 1 entry bridge (argh) + # loop back to the loop + self.check_trace_count(2) # preamble/loop and 1 bridge + self.check_jitcell_token_count(1) + self.check_target_token_count(3) # preamble, Int1, Int2 self.check_aborted_count(0) def test_three_cases(self): @@ -454,7 +456,7 @@ return node.x res = self.meta_interp(f, [55]) assert res == f(55) - self.check_tree_loop_count(4) + self.check_trace_count(3) def test_three_classes(self): class Base: @@ -484,7 +486,7 @@ return n res = self.meta_interp(f, [55], policy=StopAtXPolicy(extern)) assert res == f(55) - self.check_tree_loop_count(2) + self.check_jitcell_token_count(1) def test_bug1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -499,7 +499,7 @@ sys.defaultencoding = _str('utf-8') return sa assert self.meta_interp(f, [8]) == f(8) - self.check_resops({'jump': 2, 'int_is_true': 2, 'int_add': 2, + self.check_resops({'jump': 1, 'int_is_true': 2, 'int_add': 2, 'guard_true': 2, 'guard_not_invalidated': 2, 'int_sub': 2}) @@ -590,7 +590,7 @@ # The "".join should be unrolled, since the length of x is known since # it is virtual, ensure there are no calls to ll_join_chars, or # allocations. - self.check_resops({'jump': 2, 'guard_true': 5, 'int_lt': 2, + self.check_resops({'jump': 1, 'guard_true': 5, 'int_lt': 2, 'int_add': 2, 'int_is_true': 3}) def test_virtual_copystringcontent(self): diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -72,7 +72,7 @@ res = self.meta_interp(main, [0, 6], listops=True, backendopt=True) assert res == 5040 - self.check_resops({'jump': 2, 'int_le': 2, 'guard_value': 1, + self.check_resops({'jump': 1, 'int_le': 2, 'guard_value': 1, 'int_mul': 2, 'guard_false': 2, 'int_sub': 2}) def test_tl_2(self): @@ -80,7 +80,7 @@ res = self.meta_interp(main, [1, 10], listops=True, backendopt=True) assert res == main(1, 10) - self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 2, + self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 1, 'guard_false': 2, 'guard_value': 1}) def test_tl_call(self, listops=True, policy=None): diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -1,5 +1,5 @@ import py -from pypy.rlib.jit import JitDriver, promote +from pypy.rlib.jit import JitDriver, promote, dont_look_inside from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -30,7 +30,7 @@ assert f(10) == 55 * 10 res = self.meta_interp(f, [10]) assert res == 55 * 10 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, new=0) @@ -79,7 +79,7 @@ assert f(10) == 55 * 10 res = self.meta_interp(f, [10]) assert res == 55 * 10 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=3, new=0) @@ -97,7 +97,7 @@ return node.floatval res = self.meta_interp(f, [10]) assert res == f(10) - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new=0, float_add=1) def test_virtualized_float2(self): @@ -115,7 +115,7 @@ return node.floatval res = self.meta_interp(f, [10]) assert res == f(10) - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new=0, float_add=2) @@ -140,7 +140,7 @@ return node.value * node.extra res = self.meta_interp(f, [10]) assert res == 55 * 30 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, new=0) @@ -161,7 +161,7 @@ return node.value res = self.meta_interp(f, [500]) assert res == 640 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=1, new=0) @@ -185,7 +185,7 @@ return node.value res = self.meta_interp(f, [18]) assert res == f(18) - self.check_loop_count(2) + self.check_trace_count(2) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, new=0) @@ -214,7 +214,7 @@ return node.value res = self.meta_interp(f, [20], policy=StopAtXPolicy(externfn)) assert res == f(20) - self.check_loop_count(3) + self.check_trace_count(2) self.check_resops(**{self._new_op: 1}) self.check_resops(int_mul=0, call=1) @@ -391,7 +391,7 @@ fieldname = self._field_prefix + 'value' assert getattr(res, fieldname, -100) == f(21).value - self.check_tree_loop_count(2) # the loop and the entry path + self.check_jitcell_token_count(1) # the loop and the entry path # we get: # ENTER - compile the new loop and entry bridge # ENTER - compile the leaving path @@ -565,7 +565,10 @@ n -= 1 return node1.value + node2.value assert self.meta_interp(f, [40, 3]) == f(40, 3) - self.check_loop_count(6) + # We get 4 versions of this loop: + # preamble (no virtuals), node1 virtual, node2 virtual, both virtual + self.check_target_token_count(4) + self.check_resops(new=0, new_with_vtable=0) def test_single_virtual_forced_in_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'node']) @@ -612,10 +615,10 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(5) + self.check_trace_count(4) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) - self.check_loop_count(3) + self.check_trace_count(3) def test_forced_virtual_assigned_different_class_in_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'node', 'node2']) @@ -782,6 +785,165 @@ + def test_retrace_not_matching_bridge(self): + @dont_look_inside + def external(node): + return node.value + 1 + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'node', 'node2']) + class A(): + def new(self): + return A() + def val(self, i): + return i + 7 + class B(A): + def new(self): + return B() + def val(self, i): + return i + 42 + def f(n): + node = self._new() + node2 = A() + node.value = 0 + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, node=node, node2=node2) + next = self._new() + next.value = node.value + n + node2.val(i) + if i != 7: + next.value += external(next) + else: + node2 = B() + node = next + node2 = node2.new() + + i += 1 + return node.value + res = self.meta_interp(f, [10], repeat=10) + assert res == f(10) + self.check_resops(jump=2) + + def test_retrace_not_matching_bridge_str(self): + @dont_look_inside + def external(node): + return node.value + 1 + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'node', 'node2', 's']) + class A(): + def new(self): + return A() + def val(self, i): + return i + 7 + class B(A): + def new(self): + return B() + def val(self, i): + return i + 42 + def f(n): + s = '*' * n + node = self._new() + node2 = A() + node.value = 0 + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, node=node, node2=node2, s=s) + next = self._new() + next.value = node.value + n + node2.val(i) + if i != 7: + next.value += external(next) + else: + node2 = B() + node = next + node2 = node2.new() + node.value += len(s) + i += 1 + return node.value + res = self.meta_interp(f, [10], repeat=10) + assert res == f(10) + self.check_resops(jump=2) + + def test_nested_loops(self): + class Int(object): + def __init__(self, val): + self.val = val + myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) + bytecode = "iajb+JI" + def f(n): + pc = sa = 0 + i = j = Int(0) + while pc < len(bytecode): + myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i, j=j) + op = bytecode[pc] + if op == 'i': + i = Int(0) + elif op == 'j': + j = Int(0) + elif op == '+': + sa += i.val * j.val + elif op == 'a': + i = Int(i.val + 1) + elif op == 'b': + j = Int(j.val + 1) + elif op == 'J': + if j.val < n: + pc -= 2 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + elif op == 'I': + if i.val < n: + pc -= 5 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + pc += 1 + return sa + + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_aborted_count(0) + self.check_target_token_count(3) + + def test_nested_loops_bridge(self): + class Int(object): + def __init__(self, val): + self.val = val + myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) + bytecode = "iajb+JI" + def f(n): + pc = sa = 0 + i = j = Int(0) + while pc < len(bytecode): + myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i, j=j) + op = bytecode[pc] + if op == 'i': + i = Int(0) + elif op == 'j': + j = Int(0) + elif op == '+': + if i.val < n-8: + sa += 7 + if j.val < n-16: + sa += 42 + sa += i.val * j.val + elif op == 'a': + i = Int(i.val + 1) + elif op == 'b': + j = Int(j.val + 1) + elif op == 'J': + if j.val < n: + pc -= 2 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + elif op == 'I': + if i.val < n: + pc -= 5 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + pc += 1 + return sa + + res = self.meta_interp(f, [32]) + assert res == f(32) + self.check_aborted_count(0) + self.check_target_token_count(3) + class VirtualMiscTests: def test_multiple_equal_virtuals(self): @@ -1008,7 +1170,7 @@ assert f(10) == 20 res = self.meta_interp(f, [10]) assert res == 20 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=0, new=0) diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -582,7 +582,7 @@ res = self.meta_interp(f, [123], policy=StopAtXPolicy(g)) assert res == f(123) self.check_aborted_count(2) - self.check_tree_loop_count(0) + self.check_jitcell_token_count(0) def test_external_read_with_exception(self): jitdriver = JitDriver(greens = [], reds = ['frame'], @@ -621,7 +621,7 @@ res = self.meta_interp(f, [123], policy=StopAtXPolicy(g)) assert res == f(123) self.check_aborted_count(2) - self.check_tree_loop_count(0) + self.check_jitcell_token_count(0) def test_external_write(self): jitdriver = JitDriver(greens = [], reds = ['frame'], @@ -653,7 +653,7 @@ res = self.meta_interp(f, [240], policy=StopAtXPolicy(g)) assert res == f(240) self.check_aborted_count(3) - self.check_tree_loop_count(0) + self.check_jitcell_token_count(0) def test_external_read_sometimes(self): jitdriver = JitDriver(greens = [], reds = ['frame'], diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -321,7 +321,7 @@ assert res == 13 self.check_resops(new_with_vtable=2, # the vref, but not XY() new_array=0) # and neither next1/2/3 - self.check_loop_count(1) + self.check_trace_count(1) self.check_aborted_count(0) def test_blackhole_forces(self): @@ -363,7 +363,7 @@ assert res == 13 self.check_resops(new_with_vtable=0, # all virtualized in the n!=13 loop new_array=0) - self.check_loop_count(1) + self.check_trace_count(1) self.check_aborted_count(0) def test_bridge_forces(self): @@ -410,7 +410,7 @@ # res = self.meta_interp(f, [72]) assert res == 6 - self.check_loop_count(2) # the loop and the bridge + self.check_trace_count(2) # the loop and the bridge self.check_resops(new_with_vtable=2, # loop: nothing; bridge: vref, xy new_array=2) # bridge: next4, next5 self.check_aborted_count(0) diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -6,10 +6,11 @@ from pypy.jit.metainterp.optimizeopt.optimizer import OptValue from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from pypy.rpython.lltypesystem import lltype -from pypy.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, equaloplists +from pypy.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ + equaloplists, FakeDescrWithSnapshot from pypy.jit.metainterp.optimizeopt.intutils import IntBound -from pypy.jit.metainterp.history import TreeLoop, LoopToken -from pypy.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeDescr, FakeMetaInterpStaticData +from pypy.jit.metainterp.history import TreeLoop, JitCellToken +from pypy.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from pypy.jit.metainterp.optimize import RetraceLoop from pypy.jit.metainterp.resoperation import ResOperation, rop @@ -434,7 +435,7 @@ enable_opts = "intbounds:rewrite:virtualize:string:pure:heap:unroll" def _do_optimize_bridge(self, bridge, call_pure_results): - from pypy.jit.metainterp.optimizeopt import optimize_bridge_1, build_opt_chain + from pypy.jit.metainterp.optimizeopt import optimize_trace from pypy.jit.metainterp.optimizeopt.util import args_dict self.bridge = bridge @@ -448,10 +449,9 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - d = {} - for name in self.enable_opts.split(":"): - d[name] = None - optimize_bridge_1(metainterp_sd, bridge, d) + bridge.start_resumedescr = FakeDescrWithSnapshot() + optimize_trace(metainterp_sd, bridge, self.enable_opts) + def optimize_bridge(self, loops, bridge, expected, expected_target='Loop', **boxvalues): if isinstance(loops, str): @@ -459,24 +459,19 @@ loops = [self.parse(loop) for loop in loops] bridge = self.parse(bridge) for loop in loops: - loop.preamble = TreeLoop('preamble') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = LoopToken() - loop.preamble.start_resumedescr = FakeDescr() - self._do_optimize_loop(loop, None) + loop.preamble = self.unroll_and_optimize(loop) preamble = loops[0].preamble - for loop in loops[1:]: - preamble.token.short_preamble.extend(loop.preamble.token.short_preamble) + token = JitCellToken() + token.target_tokens = [l.operations[0].getdescr() for l in [preamble] + loops] boxes = {} for b in bridge.inputargs + [op.result for op in bridge.operations]: boxes[str(b)] = b for b, v in boxvalues.items(): boxes[b].value = v - bridge.operations[-1].setdescr(preamble.token) - try: - self._do_optimize_bridge(bridge, None) - except RetraceLoop: + bridge.operations[-1].setdescr(token) + self._do_optimize_bridge(bridge, None) + if bridge.operations[-1].getopnum() == rop.LABEL: assert expected == 'RETRACE' return @@ -485,13 +480,13 @@ self.assert_equal(bridge, expected) if expected_target == 'Preamble': - assert bridge.operations[-1].getdescr() is preamble.token + assert bridge.operations[-1].getdescr() is preamble.operations[0].getdescr() elif expected_target == 'Loop': assert len(loops) == 1 - assert bridge.operations[-1].getdescr() is loops[0].token + assert bridge.operations[-1].getdescr() is loops[0].operations[0].getdescr() elif expected_target.startswith('Loop'): n = int(expected_target[4:]) - assert bridge.operations[-1].getdescr() is loops[n].token + assert bridge.operations[-1].getdescr() is loops[n].operations[0].getdescr() else: assert False @@ -918,6 +913,9 @@ pass def getvalue(*args): pass + def emit_operation(*args): + pass + class TestShortBoxes: p1 = BoxPtr() diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -200,7 +200,7 @@ m -= 1 self.meta_interp(f2, [i2]) try: - self.check_tree_loop_count(1) + self.check_jitcell_token_count(1) break except AssertionError: print "f2: no loop generated for i2==%d" % i2 @@ -215,7 +215,7 @@ m -= 1 self.meta_interp(f1, [i1]) try: - self.check_tree_loop_count(1) + self.check_jitcell_token_count(1) break except AssertionError: print "f1: no loop generated for i1==%d" % i1 @@ -235,8 +235,8 @@ self.meta_interp(f1, [8]) # it should generate one "loop" only, which ends in a FINISH # corresponding to the return from f2. - self.check_tree_loop_count(1) - self.check_loop_count(0) + self.check_trace_count(1) + self.check_resops(jump=0) def test_simple_loop(self): mydriver = JitDriver(greens=[], reds=['m']) @@ -245,8 +245,8 @@ mydriver.jit_merge_point(m=m) m = m - 1 self.meta_interp(f1, [8]) - self.check_loop_count(1) - self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + self.check_trace_count(1) + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) def test_void_red_variable(self): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -190,14 +190,14 @@ state = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = state.make_jitcell_getter() class FakeLoopToken(object): - pass + invalidated = False looptoken = FakeLoopToken() - state.attach_unoptimized_bridge_from_interp([ConstInt(5), - constfloat(2.25)], - looptoken) + state.attach_procedure_to_interp([ConstInt(5), + constfloat(2.25)], + looptoken) cell1 = get_jitcell(True, 5, 2.25) assert cell1.counter < 0 - assert cell1.get_entry_loop_token() is looptoken + assert cell1.get_procedure_token() is looptoken def test_make_jitdriver_callbacks_1(): class FakeWarmRunnerDesc: diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -169,34 +169,22 @@ # counter == -1: there is an entry bridge for this cell # counter == -2: tracing is currently going on for this cell counter = 0 - compiled_merge_points_wref = None # list of weakrefs to LoopToken dont_trace_here = False - wref_entry_loop_token = None # (possibly) one weakref to LoopToken + wref_procedure_token = None - def get_compiled_merge_points(self): - result = [] - if self.compiled_merge_points_wref is not None: - for wref in self.compiled_merge_points_wref: - looptoken = wref() - if looptoken is not None and not looptoken.invalidated: - result.append(looptoken) - return result - - def set_compiled_merge_points(self, looptokens): - self.compiled_merge_points_wref = [self._makeref(token) - for token in looptokens] - - def get_entry_loop_token(self): - if self.wref_entry_loop_token is not None: - return self.wref_entry_loop_token() + def get_procedure_token(self): + if self.wref_procedure_token is not None: + token = self.wref_procedure_token() + if token and not token.invalidated: + return token return None - def set_entry_loop_token(self, looptoken): - self.wref_entry_loop_token = self._makeref(looptoken) + def set_procedure_token(self, token): + self.wref_procedure_token = self._makeref(token) - def _makeref(self, looptoken): - assert looptoken is not None - return weakref.ref(looptoken) + def _makeref(self, token): + assert token is not None + return weakref.ref(token) # ____________________________________________________________ @@ -283,18 +271,17 @@ debug_print("disabled inlining", loc) debug_stop("jit-disableinlining") - def attach_unoptimized_bridge_from_interp(self, greenkey, - entry_loop_token): + def attach_procedure_to_interp(self, greenkey, procedure_token): cell = self.jit_cell_at_key(greenkey) - old_token = cell.get_entry_loop_token() - cell.set_entry_loop_token(entry_loop_token) - cell.counter = -1 # valid entry bridge attached + old_token = cell.get_procedure_token() + cell.set_procedure_token(procedure_token) + cell.counter = -1 # valid procedure bridge attached if old_token is not None: - self.cpu.redirect_call_assembler(old_token, entry_loop_token) - # entry_loop_token is also kept alive by any loop that used + self.cpu.redirect_call_assembler(old_token, procedure_token) + # procedure_token is also kept alive by any loop that used # to point to old_token. Actually freeing old_token early # is a pointless optimization (it is tiny). - old_token.record_jump_to(entry_loop_token) + old_token.record_jump_to(procedure_token) # ---------- @@ -343,7 +330,7 @@ # set counter to -2, to mean "tracing in effect" cell.counter = -2 try: - loop_token = metainterp.compile_and_run_once(jitdriver_sd, + procedure_token = metainterp.compile_and_run_once(jitdriver_sd, *args) finally: if cell.counter == -2: @@ -356,8 +343,8 @@ assert cell.counter == -1 if not confirm_enter_jit(*args): return - loop_token = cell.get_entry_loop_token() - if loop_token is None: # it was a weakref that has been freed + procedure_token = cell.get_procedure_token() + if procedure_token is None: # it was a weakref that has been freed cell.counter = 0 return # machine code was already compiled for these greenargs @@ -368,14 +355,14 @@ while True: # until interrupted by an exception metainterp_sd.profiler.start_running() #debug_start("jit-running") - fail_descr = warmrunnerdesc.execute_token(loop_token) + fail_descr = warmrunnerdesc.execute_token(procedure_token) #debug_stop("jit-running") metainterp_sd.profiler.end_running() - loop_token = None # for test_memmgr + procedure_token = None # for test_memmgr if vinfo is not None: vinfo.reset_vable_token(virtualizable) - loop_token = fail_descr.handle_fail(metainterp_sd, - jitdriver_sd) + procedure_token = fail_descr.handle_fail(metainterp_sd, + jitdriver_sd) maybe_compile_and_run._dont_inline_ = True self.maybe_compile_and_run = maybe_compile_and_run @@ -617,16 +604,16 @@ def get_assembler_token(greenkey, redboxes): # 'redboxes' is only used to know the types of red arguments cell = self.jit_cell_at_key(greenkey) - entry_loop_token = cell.get_entry_loop_token() - if entry_loop_token is None: + procedure_token = cell.get_procedure_token() + if procedure_token is None: from pypy.jit.metainterp.compile import compile_tmp_callback if cell.counter == -1: # used to be a valid entry bridge, cell.counter = 0 # but was freed in the meantime. memmgr = warmrunnerdesc.memory_manager - entry_loop_token = compile_tmp_callback(cpu, jd, greenkey, - redboxes, memmgr) - cell.set_entry_loop_token(entry_loop_token) - return entry_loop_token + procedure_token = compile_tmp_callback(cpu, jd, greenkey, + redboxes, memmgr) + cell.set_procedure_token(procedure_token) + return procedure_token self.get_assembler_token = get_assembler_token # diff --git a/pypy/jit/tool/oparser.py b/pypy/jit/tool/oparser.py --- a/pypy/jit/tool/oparser.py +++ b/pypy/jit/tool/oparser.py @@ -70,7 +70,7 @@ self.invent_fail_descr = invent_fail_descr self.nonstrict = nonstrict self.model = get_model(self.use_mock_model) - self.looptoken = self.model.LoopToken() + self.original_jitcell_token = self.model.JitCellToken() def get_const(self, name, typ): if self._consts is None: @@ -243,7 +243,8 @@ descr = self.invent_fail_descr(self.model, fail_args) elif opnum == rop.JUMP: if descr is None and self.invent_fail_descr: - descr = self.looptoken + descr = self.original_jitcell_token + return opnum, args, descr, fail_args def create_op(self, opnum, args, result, descr): @@ -307,7 +308,7 @@ raise ParseError("unexpected dedent at line: %s" % newlines[num]) loop = self.model.ExtendedTreeLoop("loop") loop.comment = first_comment - loop.token = self.looptoken + loop.original_jitcell_token = self.original_jitcell_token loop.operations = ops loop.inputargs = inpargs loop.last_offset = last_offset diff --git a/pypy/jit/tool/oparser_model.py b/pypy/jit/tool/oparser_model.py --- a/pypy/jit/tool/oparser_model.py +++ b/pypy/jit/tool/oparser_model.py @@ -3,7 +3,7 @@ def get_real_model(): class LoopModel(object): - from pypy.jit.metainterp.history import TreeLoop, LoopToken + from pypy.jit.metainterp.history import TreeLoop, JitCellToken from pypy.jit.metainterp.history import Box, BoxInt, BoxFloat from pypy.jit.metainterp.history import ConstInt, ConstObj, ConstPtr, ConstFloat from pypy.jit.metainterp.history import BasicFailDescr @@ -33,13 +33,13 @@ return LoopModel def get_mock_model(): - class LoopModel(object): + class MockLoopModel(object): class TreeLoop(object): def __init__(self, name): self.name = name - class LoopToken(object): + class JitCellToken(object): I_am_a_descr = True class BasicFailDescr(object): @@ -107,9 +107,9 @@ class llhelper(object): pass - LoopModel.llhelper.BoxRef = LoopModel.BoxRef + MockLoopModel.llhelper.BoxRef = MockLoopModel.BoxRef - return LoopModel + return MockLoopModel def get_model(use_mock): diff --git a/pypy/jit/tool/test/test_jitoutput.py b/pypy/jit/tool/test/test_jitoutput.py --- a/pypy/jit/tool/test/test_jitoutput.py +++ b/pypy/jit/tool/test/test_jitoutput.py @@ -36,12 +36,12 @@ assert info.tracing_no == 1 assert info.asm_no == 1 assert info.blackhole_no == 1 - assert info.backend_no == 2 + assert info.backend_no == 1 assert info.ops.total == 2 assert info.recorded_ops.total == 2 assert info.recorded_ops.calls == 0 assert info.guards == 1 - assert info.opt_ops == 11 + assert info.opt_ops == 13 assert info.opt_guards == 2 assert info.forcings == 0 diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -4,7 +4,7 @@ from pypy.jit.tool.oparser import parse, OpParser from pypy.jit.metainterp.resoperation import rop -from pypy.jit.metainterp.history import AbstractDescr, BoxInt, LoopToken +from pypy.jit.metainterp.history import AbstractDescr, BoxInt, JitCellToken class BaseTestOparser(object): @@ -119,10 +119,10 @@ jump() ''' loop = self.parse(x) - assert loop.operations[0].getdescr() is loop.token + assert loop.operations[0].getdescr() is loop.original_jitcell_token def test_jump_target_other(self): - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.I_am_a_descr = True # for the mock case x = ''' [] diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -188,7 +188,7 @@ self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, 'getfield_gc_pure': 6, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, - 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, + 'guard_isnull': 2, 'jump': 2, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) @@ -230,7 +230,7 @@ def test_specialization(self): self.run("specialization") # This is 3, not 2 because there is a bridge for the exit. - self.check_loop_count(3) + self.check_trace_count(3) def define_slice(): return """ @@ -325,7 +325,7 @@ def test_setslice(self): result = self.run("setslice") assert result == 11.0 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -3,7 +3,7 @@ from pypy.conftest import gettestobjspace, option from pypy.interpreter.pycode import PyCode from pypy.interpreter.gateway import interp2app -from pypy.jit.metainterp.history import LoopToken +from pypy.jit.metainterp.history import JitCellToken from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.logger import Logger from pypy.rpython.annlowlevel import (cast_instance_to_base_ptr, @@ -41,11 +41,11 @@ """, namespace={'ptr0': code_gcref}).operations def interp_on_compile(): - pypyjitdriver.on_compile(logger, LoopToken(), oplist, 'loop', + pypyjitdriver.on_compile(logger, JitCellToken(), oplist, 'loop', 0, False, ll_code) def interp_on_compile_bridge(): - pypyjitdriver.on_compile_bridge(logger, LoopToken(), oplist, 0) + pypyjitdriver.on_compile_bridge(logger, JitCellToken(), oplist, 0) cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -47,32 +47,33 @@ storage = LoopStorage() traces = [SimpleParser.parse_from_input(rawtrace) for rawtrace in rawtraces] traces = storage.reconnect_loops(traces) - self.loops = [LoopWithIds.from_trace(trace, storage) for trace in traces] + self.loops = [TraceWithIds.from_trace(trace, storage) for trace in traces] def _filter(self, loop, is_entry_bridge=False): - return is_entry_bridge == '*' or loop.is_entry_bridge == is_entry_bridge + if is_entry_bridge == '*': + return loop + assert is_entry_bridge in (True, False) + return PartialTraceWithIds(loop, is_entry_bridge) def loops_by_filename(self, filename, **kwds): """ Return all loops which start in the file ``filename`` """ - return [loop for loop in self.loops - if loop.filename == filename and self._filter(loop, **kwds)] + return [self._filter(loop, **kwds) for loop in self.loops + if loop.filename == filename] def loops_by_id(self, id, **kwds): """ Return all loops which contain the ID ``id`` """ - return [loop for loop in self.loops - if loop.has_id(id) and self._filter(loop, **kwds)] + return [self._filter(loop, **kwds) for loop in self.loops + if loop.has_id(id)] @classmethod def opnames(self, oplist): return [op.name for op in oplist] -class LoopWithIds(Function): - - is_entry_bridge = False +class TraceWithIds(Function): def __init__(self, *args, **kwds): Function.__init__(self, *args, **kwds) @@ -88,7 +89,6 @@ @classmethod def from_trace(cls, trace, storage): res = cls.from_operations(trace.operations, storage) - res.is_entry_bridge = 'entry bridge' in trace.comment return res def flatten_chunks(self): @@ -117,7 +117,7 @@ # # 2. compute the ids of all the inlined functions for chunk in self.chunks: - if isinstance(chunk, LoopWithIds): + if isinstance(chunk, TraceWithIds): chunk.compute_ids(ids) def get_set_of_opcodes(self): @@ -144,6 +144,10 @@ (opcode and opcode.__class__.__name__ == opcode_name): for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op + else: + for op in chunk.operations: + if op.name == 'label': + yield op def allops(self, *args, **kwds): return list(self._allops(*args, **kwds)) @@ -161,26 +165,72 @@ def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] + loop_ops = self.allops(include_debug_merge_points, opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): for op in self._ops_for_chunk(chunk, include_debug_merge_points): - yield op + if op in loop_ops: + yield op def ops_by_id(self, *args, **kwds): return list(self._ops_by_id(*args, **kwds)) def match(self, expected_src, **kwds): - ops = list(self.allops()) - matcher = OpMatcher(ops, src=self.format_ops()) + ops = self.allops() + matcher = OpMatcher(ops) return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) - matcher = OpMatcher(ops, src=self.format_ops(id)) + matcher = OpMatcher(ops) return matcher.match(expected_src) +class PartialTraceWithIds(TraceWithIds): + def __init__(self, trace, is_entry_bridge=False): + self.trace = trace + self.is_entry_bridge = is_entry_bridge + + def allops(self, *args, **kwds): + if self.is_entry_bridge: + return self.entry_bridge_ops(*args, **kwds) + else: + return self.simple_loop_ops(*args, **kwds) + + def simple_loop_ops(self, *args, **kwds): + ops = list(self._allops(*args, **kwds)) + labels = [op for op in ops if op.name == 'label'] + jumpop = self.chunks[-1].operations[-1] + assert jumpop.name == 'jump' + assert jumpop.getdescr() == labels[-1].getdescr() + i = ops.index(labels[-1]) + return ops[i+1:] + + def entry_bridge_ops(self, *args, **kwds): + ops = list(self._allops(*args, **kwds)) + labels = [op for op in ops if op.name == 'label'] + assert ops.index(labels[0]) == 0 + i = ops.index(labels[1]) + return ops[1:i] + + @property + def chunks(self): + return self.trace.chunks + + @property + def ids(self): + return self.trace.ids + + @property + def filename(self): + return self.trace.filename + + @property + def code(self): + return self.trace.code + + class InvalidMatch(Exception): opindex = None @@ -210,9 +260,9 @@ class OpMatcher(object): - def __init__(self, ops, src=None): + def __init__(self, ops): self.ops = ops - self.src = src + self.src = '\n'.join(map(str, ops)) self.alpha_map = {} @classmethod diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -8,7 +8,7 @@ from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - LoopWithIds, OpMatcher + TraceWithIds, OpMatcher class BaseTestPyPyC(object): def setup_class(cls): @@ -50,7 +50,7 @@ cmdline.append(str(self.filepath)) # print cmdline, logfile - env={'PYPYLOG': 'jit-log-opt,jit-log-noopt,jit-summary:' + str(logfile)} + env={'PYPYLOG': 'jit-log-opt,jit-log-noopt,jit-log-virtualstate,jit-summary:' + str(logfile)} pipe = subprocess.Popen(cmdline, env=env, stdout=subprocess.PIPE, @@ -118,7 +118,7 @@ def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) - matcher = OpMatcher(loop.operations, src=src1) + matcher = OpMatcher(loop.operations) return matcher.match(src2, **kwds) def test_match_var(self): @@ -317,14 +317,17 @@ loops = log.loops_by_filename(self.filepath) assert len(loops) == 1 assert loops[0].filename == self.filepath - assert not loops[0].is_entry_bridge + assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge=True) assert len(loops) == 1 - assert loops[0].is_entry_bridge + assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) > 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge='*') - assert len(loops) == 2 + assert len(loops) == 1 + assert len([op for op in loops[0].allops() if op.name == 'label']) == 2 def test_loops_by_id(self): def f(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -22,7 +22,7 @@ guard_true(i7, descr=...) i9 = int_add(i5, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i9, i6, descr=) + jump(p0, p1, p2, p3, p4, i9, i6, descr=...) """) def test_array_sum(self): @@ -47,7 +47,7 @@ guard_no_overflow(descr=...) i18 = int_add(i7, 1) --TICK-- - jump(p0, p1, p2, p3, p4, p5, i18, i16, p8, i9, i10, descr=) + jump(p0, p1, p2, p3, p4, p5, i18, i16, p8, i9, i10, descr=...) """) def test_array_intimg(self): @@ -85,7 +85,7 @@ setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>) i28 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=) + jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=...) """) def test_array_of_doubles(self): @@ -115,7 +115,7 @@ guard_true(i18, descr=...) i20 = int_add(i6, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_array_of_floats(self): @@ -152,7 +152,7 @@ guard_true(i21, descr=...) i23 = int_add(i6, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """ % (arraydescr, arraydescr, arraydescr)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -99,7 +99,7 @@ i15 = int_add_ovf(i12, 1) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i15, i6, p7, p8, descr=) + jump(p0, p1, p2, p3, p4, i15, i6, p7, p8, descr=...) """) def test_method_call(self): @@ -142,7 +142,7 @@ i19 = int_add_ovf(i10, i17) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, i19, p7, i17, i9, i10, p11, p12, p13, descr=) + jump(p0, p1, p2, p3, p4, p5, i19, p7, i17, i9, i10, p11, p12, p13, descr=...) """) def test_static_classmethod_call(self): @@ -174,7 +174,7 @@ guard_no_overflow(descr=...) i18 = force_token() --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_default_and_kw(self): @@ -394,7 +394,7 @@ guard_not_invalidated(descr=...) i120 = int_add(i5, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_global_closure_has_constant_cells(self): @@ -438,7 +438,7 @@ i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=) + p39 = same_as(...) # Should be killed by backend """) def test_local_closure_is_virtual(self): @@ -461,7 +461,7 @@ p22 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p22, i13, descr=) setfield_gc(p4, p22, descr=) - jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) + jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) def test_kwargs_virtual(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -115,7 +115,7 @@ i35 = int_add_ovf(i5, i34) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=) + jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=...) """) def test_floatlist_unpack_without_calls(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_exception.py b/pypy/module/pypyjit/test_pypy_c/test_exception.py --- a/pypy/module/pypyjit/test_pypy_c/test_exception.py +++ b/pypy/module/pypyjit/test_pypy_c/test_exception.py @@ -42,7 +42,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=...) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_exception_inside_loop_2(self): @@ -89,5 +89,5 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -15,12 +15,14 @@ g() log = self.run(main, [500]) - loop, = log.loops_by_filename(self.filepath) + loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') assert loop.match_by_id("generator", """ + ... + label(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) i47 = arraylen_gc(p8, descr=) # Should be removed by backend - setarrayitem_gc(p8, 0, p45, descr=) - setfield_gc(p45, i29, descr=) jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -27,7 +27,7 @@ i9 = int_add_ovf(i5, 2) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i9, i6, descr=) + jump(p0, p1, p2, p3, p4, i9, i6, descr=...) """) def test_load_attr(self): @@ -52,7 +52,7 @@ i10 = int_add_ovf(i5, i7) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i10, i6, i7, p8, descr=) + jump(p0, p1, p2, p3, p4, i10, i6, i7, p8, descr=...) """) def test_getattr_with_dynamic_attribute(self): @@ -125,9 +125,9 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p20, i11, descr=) setfield_gc(ConstPtr(ptr21), p20, descr=) - setfield_gc(p20, i11, descr=) - jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=) + jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) def test_oldstyle_newstyle_mix(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py --- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py +++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py @@ -97,7 +97,7 @@ guard_no_overflow(descr=...) i17 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i14, i12, i17, p8, i9, descr=) + jump(p0, p1, p2, p3, p4, i14, i12, i17, p8, i9, descr=...) """) def test_intbound_sub_lt(self): @@ -121,7 +121,7 @@ guard_no_overflow(descr=...) i13 = int_add(i5, 1) --TICK-- - jump(p0, p1, p2, p3, i11, i13, descr=) + jump(p0, p1, p2, p3, i11, i13, descr=...) """) def test_intbound_addsub_ge(self): @@ -150,7 +150,7 @@ guard_no_overflow(descr=...) i19 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i16, i14, i19, p8, i9, descr=) + jump(p0, p1, p2, p3, p4, i16, i14, i19, p8, i9, descr=...) """) def test_intbound_addmul_ge(self): @@ -178,7 +178,7 @@ guard_no_overflow(descr=...) i21 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i18, i14, i21, p8, descr=) + jump(p0, p1, p2, p3, p4, i18, i14, i21, p8, descr=...) """) def test_intbound_eq(self): @@ -210,7 +210,7 @@ guard_no_overflow(descr=...) i16 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, p6, i14, i16, p8, descr=) + jump(p0, p1, p2, p3, p4, p6, i14, i16, p8, descr=...) """) def test_intbound_mul(self): @@ -236,7 +236,7 @@ guard_no_overflow(descr=...) i14 = int_add(i6, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i12, i14, descr=) + jump(p0, p1, p2, p3, p4, i12, i14, descr=...) """) def test_assert(self): @@ -257,7 +257,7 @@ guard_no_overflow(descr=...) i12 = int_add(i6, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i10, i12, descr=) + jump(p0, p1, p2, p3, p4, i10, i12, descr=...) """) def test_xor(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -29,7 +29,7 @@ f5 = float_add(f0, f4) i4 = int_add(i0, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_sin_cos(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -22,7 +22,7 @@ guard_no_overflow(descr=...) i11 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i11, i9, descr=) + jump(p0, p1, p2, p3, i11, i9, descr=...) """) def test_silly_max(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -34,7 +34,10 @@ jump(p0, p1, p2, p3, p4, p5, i13, i11, i8, descr=...) """ assert loop0.match(expected) - assert loop1.match(expected) + # XXX: The retracing fails to form a loop since j + # becomes constant 0 after the bridge and constant 1 at the end of the + # loop. A bridge back to the peramble is produced instead. + #assert loop1.match(expected) def test_factorial(self): def fact(n): @@ -88,7 +91,7 @@ guard_true(i9, descr=...) f10 = float_add(f8, f5) --TICK-- - jump(p0, p1, p2, p3, p4, f10, p6, f7, f8, descr=) + jump(p0, p1, p2, p3, p4, f10, p6, f7, f8, descr=...) """) @@ -159,7 +162,7 @@ i27 = int_add_ovf(i7, i18) guard_no_overflow(descr=...) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) @@ -219,7 +222,7 @@ i28 = int_add_ovf(i10, i25) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, p12, i19, descr=) + jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, p12, i19, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -156,7 +156,7 @@ guard_no_overflow(descr=...) i40 = int_sub(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i40, i38, descr=) + jump(p0, p1, p2, p3, i40, i38, descr=...) """) def test_getattr_promote(self): @@ -179,7 +179,7 @@ log = self.run(main, [1000]) assert log.result == main(1000) loops = log.loops_by_filename(self.filepath) - assert len(loops) == 2 + assert len(loops) == 1 for loop in loops: loop.match_by_id('getattr',''' guard_not_invalidated(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -24,5 +24,5 @@ guard_true(i2, descr=...) i3 = int_add(i0, 1) --THREAD-TICK-- - jump(..., descr=) + jump(..., descr=...) """) diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -70,7 +70,8 @@ def test_simple_match_repeated(self): res = self.meta_interp_match(r"abcdef", "abcdef", repeat=10) assert res == 6 - self.check_tree_loop_count(1) + self.check_trace_count(1) + self.check_jitcell_token_count(1) def test_match_minrepeat_1(self): res = self.meta_interp_match(r".*?abc", "xxxxxxxxxxxxxxabc") From noreply at buildbot.pypy.org Mon Dec 12 09:04:10 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 12 Dec 2011 09:04:10 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: hg merge default Message-ID: <20111212080410.244DF82ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50404:d3d7d07ab453 Date: 2011-12-12 08:53 +0100 http://bitbucket.org/pypy/pypy/changeset/d3d7d07ab453/ Log: hg merge default diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -261,8 +261,10 @@ return fail_index def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. + """Calls the fake 'assembler' generated for the given loop. + Returns the descr of the last executed operation: either the one + attached to the failing guard, or the one attached to the FINISH. + Use set_future_value_xxx() before, and get_latest_value_xxx() after. """ fail_index = self._execute_token(loop_token) return self.get_fail_descr_from_number(fail_index) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,6 +91,9 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") + def descr_tolist(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -179,6 +182,8 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), + + tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,6 +876,17 @@ arr.setshape(space, new_shape) return arr + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1485,6 +1496,7 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), + tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,6 +879,45 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_tolist_scalar(self): + from numpypy import int32, bool_ + x = int32(23) + assert x.tolist() == 23 + assert type(x.tolist()) is int + y = bool_(True) + assert y.tolist() is True + + def test_tolist_zerodim(self): + from numpypy import array + x = array(3) + assert x.tolist() == 3 + assert type(x.tolist()) is int + + def test_tolist_singledim(self): + from numpypy import array + a = array(range(5)) + assert a.tolist() == [0, 1, 2, 3, 4] + assert type(a.tolist()[0]) is int + b = array([0.2, 0.4, 0.6]) + assert b.tolist() == [0.2, 0.4, 0.6] + + def test_tolist_multidim(self): + from numpypy import array + a = array([[1, 2], [3, 4]]) + assert a.tolist() == [[1, 2], [3, 4]] + + def test_tolist_view(self): + from numpypy import array + a = array([[1,2],[3,4]]) + assert (a + a).tolist() == [[2, 4], [6, 8]] + + def test_tolist_slice(self): + from numpypy import array + a = array([[17.1, 27.2], [40.3, 50.3]]) + assert a[:,0].tolist() == [17.1, 40.3] + assert a[0].tolist() == [17.1, 27.2] + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -78,6 +78,9 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj + def to_builtin_type(self, space, box): + return space.wrap(self.for_computation(self.unbox(box))) + def _coerce(self, space, w_item): raise NotImplementedError @@ -180,6 +183,9 @@ def _coerce(self, space, w_item): return self.box(space.is_true(w_item)) + def to_builtin_type(self, space, w_item): + return space.wrap(self.unbox(w_item)) + def str_format(self, box): value = self.unbox(box) return "True" if value else "False" From noreply at buildbot.pypy.org Mon Dec 12 09:04:11 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 12 Dec 2011 09:04:11 +0100 (CET) Subject: [pypy-commit] pypy jit-targets: closing to be merged branch Message-ID: <20111212080411.4134C82ABE@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-targets Changeset: r50405:cccccb4a9c72 Date: 2011-12-12 08:54 +0100 http://bitbucket.org/pypy/pypy/changeset/cccccb4a9c72/ Log: closing to be merged branch From noreply at buildbot.pypy.org Mon Dec 12 10:01:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:01:13 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix for StackLocs with a negative 'position' (corresponding to Message-ID: <20111212090113.BB9A582210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50407:793032bf57e5 Date: 2011-12-12 01:10 +0100 http://bitbucket.org/pypy/pypy/changeset/793032bf57e5/ Log: Fix for StackLocs with a negative 'position' (corresponding to a positive offset from %ebp). diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1754,10 +1754,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1773,7 +1773,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position + pos = loc.position + if pos < 0: + mc.writechar(chr(self.CODE_INPUTARG)) + pos = ~pos + n = self.CODE_FROMSTACK//4 + pos else: assert isinstance(loc, RegLoc) n = loc.value @@ -1793,6 +1797,7 @@ descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] + code_inputarg = False while 1: # decode the next instruction from the bytecode code = rffi.cast(lltype.Signed, bytecode[0]) @@ -1811,11 +1816,17 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: continue + elif code == self.CODE_INPUTARG: + code_inputarg = True + continue else: # 'code' identifies a register kind = code & 3 @@ -1831,6 +1842,7 @@ def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! self.fail_ebp = allregisters[16 + ebp.value] + code_inputarg = False num = 0 value_hi = 0 while 1: @@ -1851,6 +1863,9 @@ # load the value from the stack kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: @@ -1863,6 +1878,9 @@ if code == self.CODE_HOLE: num += 1 continue + if code == self.CODE_INPUTARG: + code_inputarg = True + continue assert code == self.CODE_STOP break code >>= 2 From noreply at buildbot.pypy.org Mon Dec 12 10:01:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:01:14 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: 64-bit support. Message-ID: <20111212090114.DF00282210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50408:7ae9b2f7c938 Date: 2011-12-12 09:19 +0100 http://bitbucket.org/pypy/pypy/changeset/7ae9b2f7c938/ Log: 64-bit support. diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -202,7 +202,7 @@ def _set_initial_bindings(self, inputargs): if IS_X86_64: - return self._set_initial_bindings_64(inputargs) + inputargs = self._set_initial_bindings_regs_64(inputargs) # ... # stack layout: arg2 # arg1 @@ -214,17 +214,43 @@ assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD # - for i in range(len(inputargs)): - box = inputargs[i] + for box in inputargs: assert isinstance(box, Box) # - if box.type == FLOAT: + if IS_X86_32 and box.type == FLOAT: cur_frame_pos -= 2 else: cur_frame_pos -= 1 loc = self.fm.frame_pos(cur_frame_pos, box.type) self.fm.set_binding(box, loc) + def _set_initial_bindings_regs_64(self, inputargs): + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + # + pass_on_stack = [] + # + for box in inputargs: + assert isinstance(box, Box) + # + if box.type == FLOAT: + if len(unused_xmm) > 0: + ask = unused_xmm.pop() + got = self.xrm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + else: + if len(unused_gpr) > 0: + ask = unused_gpr.pop() + got = self.rm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + # + return pass_on_stack + def possibly_free_var(self, var): if var.type == FLOAT: self.xrm.possibly_free_var(var) From noreply at buildbot.pypy.org Mon Dec 12 10:01:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:01:16 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix call_assembler. Message-ID: <20111212090116.100D582210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50409:3a5a45bf3039 Date: 2011-12-12 09:24 +0100 http://bitbucket.org/pypy/pypy/changeset/3a5a45bf3039/ Log: Fix call_assembler. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -423,6 +423,7 @@ '''adds the following attributes to looptoken: _x86_function_addr (address of the generated func, as an int) _x86_loop_code (debug: addr of the start of the ResOps) + _x86_debug_nbargs (debug: total # of args) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -450,6 +451,7 @@ looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos + looptoken._x86_debug_nbargs = len(inputargs) clt.frame_depth = -1 # temporarily clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) @@ -798,10 +800,7 @@ def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + assert oldlooptoken._x86_debug_nbargs == newlooptoken._x86_debug_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. @@ -2235,10 +2234,10 @@ self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() assert isinstance(descr, JitCellToken) - assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) + assert len(arglocs) - 2 == descr._x86_debug_nbargs # - # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + # Write a call to the target assembler + self._emit_call(fail_index, imm(descr._x86_function_addr), arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None From noreply at buildbot.pypy.org Mon Dec 12 10:01:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:01:17 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: redirect_call_assembler() fix. Message-ID: <20111212090117.384E982210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50410:dbe437dbf368 Date: 2011-12-12 09:33 +0100 http://bitbucket.org/pypy/pypy/changeset/dbe437dbf368/ Log: redirect_call_assembler() fix. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -804,10 +804,11 @@ # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 #keep in sync with consider_label() mc.copy_to_raw_memory(oldadr) def dump(self, text): diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1490,6 +1490,12 @@ nonfloatlocs[i] = loc if isinstance(loc, RegLoc): self.fm.mark_as_free(arg) + # + # if we are too close to the start of the loop, the label's target may + # get overridden by redirect_call_assembler(). (rare case) + while self.assembler.mc.get_relative_pos() < 13: + self.assembler.mc.NOP() + # descr._x86_arglocs = nonfloatlocs, floatlocs descr._x86_loop_code = self.assembler.mc.get_relative_pos() descr._x86_clt = self.assembler.current_clt From noreply at buildbot.pypy.org Mon Dec 12 10:01:18 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:01:18 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fixes. Message-ID: <20111212090118.6ABD882210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50411:b9daf080154c Date: 2011-12-12 09:59 +0100 http://bitbucket.org/pypy/pypy/changeset/b9daf080154c/ Log: Fixes. diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3047,13 +3047,13 @@ self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) looptoken2 = JitCellToken() - inputargs = [] + inputargs = [BoxInt()] operations = [ ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), ] self.cpu.compile_loop(inputargs, operations, looptoken2) - fail = self.cpu.execute_token(looptoken2) + fail = self.cpu.execute_token(looptoken2, -9) assert fail.identifier == 42 diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -423,7 +423,6 @@ '''adds the following attributes to looptoken: _x86_function_addr (address of the generated func, as an int) _x86_loop_code (debug: addr of the start of the ResOps) - _x86_debug_nbargs (debug: total # of args) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -447,11 +446,11 @@ # self._call_header_with_stack_check() stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos - looptoken._x86_debug_nbargs = len(inputargs) clt.frame_depth = -1 # temporarily clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) @@ -800,7 +799,9 @@ def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - assert oldlooptoken._x86_debug_nbargs == newlooptoken._x86_debug_nbargs + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. @@ -808,7 +809,7 @@ target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) - assert mc.get_relative_pos() <= 13 #keep in sync with consider_label() + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) def dump(self, text): @@ -2235,7 +2236,7 @@ self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() assert isinstance(descr, JitCellToken) - assert len(arglocs) - 2 == descr._x86_debug_nbargs + assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs # # Write a call to the target assembler self._emit_call(fail_index, imm(descr._x86_function_addr), @@ -2469,6 +2470,14 @@ self.gcrootmap_retaddr_forced = -1 def closing_jump(self, target_token): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = target_token._x86_clt._debug_nbargs + assert my_nbargs == target_nbargs + # target = target_token._x86_loop_code if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -165,6 +165,7 @@ self.jump_target_descr = None self.close_stack_struct = 0 self.final_jump_op = None + self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -188,6 +189,7 @@ # note: we need to make a copy of inputargs because possibly_free_vars # is also used on op args, which is a non-resizable list self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 13 return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, @@ -460,8 +462,15 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop + def flush_loop(self): + # rare case: if the loop is too short, pad with NOPs + mc = self.assembler.mc + while mc.get_relative_pos() < self.min_bytes_before_label: + mc.NOP() + def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" @@ -1493,8 +1502,7 @@ # # if we are too close to the start of the loop, the label's target may # get overridden by redirect_call_assembler(). (rare case) - while self.assembler.mc.get_relative_pos() < 13: - self.assembler.mc.NOP() + self.flush_loop() # descr._x86_arglocs = nonfloatlocs, floatlocs descr._x86_loop_code = self.assembler.mc.get_relative_pos() diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -117,6 +117,9 @@ FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) # def execute_token(executable_token, *args): + clt = executable_token.compiled_loop_token + assert len(args) == clt._debug_nbargs + # addr = executable_token._x86_function_addr func = rffi.cast(FUNCPTR, addr) #llop.debug_print(lltype.Void, ">>>> Entering", addr) From noreply at buildbot.pypy.org Mon Dec 12 10:27:51 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:27:51 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix test Message-ID: <20111212092751.5BEDD82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50412:8564866c0e68 Date: 2011-12-12 10:04 +0100 http://bitbucket.org/pypy/pypy/changeset/8564866c0e68/ Log: Fix test diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -46,12 +46,13 @@ xmm2] assert len(failargs) == len(locs) assembler.write_failure_recovery_description(mc, failargs, locs) - nums = [Assembler386.DESCR_INT + 4*(16+0), - Assembler386.DESCR_REF + 4*(16+1), - Assembler386.DESCR_FLOAT + 4*(16+10), - Assembler386.DESCR_INT + 4*(16+100), - Assembler386.DESCR_REF + 4*(16+101), - Assembler386.DESCR_FLOAT + 4*(16+110), + base = 8 + 8*IS_X86_64 + nums = [Assembler386.DESCR_INT + 4*(base+0), + Assembler386.DESCR_REF + 4*(base+1), + Assembler386.DESCR_FLOAT + 4*(base+10), + Assembler386.DESCR_INT + 4*(base+100), + Assembler386.DESCR_REF + 4*(base+101), + Assembler386.DESCR_FLOAT + 4*(base+110), Assembler386.CODE_HOLE, Assembler386.CODE_HOLE, Assembler386.DESCR_INT + 4*ebx.value, From noreply at buildbot.pypy.org Mon Dec 12 10:27:52 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:27:52 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix test Message-ID: <20111212092752.7BCE782210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50413:73f9a638b3f9 Date: 2011-12-12 10:11 +0100 http://bitbucket.org/pypy/pypy/changeset/73f9a638b3f9/ Log: Fix test diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -110,9 +111,9 @@ looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -258,8 +259,8 @@ done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -288,8 +289,8 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 From noreply at buildbot.pypy.org Mon Dec 12 10:27:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:27:53 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix tests Message-ID: <20111212092753.A130E82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50414:70bc31a4262f Date: 2011-12-12 10:23 +0100 http://bitbucket.org/pypy/pypy/changeset/70bc31a4262f/ Log: Fix tests diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -649,14 +649,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.runjitcelltoken()) + arguments = [box.value for box in self.startvars] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -142,19 +142,20 @@ loop = self.parse(ops) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - for i, arg in enumerate(args): + arguments = [] + for arg in args: if isinstance(arg, int): - self.cpu.set_future_value_int(i, arg) + arguments.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) - self.cpu.set_future_value_float(i, arg) + arguments.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) - self.cpu.set_future_value_ref(i, llgcref) + arguments.append(llgcref) loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, *arguments) return loop def prepare_loop(self, ops): @@ -193,8 +194,8 @@ loop._jitcelltoken) return bridge - def run(self, loop): - return self.cpu.execute_token(loop._jitcelltoken) + def run(self, loop, *arguments): + return self.cpu.execute_token(loop._jitcelltoken, *arguments) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): @@ -220,7 +221,7 @@ ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' - [i5] + [i5, i6, i7, i8] label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) @@ -229,14 +230,13 @@ guard_true(i2) [i4] jump(i4, descr=targettoken2) ''' - loop2 = self.interpret(ops2, [0]) + loop2 = self.interpret(ops2, [0, 0, 0, 0]) bridge_ops = ''' [i4] jump(i4, i4, i4, i4, descr=targettoken) ''' bridge = self.attach_bridge(bridge_ops, loop2, 5) - self.cpu.set_future_value_int(0, 0) - self.run(loop2) + self.run(loop2, 0, 0, 0, 0) assert self.getint(0) == 31 assert self.getint(1) == 30 assert self.getint(2) == 30 @@ -274,8 +274,7 @@ loop = self.interpret(ops, [0]) assert self.getint(0) == 1 bridge = self.attach_bridge(bridge_ops, loop, 2) - self.cpu.set_future_value_int(0, 0) - self.run(loop) + self.run(loop, 0) assert self.getint(0) == 1 def test_inputarg_unused(self): @@ -301,9 +300,7 @@ assert self.getint(0) == 0 assert self.getint(1) == 10 bridge = self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - self.run(loop) + self.run(loop, 0, 10) assert self.getint(0) == 0 assert self.getint(1) == 10 @@ -320,9 +317,7 @@ finish(1, 2) ''' self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 1) - self.run(loop) + self.run(loop, 0, 1) def test_spill_for_constant(self): ops = ''' @@ -406,7 +401,7 @@ guard_true(i5) [i2, i1] jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' - self.interpret(ops, [0, 1, 2, 3]) + self.interpret(ops, [0, 1, 2, 3, 0, 0, 0]) def test_op_result_unused(self): ops = ''' @@ -440,9 +435,7 @@ finish(i0, i1, i2, i3, i4, i5, i6, i7, i8) ''' self.attach_bridge(bridge_ops, loop, 1) - for i in range(9): - self.cpu.set_future_value_int(i, i) - self.run(loop) + self.run(loop, 0, 1, 2, 3, 4, 5, 6, 7, 8) assert self.getints(9) == range(9) def test_loopargs(self): @@ -452,27 +445,13 @@ jump(i4, i1, i2, i3) """ regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 + if IS_X86_64: + assert len(regalloc.rm.reg_bindings) == 4 + assert len(regalloc.fm.bindings) == 0 + else: + assert len(regalloc.rm.reg_bindings) == 0 + assert len(regalloc.fm.bindings) == 4 - def test_loopargs_2(self): - ops = """ - [i0, i1, i2, i3] - i4 = int_add(i0, i1) - finish(i4, i1, i2, i3) - """ - regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 - - def test_loopargs_3(self): - ops = """ - [i0, i1, i2, i3] - i4 = int_add(i0, i1) - guard_true(i4) [i0, i1, i2, i3, i4] - jump(i4, i1, i2, i3) - """ - regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 - class TestRegallocCompOps(BaseTestRegalloc): @@ -640,8 +619,8 @@ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token assert clt.param_depth == self.expected_param_depth(1) @@ -652,8 +631,8 @@ i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) finish(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token assert clt.param_depth == self.expected_param_depth(2) @@ -689,9 +668,7 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 5*7 def test_bridge_calls_2(self): @@ -712,8 +689,6 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -22,8 +22,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 9) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 9) assert cpu.get_latest_value_int(0) == (9 >> 3) assert cpu.get_latest_value_int(1) == (~18) @@ -45,8 +44,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -10) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -10) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == -1000 assert cpu.get_latest_value_int(2) == 1 @@ -142,17 +140,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -13) - cpu.set_future_value_int(1, 10) - cpu.set_future_value_int(2, 10) - cpu.set_future_value_int(3, 8) - cpu.set_future_value_int(4, -8) - cpu.set_future_value_int(5, -16) - cpu.set_future_value_int(6, -18) - cpu.set_future_value_int(7, 46) - cpu.set_future_value_int(8, -12) - cpu.set_future_value_int(9, 26) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 0 assert cpu.get_latest_value_int(2) == 0 @@ -257,17 +245,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 17) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, -6) - cpu.set_future_value_int(3, 6) - cpu.set_future_value_int(4, 1) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, 13) - cpu.set_future_value_int(7, 9) - cpu.set_future_value_int(8, 49) - cpu.set_future_value_int(9, 8) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 8 assert cpu.get_latest_value_int(2) == 1 From noreply at buildbot.pypy.org Mon Dec 12 10:27:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:27:54 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix test Message-ID: <20111212092754.C180082210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50415:a7c8b3b608d5 Date: 2011-12-12 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/a7c8b3b608d5/ Log: Fix test diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -19,8 +19,7 @@ finish(i3, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 @@ -55,8 +54,7 @@ assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 assert self.getint(1) == 22 @@ -71,20 +69,19 @@ i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) - ''', [0]) + ''', [0, 0, 0, 0, 0, 0, 0, 0]) other_loop = self.interpret(''' - [i3] + [i3, i10, i11, i12, i13, i14, i15, i16] label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] jump(i3, descr=targettoken2) - ''', [1]) + ''', [1, 0, 0, 0, 0, 0, 0, 0]) ops = ''' [i3] jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' bridge = self.attach_bridge(ops, other_loop, 1) - self.cpu.set_future_value_int(0, 1) - fail = self.run(other_loop) + fail = self.run(other_loop, 1, 0, 0, 0, 0, 0, 0, 0) assert fail.identifier == 1 def test_bridge_jumps_to_self_deeper(self): @@ -100,7 +97,7 @@ i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] jump(i3, i30, 1, i30, i30, i30, descr=targettoken) - ''', [0]) + ''', [0, 0, 0, 0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -123,10 +120,7 @@ # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 @@ -142,7 +136,7 @@ i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] jump(i3, i1, i2, descr=targettoken) - ''', [0]) + ''', [0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -150,10 +144,7 @@ jump(i3, 0, 1, descr=targettoken) ''' bridge = self.attach_bridge(ops, loop, 5) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 From noreply at buildbot.pypy.org Mon Dec 12 10:37:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 10:37:57 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Yay. More code is killed. Killing this was actually Message-ID: <20111212093757.03EA782210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50416:bdc61f3c13cb Date: 2011-12-12 10:36 +0100 http://bitbucket.org/pypy/pypy/changeset/bdc61f3c13cb/ Log: Yay. More code is killed. Killing this was actually the goal of the branch, somehow. diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -987,6 +987,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -802,18 +802,22 @@ assert exception, "PropagateExceptionDescr: no exception??" raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) -def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes, +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redargtypes, memory_manager=None): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled version of the code may end up replacing it. """ - # 'redboxes' is only used to know the types of red arguments. jitcell_token = make_jitcell_token(jitdriver_sd) - # 'nb_red_args' might be smaller than len(redboxes), - # because it doesn't include the virtualizable boxes. nb_red_args = jitdriver_sd.num_red_args - inputargs = [box.clonebox() for box in redboxes[:nb_red_args]] + assert len(redargtypes) == nb_red_args + inputargs = [] + for kind in redargtypes: + if kind == history.INT: box = BoxInt() + elif kind == history.REF: box = BoxPtr() + elif kind == history.FLOAT: box = BoxFloat() + else: raise AssertionError + inputargs.append(box) k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) callargs = [funcbox] + greenboxes + inputargs diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2423,22 +2423,6 @@ abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) - def gen_load_from_other_virtualizable(self, vinfo, vbox): - boxes = [] - assert vinfo is not None - for i in range(vinfo.num_static_extra_boxes): - descr = vinfo.static_field_descrs[i] - boxes.append(self.execute_and_record(rop.GETFIELD_GC, descr, vbox)) - virtualizable = vinfo.unwrap_virtualizable_box(vbox) - for k in range(vinfo.num_arrays): - descr = vinfo.array_field_descrs[k] - abox = self.execute_and_record(rop.GETFIELD_GC, descr, vbox) - descr = vinfo.array_descrs[k] - for j in range(vinfo.get_array_length(virtualizable, k)): - boxes.append(self.execute_and_record(rop.GETARRAYITEM_GC, descr, - abox, ConstInt(j))) - return boxes - def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) for frame in self.framestack: @@ -2510,14 +2494,8 @@ greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args - vinfo = targetjitdriver_sd.virtualizable_info - if vinfo is not None: - index = targetjitdriver_sd.index_of_virtualizable - vbox = args[index] - args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) - # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenargs, args) + token = warmrunnerstate.get_assembler_token(greenargs) op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -554,8 +554,9 @@ jd.on_compile = lambda *args: None jd.on_compile_bridge = lambda *args: None - def get_assembler_token(greenkey, redboxes): - # 'redboxes' is only used to know the types of red arguments + redargtypes = ''.join([kind[0] for kind in jd.red_args_types]) + + def get_assembler_token(greenkey): cell = self.jit_cell_at_key(greenkey) procedure_token = cell.get_procedure_token() if procedure_token is None: @@ -564,7 +565,7 @@ cell.counter = 0 # but was freed in the meantime. memmgr = warmrunnerdesc.memory_manager procedure_token = compile_tmp_callback(cpu, jd, greenkey, - redboxes, memmgr) + redargtypes, memmgr) cell.set_procedure_token(procedure_token) return procedure_token self.get_assembler_token = get_assembler_token From noreply at buildbot.pypy.org Mon Dec 12 11:53:05 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 11:53:05 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Add an extra assertion in the llgraph backend. Message-ID: <20111212105305.7D44D82ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50418:cd85bd047d43 Date: 2011-12-12 11:15 +0100 http://bitbucket.org/pypy/pypy/changeset/cd85bd047d43/ Log: Add an extra assertion in the llgraph backend. diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -328,6 +328,10 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + clt._debug_argtypes = [v.concretetype for v in _variables] + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -355,11 +359,13 @@ TARGET_TOKENS = weakref.WeakKeyDictionary() -def compile_add_target_token(loop, descr): +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling loop = _from_opaque(loop) op = loop.operations[-1] descrobj = _normalize(descr) - TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt def compile_add_var(loop, intvar): loop = _from_opaque(loop) @@ -395,10 +401,13 @@ _variables.append(v) return r -def compile_add_jump_target(loop, targettoken): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) descrobj = _normalize(targettoken) - loop_target, target_opindex, target_inputargs = TARGET_TOKENS[descrobj] + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + assert source_clt._debug_argtypes == target_clt._debug_argtypes # op = loop.operations[-1] op.jump_target = loop_target @@ -406,6 +415,7 @@ op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP assert len(op.args) == len(target_inputargs) + # if loop_target == loop: log.info("compiling new loop") else: diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -138,11 +138,12 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, jitcell_token, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl @@ -153,14 +154,14 @@ clt.loop_and_bridges = [c] clt.compiled_version = c jitcell_token.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -187,7 +189,7 @@ assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: - llimpl.compile_add_target_token(c, descr) + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -241,7 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - llimpl.compile_add_jump_target(c, targettoken) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) From noreply at buildbot.pypy.org Mon Dec 12 11:53:04 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 11:53:04 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix tests Message-ID: <20111212105304.58BE582210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50417:5960a81ae802 Date: 2011-12-12 10:13 +0000 http://bitbucket.org/pypy/pypy/changeset/5960a81ae802/ Log: Fix tests diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -172,8 +172,7 @@ result_type = INT # loop_token = compile_tmp_callback(cpu, FakeJitDriverSD(), - [ConstInt(12), ConstInt(34)], - [BoxInt(56), ConstInt(78), BoxInt(90)]) + [ConstInt(12), ConstInt(34)], "ii") # raiseme = None # only two arguments must be passed in diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -348,8 +348,8 @@ def get_fail_descr_from_number(self, no): return FakeFailDescr(no) - def execute_token(self, token, *args): - assert 0 + def make_execute_token(self, *ARGS): + return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -187,6 +187,7 @@ _confirm_enter_jit_ptr = None _can_never_inline_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] class FakeCell: dont_trace_here = False state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) @@ -216,6 +217,7 @@ _can_never_inline_ptr = None _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() res = state.get_location_str([ConstInt(5), constfloat(42.5)]) @@ -241,6 +243,7 @@ _can_never_inline_ptr = None _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() @@ -266,6 +269,7 @@ _can_never_inline_ptr = llhelper(CAN_NEVER_INLINE, can_never_inline) _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() From noreply at buildbot.pypy.org Mon Dec 12 11:53:06 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 11:53:06 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix Message-ID: <20111212105306.9DFEE82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50419:bcdbd1bc9618 Date: 2011-12-12 11:15 +0100 http://bitbucket.org/pypy/pypy/changeset/bcdbd1bc9618/ Log: Fix diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -6,6 +6,7 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -711,10 +712,21 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.startvars: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) From noreply at buildbot.pypy.org Mon Dec 12 11:53:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 11:53:07 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Expand the comment. Message-ID: <20111212105307.BF10382210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50420:6e71614fa5d3 Date: 2011-12-12 11:29 +0100 http://bitbucket.org/pypy/pypy/changeset/6e71614fa5d3/ Log: Expand the comment. diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -737,10 +737,23 @@ class TargetToken(AbstractDescr): def __init__(self, targeting_jitcell_token=None): - # The jitcell to which jumps might result in a jump to this label + # Warning, two different jitcell_tokens here! + # + # * 'targeting_jitcell_token' is only useful for the front-end, + # and it means: consider the LABEL that uses this TargetToken. + # At this position, the state is logically the one given + # by targeting_jitcell_token. So e.g. if we want to enter the + # JIT with some given green args, if the jitcell matches, then + # we can jump to this LABEL. + # + # * 'original_jitcell_token' is information from the backend's + # point of view: it means that this TargetToken is used in + # a LABEL that belongs to either: + # - a loop; then 'original_jitcell_token' is this loop + # - or a bridge; then 'original_jitcell_token' is the loop + # out of which we made this bridge + # self.targeting_jitcell_token = targeting_jitcell_token - - # The jitcell where the trace containing the label with this TargetToken begins self.original_jitcell_token = None self.virtual_state = None From noreply at buildbot.pypy.org Mon Dec 12 11:53:08 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 11:53:08 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix test_random. Message-ID: <20111212105308.E318782210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50421:ee8cf5779f0c Date: 2011-12-12 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/ee8cf5779f0c/ Log: Fix test_random. diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -414,7 +414,8 @@ op.jump_target_opindex = target_opindex op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(target_inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) # if loop_target == loop: log.info("compiling new loop") diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -617,8 +617,13 @@ return self.loop._jitcelltoken if not hasattr(self, '_initialjumploop_celltoken'): self._initialjumploop_celltoken = JitCellToken() - self.cpu.compile_loop(self.startvars[:], - [ResOperation(rop.JUMP, self.startvars[:], None, + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, descr=self.loop._targettoken)], self._initialjumploop_celltoken) return self._initialjumploop_celltoken @@ -650,7 +655,7 @@ exc = cpu.grab_exc_value() assert not exc - arguments = [box.value for box in self.startvars] + arguments = [box.value for box in self.loop.inputargs] fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): @@ -716,7 +721,7 @@ # New restriction: must have the same argument count and types # as the original loop subset = [] - for box in self.startvars: + for box in self.loop.inputargs: srcbox = r.choice(fail_args) if srcbox.type != box.type: if box.type == INT: From noreply at buildbot.pypy.org Mon Dec 12 12:05:47 2011 From: noreply at buildbot.pypy.org (hager) Date: Mon, 12 Dec 2011 12:05:47 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: fix wrong computation of stack offset Message-ID: <20111212110547.8DFD082ABD@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50422:f4618a1f60ba Date: 2011-12-12 12:04 +0100 http://bitbucket.org/pypy/pypy/changeset/f4618a1f60ba/ Log: fix wrong computation of stack offset in gen_direct_bootstrap_code diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -436,7 +436,7 @@ # load values passed on the stack to the corresponding locations stack_position = self.OFFSET_SPP_TO_OLD_BACKCHAIN\ - + BACKCHAIN_SIZE + + BACKCHAIN_SIZE * WORD count = 0 for i in range(reg_args, len(inputargs)): From noreply at buildbot.pypy.org Mon Dec 12 12:05:48 2011 From: noreply at buildbot.pypy.org (hager) Date: Mon, 12 Dec 2011 12:05:48 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: remove comments and debug stuff Message-ID: <20111212110548.AF07382ABD@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50423:9130b26b5bad Date: 2011-12-12 12:05 +0100 http://bitbucket.org/pypy/pypy/changeset/9130b26b5bad/ Log: remove comments and debug stuff diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -251,7 +251,6 @@ else: assert 0, 'unknown type' - assert enc[i] == self.END_OF_LOCS descr = decode32(enc, i+1) self.fail_boxes_count = fail_index @@ -290,12 +289,6 @@ def _gen_leave_jitted_hook_code(self, save_exc=False): mc = PPCBuilder() - - # PLAN: - # ===== - # save caller save registers AND(!) r0 - # (r0 contains address of state encoding) - mc.b_abs(self.exit_code_adr) mc.prepare_insts_blocks() return mc.materialize(self.cpu.asmmemmgr, [], @@ -312,7 +305,6 @@ # - jump back to the calling code def _gen_exit_path(self): mc = PPCBuilder() - mc.mr(r.r6.value, r.r3.value) self._save_managed_regs(mc) decode_func_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func) @@ -325,8 +317,6 @@ r2_value = descr[1] r11_value = descr[2] - - # load parameters into parameter registers if IS_PPC_32: mc.lwz(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding From notifications-noreply at bitbucket.org Mon Dec 12 12:58:23 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Mon, 12 Dec 2011 11:58:23 -0000 Subject: [pypy-commit] Notification: benchmarks Message-ID: <20111212115823.3349.98671@bitbucket03.managed.contegix.com> You have received a notification from pypyja. Hi, I forked benchmarks. My fork is at https://bitbucket.org/pypyja/benchmarks. -- Disable notifications at https://bitbucket.org/account/notifications/ From notifications-noreply at bitbucket.org Mon Dec 12 12:59:14 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Mon, 12 Dec 2011 11:59:14 -0000 Subject: [pypy-commit] Notification: Your access to benchmarks has been revoked. Message-ID: <20111212115914.13394.56818@bitbucket02.managed.contegix.com> You have received a notification from pypyja. You no longer have access to the source of benchmarks. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Mon Dec 12 14:28:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 14:28:29 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix. This is also a bug on trunk, actually, but because the Message-ID: <20111212132829.2E71B82ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50424:7e98ea1a1c5e Date: 2011-12-12 12:20 +0100 http://bitbucket.org/pypy/pypy/changeset/7e98ea1a1c5e/ Log: Fix. This is also a bug on trunk, actually, but because the stack checking is only done after CALL_ASSEMBLER, then %ebp has still a not-too-incorrect value... diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -309,12 +309,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -325,7 +324,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): From noreply at buildbot.pypy.org Mon Dec 12 15:34:34 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 15:34:34 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Fix test Message-ID: <20111212143434.9902882ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50425:cc11be2ea8a9 Date: 2011-12-12 15:01 +0100 http://bitbucket.org/pypy/pypy/changeset/cc11be2ea8a9/ Log: Fix test diff --git a/pypy/jit/tool/jitoutput.py b/pypy/jit/tool/jitoutput.py --- a/pypy/jit/tool/jitoutput.py +++ b/pypy/jit/tool/jitoutput.py @@ -10,9 +10,6 @@ REGEXES = [ (('tracing_no', 'tracing_time'), '^Tracing:\s+([\d.]+)\s+([\d.]+)$'), (('backend_no', 'backend_time'), '^Backend:\s+([\d.]+)\s+([\d.]+)$'), - (('asm_no',), '^Running asm:\s+([\d.]+)$'), - (('blackhole_no',), - '^Blackhole:\s+([\d.]+)$'), (None, '^TOTAL.*$'), (('ops.total',), '^ops:\s+(\d+)$'), (('recorded_ops.total',), '^recorded ops:\s+(\d+)$'), diff --git a/pypy/jit/tool/test/test_jitoutput.py b/pypy/jit/tool/test/test_jitoutput.py --- a/pypy/jit/tool/test/test_jitoutput.py +++ b/pypy/jit/tool/test/test_jitoutput.py @@ -34,8 +34,6 @@ # assert did not crash # asserts below are a bit delicate, possibly they might be deleted assert info.tracing_no == 1 - assert info.asm_no == 1 - assert info.blackhole_no == 1 assert info.backend_no == 1 assert info.ops.total == 2 assert info.recorded_ops.total == 2 @@ -47,8 +45,6 @@ DATA = '''Tracing: 1 0.006992 Backend: 1 0.000525 -Running asm: 1 -Blackhole: 1 TOTAL: 0.025532 ops: 2 recorded ops: 6 @@ -75,8 +71,6 @@ info = parse_prof(DATA) assert info.tracing_no == 1 assert info.tracing_time == 0.006992 - assert info.asm_no == 1 - assert info.blackhole_no == 1 assert info.backend_no == 1 assert info.backend_time == 0.000525 assert info.ops.total == 2 From noreply at buildbot.pypy.org Mon Dec 12 15:34:35 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 15:34:35 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Translation fix. Message-ID: <20111212143435.BC4E382ABE@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50426:b43f4ac19a1b Date: 2011-12-12 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/b43f4ac19a1b/ Log: Translation fix. diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -330,7 +330,11 @@ def compile_started_vars(clt): if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop - clt._debug_argtypes = [v.concretetype for v in _variables] + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when translated + pass def compile_add(loop, opnum): loop = _from_opaque(loop) @@ -407,7 +411,10 @@ (loop_target, target_opindex, target_inputargs, target_clt ) = TARGET_TOKENS[descrobj] # - assert source_clt._debug_argtypes == target_clt._debug_argtypes + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass # op = loop.operations[-1] op.jump_target = loop_target @@ -1828,6 +1835,7 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) From noreply at buildbot.pypy.org Mon Dec 12 15:34:36 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 15:34:36 +0100 (CET) Subject: [pypy-commit] pypy default: Tests and fix for ``for c in string'' when the string turns out Message-ID: <20111212143436.E053982ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50427:5ba9f567b515 Date: 2011-12-12 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/5ba9f567b515/ Log: Tests and fix for ``for c in string'' when the string turns out to be always a single character. diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -99,7 +99,7 @@ return p def make_iterator_repr(self): - return self.iterator_repr + return self.repr.iterator_repr def can_ll_be_null(self, s_value): # XXX unicode diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -89,6 +89,28 @@ res = self.interpret(fn, [i]) assert res is True + def test_iter_over_char(self): + const = self.const + def fn(i): + for c in const('a'): + i += ord(c) + 10000 + return i + res = self.interpret(fn, [0]) + assert res == ord('a') + 10000 + + def test_iter_over_nonconst_char(self): + const = self.const + def fn(i): + if i > 0: + c = const('a') + else: + c = const('A') + for c in c: + i += ord(c) + 10000 + return i + res = self.interpret(fn, [1]) + assert res == 1 + ord('a') + 10000 + def test_char_constant(self): const = self.const def fn(s): From noreply at buildbot.pypy.org Mon Dec 12 15:34:38 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 15:34:38 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: hg merge default Message-ID: <20111212143438.A546482ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50428:aff416c812ea Date: 2011-12-12 15:19 +0100 http://bitbucket.org/pypy/pypy/changeset/aff416c812ea/ Log: hg merge default diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -207,6 +207,7 @@ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] label = part.operations[0] + orignial_label = label.clone() assert label.getopnum() == rop.LABEL try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) @@ -216,12 +217,13 @@ target_token = label.getdescr() assert isinstance(target_token, TargetToken) assert target_token.exported_state - part.operations = [label] + \ + part.operations = [orignial_label] + \ [ResOperation(rop.JUMP, target_token.exported_state.jump_args, None, descr=loop_jitcell_token)] try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, inline_short_preamble=False) + except InvalidLoop: return None assert part.operations[-1].getopnum() != rop.LABEL diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -188,7 +188,7 @@ return self.did_import = True - self.short = target_token.short_preamble + self.short = target_token.short_preamble[:] self.short_seen = {} self.short_boxes = exported_state.short_boxes.clone() for box, const in exported_state.constant_inputargs.items(): diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -2,13 +2,15 @@ pypyjit.set_param(threshold=200) +def g(*args): + return len(args) + def f(n): - pairs = [(0.0, 1.0), (2.0, 3.0)] * n - mag = 0 - for (x1, x2) in pairs: - dx = x1 - x2 - mag += ((dx * dx ) ** (-1.5)) - return n + s = 0 + for i in range(n): + l = [i, n, 2] + s += g(*l) + return s try: print f(301) diff --git a/pypy/module/_continuation/test/test_translated.py b/pypy/module/_continuation/test/test_translated.py --- a/pypy/module/_continuation/test/test_translated.py +++ b/pypy/module/_continuation/test/test_translated.py @@ -93,13 +93,20 @@ if not option.runappdirect: py.test.skip("meant only for -A run") - def test_single_threaded(self): - for i in range(20): - yield Runner().run_test, - - def test_multi_threaded(self): - for i in range(5): - yield multithreaded_test, +def _setup(): + for _i in range(20): + def test_single_threaded(self): + Runner().run_test() + test_single_threaded.func_name = 'test_single_threaded_%d' % _i + setattr(AppTestWrapper, test_single_threaded.func_name, + test_single_threaded) + for _i in range(5): + def test_multi_threaded(self): + multithreaded_test() + test_multi_threaded.func_name = 'test_multi_threaded_%d' % _i + setattr(AppTestWrapper, test_multi_threaded.func_name, + test_multi_threaded) +_setup() class ThreadTest(object): def __init__(self, lock): diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -21,11 +21,11 @@ class W_Hash(Wrappable): ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - _block_size = -1 def __init__(self, space, name): self.name = name - self.digest_size = self.compute_digest_size() + digest_type = self.digest_type_by_name(space) + self.digest_size = rffi.getintfield(digest_type, 'c_md_size') # Allocate a lock for each HASH object. # An optimization would be to not release the GIL on small requests, @@ -34,21 +34,22 @@ ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw') rgc.add_memory_pressure(HASH_MALLOC_SIZE + self.digest_size) + ropenssl.EVP_DigestInit(ctx, digest_type) self.ctx = ctx - def initdigest(self, space, name): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise OperationError(space.w_ValueError, - space.wrap("unknown hash function")) - ropenssl.EVP_DigestInit(self.ctx, digest) - def __del__(self): # self.lock.free() if self.ctx: ropenssl.EVP_MD_CTX_cleanup(self.ctx) lltype.free(self.ctx, flavor='raw') + def digest_type_by_name(self, space): + digest_type = ropenssl.EVP_get_digestbyname(self.name) + if not digest_type: + raise OperationError(space.w_ValueError, + space.wrap("unknown hash function")) + return digest_type + def descr_repr(self, space): addrstring = self.getaddrstring(space) return space.wrap("<%s HASH object at 0x%s>" % ( @@ -87,7 +88,9 @@ return space.wrap(self.digest_size) def get_block_size(self, space): - return space.wrap(self.compute_block_size()) + digest_type = self.digest_type_by_name(space) + block_size = rffi.getintfield(digest_type, 'c_block_size') + return space.wrap(block_size) def _digest(self, space): with lltype.scoped_alloc(ropenssl.EVP_MD_CTX.TO) as ctx: @@ -99,36 +102,6 @@ ropenssl.EVP_MD_CTX_cleanup(ctx) return rffi.charpsize2str(digest, digest_size) - def compute_digest_size(self): - # XXX This isn't the nicest way, but the EVP_MD_size OpenSSL - # XXX function is defined as a C macro on OS X and would be - # XXX significantly harder to implement in another way. - # Values are digest sizes in bytes - return { - 'md5': 16, 'MD5': 16, - 'sha1': 20, 'SHA1': 20, - 'sha224': 28, 'SHA224': 28, - 'sha256': 32, 'SHA256': 32, - 'sha384': 48, 'SHA384': 48, - 'sha512': 64, 'SHA512': 64, - }.get(self.name, 0) - - def compute_block_size(self): - if self._block_size != -1: - return self._block_size - # XXX This isn't the nicest way, but the EVP_MD_CTX_block_size - # XXX OpenSSL function is defined as a C macro on some systems - # XXX and would be significantly harder to implement in - # XXX another way. - self._block_size = { - 'md5': 64, 'MD5': 64, - 'sha1': 64, 'SHA1': 64, - 'sha224': 64, 'SHA224': 64, - 'sha256': 64, 'SHA256': 64, - 'sha384': 128, 'SHA384': 128, - 'sha512': 128, 'SHA512': 128, - }.get(self.name, 0) - return self._block_size W_Hash.typedef = TypeDef( 'HASH', @@ -142,11 +115,11 @@ digestsize=GetSetProperty(W_Hash.get_digest_size), block_size=GetSetProperty(W_Hash.get_block_size), ) +W_Hash.acceptable_as_base_class = False @unwrap_spec(name=str, string='bufferstr') def new(space, name, string=''): w_hash = W_Hash(space, name) - w_hash.initdigest(space, name) w_hash.update(space, string) return space.wrap(w_hash) @@ -158,6 +131,6 @@ return new(space, name, string) return new_hash -for name in algorithms: - newname = 'new_%s' % (name,) - globals()[newname] = make_new_hash(name, newname) +for _name in algorithms: + _newname = 'new_%s' % (_name,) + globals()[_newname] = make_new_hash(_name, _newname) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -80,28 +80,27 @@ _hashlib.openssl_sha1(b).digest() def test_extra_algorithms(self): - import _hashlib - test_string = "Nobody inspects the spammish repetition" expected_results = { "md5": "bb649c83dd1ea5c9d9dec9a18df0ffe9", "md4": "c275b8454684ea416b93d7a418b43176", "mdc2": None, # XXX find the correct expected value "sha": "e2b0a8609b47c58e5d984c9ccfe69f9b654b032b", "ripemd160": "cc4a5ce1b3df48aec5d22d1f16b894a0b894eccc", - "whirlpool": "1a22b79fe5afda02c63a25927193ed01dc718b74" - "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" - "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583", + "whirlpool": ("1a22b79fe5afda02c63a25927193ed01dc718b74" + "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" + "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583"), } - def extracheck(hash_name, expected): + import _hashlib + test_string = "Nobody inspects the spammish repetition" + for hash_name, expected in sorted(expected_results.items()): try: m = _hashlib.new(hash_name) except ValueError, e: - skip('%s: %s' % (hash_name, e)) + print 'skipped %s: %s' % (hash_name, e) + continue m.update(test_string) got = m.hexdigest() assert got and type(got) is str and len(got) % 2 == 0 got.decode('hex') if expected is not None: assert got == expected - for hash_name, expected in sorted(expected_results.items()): - yield extracheck, hash_name, expected diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -14,7 +14,6 @@ METH_VARARGS, build_type_checkers, PyObjectFields, bootstrap_function) from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.rlib.objectmodel import we_are_translated -from pypy.objspace.std.tupleobject import W_TupleObject PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction') PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject)) diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -42,11 +42,11 @@ which case o is returned. Use PySequence_Fast_GET_ITEM() to access the members of the result. Returns NULL on failure. If the object is not a sequence, raises TypeError with m as the message text.""" - if (space.is_true(space.isinstance(w_obj, space.w_list)) or - space.is_true(space.isinstance(w_obj, space.w_tuple))): + if (isinstance(w_obj, listobject.W_ListObject) or + isinstance(w_obj, tupleobject.W_TupleObject)): return w_obj try: - return space.newtuple(space.fixedview(w_obj)) + return tupleobject.W_TupleObject(space.fixedview(w_obj)) except OperationError: raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -6,13 +6,12 @@ borrow_from, make_ref, from_ref) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.tupleobject import W_TupleObject -from pypy.objspace.std.smalltupleobject import W_SmallTupleObject PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple") @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): - return space.newtuple([space.w_None] * size) + return W_TupleObject([space.w_None] * size) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyTuple_SetItem(space, w_t, pos, w_obj): @@ -24,12 +23,12 @@ return 0 def _setitem_tuple(w_t, pos, w_obj): - if isinstance(w_t, W_TupleObject): - w_t.wrappeditems[pos] = w_obj - elif isinstance(w_t, W_SmallTupleObject): - w_t.setitem(pos, w_obj) - else: - assert False + # this function checks that w_t is really a W_TupleObject. It + # should only ever be called with a freshly built tuple from + # PyTuple_New(), which always return a W_TupleObject, even if there + # are also other implementations of tuples. + assert isinstance(w_t, W_TupleObject) + w_t.wrappeditems[pos] = w_obj @cpython_api([PyObject, Py_ssize_t], PyObject) def PyTuple_GetItem(space, w_t, pos): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -79,7 +79,7 @@ return w_obj def to_builtin_type(self, space, box): - return space.wrap(self.unbox(box)) + return space.wrap(self.for_computation(self.unbox(box))) def _coerce(self, space, w_item): raise NotImplementedError @@ -183,6 +183,9 @@ def _coerce(self, space, w_item): return self.box(space.is_true(w_item)) + def to_builtin_type(self, space, w_item): + return space.wrap(self.unbox(w_item)) + def str_format(self, box): value = self.unbox(box) return "True" if value else "False" diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -34,7 +34,10 @@ jump(p0, p1, p2, p3, p4, p5, i13, i11, i8, descr=...) """ assert loop0.match(expected) - assert loop1.match(expected) + # XXX: The retracing fails to form a loop since j + # becomes constant 0 after the bridge and constant 1 at the end of the + # loop. A bridge back to the peramble is produced instead. + #assert loop1.match(expected) def test_factorial(self): def fact(n): diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -179,7 +179,7 @@ log = self.run(main, [1000]) assert log.result == main(1000) loops = log.loops_by_filename(self.filepath) - assert len(loops) == 2 + assert len(loops) == 1 for loop in loops: loop.match_by_id('getattr',''' guard_not_invalidated(descr=...) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -9,7 +9,7 @@ from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import instantiate, we_are_translated from pypy.rlib.nonconst import NonConstant -from pypy.rlib.rarithmetic import r_uint +from pypy.rlib.rarithmetic import r_uint, r_singlefloat from pypy.translator.translator import TranslationContext from pypy.tool.option import make_config @@ -145,9 +145,15 @@ self._see_interp2app(x) if isinstance(x, GetSetProperty): self._see_getsetproperty(x) + if isinstance(x, r_singlefloat): + self._wrap_not_rpython(x) return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" + def _wrap_not_rpython(self, x): + "NOT_RPYTHON" + raise NotImplementedError + def _see_interp2app(self, interp2app): "NOT_RPYTHON" activation = interp2app._code.activation diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -694,14 +694,16 @@ return self.wrap(r) @jit.look_inside_iff(lambda self, w_list: - jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] @jit.unroll_safe def getitems_unroll(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] - @jit.dont_look_inside + + @jit.look_inside_iff(lambda self, w_list: + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) def getitems_fixedsize(self, w_list): return self.getitems_unroll(w_list) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -15,6 +15,7 @@ _registered_implementations.add(implcls) option_to_typename = { + "withspecialisedtuple" : ["specialisedtupleobject.W_SpecialisedTupleObject"], "withsmalltuple" : ["smalltupleobject.W_SmallTupleObject"], "withsmallint" : ["smallintobject.W_SmallIntObject"], "withsmalllong" : ["smalllongobject.W_SmallLongObject"], @@ -261,6 +262,11 @@ self.typeorder[smalltupleobject.W_SmallTupleObject] += [ (tupleobject.W_TupleObject, smalltupleobject.delegate_SmallTuple2Tuple)] + if config.objspace.std.withspecialisedtuple: + from pypy.objspace.std import specialisedtupleobject + self.typeorder[specialisedtupleobject.W_SpecialisedTupleObject] += [ + (tupleobject.W_TupleObject, specialisedtupleobject.delegate_SpecialisedTuple2Tuple)] + # put W_Root everywhere self.typeorder[W_Root] = [] for type in self.typeorder: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -29,7 +29,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.smallintobject import W_SmallIntObject from pypy.objspace.std.stringobject import W_StringObject -from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.typeobject import W_TypeObject # types @@ -391,8 +391,8 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject): t = w_obj.getitems_copy() else: @@ -405,8 +405,8 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.tolist() elif isinstance(w_obj, W_ListObject): if unroll: t = w_obj.getitems_unroll() @@ -430,8 +430,8 @@ def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): t = w_obj.getitems() - elif isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + elif isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: diff --git a/pypy/objspace/std/smalltupleobject.py b/pypy/objspace/std/smalltupleobject.py --- a/pypy/objspace/std/smalltupleobject.py +++ b/pypy/objspace/std/smalltupleobject.py @@ -9,13 +9,14 @@ from pypy.interpreter import gateway from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name from pypy.objspace.std.tupleobject import W_AbstractTupleObject, W_TupleObject class W_SmallTupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef - def tolist(self): - raise NotImplementedError + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError def length(self): raise NotImplementedError @@ -51,6 +52,9 @@ l[i] = getattr(self, 'w_value%s' % i) return l + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + def length(self): return n diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -0,0 +1,302 @@ +from pypy.interpreter.error import OperationError +from pypy.objspace.std.model import registerimplementation +from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.multimethod import FailedToImplement +from pypy.objspace.std.tupleobject import W_AbstractTupleObject +from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_hash +from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name + +class NotSpecialised(Exception): + pass + +class W_SpecialisedTupleObject(W_AbstractTupleObject): + from pypy.objspace.std.tupletype import tuple_typedef as typedef + __slots__ = [] + + def __repr__(self): + """ representation for debugging purposes """ + reprlist = [repr(item) for item in self._to_unwrapped_list()] + return "%s(%s)" % (self.__class__.__name__, ', '.join(reprlist)) + + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError + + def _to_unwrapped_list(self): + "NOT_RPYTHON" + raise NotImplementedError + + def length(self): + raise NotImplementedError + + def getitem(self, index): + raise NotImplementedError + + def hash(self, space): + raise NotImplementedError + + def eq(self, space, w_other): + raise NotImplementedError + + def setitem(self, index, w_item): + raise NotImplementedError + + def unwrap(self, space): + return tuple(self._to_unwrapped_list()) + + def delegating(self): + pass # for tests only + + +def make_specialised_class(typetuple): + assert type(typetuple) == tuple + + nValues = len(typetuple) + iter_n = unrolling_iterable(range(nValues)) + + class cls(W_SpecialisedTupleObject): + def __init__(self, space, *values_w): + self.space = space + assert len(values_w) == nValues + for i in iter_n: + w_obj = values_w[i] + val_type = typetuple[i] + if val_type == int: + unwrapped = space.int_w(w_obj) + elif val_type == float: + unwrapped = space.float_w(w_obj) + elif val_type == str: + unwrapped = space.str_w(w_obj) + elif val_type == object: + unwrapped = w_obj + else: + raise AssertionError + setattr(self, 'value%s' % i, unwrapped) + + def length(self): + return nValues + + def tolist(self): + list_w = [None] * nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + list_w[i] = value + return list_w + + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + + def _to_unwrapped_list(self): + "NOT_RPYTHON" + list_w = [None] * nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + value = self.space.unwrap(value) + list_w[i] = value + return list_w + + def hash(self, space): + # XXX duplicate logic from tupleobject.py + mult = 1000003 + x = 0x345678 + z = nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + y = space.int_w(space.hash(value)) + elif typetuple[i] == float: + # get the correct hash for float which is an + # integer & other less frequent cases + from pypy.objspace.std.floatobject import _hash_float + y = _hash_float(space, value) + else: + y = compute_hash(value) + x = (x ^ y) * mult + z -= 1 + mult += 82520 + z + z + x += 97531 + return space.wrap(intmask(x)) + + def _eq(self, w_other): + if not isinstance(w_other, cls): + # if we are not comparing same types, give up + raise FailedToImplement + for i in iter_n: + myval = getattr(self, 'value%s' % i) + otherval = getattr(w_other, 'value%s' % i) + if typetuple[i] == object: + if not self.space.eq_w(myval, otherval): + return False + else: + if myval != otherval: + return False + else: + return True + + def eq(self, space, w_other): + return space.newbool(self._eq(w_other)) + + def ne(self, space, w_other): + return space.newbool(not self._eq(w_other)) + +## def _compare(self, compare_op, w_other): +## if not isinstance(w_other, cls): +## raise FailedToImplement +## ncmp = min(self.length(), w_other.length()) +## for i in iter_n: +## if typetuple[i] == Any:#like space.eq on wrapped or two params? +## raise FailedToImplement +## if ncmp > i: +## l_val = getattr(self, 'value%s' % i) +## r_val = getattr(w_other, 'value%s' % i) +## if l_val != r_val: +## return compare_op(l_val, r_val) +## return compare_op(self.length(), w_other.length()) + + def getitem(self, index): + for i in iter_n: + if index == i: + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + return value + raise IndexError + + cls.__name__ = ('W_SpecialisedTupleObject_' + + ''.join([t.__name__[0] for t in typetuple])) + _specialisations.append(cls) + return cls + +# ---------- current specialized versions ---------- + +_specialisations = [] +Cls_ii = make_specialised_class((int, int)) +Cls_is = make_specialised_class((int, str)) +Cls_io = make_specialised_class((int, object)) +Cls_si = make_specialised_class((str, int)) +Cls_ss = make_specialised_class((str, str)) +Cls_so = make_specialised_class((str, object)) +Cls_oi = make_specialised_class((object, int)) +Cls_os = make_specialised_class((object, str)) +Cls_oo = make_specialised_class((object, object)) +Cls_ff = make_specialised_class((float, float)) +Cls_ooo = make_specialised_class((object, object, object)) + +def makespecialisedtuple(space, list_w): + if len(list_w) == 2: + w_arg1, w_arg2 = list_w + w_type1 = space.type(w_arg1) + w_type2 = space.type(w_arg2) + # + if w_type1 is space.w_int: + if w_type2 is space.w_int: + return Cls_ii(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_is(space, w_arg1, w_arg2) + else: + return Cls_io(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_str: + if w_type2 is space.w_int: + return Cls_si(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_ss(space, w_arg1, w_arg2) + else: + return Cls_so(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_float and w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) + # + else: + if w_type2 is space.w_int: + return Cls_oi(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_os(space, w_arg1, w_arg2) + else: + return Cls_oo(space, w_arg1, w_arg2) + # + elif len(list_w) == 3: + return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + else: + raise NotSpecialised + +# ____________________________________________________________ + +registerimplementation(W_SpecialisedTupleObject) + +def delegate_SpecialisedTuple2Tuple(space, w_specialised): + w_specialised.delegating() + return W_TupleObject(w_specialised.tolist()) + +def len__SpecialisedTuple(space, w_tuple): + return space.wrap(w_tuple.length()) + +def getitem__SpecialisedTuple_ANY(space, w_tuple, w_index): + index = space.getindex_w(w_index, space.w_IndexError, "tuple index") + if index < 0: + index += w_tuple.length() + try: + return w_tuple.getitem(index) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("tuple index out of range")) + +def getitem__SpecialisedTuple_Slice(space, w_tuple, w_slice): + length = w_tuple.length() + start, stop, step, slicelength = w_slice.indices4(space, length) + assert slicelength >= 0 + subitems = [None] * slicelength + for i in range(slicelength): + subitems[i] = w_tuple.getitem(start) + start += step + return space.newtuple(subitems) + +def mul_specialisedtuple_times(space, w_tuple, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + raise FailedToImplement + raise + if times == 1 and space.type(w_tuple) == space.w_tuple: + return w_tuple + items = w_tuple.tolist() + return space.newtuple(items * times) + +def mul__SpecialisedTuple_ANY(space, w_tuple, w_times): + return mul_specialisedtuple_times(space, w_tuple, w_times) + +def mul__ANY_SpecialisedTuple(space, w_times, w_tuple): + return mul_specialisedtuple_times(space, w_tuple, w_times) + +def eq__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): + return w_tuple1.eq(space, w_tuple2) + +def ne__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): + return w_tuple1.ne(space, w_tuple2) + +##from operator import lt, le, ge, gt + +##def lt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(lt, w_tuple2)) + +##def le__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(le, w_tuple2)) + +##def ge__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(ge, w_tuple2)) + +##def gt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(gt, w_tuple2)) + +def hash__SpecialisedTuple(space, w_tuple): + return w_tuple.hash(space) + +from pypy.objspace.std import tupletype +register_all(vars(), tupletype) diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -0,0 +1,234 @@ +import py, sys +from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject +from pypy.objspace.std.specialisedtupleobject import _specialisations +from pypy.interpreter.error import OperationError +from pypy.conftest import gettestobjspace, option +from pypy.objspace.std.test import test_tupleobject +from pypy.interpreter import gateway + + +for cls in _specialisations: + globals()[cls.__name__] = cls + + +class TestW_SpecialisedTupleObject(): + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + + def test_isspecialisedtupleobjectintint(self): + w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) + assert isinstance(w_tuple, W_SpecialisedTupleObject_ii) + + def test_isnotspecialisedtupleobject(self): + w_tuple = self.space.newtuple([self.space.wrap({})]) + assert not isinstance(w_tuple, W_SpecialisedTupleObject) + + def test_specialisedtupleclassname(self): + w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) + assert w_tuple.__class__.__name__ == 'W_SpecialisedTupleObject_ii' + + def test_hash_against_normal_tuple(self): + N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) + S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + + def hash_test(values): + N_values_w = [N_space.wrap(value) for value in values] + S_values_w = [S_space.wrap(value) for value in values] + N_w_tuple = N_space.newtuple(N_values_w) + S_w_tuple = S_space.newtuple(S_values_w) + + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + assert isinstance(N_w_tuple, W_TupleObject) + assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) + assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) + assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) + + hash_test([1,2]) + hash_test([1.5,2.8]) + hash_test([1.0,2.0]) + hash_test(['arbitrary','strings']) + hash_test([1,(1,2,3,4)]) + hash_test([1,(1,2)]) + hash_test([1,('a',2)]) + hash_test([1,()]) + hash_test([1,2,3]) + + +class AppTestW_SpecialisedTupleObject: + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + def forbid_delegation(space, w_tuple): + def delegation_forbidden(): + # haaaack + co = sys._getframe(2).f_code + if co.co_name.startswith('_mm_repr_tuple'): + return + raise OperationError(space.w_ReferenceError, w_tuple) + w_tuple.delegating = delegation_forbidden + return w_tuple + if option.runappdirect: + cls.w_forbid_delegation = lambda self, x: x + cls.test_delegation = lambda self: skip("runappdirect") + else: + cls.w_forbid_delegation = cls.space.wrap( + gateway.interp2app(forbid_delegation)) + + def w_isspecialised(self, obj, expected=''): + import __pypy__ + r = __pypy__.internal_repr(obj) + print obj, '==>', r, ' (expected: %r)' % expected + return ("SpecialisedTupleObject" + expected) in r + + def test_createspecialisedtuple(self): + spec = {int: 'i', + float: 'f', + str: 's', + list: 'o'} + # + for x in [42, 4.2, "foo", []]: + for y in [43, 4.3, "bar", []]: + expected1 = spec[type(x)] + expected2 = spec[type(y)] + if (expected1 == 'f') ^ (expected2 == 'f'): + if expected1 == 'f': expected1 = 'o' + if expected2 == 'f': expected2 = 'o' + obj = (x, y) + assert self.isspecialised(obj, '_' + expected1 + expected2) + # + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') + + def test_delegation(self): + t = self.forbid_delegation((42, 43)) + raises(ReferenceError, t.__getslice__, 0, 1) + + def test_len(self): + t = self.forbid_delegation((42,43)) + assert len(t) == 2 + + def test_notspecialisedtuple(self): + assert not self.isspecialised((42,43,44,45)) + assert not self.isspecialised((1.5,)) + + def test_slicing_to_specialised(self): + t = (1, 2, 3) + assert self.isspecialised(t[0:2]) + t = (1, '2', 3) + assert self.isspecialised(t[0:5:2]) + + def test_adding_to_specialised(self): + t = (1,) + assert self.isspecialised(t + (2,)) + + def test_multiply_to_specialised(self): + t = (1,) + assert self.isspecialised(t * 2) + + def test_slicing_from_specialised(self): + t = (1, 2, 3) + assert t[0:2:1] == (1, 2) + + def test_eq_no_delegation(self): + t = (1,) + a = self.forbid_delegation(t + (2,)) + b = (1, 2) + assert a == b + + c = (2, 1) + assert not a == c + + def test_eq_can_delegate(self): + a = (1,2) + b = (1,3,2) + assert not a == b + + values = [2, 2L, 2.0, 1, 1L, 1.0] + for x in values: + for y in values: + assert ((1,2) == (x,y)) == (1 == x and 2 == y) + + def test_neq(self): + a = self.forbid_delegation((1,2)) + b = (1,) + b = b+(2,) + assert not a != b + + c = (1,3) + assert a != c + + def test_ordering(self): + a = (1,2) #self.forbid_delegation((1,2)) --- code commented out + assert a < (2,2) + assert a < (1,3) + assert not a < (1,2) + + assert a <= (2,2) + assert a <= (1,2) + assert not a <= (1,1) + + assert a >= (0,2) + assert a >= (1,2) + assert not a >= (1,3) + + assert a > (0,2) + assert a > (1,1) + assert not a > (1,3) + + assert (2,2) > a + assert (1,3) > a + assert not (1,2) > a + + assert (2,2) >= a + assert (1,2) >= a + assert not (1,1) >= a + + assert (0,2) <= a + assert (1,2) <= a + assert not (1,3) <= a + + assert (0,2) < a + assert (1,1) < a + assert not (1,3) < a + + def test_hash(self): + a = (1,2) + b = (1,) + b += (2,) # else a and b refer to same constant + assert hash(a) == hash(b) + + c = (2,4) + assert hash(a) != hash(c) + + assert hash(a) == hash((1L, 2L)) == hash((1.0, 2.0)) == hash((1.0, 2L)) + + def test_getitem(self): + t = self.forbid_delegation((5,3)) + assert (t)[0] == 5 + assert (t)[1] == 3 + assert (t)[-1] == 3 + assert (t)[-2] == 5 + raises(IndexError, "t[2]") + raises(IndexError, "t[-3]") + + def test_three_tuples(self): + b = self.forbid_delegation((1, 2, 3)) + c = (1,) + d = c + (2, 3) + assert self.isspecialised(d) + assert b == d + + def test_mongrel(self): + a = self.forbid_delegation((1, 2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 3 + assert a[0] == 1 and a[1] == 2.2 and a[2] == '333' + b = ('333',) + assert a == (1, 2.2,) + b + assert not a != (1, 2.2) + b + + +class AppTestAll(test_tupleobject.AppTestW_TupleObject): + pass diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py --- a/pypy/objspace/std/test/test_tupleobject.py +++ b/pypy/objspace/std/test/test_tupleobject.py @@ -280,6 +280,8 @@ assert () * 10 == () assert (5,) * 3 == (5,5,5) assert (5,2) * 2 == (5,2,5,2) + + def test_mul_identity(self): t = (1,2,3) assert (t * 1) is t diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -12,6 +12,15 @@ class W_AbstractTupleObject(W_Object): __slots__ = () + def tolist(self): + "Returns the items, as a fixed-size list." + raise NotImplementedError + + def getitems_copy(self): + "Returns a copy of the items, as a resizable list." + raise NotImplementedError + + class W_TupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef _immutable_fields_ = ['wrappeditems[*]'] @@ -29,6 +38,12 @@ items = [space.unwrap(w_item) for w_item in w_tuple.wrappeditems] return tuple(items) + def tolist(self): + return self.wrappeditems + + def getitems_copy(self): + return self.wrappeditems[:] # returns a resizable list + registerimplementation(W_TupleObject) diff --git a/pypy/objspace/std/tupletype.py b/pypy/objspace/std/tupletype.py --- a/pypy/objspace/std/tupletype.py +++ b/pypy/objspace/std/tupletype.py @@ -5,6 +5,14 @@ def wraptuple(space, list_w): from pypy.objspace.std.tupleobject import W_TupleObject + + if space.config.objspace.std.withspecialisedtuple: + from specialisedtupleobject import makespecialisedtuple, NotSpecialised + try: + return makespecialisedtuple(space, list_w) + except NotSpecialised: + pass + if space.config.objspace.std.withsmalltuple: from pypy.objspace.std.smalltupleobject import W_SmallTupleObject2 from pypy.objspace.std.smalltupleobject import W_SmallTupleObject3 diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -110,6 +110,10 @@ 'struct GENERAL_NAME_st', [('type', rffi.INT), ]) + EVP_MD_st = rffi_platform.Struct( + 'EVP_MD', + [('md_size', rffi.INT), + ('block_size', rffi.INT)]) EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD') EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX') @@ -258,7 +262,7 @@ [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) EVP_MD_CTX = rffi.COpaquePtr('EVP_MD_CTX', compilation_info=eci) -EVP_MD = rffi.COpaquePtr('EVP_MD', compilation_info=eci) +EVP_MD = lltype.Ptr(EVP_MD_st) OpenSSL_add_all_digests = external( 'OpenSSL_add_all_digests', [], lltype.Void) diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -99,7 +99,7 @@ return p def make_iterator_repr(self): - return self.iterator_repr + return self.repr.iterator_repr def can_ll_be_null(self, s_value): # XXX unicode diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -89,6 +89,28 @@ res = self.interpret(fn, [i]) assert res is True + def test_iter_over_char(self): + const = self.const + def fn(i): + for c in const('a'): + i += ord(c) + 10000 + return i + res = self.interpret(fn, [0]) + assert res == ord('a') + 10000 + + def test_iter_over_nonconst_char(self): + const = self.const + def fn(i): + if i > 0: + c = const('a') + else: + c = const('A') + for c in c: + i += ord(c) + 10000 + return i + res = self.interpret(fn, [1]) + assert res == 1 + ord('a') + 10000 + def test_char_constant(self): const = self.const def fn(s): diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -63,7 +63,10 @@ exec_ = eval def repr(self, w_value): - return self.space.unwrap(self.space.repr(w_value)) + try: + return self.space.unwrap(self.space.repr(w_value)) + except Exception, e: + return ""%e def is_true(self, w_value): return self.space.is_true(w_value) From noreply at buildbot.pypy.org Mon Dec 12 16:37:53 2011 From: noreply at buildbot.pypy.org (exarkun) Date: Mon, 12 Dec 2011 16:37:53 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: fiddle around a bit - add a unit test that might exercise the desired functionality; play with Py_buffer definition and with the getargs.c implementation of s* for buffers. Broken. Message-ID: <20111212153753.AB80182ABD@wyvern.cs.uni-duesseldorf.de> Author: Jean-Paul Calderone Branch: pyarg-parsetuple-s-star-buffer Changeset: r50429:0561684806ca Date: 2011-12-12 10:37 -0500 http://bitbucket.org/pypy/pypy/changeset/0561684806ca/ Log: fiddle around a bit - add a unit test that might exercise the desired functionality; play with Py_buffer definition and with the getargs.c implementation of s* for buffers. Broken. diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -45,6 +45,7 @@ import pypy.module.cpyext.longobject import pypy.module.cpyext.listobject import pypy.module.cpyext.sequence +import pypy.module.cpyext.buffer import pypy.module.cpyext.eval import pypy.module.cpyext.import_ import pypy.module.cpyext.mapping diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1055,6 +1055,7 @@ boxed_args = () to_decref = [] assert len(args) == len(FT.ARGS) + import pdb; pdb.set_trace() for i, ARG in unrolling_arg_types: arg = args[i] if is_PyObject(ARG): diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -234,7 +234,7 @@ writebufferproc bf_getwritebuffer; segcountproc bf_getsegcount; charbufferproc bf_getcharbuffer; - getbufferproc bf_getbuffer; + getbufferproc bf_getbuffer; releasebufferproc bf_releasebuffer; } PyBufferProcs; diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -777,18 +777,14 @@ Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { + fflush(stdout); PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } else { - PyErr_SetString( - PyExc_NotImplementedError, - "s* not implemented for non-string values"); - return NULL; - } -#if 0 + } #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { +#if 0 uarg = UNICODE_DEFAULT_ENCODING(arg); if (uarg == NULL) return converterr(CONV_UNICODE, @@ -796,6 +792,9 @@ PyBuffer_FillInfo(p, arg, PyString_AS_STRING(uarg), PyString_GET_SIZE(uarg), 1, 0); +#else + return converterr("string or buffer", arg, msgbuf, bufsize); +#endif } #endif else { /* any buffer-like object */ @@ -803,7 +802,6 @@ if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } -#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", @@ -1342,7 +1340,6 @@ return count; } -#if 0 //YYY static int getbuffer(PyObject *arg, Py_buffer *view, char **errmsg) { @@ -1373,7 +1370,6 @@ PyBuffer_FillInfo(view, NULL, buf, count, 1, 0); return 0; } -#endif /* Support for keyword arguments donated by Geoff Philbrick */ diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import ( - cpython_api, PyObject, PyObjectP, CANNOT_FAIL + cpython_api, PyObject, PyObjectP, CANNOT_FAIL, Py_buffer ) from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex from pypy.rpython.lltypesystem import rffi, lltype @@ -10,7 +10,6 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP -Py_buffer = rffi.VOIDP va_list = rffi.VOIDP PyDateTime_Date = rffi.VOIDP PyDateTime_DateTime = rffi.VOIDP @@ -178,13 +177,6 @@ ~Py_buffer.format.""" raise NotImplementedError - at cpython_api([Py_buffer, lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): - """Return 1 if the memory defined by the view is C-style (fortran is - 'C') or Fortran-style (fortran is 'F') contiguous or either one - (fortran is 'A'). Return 0 otherwise.""" - raise NotImplementedError - @cpython_api([rffi.INT_real, Py_ssize_t, Py_ssize_t, Py_ssize_t, lltype.Char], lltype.Void) def PyBuffer_FillContiguousStrides(space, ndim, shape, strides, itemsize, fortran): """Fill the strides array with byte-strides of a contiguous (C-style if diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -129,6 +129,23 @@ assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + def test_pyarg_parse_string_old_buffer(self): + pybuffer = self.import_parser( + ''' + Py_buffer buf; + PyObject *result; + if (!PyArg_ParseTuple(args, "s*", &buf)) { + return NULL; + } + printf("OH NO %s %d\\n", buf.buf, buf.len); + fflush(stdout); + result = PyString_FromStringAndSize(buf.buf, buf.len); + PyBuffer_Release(&buf); + return result; + ''') + assert buffer('foo\0bar\0baz') == pybuffer(buffer('foo\0bar\0baz')) + + def test_pyarg_parse_charbuf_and_length(self): """ The `t#` format specifier can be used to parse a read-only 8-bit From noreply at buildbot.pypy.org Mon Dec 12 16:45:27 2011 From: noreply at buildbot.pypy.org (exarkun) Date: Mon, 12 Dec 2011 16:45:27 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: Add trivial buffer implementation module Message-ID: <20111212154527.A2DB382ABD@wyvern.cs.uni-duesseldorf.de> Author: Jean-Paul Calderone Branch: pyarg-parsetuple-s-star-buffer Changeset: r50430:421fe5b137ea Date: 2011-12-12 10:45 -0500 http://bitbucket.org/pypy/pypy/changeset/421fe5b137ea/ Log: Add trivial buffer implementation module diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/buffer.py @@ -0,0 +1,11 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, CANNOT_FAIL, Py_buffer) + + at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) +def PyBuffer_IsContiguous(space, view, fortran): + """Return 1 if the memory defined by the view is C-style (fortran is + 'C') or Fortran-style (fortran is 'F') contiguous or either one + (fortran is 'A'). Return 0 otherwise.""" + # PyPy only supports contiguous Py_buffers for now. + return space.wrap(1) From noreply at buildbot.pypy.org Mon Dec 12 16:59:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 16:59:00 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (sven, arigo) Fix a bug, and add a print that is useful for Message-ID: <20111212155900.C803F82ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-jit-backend Changeset: r50431:7ef8482ed95b Date: 2011-12-12 16:58 +0100 http://bitbucket.org/pypy/pypy/changeset/7ef8482ed95b/ Log: (sven, arigo) Fix a bug, and add a print that is useful for debugging with gdb. diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -162,12 +162,15 @@ self._save_nonvolatiles() # save r31, use r30 as scratch register # this is safe because r30 has been saved already + assert NONVOLATILES[-1] == r.SPP + ofs_to_r31 = (self.OFFSET_SPP_TO_GPR_SAVE_AREA + + WORD * (len(NONVOLATILES)-1)) if IS_PPC_32: self.mc.lwz(r.r30.value, r.SP.value, WORD) - self.mc.stw(r.r30.value, r.SPP.value, WORD * len(NONVOLATILES)) + self.mc.stw(r.r30.value, r.SPP.value, ofs_to_r31) else: self.mc.ld(r.r30.value, r.SP.value, WORD) - self.mc.std(r.r30.value, r.SPP.value, WORD * len(NONVOLATILES)) + self.mc.std(r.r30.value, r.SPP.value, ofs_to_r31) def setup_failure_recovery(self): @@ -733,8 +736,11 @@ self.datablockwrapper.done() self.datablockwrapper = None allblocks = self.get_asmmemmgr_blocks(looptoken) - return self.mc.materialize(self.cpu.asmmemmgr, allblocks, - self.cpu.gc_ll_descr.gcrootmap) + start = self.mc.materialize(self.cpu.asmmemmgr, allblocks, + self.cpu.gc_ll_descr.gcrootmap) + from pypy.rlib.rarithmetic import r_uint + print "=== Loop start is at %s ===" % hex(r_uint(start)) + return start def write_pending_failure_recoveries(self): for tok in self.pending_guards: From noreply at buildbot.pypy.org Mon Dec 12 17:12:19 2011 From: noreply at buildbot.pypy.org (hager) Date: Mon, 12 Dec 2011 17:12:19 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): do stack pointer alignment Message-ID: <20111212161219.1B57782ABD@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50432:55b2c881ec01 Date: 2011-12-12 17:11 +0100 http://bitbucket.org/pypy/pypy/changeset/55b2c881ec01/ Log: (bivab, hager): do stack pointer alignment diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -729,6 +729,10 @@ + PARAMETER_AREA + BACKCHAIN_SIZE * WORD) + # align stack pointer + while frame_depth % (4 * WORD) != 0: + frame_depth += WORD + return frame_depth def materialize_loop(self, looptoken, show): From noreply at buildbot.pypy.org Mon Dec 12 17:30:53 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Dec 2011 17:30:53 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: mention the default build contains stackless, remove the stackless build Message-ID: <20111212163053.8A60582ABD@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r297:ba6be317c363 Date: 2011-12-12 18:29 +0200 http://bitbucket.org/pypy/pypy.org/changeset/ba6be317c363/ Log: mention the default build contains stackless, remove the stackless build instruction diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -65,7 +65,8 @@

“JIT Compiler” version

These binaries include a Just-in-Time compiler. They only work on x86 CPUs that have the SSE2 instruction set (most of -them do, nowadays), or on x86-64 CPUs. +them do, nowadays), or on x86-64 CPUs. They also contain stackless +extensions, like greenlets. (This is the official release 1.7; for the most up-to-date version see below.)

    @@ -84,8 +85,6 @@ release is too old for what you want to do.
  • No JIT: A version without the JIT. Consumes a bit less memory and may be faster on short-running scripts.
  • -
  • Stackless: Provides Stackless extensions, as well as greenlets. -It is not possible right now to combine Stackless features with the JIT.
  • Sandboxing: A special safe version. Read the docs about sandboxing. (It is also possible to translate a version that includes both sandboxing and the JIT compiler, although as the JIT is relatively @@ -134,7 +133,6 @@ pypy translate.py -Ojit # get the JIT version pypy translate.py -O2 # get the no-jit version pypy translate.py -O2 --sandbox # get the sandbox version -pypy translate.py -O2 --stackless # get the stackless version pypy translate.py -Ojit --backend=cli # only for branch/cli-jit
  • diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -34,7 +34,8 @@ These binaries include a Just-in-Time compiler. They only work on x86 CPUs that have the SSE2_ instruction set (most of -them do, nowadays), or on x86-64 CPUs. +them do, nowadays), or on x86-64 CPUs. They also contain `stackless`_ +extensions, like `greenlets`_. (This is the official release 1.7; for the most up-to-date version see below.) @@ -65,9 +66,6 @@ * No JIT: A version without the JIT. Consumes a bit less memory and may be faster on short-running scripts. -* Stackless: Provides Stackless_ extensions, as well as greenlets_. - It is not possible right now to combine Stackless features with the JIT. - * Sandboxing: A special safe version. Read the docs about sandboxing_. (It is also possible to translate_ a version that includes both sandboxing and the JIT compiler, although as the JIT is relatively @@ -126,7 +124,6 @@ pypy translate.py -Ojit # get the JIT version pypy translate.py -O2 # get the no-jit version pypy translate.py -O2 --sandbox # get the sandbox version - pypy translate.py -O2 --stackless # get the stackless version pypy translate.py -Ojit --backend=cli # only for branch/cli-jit 5. Enjoy Mandelbrot ``:-)`` It takes on the order of half an hour to From noreply at buildbot.pypy.org Mon Dec 12 17:30:54 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Dec 2011 17:30:54 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: merge Message-ID: <20111212163054.900AC82ABD@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r298:86a7edc8b73e Date: 2011-12-12 18:30 +0200 http://bitbucket.org/pypy/pypy.org/changeset/86a7edc8b73e/ Log: merge From noreply at buildbot.pypy.org Mon Dec 12 18:14:47 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Mon, 12 Dec 2011 18:14:47 +0100 (CET) Subject: [pypy-commit] pypy numpy-pi-sum-min-max: Added pi, sum, min, and max Message-ID: <20111212171447.2B48982ABD@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-pi-sum-min-max Changeset: r50433:d4d7f068865e Date: 2011-12-12 11:01 -0500 http://bitbucket.org/pypy/pypy/changeset/d4d7f068865e/ Log: Added pi, sum, min, and max diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -79,8 +79,12 @@ appleveldefs = { 'average': 'app_numpy.average', 'mean': 'app_numpy.mean', + 'sum': 'app_numpy.sum', + 'min': 'app_numpy.min', + 'max': 'app_numpy.max', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', + 'pi': 'app_numpy.pi', 'arange': 'app_numpy.arange', 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -5,6 +5,7 @@ inf = float("inf") e = math.e +pi = math.pi def average(a): @@ -18,6 +19,20 @@ a = numpypy.array(a) return a.mean() +def sum(a): + if not hasattr(a, "sum"): + a = numpypy.array(a) + return a.sum() + +def min(a): + if not hasattr(a, "min"): + a = numpypy.array(a) + return a.min() + +def max(a): + if not hasattr(a, "max"): + a = numpypy.array(a) + return a.max() def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/test/test_module.py b/pypy/module/micronumpy/test/test_module.py --- a/pypy/module/micronumpy/test/test_module.py +++ b/pypy/module/micronumpy/test/test_module.py @@ -11,11 +11,28 @@ from numpypy import array, average assert average(range(10)) == 4.5 assert average(array(range(10))) == 4.5 + + def test_sum(self): + from numpypy import array, sum + assert sum(range(10)) == 45 + assert sum(array(range(10))) == 45 + + def test_min(self): + from numpypy import array, min + assert min(range(10)) == 0 + assert min(array(range(10))) == 0 + + def test_max(self): + from numpypy import array, max + assert max(range(10)) == 9 + assert max(array(range(10))) == 9 def test_constants(self): import math - from numpypy import inf, e + from numpypy import inf, e, pi assert type(inf) is float assert inf == float("inf") assert e == math.e assert type(e) is float + assert pi == math.pi + assert type(pi) is float From noreply at buildbot.pypy.org Mon Dec 12 18:21:40 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 18:21:40 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: Another simplification: _x86_arglocs can be just one list, instead Message-ID: <20111212172140.AA94882ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50434:3c385108e3da Date: 2011-12-12 18:21 +0100 http://bitbucket.org/pypy/pypy/changeset/3c385108e3da/ Log: Another simplification: _x86_arglocs can be just one list, instead of a tuple of two lists. diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1367,51 +1367,54 @@ # we would like the boxes to be after the jump. def _compute_hint_frame_locations_from_descr(self, descr): - nonfloatlocs, floatlocs = descr._x86_arglocs + arglocs = descr._x86_arglocs jump_op = self.final_jump_op - assert len(nonfloatlocs) == jump_op.numargs() + assert len(arglocs) == jump_op.numargs() for i in range(jump_op.numargs()): box = jump_op.getarg(i) if isinstance(box, Box): - loc = nonfloatlocs[i] + loc = arglocs[i] if isinstance(loc, StackLoc): - assert box.type != FLOAT self.fm.hint_frame_locations[box] = loc - else: - loc = floatlocs[i] - if isinstance(loc, StackLoc): - assert box.type == FLOAT - self.fm.hint_frame_locations[box] = loc def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() assert isinstance(descr, TargetToken) - nonfloatlocs, floatlocs = descr._x86_arglocs + arglocs = descr._x86_arglocs self.jump_target_descr = descr # compute 'tmploc' to be all_regs[0] by spilling what is there - box = TempBox() - box1 = TempBox() + tmpbox1 = TempBox() + tmpbox2 = TempBox() tmpreg = X86RegisterManager.all_regs[0] - tmploc = self.rm.force_allocate_reg(box, selected_reg=tmpreg) + self.rm.force_allocate_reg(tmpbox1, selected_reg=tmpreg) xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) + self.xrm.force_allocate_reg(tmpbox2, selected_reg=xmmtmp) # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + assert dst_loc != tmpreg and dst_loc != xmmtmp + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + # Do the remapping remap_frame_layout_mixed(assembler, - src_locations1, dst_locations1, tmploc, + src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(box) - self.xrm.possibly_free_var(box1) + self.rm.possibly_free_var(tmpbox1) + self.xrm.possibly_free_var(tmpbox2) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1463,12 +1466,10 @@ self.rm.force_allocate_frame_reg(op.result) def consider_label(self, op): - # XXX big refactoring needed? descr = op.getdescr() assert isinstance(descr, TargetToken) inputargs = op.getarglist() - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) + arglocs = [None] * len(inputargs) # # we need to make sure that the tmpreg and xmmtmp are free tmpreg = X86RegisterManager.all_regs[0] @@ -1493,10 +1494,7 @@ assert not isinstance(arg, Const) loc = self.loc(arg) assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc + arglocs[i] = loc if isinstance(loc, RegLoc): self.fm.mark_as_free(arg) # @@ -1504,7 +1502,7 @@ # get overridden by redirect_call_assembler(). (rare case) self.flush_loop() # - descr._x86_arglocs = nonfloatlocs, floatlocs + descr._x86_arglocs = arglocs descr._x86_loop_code = self.assembler.mc.get_relative_pos() descr._x86_clt = self.assembler.current_clt self.assembler.target_tokens_currently_compiling[descr] = None @@ -1518,23 +1516,6 @@ if jump_op is not None and jump_op.getdescr() is descr: self._compute_hint_frame_locations_from_descr(descr) -## from pypy.rpython.annlowlevel import llhelper -## def fn(addr): -## print '...label:', hex(addr), nonfloatlocs -## FUNC = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) -## ll_disp = llhelper(FUNC, fn) -## faddr = rffi.cast(lltype.Signed, ll_disp) -## for i in range(16): -## self.assembler.mc.PUSH_r(i) -## self.assembler.mc.CALL_l(0) -## self.assembler.mc.POP(edi) -## self.assembler.mc.MOV(r11, imm(faddr)) -## self.assembler.mc.CALL(r11) -## for i in range(15, -1, -1): -## if i == esp.value: -## i -= 1 -## self.assembler.mc.POP_r(i) - def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) From noreply at buildbot.pypy.org Mon Dec 12 18:24:41 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 18:24:41 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: fix comment (thanks fijal) Message-ID: <20111212172441.569E482ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-simplify-backendintf Changeset: r50435:d56a7f15aeba Date: 2011-12-12 18:24 +0100 http://bitbucket.org/pypy/pypy/changeset/d56a7f15aeba/ Log: fix comment (thanks fijal) diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -333,8 +333,8 @@ argtypes = [v.concretetype for v in _variables] try: clt._debug_argtypes = argtypes - except AttributeError: # when translated - pass + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct def compile_add(loop, opnum): loop = _from_opaque(loop) From noreply at buildbot.pypy.org Mon Dec 12 18:49:12 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Mon, 12 Dec 2011 18:49:12 +0100 (CET) Subject: [pypy-commit] pypy default: Merged numpy-pi-sum-min-max, adding numpypy.pi, .sum, .min, and .max Message-ID: <20111212174912.026D982ABD@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: Changeset: r50436:863190739032 Date: 2011-12-12 12:43 -0500 http://bitbucket.org/pypy/pypy/changeset/863190739032/ Log: Merged numpy-pi-sum-min-max, adding numpypy.pi, .sum, .min, and .max diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -79,8 +79,12 @@ appleveldefs = { 'average': 'app_numpy.average', 'mean': 'app_numpy.mean', + 'sum': 'app_numpy.sum', + 'min': 'app_numpy.min', + 'max': 'app_numpy.max', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', + 'pi': 'app_numpy.pi', 'arange': 'app_numpy.arange', 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -5,6 +5,7 @@ inf = float("inf") e = math.e +pi = math.pi def average(a): @@ -18,6 +19,20 @@ a = numpypy.array(a) return a.mean() +def sum(a): + if not hasattr(a, "sum"): + a = numpypy.array(a) + return a.sum() + +def min(a): + if not hasattr(a, "min"): + a = numpypy.array(a) + return a.min() + +def max(a): + if not hasattr(a, "max"): + a = numpypy.array(a) + return a.max() def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/test/test_module.py b/pypy/module/micronumpy/test/test_module.py --- a/pypy/module/micronumpy/test/test_module.py +++ b/pypy/module/micronumpy/test/test_module.py @@ -11,11 +11,28 @@ from numpypy import array, average assert average(range(10)) == 4.5 assert average(array(range(10))) == 4.5 + + def test_sum(self): + from numpypy import array, sum + assert sum(range(10)) == 45 + assert sum(array(range(10))) == 45 + + def test_min(self): + from numpypy import array, min + assert min(range(10)) == 0 + assert min(array(range(10))) == 0 + + def test_max(self): + from numpypy import array, max + assert max(range(10)) == 9 + assert max(array(range(10))) == 9 def test_constants(self): import math - from numpypy import inf, e + from numpypy import inf, e, pi assert type(inf) is float assert inf == float("inf") assert e == math.e assert type(e) is float + assert pi == math.pi + assert type(pi) is float From noreply at buildbot.pypy.org Mon Dec 12 18:52:09 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Mon, 12 Dec 2011 18:52:09 +0100 (CET) Subject: [pypy-commit] pypy numpy-pi-sum-min-max: Close merged branch Message-ID: <20111212175209.106DD82ABD@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-pi-sum-min-max Changeset: r50437:53d2c2028ef3 Date: 2011-12-12 12:51 -0500 http://bitbucket.org/pypy/pypy/changeset/53d2c2028ef3/ Log: Close merged branch From noreply at buildbot.pypy.org Mon Dec 12 19:13:21 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Dec 2011 19:13:21 +0100 (CET) Subject: [pypy-commit] pypy default: kill redundant test Message-ID: <20111212181321.47CF082ABD@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50438:770cb2b481e2 Date: 2011-12-12 20:12 +0200 http://bitbucket.org/pypy/pypy/changeset/770cb2b481e2/ Log: kill redundant test diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -250,22 +250,6 @@ 'int_ge': 1, 'guard_false': 1, 'jump': 1}) - def define_slice2(): - return """ - a = |30| - s1 = a -> :20:2 - s2 = a -> :30:3 - b = s1 + s2 - b -> 3 - """ - - def test_slice2(self): - result = self.run("slice2") - assert result == 15 - self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) - def define_multidim(): return """ a = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] From noreply at buildbot.pypy.org Mon Dec 12 19:17:49 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 19:17:49 +0100 (CET) Subject: [pypy-commit] pypy default: Fix reset_stats() to clear a bit more things. It's still not Message-ID: <20111212181749.0571F82ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50439:2edda8cbb1b0 Date: 2011-12-12 19:17 +0100 http://bitbucket.org/pypy/pypy/changeset/2edda8cbb1b0/ Log: Fix reset_stats() to clear a bit more things. It's still not perfect, but better... diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -981,15 +981,19 @@ self.aborted_keys = [] self.invalidated_token_numbers = set() # <- not RPython self.jitcell_token_wrefs = [] + self.jitcell_dicts = [] # <- not RPython def clear(self): del self.loops[:] del self.locations[:] del self.aborted_keys[:] + del self.jitcell_token_wrefs[:] self.invalidated_token_numbers.clear() self.compiled_count = 0 self.enter_count = 0 self.aborted_count = 0 + for dict in self.jitcell_dicts: + dict.clear() def add_jitcell_token(self, token): assert isinstance(token, JitCellToken) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -439,6 +439,10 @@ return x # jitcell_dict = r_dict(comparekey, hashkey) + try: + self.warmrunnerdesc.stats.jitcell_dicts.append(jitcell_dict) + except AttributeError: + pass # def get_jitcell(build, *greenargs): try: From noreply at buildbot.pypy.org Mon Dec 12 19:17:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 19:17:50 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111212181750.3FC0D82ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50440:c805becb84aa Date: 2011-12-12 19:17 +0100 http://bitbucket.org/pypy/pypy/changeset/c805becb84aa/ Log: merge heads diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -79,8 +79,12 @@ appleveldefs = { 'average': 'app_numpy.average', 'mean': 'app_numpy.mean', + 'sum': 'app_numpy.sum', + 'min': 'app_numpy.min', + 'max': 'app_numpy.max', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', + 'pi': 'app_numpy.pi', 'arange': 'app_numpy.arange', 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -5,6 +5,7 @@ inf = float("inf") e = math.e +pi = math.pi def average(a): @@ -18,6 +19,20 @@ a = numpypy.array(a) return a.mean() +def sum(a): + if not hasattr(a, "sum"): + a = numpypy.array(a) + return a.sum() + +def min(a): + if not hasattr(a, "min"): + a = numpypy.array(a) + return a.min() + +def max(a): + if not hasattr(a, "max"): + a = numpypy.array(a) + return a.max() def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) diff --git a/pypy/module/micronumpy/test/test_module.py b/pypy/module/micronumpy/test/test_module.py --- a/pypy/module/micronumpy/test/test_module.py +++ b/pypy/module/micronumpy/test/test_module.py @@ -11,11 +11,28 @@ from numpypy import array, average assert average(range(10)) == 4.5 assert average(array(range(10))) == 4.5 + + def test_sum(self): + from numpypy import array, sum + assert sum(range(10)) == 45 + assert sum(array(range(10))) == 45 + + def test_min(self): + from numpypy import array, min + assert min(range(10)) == 0 + assert min(array(range(10))) == 0 + + def test_max(self): + from numpypy import array, max + assert max(range(10)) == 9 + assert max(array(range(10))) == 9 def test_constants(self): import math - from numpypy import inf, e + from numpypy import inf, e, pi assert type(inf) is float assert inf == float("inf") assert e == math.e assert type(e) is float + assert pi == math.pi + assert type(pi) is float diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -250,22 +250,6 @@ 'int_ge': 1, 'guard_false': 1, 'jump': 1}) - def define_slice2(): - return """ - a = |30| - s1 = a -> :20:2 - s2 = a -> :30:3 - b = s1 + s2 - b -> 3 - """ - - def test_slice2(self): - result = self.run("slice2") - assert result == 15 - self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) - def define_multidim(): return """ a = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] From noreply at buildbot.pypy.org Mon Dec 12 19:26:22 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 12 Dec 2011 19:26:22 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: anotehr item Message-ID: <20111212182622.91CEB82ABD@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3982:160ba3f22a89 Date: 2011-12-12 13:26 -0500 http://bitbucket.org/pypy/extradoc/changeset/160ba3f22a89/ Log: anotehr item diff --git a/planning/micronumpy.txt b/planning/micronumpy.txt --- a/planning/micronumpy.txt +++ b/planning/micronumpy.txt @@ -19,3 +19,4 @@ - axis= parameter to various methods +- expose ndarray.ctypes From noreply at buildbot.pypy.org Mon Dec 12 21:24:58 2011 From: noreply at buildbot.pypy.org (exarkun) Date: Mon, 12 Dec 2011 21:24:58 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: Make PyBufferObject public so cpython_struct can work Message-ID: <20111212202458.6F27C82ABD@wyvern.cs.uni-duesseldorf.de> Author: Jean-Paul Calderone Branch: pyarg-parsetuple-s-star-buffer Changeset: r50441:339b179f2f51 Date: 2011-12-12 15:23 -0500 http://bitbucket.org/pypy/pypy/changeset/339b179f2f51/ Log: Make PyBufferObject public so cpython_struct can work diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -9,6 +9,17 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + PyObject *b_base; + void *b_ptr; + Py_ssize_t b_size; + Py_ssize_t b_offset; + int b_readonly; + long b_hash; +} PyBufferObject; + + PyAPI_DATA(PyTypeObject) PyBuffer_Type; #define PyBuffer_Check(op) (((PyObject*)(op))->ob_type == &PyBuffer_Type) diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -4,17 +4,6 @@ #include "Python.h" -typedef struct { - PyObject_HEAD - PyObject *b_base; - void *b_ptr; - Py_ssize_t b_size; - Py_ssize_t b_offset; - int b_readonly; - long b_hash; -} PyBufferObject; - - enum buffer_t { READ_BUFFER, WRITE_BUFFER, From noreply at buildbot.pypy.org Mon Dec 12 21:24:59 2011 From: noreply at buildbot.pypy.org (exarkun) Date: Mon, 12 Dec 2011 21:24:59 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: Try to teach cpyext about buffer objects Message-ID: <20111212202459.942A282ABD@wyvern.cs.uni-duesseldorf.de> Author: Jean-Paul Calderone Branch: pyarg-parsetuple-s-star-buffer Changeset: r50442:1396a5482b12 Date: 2011-12-12 15:24 -0500 http://bitbucket.org/pypy/pypy/changeset/1396a5482b12/ Log: Try to teach cpyext about buffer objects diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,6 +46,7 @@ import pypy.module.cpyext.listobject import pypy.module.cpyext.sequence import pypy.module.cpyext.buffer +import pypy.module.cpyext.bufferobject import pypy.module.cpyext.eval import pypy.module.cpyext.import_ import pypy.module.cpyext.mapping diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -317,6 +317,10 @@ INTERPLEVEL_API = {} FUNCTIONS = {} + +# These are C symbols which cpyext will export, but which are defined in .c +# files somewhere in the implementation of cpyext (rather than being defined in +# RPython). SYMBOLS_C = [ 'Py_FatalError', 'PyOS_snprintf', 'PyOS_vsnprintf', 'PyArg_Parse', 'PyArg_ParseTuple', 'PyArg_UnpackTuple', 'PyArg_ParseTupleAndKeywords', @@ -1055,7 +1059,6 @@ boxed_args = () to_decref = [] assert len(args) == len(FT.ARGS) - import pdb; pdb.set_trace() for i, ARG in unrolling_arg_types: arg = args[i] if is_PyObject(ARG): diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/bufferobject.py @@ -0,0 +1,62 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, + PyObjectFields, PyObject) +from pypy.module.cpyext.pyobject import make_typedescr +from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer + + +PyBufferObjectStruct = lltype.ForwardReference() +PyBufferObject = lltype.Ptr(PyBufferObjectStruct) +PyBufferObjectFields = PyObjectFields + ( + ("b_base", PyObject), + ("b_ptr", rffi.VOIDP), + ("b_size", Py_ssize_t), + ("b_offset", Py_ssize_t), + ("b_readonly", rffi.INT), + ("b_hash", rffi.LONG), + ) + +cpython_struct("PyBufferObject", PyBufferObjectFields, PyBufferObjectStruct) + + at bootstrap_function +def init_bufferobject(space): + "Type description of PyBufferObject" + make_typedescr(space.gettypefor(Buffer).instancetypedef, + basestruct=PyBufferObject.TO, + attach=buffer_attach, + # dealloc=buffer_dealloc, + realize=buffer_realize) + +def buffer_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyBufferObject with the given (str) buffer object. + """ + py_buf = rffi.cast(PyBufferObject, py_obj) + py_buf.c_b_offset = 0 + py_buf.c_b_readonly = 1 + py_buf.c_b_hash = -1 + + if isinstance(w_obj, SubBuffer): + py_buf.c_b_offset = w_obj.offset + w_obj = w_obj.buffer + + if isinstance(w_obj, StringBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_size = w_obj.getlength() + else: + raise Exception("Fail fail fail fail fail") + + +def buffer_realize(space, py_obj): + """ + Creates the buffer in the PyPy interpreter from a cpyext representation. + """ + raise Exception("realize fail fail fail") + + + +# @cpython_api([PyObject], lltype.Void, external=False) +# def buffer_dealloc(space, py_obj): + diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -113,6 +113,8 @@ @specialize.memo() def _get_typedescr_1(typedef): + if typedef.name == "buffer": + import pdb; pdb.set_trace() try: return typedescr_cache[typedef] except KeyError: From noreply at buildbot.pypy.org Mon Dec 12 21:28:39 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 12 Dec 2011 21:28:39 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: kill constant_inputargs and aliases from the exported state and encode it all in inputarg_setup_ops instead Message-ID: <20111212202839.C65C782ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50443:667ace56adf5 Date: 2011-12-12 19:53 +0100 http://bitbucket.org/pypy/pypy/changeset/667ace56adf5/ Log: kill constant_inputargs and aliases from the exported state and encode it all in inputarg_setup_ops instead diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -141,26 +141,15 @@ inputargs = virtual_state.make_inputargs(values, self.optimizer) short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - constant_inputargs[box] = const - - short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) - aliased_vrituals = {} - for i in range(len(original_jump_args)): - if original_jump_args[i] is not jump_args[i]: - if values[i].is_virtual(): - aliased_vrituals[original_jump_args[i]] = jump_args[i] - else: - short_boxes.alias(original_jump_args[i], jump_args[i]) + short_boxes = ShortBoxes(self.optimizer, inputargs) self.optimizer.clear_newoperations() - for box in short_inputargs: - value = self.getvalue(box) - if value.is_virtual(): - value.force_box(self.optimizer) + for i in range(len(original_jump_args)): + if values[i].is_virtual(): + values[i].force_box(self.optimizer) + if original_jump_args[i] is not jump_args[i]: + op = ResOperation(rop.SAME_AS, [jump_args[i]], original_jump_args[i]) + self.optimizer.emit_operation(op) inputarg_setup_ops = self.optimizer.get_newoperations() target_token = targetop.getdescr() @@ -169,9 +158,9 @@ target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] target_token.start_resumedescr = start_resumedescr - target_token.exported_state = ExportedState(constant_inputargs, short_boxes, + target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops, self.optimizer, - aliased_vrituals, jump_args) + jump_args) def import_state(self, targetop): self.did_import = False @@ -191,8 +180,6 @@ self.short = target_token.short_preamble[:] self.short_seen = {} self.short_boxes = exported_state.short_boxes.clone() - for box, const in exported_state.constant_inputargs.items(): - self.short_seen[box] = True self.imported_state = exported_state self.inputargs = targetop.getarglist() self.initial_virtual_state = target_token.virtual_state @@ -207,9 +194,6 @@ value = self.optimizer.getvalue(box) value.import_from(preamble_value, self.optimizer) - for newbox, oldbox in self.short_boxes.aliases.items(): - self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) - # Setup the state of the new optimizer by emiting the # short operations and discarding the result self.optimizer.emitting_dissabled = True @@ -231,13 +215,9 @@ self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX - #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! self.optimizer.flush() self.optimizer.emitting_dissabled = False - for box, key_box in exported_state.aliased_vrituals.items(): - self.optimizer.make_equal_to(box, self.getvalue(key_box)) - def close_bridge(self, start_label): inputargs = self.inputargs short_jumpargs = inputargs[:] @@ -266,7 +246,6 @@ def close_loop(self, jumpop): virtual_state = self.initial_virtual_state short_inputargs = self.short[0].getarglist() - constant_inputargs = self.imported_state.constant_inputargs inputargs = self.inputargs short_jumpargs = inputargs[:] @@ -289,8 +268,6 @@ raise InvalidLoop args[short_inputargs[i]] = jmp_to_short_args[i] self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - for box, const in constant_inputargs.items(): - self.short_inliner.argmap[box] = const for op in self.short[1:]: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) @@ -381,8 +358,6 @@ newargs[i] = a.clonebox() boxmap[a] = newargs[i] inliner = Inliner(short_inputargs, newargs) - for box, const in self.imported_state.constant_inputargs.items(): - inliner.argmap[box] = const for i in range(len(short)): short[i] = inliner.inline_op(short[i]) @@ -592,12 +567,9 @@ self.unroll.add_op_to_short(self.op, False, True) class ExportedState(object): - def __init__(self, constant_inputargs, - short_boxes, inputarg_setup_ops, optimizer, aliased_vrituals, + def __init__(self, short_boxes, inputarg_setup_ops, optimizer, jump_args): - self.constant_inputargs = constant_inputargs self.short_boxes = short_boxes self.inputarg_setup_ops = inputarg_setup_ops self.optimizer = optimizer - self.aliased_vrituals = aliased_vrituals self.jump_args = jump_args From noreply at buildbot.pypy.org Mon Dec 12 21:28:40 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 12 Dec 2011 21:28:40 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: fix Message-ID: <20111212202840.E7DC982ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50444:faa7d41a5f50 Date: 2011-12-12 21:28 +0100 http://bitbucket.org/pypy/pypy/changeset/faa7d41a5f50/ Log: fix diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -453,6 +453,7 @@ def clear_newoperations(self): self._newoperations = [] + self.seen_results = {} def make_equal_to(self, box, value, replace=False): assert isinstance(value, OptValue) From noreply at buildbot.pypy.org Mon Dec 12 21:34:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Dec 2011 21:34:16 +0100 (CET) Subject: [pypy-commit] pypy default: (arigo) merge jit-simplify-backendintf branch. This branch kills Message-ID: <20111212203416.89CAE82ABD@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50445:04116407532d Date: 2011-12-12 22:31 +0200 http://bitbucket.org/pypy/pypy/changeset/04116407532d/ Log: (arigo) merge jit-simplify-backendintf branch. This branch kills set_future_value_xxx and replaces it with a function accepting normal calling convention. Kills the need for a special prologue (used by execute_token) with the one that's only necessary for execute_assembler diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -328,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -355,11 +363,13 @@ TARGET_TOKENS = weakref.WeakKeyDictionary() -def compile_add_target_token(loop, descr): +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling loop = _from_opaque(loop) op = loop.operations[-1] descrobj = _normalize(descr) - TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt def compile_add_var(loop, intvar): loop = _from_opaque(loop) @@ -395,17 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, targettoken): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) descrobj = _normalize(targettoken) - loop_target, target_opindex, target_inputargs = TARGET_TOKENS[descrobj] + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass # op = loop.operations[-1] op.jump_target = loop_target op.jump_target_opindex = target_opindex op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(target_inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -987,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1816,6 +1835,7 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -138,11 +138,12 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, jitcell_token, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl @@ -153,14 +154,14 @@ clt.loop_and_bridges = [c] clt.compiled_version = c jitcell_token.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -187,7 +189,7 @@ assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: - llimpl.compile_add_target_token(c, descr) + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -241,7 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - llimpl.compile_add_jump_target(c, targettoken) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,23 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the fake 'assembler' generated for the given loop. - Returns the descr of the last executed operation: either the one - attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -69,6 +69,8 @@ self.bindings[box] = loc # index = self.get_loc_index(loc) + if index < 0: + return endindex = index + self.frame_size(box.type) while len(self.used) < endindex: self.used.append(False) @@ -91,6 +93,8 @@ # size = self.frame_size(box.type) baseindex = self.get_loc_index(loc) + if baseindex < 0: + return for i in range(size): index = baseindex + i assert 0 <= index < len(self.used) @@ -98,7 +102,8 @@ def try_to_reuse_location(self, box, loc): index = self.get_loc_index(loc) - assert index >= 0 + if index < 0: + return False size = self.frame_size(box.type) for i in range(size): while (index + i) >= len(self.used): @@ -158,7 +163,7 @@ if not we_are_translated() and self.box_types is not None: assert isinstance(v, TempBox) or v.type in self.box_types - def possibly_free_var(self, v): + def possibly_free_var(self, v, _hint_dont_reuse_quickly=False): """ If v is stored in a register and v is not used beyond the current position, then free it. Must be called at some point for all variables that might be in registers. @@ -168,7 +173,10 @@ return if v not in self.longevity or self.longevity[v][1] <= self.position: if v in self.reg_bindings: - self.free_regs.append(self.reg_bindings[v]) + if _hint_dont_reuse_quickly: + self.free_regs.insert(0, self.reg_bindings[v]) + else: + self.free_regs.append(self.reg_bindings[v]) del self.reg_bindings[v] if self.frame_manager is not None: self.frame_manager.mark_as_free(v) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -110,9 +111,9 @@ looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -258,8 +259,8 @@ done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -288,8 +289,8 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -34,20 +34,17 @@ descr) looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -108,8 +105,7 @@ inputargs = [i0] looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -131,8 +127,7 @@ operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -156,8 +151,7 @@ operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 44) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -222,8 +216,7 @@ self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -264,8 +257,7 @@ self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -288,8 +280,7 @@ operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -311,8 +302,7 @@ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 @@ -343,8 +333,7 @@ ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 @@ -379,9 +368,7 @@ ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -442,9 +429,7 @@ for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1129,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1191,10 +1166,11 @@ self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1244,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1303,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1356,15 +1328,16 @@ # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1427,10 +1400,9 @@ unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1699,14 +1671,12 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1725,8 +1695,7 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1744,13 +1713,11 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1922,16 +1889,12 @@ ops[2].setfailargs([i1, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1967,16 +1930,12 @@ ops[2].setfailargs([i1, i2, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -2013,17 +1972,13 @@ ops[2].setfailargs([i1, f2, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2058,8 +2013,7 @@ ops[1].setfailargs([i1, i2]) looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2118,12 +2072,12 @@ ops[1].setfailargs([]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2176,9 +2130,8 @@ self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2197,9 +2150,7 @@ looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2208,9 +2159,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2226,9 +2175,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2237,9 +2184,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2448,9 +2393,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2462,9 +2406,8 @@ loop = parse(ops, namespace=locals()) othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -2499,9 +2442,9 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2513,9 +2456,9 @@ loop = parse(ops, namespace=locals()) othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2526,9 +2469,9 @@ try: othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2589,9 +2532,9 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2607,9 +2550,9 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2629,10 +2572,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2986,8 +2928,7 @@ looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier def test_compile_loop_with_target(self): @@ -3014,8 +2955,7 @@ operations[6].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -3027,8 +2967,7 @@ ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) assert res == -10 @@ -3108,13 +3047,13 @@ self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) looptoken2 = JitCellToken() - inputargs = [] + inputargs = [BoxInt()] operations = [ ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), ] self.cpu.compile_loop(inputargs, operations, looptoken2) - fail = self.cpu.execute_token(looptoken2) + fail = self.cpu.execute_token(looptoken2, -9) assert fail.identifier == 42 diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -6,6 +6,7 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -616,8 +617,13 @@ return self.loop._jitcelltoken if not hasattr(self, '_initialjumploop_celltoken'): self._initialjumploop_celltoken = JitCellToken() - self.cpu.compile_loop(self.startvars[:], - [ResOperation(rop.JUMP, self.startvars[:], None, + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, descr=self.loop._targettoken)], self._initialjumploop_celltoken) return self._initialjumploop_celltoken @@ -649,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.runjitcelltoken()) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -717,10 +717,21 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -309,12 +309,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -325,7 +324,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -421,10 +420,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -445,12 +442,12 @@ operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily @@ -458,19 +455,17 @@ frame_depth, param_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth clt.param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, looppos, - frame_depth+param_depth) + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, rawstart + looppos, - rawstart + directbootstrappos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -481,18 +476,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -802,152 +796,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -1891,10 +1754,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1910,7 +1773,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position + pos = loc.position + if pos < 0: + mc.writechar(chr(self.CODE_INPUTARG)) + pos = ~pos + n = self.CODE_FROMSTACK//4 + pos else: assert isinstance(loc, RegLoc) n = loc.value @@ -1930,6 +1797,7 @@ descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] + code_inputarg = False while 1: # decode the next instruction from the bytecode code = rffi.cast(lltype.Signed, bytecode[0]) @@ -1948,11 +1816,17 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: continue + elif code == self.CODE_INPUTARG: + code_inputarg = True + continue else: # 'code' identifies a register kind = code & 3 @@ -1968,6 +1842,7 @@ def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! self.fail_ebp = allregisters[16 + ebp.value] + code_inputarg = False num = 0 value_hi = 0 while 1: @@ -1988,6 +1863,9 @@ # load the value from the stack kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: @@ -2000,6 +1878,9 @@ if code == self.CODE_HOLE: num += 1 continue + if code == self.CODE_INPUTARG: + code_inputarg = True + continue assert code == self.CODE_STOP break code >>= 2 @@ -2104,9 +1985,9 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA in _call_footer below throws - # away most of the frame, including all the PUSHes that we did just - # above. + # _call_header_with_stack_check(). The LEA in _call_footer below + # throws away most of the frame, including all the PUSHes that we + # did just above. self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -2354,10 +2235,10 @@ self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() assert isinstance(descr, JitCellToken) - assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) + assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs # - # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + # Write a call to the target assembler + self._emit_call(fail_index, imm(descr._x86_function_addr), arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None @@ -2588,6 +2469,14 @@ self.gcrootmap_retaddr_forced = -1 def closing_jump(self, target_token): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = target_token._x86_clt._debug_nbargs + assert my_nbargs == target_nbargs + # target = target_token._x86_loop_code if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -28,7 +28,7 @@ class X86RegisterManager(RegisterManager): box_types = [INT, REF] - all_regs = [eax, ecx, edx, ebx, esi, edi] + all_regs = [ecx, eax, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] frame_reg = ebp @@ -60,7 +60,7 @@ class X86_64_RegisterManager(X86RegisterManager): # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -165,6 +165,7 @@ self.jump_target_descr = None self.close_stack_struct = 0 self.final_jump_op = None + self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -173,22 +174,27 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity, useful = self._compute_vars_longevity(inputargs, operations) + longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations, useful + return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations, useful = self._prepare(inputargs, operations, allgcrefs) - return self._process_inputargs(inputargs, useful), operations + operations = self._prepare(inputargs, operations, allgcrefs) + self._set_initial_bindings(inputargs) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 13 + return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations, _ = self._prepare(inputargs, operations, allgcrefs) + operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.param_depth = prev_depths[1] return operations @@ -196,46 +202,56 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs, useful): - # XXX we can sort out here by longevity if we need something - # more optimal - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - # Don't use all_regs[0] for passing arguments around a loop. - # Must be kept in sync with consider_jump(). - # XXX this should probably go to llsupport/regalloc.py - xmmtmp = self.xrm.free_regs.pop(0) - tmpreg = self.rm.free_regs.pop(0) - assert tmpreg == X86RegisterManager.all_regs[0] - assert xmmtmp == X86XMMRegisterManager.all_regs[0] - for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - reg = None - if self.longevity[arg][1] > -1 and arg in useful: - if arg.type == FLOAT: - # xxx is it really a good idea? at the first CALL they - # will all be flushed anyway - reg = self.xrm.try_allocate_reg(arg) + def _set_initial_bindings(self, inputargs): + if IS_X86_64: + inputargs = self._set_initial_bindings_regs_64(inputargs) + # ... + # stack layout: arg2 + # arg1 + # arg0 + # return address + # saved ebp <-- ebp points here + # ... + cur_frame_pos = - 1 - FRAME_FIXED_SIZE + assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD + assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD + # + for box in inputargs: + assert isinstance(box, Box) + # + if IS_X86_32 and box.type == FLOAT: + cur_frame_pos -= 2 + else: + cur_frame_pos -= 1 + loc = self.fm.frame_pos(cur_frame_pos, box.type) + self.fm.set_binding(box, loc) + + def _set_initial_bindings_regs_64(self, inputargs): + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + # + pass_on_stack = [] + # + for box in inputargs: + assert isinstance(box, Box) + # + if box.type == FLOAT: + if len(unused_xmm) > 0: + ask = unused_xmm.pop() + got = self.xrm.try_allocate_reg(box, selected_reg=ask) + assert ask == got else: - reg = self.rm.try_allocate_reg(arg) - if reg: - loc = reg + pass_on_stack.append(box) else: - loc = self.fm.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc - # otherwise we have it saved on stack, so no worry - self.rm.free_regs.insert(0, tmpreg) - self.xrm.free_regs.insert(0, xmmtmp) - assert tmpreg not in nonfloatlocs - assert xmmtmp not in floatlocs - # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op args, which is a non-resizable list - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + if len(unused_gpr) > 0: + ask = unused_gpr.pop() + got = self.rm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + # + return pass_on_stack def possibly_free_var(self, var): if var.type == FLOAT: @@ -446,8 +462,15 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop + def flush_loop(self): + # rare case: if the loop is too short, pad with NOPs + mc = self.assembler.mc + while mc.get_relative_pos() < self.min_bytes_before_label: + mc.NOP() + def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" @@ -458,7 +481,7 @@ # only to guard operations or to jump or to finish produced = {} last_used = {} - useful = {} + #useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -469,8 +492,8 @@ opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - if opnum != rop.JUMP and opnum != rop.FINISH: - useful[arg] = None + #if opnum != rop.JUMP and opnum != rop.FINISH: + # useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -496,7 +519,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity, useful + return longevity#, useful def loc(self, v): if v is None: # xxx kludgy @@ -1344,51 +1367,54 @@ # we would like the boxes to be after the jump. def _compute_hint_frame_locations_from_descr(self, descr): - nonfloatlocs, floatlocs = descr._x86_arglocs + arglocs = descr._x86_arglocs jump_op = self.final_jump_op - assert len(nonfloatlocs) == jump_op.numargs() + assert len(arglocs) == jump_op.numargs() for i in range(jump_op.numargs()): box = jump_op.getarg(i) if isinstance(box, Box): - loc = nonfloatlocs[i] + loc = arglocs[i] if isinstance(loc, StackLoc): - assert box.type != FLOAT self.fm.hint_frame_locations[box] = loc - else: - loc = floatlocs[i] - if isinstance(loc, StackLoc): - assert box.type == FLOAT - self.fm.hint_frame_locations[box] = loc def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() assert isinstance(descr, TargetToken) - nonfloatlocs, floatlocs = descr._x86_arglocs + arglocs = descr._x86_arglocs self.jump_target_descr = descr # compute 'tmploc' to be all_regs[0] by spilling what is there - box = TempBox() - box1 = TempBox() + tmpbox1 = TempBox() + tmpbox2 = TempBox() tmpreg = X86RegisterManager.all_regs[0] - tmploc = self.rm.force_allocate_reg(box, selected_reg=tmpreg) + self.rm.force_allocate_reg(tmpbox1, selected_reg=tmpreg) xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) + self.xrm.force_allocate_reg(tmpbox2, selected_reg=xmmtmp) # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + assert dst_loc != tmpreg and dst_loc != xmmtmp + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + # Do the remapping remap_frame_layout_mixed(assembler, - src_locations1, dst_locations1, tmploc, + src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(box) - self.xrm.possibly_free_var(box1) + self.rm.possibly_free_var(tmpbox1) + self.xrm.possibly_free_var(tmpbox2) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1440,23 +1466,21 @@ self.rm.force_allocate_frame_reg(op.result) def consider_label(self, op): - # XXX big refactoring needed? descr = op.getdescr() assert isinstance(descr, TargetToken) inputargs = op.getarglist() - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) + arglocs = [None] * len(inputargs) # # we need to make sure that the tmpreg and xmmtmp are free tmpreg = X86RegisterManager.all_regs[0] tmpvar = TempBox() self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) - self.rm.possibly_free_var(tmpvar) + self.rm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) # xmmtmp = X86XMMRegisterManager.all_regs[0] tmpvar = TempBox() self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) - self.xrm.possibly_free_var(tmpvar) + self.xrm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) # # we need to make sure that no variable is stored in ebp for arg in inputargs: @@ -1470,13 +1494,15 @@ assert not isinstance(arg, Const) loc = self.loc(arg) assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc + arglocs[i] = loc if isinstance(loc, RegLoc): self.fm.mark_as_free(arg) - descr._x86_arglocs = nonfloatlocs, floatlocs + # + # if we are too close to the start of the loop, the label's target may + # get overridden by redirect_call_assembler(). (rare case) + self.flush_loop() + # + descr._x86_arglocs = arglocs descr._x86_loop_code = self.assembler.mc.get_relative_pos() descr._x86_clt = self.assembler.current_clt self.assembler.target_tokens_currently_compiling[descr] = None @@ -1490,23 +1516,6 @@ if jump_op is not None and jump_op.getdescr() is descr: self._compute_hint_frame_locations_from_descr(descr) -## from pypy.rpython.annlowlevel import llhelper -## def fn(addr): -## print '...label:', hex(addr), nonfloatlocs -## FUNC = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) -## ll_disp = llhelper(FUNC, fn) -## faddr = rffi.cast(lltype.Signed, ll_disp) -## for i in range(16): -## self.assembler.mc.PUSH_r(i) -## self.assembler.mc.CALL_l(0) -## self.assembler.mc.POP(edi) -## self.assembler.mc.MOV(r11, imm(faddr)) -## self.assembler.mc.CALL(r11) -## for i in range(15, -1, -1): -## if i == esp.value: -## i -= 1 -## self.assembler.mc.POP_r(i) - def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -44,7 +44,6 @@ _location_code = 'b' def __init__(self, position, ebp_offset, num_words, type): - assert ebp_offset < 0 # so no confusion with RegLoc.value self.position = position self.value = ebp_offset self.width = num_words * WORD diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.objectmodel import we_are_translated +from pypy.jit.codewriter import longlong from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS @@ -21,7 +22,6 @@ supports_floats = True supports_singlefloats = True - BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) dont_keepalive_stuff = False # for tests with_threads = False @@ -91,15 +91,6 @@ return self.assembler.assemble_bridge(faildescr, inputargs, operations, original_loop_token, log=log) - def set_future_value_int(self, index, intvalue): - self.assembler.fail_boxes_int.setitem(index, intvalue) - - def set_future_value_float(self, index, floatvalue): - self.assembler.fail_boxes_float.setitem(index, floatvalue) - - def set_future_value_ref(self, index, ptrvalue): - self.assembler.fail_boxes_ptr.setitem(index, ptrvalue) - def get_latest_value_int(self, index): return self.assembler.fail_boxes_int.getitem(index) @@ -122,27 +113,28 @@ # the FORCE_TOKEN operation and this helper both return 'ebp'. return self.assembler.fail_ebp - def execute_token(self, executable_token): - addr = executable_token._x86_bootstrap_code - #llop.debug_print(lltype.Void, ">>>> Entering", addr) - func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) - fail_index = self._execute_call(func) - #llop.debug_print(lltype.Void, "<<<< Back") - return self.get_fail_descr_from_number(fail_index) - - def _execute_call(self, func): - # help flow objspace - prev_interpreter = None - if not self.translate_support_code: - prev_interpreter = LLInterpreter.current_interpreter - LLInterpreter.current_interpreter = self.debug_ll_interpreter - res = 0 - try: - res = func() - finally: + def make_execute_token(self, *ARGS): + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) + # + def execute_token(executable_token, *args): + clt = executable_token.compiled_loop_token + assert len(args) == clt._debug_nbargs + # + addr = executable_token._x86_function_addr + func = rffi.cast(FUNCPTR, addr) + #llop.debug_print(lltype.Void, ">>>> Entering", addr) + prev_interpreter = None # help flow space if not self.translate_support_code: - LLInterpreter.current_interpreter = prev_interpreter - return res + prev_interpreter = LLInterpreter.current_interpreter + LLInterpreter.current_interpreter = self.debug_ll_interpreter + try: + fail_index = func(*args) + finally: + if not self.translate_support_code: + LLInterpreter.current_interpreter = prev_interpreter + #llop.debug_print(lltype.Void, "<<<< Back") + return self.get_fail_descr_from_number(fail_index) + return execute_token def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -46,12 +46,13 @@ xmm2] assert len(failargs) == len(locs) assembler.write_failure_recovery_description(mc, failargs, locs) - nums = [Assembler386.DESCR_INT + 4*(16+0), - Assembler386.DESCR_REF + 4*(16+1), - Assembler386.DESCR_FLOAT + 4*(16+10), - Assembler386.DESCR_INT + 4*(16+100), - Assembler386.DESCR_REF + 4*(16+101), - Assembler386.DESCR_FLOAT + 4*(16+110), + base = 8 + 8*IS_X86_64 + nums = [Assembler386.DESCR_INT + 4*(base+0), + Assembler386.DESCR_REF + 4*(base+1), + Assembler386.DESCR_FLOAT + 4*(base+10), + Assembler386.DESCR_INT + 4*(base+100), + Assembler386.DESCR_REF + 4*(base+101), + Assembler386.DESCR_FLOAT + 4*(base+110), Assembler386.CODE_HOLE, Assembler386.CODE_HOLE, Assembler386.DESCR_INT + 4*ebx.value, diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -19,8 +19,7 @@ finish(i3, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 @@ -55,8 +54,7 @@ assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 assert self.getint(1) == 22 @@ -71,20 +69,19 @@ i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) - ''', [0]) + ''', [0, 0, 0, 0, 0, 0, 0, 0]) other_loop = self.interpret(''' - [i3] + [i3, i10, i11, i12, i13, i14, i15, i16] label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] jump(i3, descr=targettoken2) - ''', [1]) + ''', [1, 0, 0, 0, 0, 0, 0, 0]) ops = ''' [i3] jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' bridge = self.attach_bridge(ops, other_loop, 1) - self.cpu.set_future_value_int(0, 1) - fail = self.run(other_loop) + fail = self.run(other_loop, 1, 0, 0, 0, 0, 0, 0, 0) assert fail.identifier == 1 def test_bridge_jumps_to_self_deeper(self): @@ -100,7 +97,7 @@ i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] jump(i3, i30, 1, i30, i30, i30, descr=targettoken) - ''', [0]) + ''', [0, 0, 0, 0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -123,10 +120,7 @@ # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 @@ -142,7 +136,7 @@ i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] jump(i3, i1, i2, descr=targettoken) - ''', [0]) + ''', [0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -150,10 +144,7 @@ jump(i3, 0, 1, descr=targettoken) ''' bridge = self.attach_bridge(ops, loop, 5) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -142,19 +142,20 @@ loop = self.parse(ops) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - for i, arg in enumerate(args): + arguments = [] + for arg in args: if isinstance(arg, int): - self.cpu.set_future_value_int(i, arg) + arguments.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) - self.cpu.set_future_value_float(i, arg) + arguments.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) - self.cpu.set_future_value_ref(i, llgcref) + arguments.append(llgcref) loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, *arguments) return loop def prepare_loop(self, ops): @@ -193,8 +194,8 @@ loop._jitcelltoken) return bridge - def run(self, loop): - return self.cpu.execute_token(loop._jitcelltoken) + def run(self, loop, *arguments): + return self.cpu.execute_token(loop._jitcelltoken, *arguments) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): @@ -220,7 +221,7 @@ ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' - [i5] + [i5, i6, i7, i8] label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) @@ -229,14 +230,13 @@ guard_true(i2) [i4] jump(i4, descr=targettoken2) ''' - loop2 = self.interpret(ops2, [0]) + loop2 = self.interpret(ops2, [0, 0, 0, 0]) bridge_ops = ''' [i4] jump(i4, i4, i4, i4, descr=targettoken) ''' bridge = self.attach_bridge(bridge_ops, loop2, 5) - self.cpu.set_future_value_int(0, 0) - self.run(loop2) + self.run(loop2, 0, 0, 0, 0) assert self.getint(0) == 31 assert self.getint(1) == 30 assert self.getint(2) == 30 @@ -274,8 +274,7 @@ loop = self.interpret(ops, [0]) assert self.getint(0) == 1 bridge = self.attach_bridge(bridge_ops, loop, 2) - self.cpu.set_future_value_int(0, 0) - self.run(loop) + self.run(loop, 0) assert self.getint(0) == 1 def test_inputarg_unused(self): @@ -301,9 +300,7 @@ assert self.getint(0) == 0 assert self.getint(1) == 10 bridge = self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - self.run(loop) + self.run(loop, 0, 10) assert self.getint(0) == 0 assert self.getint(1) == 10 @@ -320,9 +317,7 @@ finish(1, 2) ''' self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 1) - self.run(loop) + self.run(loop, 0, 1) def test_spill_for_constant(self): ops = ''' @@ -406,7 +401,7 @@ guard_true(i5) [i2, i1] jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' - self.interpret(ops, [0, 1, 2, 3]) + self.interpret(ops, [0, 1, 2, 3, 0, 0, 0]) def test_op_result_unused(self): ops = ''' @@ -440,9 +435,7 @@ finish(i0, i1, i2, i3, i4, i5, i6, i7, i8) ''' self.attach_bridge(bridge_ops, loop, 1) - for i in range(9): - self.cpu.set_future_value_int(i, i) - self.run(loop) + self.run(loop, 0, 1, 2, 3, 4, 5, 6, 7, 8) assert self.getints(9) == range(9) def test_loopargs(self): @@ -452,27 +445,13 @@ jump(i4, i1, i2, i3) """ regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 + if IS_X86_64: + assert len(regalloc.rm.reg_bindings) == 4 + assert len(regalloc.fm.bindings) == 0 + else: + assert len(regalloc.rm.reg_bindings) == 0 + assert len(regalloc.fm.bindings) == 4 - def test_loopargs_2(self): - ops = """ - [i0, i1, i2, i3] - i4 = int_add(i0, i1) - finish(i4, i1, i2, i3) - """ - regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 - - def test_loopargs_3(self): - ops = """ - [i0, i1, i2, i3] - i4 = int_add(i0, i1) - guard_true(i4) [i0, i1, i2, i3, i4] - jump(i4, i1, i2, i3) - """ - regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 - class TestRegallocCompOps(BaseTestRegalloc): @@ -640,8 +619,8 @@ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token assert clt.param_depth == self.expected_param_depth(1) @@ -652,8 +631,8 @@ i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) finish(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token assert clt.param_depth == self.expected_param_depth(2) @@ -689,9 +668,7 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 5*7 def test_bridge_calls_2(self): @@ -712,8 +689,6 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -22,8 +22,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 9) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 9) assert cpu.get_latest_value_int(0) == (9 >> 3) assert cpu.get_latest_value_int(1) == (~18) @@ -45,8 +44,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -10) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -10) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == -1000 assert cpu.get_latest_value_int(2) == 1 @@ -142,17 +140,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -13) - cpu.set_future_value_int(1, 10) - cpu.set_future_value_int(2, 10) - cpu.set_future_value_int(3, 8) - cpu.set_future_value_int(4, -8) - cpu.set_future_value_int(5, -16) - cpu.set_future_value_int(6, -18) - cpu.set_future_value_int(7, 46) - cpu.set_future_value_int(8, -12) - cpu.set_future_value_int(9, 26) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 0 assert cpu.get_latest_value_int(2) == 0 @@ -257,17 +245,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 17) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, -6) - cpu.set_future_value_int(3, 6) - cpu.set_future_value_int(4, 1) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, 13) - cpu.set_future_value_int(7, 9) - cpu.set_future_value_int(8, 49) - cpu.set_future_value_int(9, 8) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 8 assert cpu.get_latest_value_int(2) == 1 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -282,11 +282,7 @@ ops[-2].setfailargs([i1]) looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) - if op == rop.INT_IS_TRUE: - self.cpu.set_future_value_int(0, b.value) - else: - self.cpu.set_future_value_ref(0, b.value) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_latest_value_int(0) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, @@ -332,9 +328,8 @@ inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) - for i, box in enumerate(inputargs): - self.cpu.set_future_value_int(i, box.value) - self.cpu.execute_token(looptoken) + inputvalues = [box.value for box in inputargs] + self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_latest_value_int(0) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: @@ -400,8 +395,7 @@ assert address >= loopaddress + loopsize assert size >= 10 # randomish number - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -507,9 +501,7 @@ looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) - self.cpu.set_future_value_int(0, 123450) - self.cpu.set_future_value_int(1, 123408) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 123450, 123408) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert self.cpu.get_latest_value_int(1) == 42 @@ -541,8 +533,7 @@ self.cpu.assembler.set_debug(True) looptoken = JitCellToken() self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -565,7 +556,6 @@ self.cpu.assembler.set_debug(True) looptoken = JitCellToken() self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1504,7 +1504,6 @@ all_virtuals=None): from pypy.jit.metainterp.resume import blackhole_from_resumedata #debug_start('jit-blackhole') - metainterp_sd.profiler.start_blackhole() blackholeinterp = blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, jitdriver_sd, @@ -1518,10 +1517,9 @@ current_exc = blackholeinterp._prepare_resume_from_failure( resumedescr.guard_opnum, dont_change_position) - try: - _run_forever(blackholeinterp, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(blackholeinterp, current_exc) + #finally: #debug_stop('jit-blackhole') def convert_and_run_from_pyjitpl(metainterp, raising_exception=False): @@ -1529,7 +1527,6 @@ # 'metainterp.framestack'. #debug_start('jit-blackhole') metainterp_sd = metainterp.staticdata - metainterp_sd.profiler.start_blackhole() nextbh = None for frame in metainterp.framestack: curbh = metainterp_sd.blackholeinterpbuilder.acquire_interp() @@ -1546,8 +1543,7 @@ firstbh.exception_last_value = current_exc current_exc = lltype.nullptr(rclass.OBJECTPTR.TO) # - try: - _run_forever(firstbh, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(firstbh, current_exc) + #finally: #debug_stop('jit-blackhole') diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -11,7 +11,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt -from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const +from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const, ConstInt from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop @@ -254,7 +254,44 @@ record_loop_or_bridge(metainterp_sd, loop) return target_token +def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): + vinfo = jitdriver_sd.virtualizable_info + extra_ops = [] + inputargs = loop.inputargs + vable_box = inputargs[jitdriver_sd.index_of_virtualizable] + i = jitdriver_sd.num_red_args + loop.inputargs = inputargs[:i] + for descr in vinfo.static_field_descrs: + assert i < len(inputargs) + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], box, descr)) + i += 1 + arrayindex = 0 + for descr in vinfo.array_field_descrs: + vable = vable_box.getref_base() + arraylen = vinfo.get_array_length(vable, arrayindex) + arraybox = BoxPtr() + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], arraybox, descr)) + arraydescr = vinfo.array_descrs[arrayindex] + assert i + arraylen <= len(inputargs) + for index in range(arraylen): + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETARRAYITEM_GC, + [arraybox, ConstInt(index)], + box, descr=arraydescr)) + i += 1 + arrayindex += 1 + assert i == len(inputargs) + loop.operations = extra_ops + loop.operations + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): + vinfo = jitdriver_sd.virtualizable_info + if vinfo is not None: + patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) + original_jitcell_token = loop.original_jitcell_token jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) @@ -435,14 +472,14 @@ if self.must_compile(metainterp_sd, jitdriver_sd): self.start_compiling() try: - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) finally: self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) - assert 0, "unreachable" + assert 0, "unreachable" def _trace_and_compile_from_bridge(self, metainterp_sd, jitdriver_sd): # 'jitdriver_sd' corresponds to the outermost one, i.e. the one @@ -451,7 +488,7 @@ # jitdrivers. from pypy.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - return metainterp.handle_guard_failure(self) + metainterp.handle_guard_failure(self) _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, metainterp_sd, jitdriver_sd): @@ -767,21 +804,25 @@ assert exception, "PropagateExceptionDescr: no exception??" raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) -def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes, +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redargtypes, memory_manager=None): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled version of the code may end up replacing it. """ - # 'redboxes' is only used to know the types of red arguments. - inputargs = [box.clonebox() for box in redboxes] jitcell_token = make_jitcell_token(jitdriver_sd) - # 'nb_red_args' might be smaller than len(redboxes), - # because it doesn't include the virtualizable boxes. nb_red_args = jitdriver_sd.num_red_args + assert len(redargtypes) == nb_red_args + inputargs = [] + for kind in redargtypes: + if kind == history.INT: box = BoxInt() + elif kind == history.REF: box = BoxPtr() + elif kind == history.FLOAT: box = BoxFloat() + else: raise AssertionError + inputargs.append(box) k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + callargs = [funcbox] + greenboxes + inputargs # result_type = jitdriver_sd.result_type if result_type == history.INT: diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -124,9 +124,6 @@ def sort_key(self): raise NotImplementedError - def set_future_value(self, cpu, j): - raise NotImplementedError - def nonnull(self): raise NotImplementedError @@ -289,9 +286,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def same_constant(self, other): if isinstance(other, ConstInt): return self.value == other.value @@ -329,9 +323,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def same_constant(self, other): if isinstance(other, ConstFloat): return self.value == other.value @@ -378,9 +369,6 @@ def getaddr(self): return llmemory.cast_ptr_to_adr(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -432,9 +420,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - ## def getaddr(self): ## # so far this is used only when calling ## # CodeWriter.IndirectCallset.bytecode_for_address. We don't need a @@ -540,9 +525,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def nonnull(self): return self.value != 0 @@ -575,9 +557,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def nonnull(self): return self.value != longlong.ZEROF @@ -620,9 +599,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def nonnull(self): return bool(self.value) @@ -667,19 +643,12 @@ def nonnull(self): return bool(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def repr_rpython(self): return repr_rpython(self, 'bo') _getrepr_ = repr_object -def set_future_values(cpu, boxes): - for j in range(len(boxes)): - boxes[j].set_future_value(cpu, j) - # ____________________________________________________________ @@ -768,10 +737,23 @@ class TargetToken(AbstractDescr): def __init__(self, targeting_jitcell_token=None): - # The jitcell to which jumps might result in a jump to this label + # Warning, two different jitcell_tokens here! + # + # * 'targeting_jitcell_token' is only useful for the front-end, + # and it means: consider the LABEL that uses this TargetToken. + # At this position, the state is logically the one given + # by targeting_jitcell_token. So e.g. if we want to enter the + # JIT with some given green args, if the jitcell matches, then + # we can jump to this LABEL. + # + # * 'original_jitcell_token' is information from the backend's + # point of view: it means that this TargetToken is used in + # a LABEL that belongs to either: + # - a loop; then 'original_jitcell_token' is this loop + # - or a bridge; then 'original_jitcell_token' is the loop + # out of which we made this bridge + # self.targeting_jitcell_token = targeting_jitcell_token - - # The jitcell where the trace containing the label with this TargetToken begins self.original_jitcell_token = None self.virtual_state = None diff --git a/pypy/jit/metainterp/jitdriver.py b/pypy/jit/metainterp/jitdriver.py --- a/pypy/jit/metainterp/jitdriver.py +++ b/pypy/jit/metainterp/jitdriver.py @@ -11,6 +11,7 @@ # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.num_red_args ... pypy.jit.metainterp.warmspot + # self.red_args_types ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.greenfield_info ... pypy.jit.metainterp.warmspot diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -10,8 +10,6 @@ counters=""" TRACING BACKEND -RUNNING -BLACKHOLE OPS RECORDED_OPS GUARDS @@ -67,18 +65,6 @@ def end_backend(self): pass - def start_running(self): - pass - - def end_running(self): - pass - - def start_blackhole(self): - pass - - def end_blackhole(self): - pass - def count(self, kind, inc=1): pass @@ -134,16 +120,6 @@ def start_backend(self): self._start(BACKEND) def end_backend(self): self._end (BACKEND) - # Don't record times for 'running' and 'blackhole' because there are - # too many of them: calling time.time() is a major blocker. - # If you are interested in these numbers, use 'PYPYLOG=file' and - # look at the resulting file with pypy/tool/logparser.py. - def start_running(self): self.count(RUNNING) - def end_running(self): pass - - def start_blackhole(self): self.count(BLACKHOLE) - def end_blackhole(self): pass - def count(self, kind, inc=1): self.counters[kind] += inc @@ -165,8 +141,6 @@ calls = self.calls self._print_line_time("Tracing", cnt[TRACING], tim[TRACING]) self._print_line_time("Backend", cnt[BACKEND], tim[BACKEND]) - self._print_intline("Running asm", cnt[RUNNING]) - self._print_intline("Blackhole", cnt[BLACKHOLE]) line = "TOTAL: \t\t%f" % (self.tk - self.starttime, ) debug_print(line) self._print_intline("ops", cnt[OPS]) diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1810,7 +1810,7 @@ def _interpret(self): # Execute the frames forward until we raise a DoneWithThisFrame, - # a ExitFrameWithException, or a GenerateMergePoint exception. + # a ExitFrameWithException, or a ContinueRunningNormally exception. self.staticdata.stats.entered() while True: self.framestack[-1].run_one_step() @@ -1858,8 +1858,6 @@ self.seen_loop_header_for_jdindex = -1 try: self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1894,8 +1892,6 @@ if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(ABORT_BRIDGE) self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1980,12 +1976,48 @@ start = len(self.history.operations) self.current_merge_points.append((live_arg_boxes, start)) - def designate_target_loop(self, gmp): - loop_token = gmp.target_loop_token + def _unpack_boxes(self, boxes, start, stop): + ints = []; refs = []; floats = [] + for i in range(start, stop): + box = boxes[i] + if box.type == history.INT: ints.append(box.getint()) + elif box.type == history.REF: refs.append(box.getref_base()) + elif box.type == history.FLOAT:floats.append(box.getfloatstorage()) + else: assert 0 + return ints[:], refs[:], floats[:] + + def raise_continue_running_normally(self, live_arg_boxes, loop_token): + self.history.inputargs = None + self.history.operations = None + # For simplicity, we just raise ContinueRunningNormally here and + # ignore the loop_token passed in. It means that we go back to + # interpreted mode, but it should come back very quickly to the + # JIT, find probably the same 'loop_token', and execute it. + if we_are_translated(): + num_green_args = self.jitdriver_sd.num_green_args + gi, gr, gf = self._unpack_boxes(live_arg_boxes, 0, num_green_args) + ri, rr, rf = self._unpack_boxes(live_arg_boxes, num_green_args, + len(live_arg_boxes)) + CRN = self.staticdata.ContinueRunningNormally + raise CRN(gi, gr, gf, ri, rr, rf) + else: + # However, in order to keep the existing tests working + # (which are based on the assumption that 'loop_token' is + # directly used here), a bit of custom non-translatable code... + self._nontranslated_run_directly(live_arg_boxes, loop_token) + assert 0, "unreachable" + + def _nontranslated_run_directly(self, live_arg_boxes, loop_token): + "NOT_RPYTHON" + args = [] num_green_args = self.jitdriver_sd.num_green_args - residual_args = gmp.argboxes[num_green_args:] - history.set_future_values(self.cpu, residual_args) - return loop_token + num_red_args = self.jitdriver_sd.num_red_args + for box in live_arg_boxes[num_green_args:num_green_args+num_red_args]: + if box.type == history.INT: args.append(box.getint()) + elif box.type == history.REF: args.append(box.getref_base()) + elif box.type == history.FLOAT: args.append(box.getfloatstorage()) + else: assert 0 + self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) def prepare_resume_from_failure(self, opnum, dont_change_position=False): frame = self.framestack[-1] @@ -2054,10 +2086,9 @@ if target_token is not None: # raise if it *worked* correctly - self.history.inputargs = None - self.history.operations = None assert isinstance(target_token, TargetToken) - raise GenerateMergePoint(live_arg_boxes, target_token.targeting_jitcell_token) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_trace(self, live_arg_boxes, start_resumedescr): num_green_args = self.jitdriver_sd.num_green_args @@ -2075,10 +2106,9 @@ finally: self.history.operations.pop() # remove the JUMP if target_token is not None: # raise if it *worked* correctly - self.history.inputargs = None - self.history.operations = None assert isinstance(target_token, TargetToken) - raise GenerateMergePoint(live_arg_boxes, target_token.targeting_jitcell_token) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, bridge_arg_boxes, start_resumedescr): @@ -2114,10 +2144,8 @@ except RetraceLoop: assert False assert target_loop_token is not None - - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, old_loop_tokens[0]) + self.raise_continue_running_normally(live_arg_boxes, + old_loop_tokens[0]) def compile_done_with_this_frame(self, exitbox): self.gen_store_back_in_virtualizable() @@ -2395,22 +2423,6 @@ abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) - def gen_load_from_other_virtualizable(self, vinfo, vbox): - boxes = [] - assert vinfo is not None - for i in range(vinfo.num_static_extra_boxes): - descr = vinfo.static_field_descrs[i] - boxes.append(self.execute_and_record(rop.GETFIELD_GC, descr, vbox)) - virtualizable = vinfo.unwrap_virtualizable_box(vbox) - for k in range(vinfo.num_arrays): - descr = vinfo.array_field_descrs[k] - abox = self.execute_and_record(rop.GETFIELD_GC, descr, vbox) - descr = vinfo.array_descrs[k] - for j in range(vinfo.get_array_length(virtualizable, k)): - boxes.append(self.execute_and_record(rop.GETARRAYITEM_GC, descr, - abox, ConstInt(j))) - return boxes - def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) for frame in self.framestack: @@ -2482,25 +2494,13 @@ greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args - vinfo = targetjitdriver_sd.virtualizable_info - if vinfo is not None: - index = targetjitdriver_sd.index_of_virtualizable - vbox = args[index] - args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) - # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenargs, args) + token = warmrunnerstate.get_assembler_token(greenargs) op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ -class GenerateMergePoint(JitException): - def __init__(self, args, target_loop_token): - assert target_loop_token is not None - self.argboxes = args - self.target_loop_token = target_loop_token - class ChangeFrame(JitException): """Raised after we mutated metainterp.framestack, in order to force it to reload the current top-of-stack frame that gets interpreted.""" diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -4,9 +4,9 @@ from pypy.rpython.ootypesystem import ootype from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.warmstate import unspecialize_value from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value from pypy.jit.codewriter.policy import JitPolicy from pypy.jit.codewriter import codewriter, longlong from pypy.rlib.rfloat import isnan @@ -136,11 +136,11 @@ procedure_token = metainterp.get_procedure_token(args[:num_green_args]) # a loop was successfully created by _run_with_pyjitpl(); call it cpu = metainterp.cpu + args1 = [] for i in range(len(args) - num_green_args): x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(procedure_token) + args1.append(unspecialize_value(x)) + faildescr = cpu.execute_token(procedure_token, *args1) assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') if metainterp.jitdriver_sd.result_type == history.INT: return cpu.get_latest_value_int(0) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.metainterp.warmspot import get_stats -from pypy.jit.metainterp.warmstate import set_future_value from pypy.rlib import rerased from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -55,6 +55,7 @@ warmstate = FakeState() on_compile = staticmethod(lambda *args: None) on_compile_bridge = staticmethod(lambda *args: None) + virtualizable_info = None def test_compile_loop(): cpu = FakeCPU() @@ -171,23 +172,17 @@ result_type = INT # loop_token = compile_tmp_callback(cpu, FakeJitDriverSD(), - [ConstInt(12), ConstInt(34)], - [BoxInt(56), ConstInt(78), BoxInt(90)]) + [ConstInt(12), ConstInt(34)], "ii") # raiseme = None - cpu.set_future_value_int(0, -156) - cpu.set_future_value_int(1, -178) - cpu.set_future_value_int(2, -190) # passed in, but dropped - fail_descr = cpu.execute_token(loop_token) + # only two arguments must be passed in + fail_descr = cpu.execute_token(loop_token, -156, -178) assert fail_descr is FakeJitDriverSD().portal_finishtoken # EXC = lltype.GcStruct('EXC') llexc = lltype.malloc(EXC) raiseme = LLException("exception class", llexc) - cpu.set_future_value_int(0, -156) - cpu.set_future_value_int(1, -178) - cpu.set_future_value_int(2, -190) - fail_descr = cpu.execute_token(loop_token) + fail_descr = cpu.execute_token(loop_token, -156, -178) assert isinstance(fail_descr, compile.PropagateExceptionDescr) got = cpu.grab_exc_value() assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), got) == llexc @@ -196,10 +191,7 @@ class ExitFrameWithExceptionRef(Exception): pass FakeMetaInterpSD.cpu = cpu - cpu.set_future_value_int(0, -156) - cpu.set_future_value_int(1, -178) - cpu.set_future_value_int(2, -190) - fail_descr = cpu.execute_token(loop_token) + fail_descr = cpu.execute_token(loop_token, -156, -178) try: fail_descr.handle_fail(FakeMetaInterpSD(), None) except FakeMetaInterpSD.ExitFrameWithExceptionRef, e: diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -10,7 +10,7 @@ self.counter = 123456 Profiler.start(self) self.events = [] - self.times = [0, 0, 0, 0] + self.times = [0, 0] def timer(self): self.counter += 1 @@ -24,12 +24,6 @@ Profiler._end(self, event) self.events.append(~event) - def start_running(self): self._start(RUNNING) - def end_running(self): self._end(RUNNING) - - def start_blackhole(self): self._start(BLACKHOLE) - def end_blackhole(self): self._end(BLACKHOLE) - class ProfilerMixin(LLJitMixin): def meta_interp(self, *args, **kwds): kwds = kwds.copy() @@ -56,14 +50,10 @@ BACKEND, ~ BACKEND, ~ TRACING, - RUNNING, - ~ RUNNING, - BLACKHOLE, - ~ BLACKHOLE ] assert profiler.events == expected - assert profiler.times == [2, 1, 1, 1] - assert profiler.counters == [1, 1, 1, 1, 3, 3, 1, 15, 2, 0, 0, 0, 0, + assert profiler.times == [2, 1] + assert profiler.counters == [1, 1, 3, 3, 1, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0] def test_simple_loop_with_call(self): diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -72,16 +72,16 @@ res = self.meta_interp(main, [0, 6], listops=True, backendopt=True) assert res == 5040 - self.check_resops({'jump': 1, 'int_le': 2, 'guard_value': 1, - 'int_mul': 2, 'guard_false': 2, 'int_sub': 2}) + self.check_simple_loop({'jump': 1, 'int_le': 1, + 'int_mul': 1, 'guard_false': 1, 'int_sub': 1}) def test_tl_2(self): main = self._get_main() res = self.meta_interp(main, [1, 10], listops=True, backendopt=True) assert res == main(1, 10) - self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 1, - 'guard_false': 2, 'guard_value': 1}) + self.check_simple_loop({'int_le': 1, 'int_sub': 1, 'jump': 1, + 'guard_false': 1}) def test_tl_call(self, listops=True, policy=None): from pypy.jit.tl.tl import interp diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -77,7 +77,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 30 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_preexisting_access_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -102,7 +102,8 @@ assert f(5) == 185 res = self.meta_interp(f, [5]) assert res == 185 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, + getfield_gc=2) # <= at the header of the loop def test_two_paths_access(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -124,7 +125,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10118 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_synchronize_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -146,7 +147,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_virtualizable_and_greens(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'xy'], @@ -174,7 +175,7 @@ return res res = self.meta_interp(f, [40]) assert res == 50 * 4 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=4) def test_double_frame(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy', 'other'], @@ -197,7 +198,8 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_resops(setfield_gc=2, getfield_gc=1) + self.check_simple_loop(setfield_gc=1, getfield_gc=0) + self.check_resops(setfield_gc=2, getfield_gc=3) # ------------------------------ @@ -247,8 +249,8 @@ return xy2.inst_l1[2] res = self.meta_interp(f, [16]) assert res == 3001 + 16 * 80 - self.check_resops(setarrayitem_gc=0, setfield_gc=0, - getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(setarrayitem_gc=0, setfield_gc=0, + getarrayitem_gc=0, getfield_gc=0) def test_synchronize_arrays_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -278,7 +280,8 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getarrayitem_gc=0, + getfield_gc=0, setarrayitem_gc=0) def test_array_length(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -304,8 +307,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, - arraylen_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=0, getfield_gc=0) def test_residual_function(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -338,8 +341,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_resops(call=2, setfield_gc=0, getarrayitem_gc=0, - arraylen_gc=2, getfield_gc=0) + self.check_simple_loop(call=1, setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=1, getfield_gc=0) def test_double_frame_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2', 'other'], @@ -375,8 +378,8 @@ expected = f(20) res = self.meta_interp(f, [20], enable_opts='') assert res == expected - self.check_resops(setarrayitem_gc=1, setfield_gc=0, - getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) + self.check_simple_loop(setarrayitem_gc=1, setfield_gc=0, + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) # ------------------------------ @@ -423,7 +426,8 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(getfield_gc=0, getarrayitem_gc=0, + setfield_gc=0, setarrayitem_gc=0) # ------------------------------ @@ -457,7 +461,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_virtualizable_with_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'frame'], @@ -491,7 +495,8 @@ res = self.meta_interp(f, [10, 1], listops=True) assert res == f(10, 1) - self.check_resops(getarrayitem_gc=0) + self.check_simple_loop(getfield_gc=0, getarrayitem_gc=0) + self.check_resops(getfield_gc=2, getarrayitem_gc=4) def test_subclass_of_virtualizable(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -519,7 +524,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_external_pass(self): jitdriver = JitDriver(greens = [], reds = ['n', 'z', 'frame'], @@ -1037,7 +1042,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) from pypy.jit.backend.test.support import BaseCompiledMixin if isinstance(self, BaseCompiledMixin): @@ -1197,7 +1202,8 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_blackhole_should_synchronize(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1233,7 +1239,8 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_blackhole_should_not_reenter(self): if not self.basic: diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -303,18 +303,11 @@ exc_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) cls.exc_vtable = exc_vtable - class FakeLoopToken: + class FakeFailDescr(object): def __init__(self, no): self.no = no - self.generation = 0 - - class FakeFailDescr(object): - def __init__(self, looptoken): - assert isinstance(looptoken, FakeLoopToken) - self.looptoken = looptoken - def handle_fail(self, metainterp_sd, jitdrivers_sd): - no = self.looptoken.no + no = self.no if no == 0: raise metainterp_sd.warmrunnerdesc.DoneWithThisFrameInt(3) if no == 1: @@ -326,7 +319,7 @@ raise metainterp_sd.warmrunnerdesc.ExitFrameWithExceptionRef( metainterp_sd.cpu, lltype.cast_opaque_ptr(llmemory.GCREF, exc)) - return self.looptoken + assert 0 class FakeDescr: def as_vtable_size_descr(self): @@ -353,11 +346,10 @@ sizeof = nodescr def get_fail_descr_from_number(self, no): - return FakeFailDescr(FakeLoopToken(no)) + return FakeFailDescr(no) - def execute_token(self, token): - assert token.no == 2 - return FakeFailDescr(FakeLoopToken(1)) + def make_execute_token(self, *ARGS): + return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) @@ -381,7 +373,6 @@ [jd] = self.desc.jitdrivers_sd assert jd._assembler_call_helper(0, 0) == 3 assert jd._assembler_call_helper(1, 0) == 10 - assert jd._assembler_call_helper(2, 0) == 10 try: jd._assembler_call_helper(3, 0) except LLException, lle: diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -151,29 +151,6 @@ assert get_jitcell(False, 42, 0.25) is cell4 assert cell1 is not cell3 is not cell4 is not cell1 -def test_make_set_future_values(): - future_values = {} - class FakeCPU: - def set_future_value_int(self, j, value): - future_values[j] = "int", value - def set_future_value_float(self, j, value): - future_values[j] = "float", value - class FakeWarmRunnerDesc: - cpu = FakeCPU() - memory_manager = None - class FakeJitDriverSD: - _red_args_types = ["int", "float"] - virtualizable_info = None - # - state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - set_future_values = state.make_set_future_values() - set_future_values(5, 42.5) - assert future_values == { - 0: ("int", 5), - 1: ("float", longlong.getfloatstorage(42.5)), - } - assert set_future_values is state.make_set_future_values() - def test_make_unwrap_greenkey(): class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] @@ -210,6 +187,7 @@ _confirm_enter_jit_ptr = None _can_never_inline_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] class FakeCell: dont_trace_here = False state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) @@ -239,6 +217,7 @@ _can_never_inline_ptr = None _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() res = state.get_location_str([ConstInt(5), constfloat(42.5)]) @@ -264,6 +243,7 @@ _can_never_inline_ptr = None _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() @@ -289,6 +269,7 @@ _can_never_inline_ptr = llhelper(CAN_NEVER_INLINE, can_never_inline) _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -522,9 +522,9 @@ greens_v, reds_v = support.decode_hp_hint_args(op) ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] - jd._red_args_types = [history.getkind(v.concretetype) for v in reds_v] + jd.red_args_types = [history.getkind(v.concretetype) for v in reds_v] jd.num_green_args = len(jd._green_args_spec) - jd.num_red_args = len(jd._red_args_types) + jd.num_red_args = len(jd.red_args_types) RESTYPE = graph.getreturnvar().concretetype (jd._JIT_ENTER_FUNCTYPE, jd._PTR_JIT_ENTER_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, lltype.Void) @@ -771,16 +771,16 @@ def assembler_call_helper(failindex, virtualizableref): fail_descr = self.cpu.get_fail_descr_from_number(failindex) - while True: - if vinfo is not None: - virtualizable = lltype.cast_opaque_ptr( - vinfo.VTYPEPTR, virtualizableref) - vinfo.reset_vable_token(virtualizable) - try: - loop_token = fail_descr.handle_fail(self.metainterp_sd, jd) - except JitException, e: - return handle_jitexception(e) - fail_descr = self.execute_token(loop_token) + if vinfo is not None: + virtualizable = lltype.cast_opaque_ptr( + vinfo.VTYPEPTR, virtualizableref) + vinfo.reset_vable_token(virtualizable) + try: + fail_descr.handle_fail(self.metainterp_sd, jd) + except JitException, e: + return handle_jitexception(e) + else: + assert 0, "should have raised" jd._assembler_call_helper = assembler_call_helper # for debugging jd._assembler_helper_ptr = self.helper_func( @@ -910,10 +910,3 @@ graphs = self.translator.graphs for graph, block, i in find_force_quasi_immutable(graphs): self.replace_force_quasiimmut_with_direct_call(block.operations[i]) - - # ____________________________________________________________ - - def execute_token(self, loop_token): - fail_descr = self.cpu.execute_token(loop_token) - self.memory_manager.keep_loop_alive(loop_token) - return fail_descr diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -12,6 +12,7 @@ from pypy.rlib.debug import debug_start, debug_stop, debug_print from pypy.jit.metainterp import history from pypy.jit.codewriter import support, heaptracker, longlong +from pypy.tool.sourcetools import func_with_new_name # ____________________________________________________________ @@ -142,26 +143,6 @@ else: return rffi.cast(lltype.Signed, x) - at specialize.ll_and_arg(3) -def set_future_value(cpu, j, value, typecode): - if typecode == 'ref': - refvalue = cpu.ts.cast_to_ref(value) - cpu.set_future_value_ref(j, refvalue) - elif typecode == 'int': - if isinstance(lltype.typeOf(value), lltype.Ptr): - intvalue = llmemory.AddressAsInt(llmemory.cast_ptr_to_adr(value)) - else: - intvalue = lltype.cast_primitive(lltype.Signed, value) - cpu.set_future_value_int(j, intvalue) - elif typecode == 'float': - if lltype.typeOf(value) is lltype.Float: - value = longlong.getfloatstorage(value) - else: - assert longlong.is_longlong(lltype.typeOf(value)) - value = rffi.cast(lltype.SignedLongLong, value) - cpu.set_future_value_float(j, value) - else: - assert False class JitCell(BaseJitCell): # the counter can mean the following things: @@ -297,20 +278,48 @@ index_of_virtualizable = jitdriver_sd.index_of_virtualizable num_green_args = jitdriver_sd.num_green_args get_jitcell = self.make_jitcell_getter() - set_future_values = self.make_set_future_values() self.make_jitdriver_callbacks() confirm_enter_jit = self.confirm_enter_jit + range_red_args = unrolling_iterable( + range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) + # get a new specialized copy of the method + ARGS = [] + for kind in jitdriver_sd.red_args_types: + if kind == 'int': + ARGS.append(lltype.Signed) + elif kind == 'ref': + ARGS.append(llmemory.GCREF) + elif kind == 'float': + ARGS.append(longlong.FLOATSTORAGE) + else: + assert 0, kind + func_execute_token = self.cpu.make_execute_token(*ARGS) + + def execute_assembler(loop_token, *args): + # Call the backend to run the 'looptoken' with the given + # input args. + fail_descr = func_execute_token(loop_token, *args) + # + # If we have a virtualizable, we have to reset its + # 'vable_token' field afterwards + if vinfo is not None: + virtualizable = args[index_of_virtualizable] + virtualizable = vinfo.cast_gcref_to_vtype(virtualizable) + vinfo.reset_vable_token(virtualizable) + # + # Record in the memmgr that we just ran this loop, + # so that it will keep it alive for a longer time + warmrunnerdesc.memory_manager.keep_loop_alive(loop_token) + # + # Handle the failure + fail_descr.handle_fail(metainterp_sd, jitdriver_sd) + # + assert 0, "should have raised" def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. """ - if vinfo is not None: - virtualizable = args[num_green_args + index_of_virtualizable] - virtualizable = vinfo.cast_to_vtype(virtualizable) - else: - virtualizable = None - # look for the cell corresponding to the current greenargs greenargs = args[:num_green_args] cell = get_jitcell(True, *greenargs) @@ -330,42 +339,36 @@ # set counter to -2, to mean "tracing in effect" cell.counter = -2 try: - procedure_token = metainterp.compile_and_run_once(jitdriver_sd, - *args) + metainterp.compile_and_run_once(jitdriver_sd, *args) finally: if cell.counter == -2: cell.counter = 0 else: - if cell.counter == -2: + if cell.counter != -1: + assert cell.counter == -2 # tracing already happening in some outer invocation of # this function. don't trace a second time. return - assert cell.counter == -1 if not confirm_enter_jit(*args): return + # machine code was already compiled for these greenargs procedure_token = cell.get_procedure_token() if procedure_token is None: # it was a weakref that has been freed cell.counter = 0 return - # machine code was already compiled for these greenargs - # get the assembler and fill in the boxes - set_future_values(*args[num_green_args:]) - - # ---------- execute assembler ---------- - while True: # until interrupted by an exception - metainterp_sd.profiler.start_running() - #debug_start("jit-running") - fail_descr = warmrunnerdesc.execute_token(procedure_token) - #debug_stop("jit-running") - metainterp_sd.profiler.end_running() - procedure_token = None # for test_memmgr - if vinfo is not None: - vinfo.reset_vable_token(virtualizable) - procedure_token = fail_descr.handle_fail(metainterp_sd, - jitdriver_sd) + # extract and unspecialize the red arguments to pass to + # the assembler + execute_args = () + for i in range_red_args: + execute_args += (unspecialize_value(args[i]), ) + # run it! this executes until interrupted by an exception + execute_assembler(procedure_token, *execute_args) + # + assert 0, "should not reach this point" maybe_compile_and_run._dont_inline_ = True self.maybe_compile_and_run = maybe_compile_and_run + self.execute_assembler = execute_assembler return maybe_compile_and_run # ---------- @@ -506,56 +509,6 @@ # ---------- - def make_set_future_values(self): - "NOT_RPYTHON" - if hasattr(self, 'set_future_values'): - return self.set_future_values - - jitdriver_sd = self.jitdriver_sd - cpu = self.cpu - vinfo = jitdriver_sd.virtualizable_info - red_args_types = unrolling_iterable(jitdriver_sd._red_args_types) - # - def set_future_values(*redargs): - i = 0 - for typecode in red_args_types: - set_future_value(cpu, i, redargs[i], typecode) - i = i + 1 - if vinfo is not None: - set_future_values_from_vinfo(*redargs) - # - if vinfo is not None: - i0 = len(jitdriver_sd._red_args_types) - index_of_virtualizable = jitdriver_sd.index_of_virtualizable - vable_static_fields = unrolling_iterable( - zip(vinfo.static_extra_types, vinfo.static_fields)) - vable_array_fields = unrolling_iterable( - zip(vinfo.arrayitem_extra_types, vinfo.array_fields)) - getlength = cpu.ts.getlength - getarrayitem = cpu.ts.getarrayitem - # - def set_future_values_from_vinfo(*redargs): - i = i0 - virtualizable = redargs[index_of_virtualizable] - virtualizable = vinfo.cast_to_vtype(virtualizable) - for typecode, fieldname in vable_static_fields: - x = getattr(virtualizable, fieldname) - set_future_value(cpu, i, x, typecode) - i = i + 1 - for typecode, fieldname in vable_array_fields: - lst = getattr(virtualizable, fieldname) - for j in range(getlength(lst)): - x = getarrayitem(lst, j) - set_future_value(cpu, i, x, typecode) - i = i + 1 - else: - set_future_values_from_vinfo = None - # - self.set_future_values = set_future_values - return set_future_values - - # ---------- - def make_jitdriver_callbacks(self): if hasattr(self, 'get_location_str'): return @@ -605,8 +558,9 @@ jd.on_compile = lambda *args: None jd.on_compile_bridge = lambda *args: None - def get_assembler_token(greenkey, redboxes): - # 'redboxes' is only used to know the types of red arguments + redargtypes = ''.join([kind[0] for kind in jd.red_args_types]) + + def get_assembler_token(greenkey): cell = self.jit_cell_at_key(greenkey) procedure_token = cell.get_procedure_token() if procedure_token is None: @@ -615,7 +569,7 @@ cell.counter = 0 # but was freed in the meantime. memmgr = warmrunnerdesc.memory_manager procedure_token = compile_tmp_callback(cpu, jd, greenkey, - redboxes, memmgr) + redargtypes, memmgr) cell.set_procedure_token(procedure_token) return procedure_token self.get_assembler_token = get_assembler_token diff --git a/pypy/jit/tool/jitoutput.py b/pypy/jit/tool/jitoutput.py --- a/pypy/jit/tool/jitoutput.py +++ b/pypy/jit/tool/jitoutput.py @@ -10,9 +10,6 @@ REGEXES = [ (('tracing_no', 'tracing_time'), '^Tracing:\s+([\d.]+)\s+([\d.]+)$'), (('backend_no', 'backend_time'), '^Backend:\s+([\d.]+)\s+([\d.]+)$'), - (('asm_no',), '^Running asm:\s+([\d.]+)$'), - (('blackhole_no',), - '^Blackhole:\s+([\d.]+)$'), (None, '^TOTAL.*$'), (('ops.total',), '^ops:\s+(\d+)$'), (('recorded_ops.total',), '^recorded ops:\s+(\d+)$'), diff --git a/pypy/jit/tool/test/test_jitoutput.py b/pypy/jit/tool/test/test_jitoutput.py --- a/pypy/jit/tool/test/test_jitoutput.py +++ b/pypy/jit/tool/test/test_jitoutput.py @@ -34,8 +34,6 @@ # assert did not crash # asserts below are a bit delicate, possibly they might be deleted assert info.tracing_no == 1 - assert info.asm_no == 1 - assert info.blackhole_no == 1 assert info.backend_no == 1 assert info.ops.total == 2 assert info.recorded_ops.total == 2 @@ -47,8 +45,6 @@ DATA = '''Tracing: 1 0.006992 Backend: 1 0.000525 -Running asm: 1 -Blackhole: 1 TOTAL: 0.025532 ops: 2 recorded ops: 6 @@ -75,8 +71,6 @@ info = parse_prof(DATA) assert info.tracing_no == 1 assert info.tracing_time == 0.006992 - assert info.asm_no == 1 - assert info.blackhole_no == 1 assert info.backend_no == 1 assert info.backend_time == 0.000525 assert info.ops.total == 2 From noreply at buildbot.pypy.org Mon Dec 12 21:34:17 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Dec 2011 21:34:17 +0100 (CET) Subject: [pypy-commit] pypy jit-simplify-backendintf: close merged branch Message-ID: <20111212203417.A652382ABD@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-simplify-backendintf Changeset: r50446:539833042097 Date: 2011-12-12 22:31 +0200 http://bitbucket.org/pypy/pypy/changeset/539833042097/ Log: close merged branch From noreply at buildbot.pypy.org Mon Dec 12 21:44:08 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Dec 2011 21:44:08 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add an abstract Message-ID: <20111212204408.8383582ABD@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3983:a2c53a086126 Date: 2011-12-12 22:42 +0200 http://bitbucket.org/pypy/extradoc/changeset/a2c53a086126/ Log: Add an abstract diff --git a/talk/sea2012/abstract.rst b/talk/sea2012/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/sea2012/abstract.rst @@ -0,0 +1,16 @@ +Fast numeric in Python - NumPy and PyPy +======================================= + +Python has seen a growing adoption as a new scientific processing language +in the past few years. It has been successfully used as a glue language +to drive various simulations implemented in either C, Fortran or the array +manipulation language implemented in the NumPy package. Originally the main +reason why Python was used only as a glue language is because the original +Python implementation is relatively slow. With the recent progress in the PyPy +project, it has been shown that while still not at C speeds, it has been +gaining significant performance improvements the releases, bringing it +closer and closer to C-level speeds. In this talk I would like to explore +how to use it right now, in the near future and our plans to provide a very +robust infrastructure for implementing numerical computations. I will also +spend some time exploring the ideas how dynamic compilation can eventually +outperform static compilation and how having a high-level language helps here. From noreply at buildbot.pypy.org Mon Dec 12 21:59:12 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 12 Dec 2011 21:59:12 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: Fix for 64bit platforms Message-ID: <20111212205912.57AC182ABD@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: pyarg-parsetuple-s-star-buffer Changeset: r50447:ab4be8e09a58 Date: 2011-12-12 21:54 +0100 http://bitbucket.org/pypy/pypy/changeset/ab4be8e09a58/ Log: Fix for 64bit platforms diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -34,8 +34,8 @@ """ py_buf = rffi.cast(PyBufferObject, py_obj) py_buf.c_b_offset = 0 - py_buf.c_b_readonly = 1 - py_buf.c_b_hash = -1 + rffi.setintfield(py_buf, 'c_b_readonly', 1) + rffi.setintfield(py_buf, 'c_b_hash', -1) if isinstance(w_obj, SubBuffer): py_buf.c_b_offset = w_obj.offset From noreply at buildbot.pypy.org Mon Dec 12 21:59:21 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Mon, 12 Dec 2011 21:59:21 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Improve language in abstract. Message-ID: <20111212205921.9F04182ABD@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: extradoc Changeset: r3984:b91e6cab9b06 Date: 2011-12-12 15:58 -0500 http://bitbucket.org/pypy/extradoc/changeset/b91e6cab9b06/ Log: Improve language in abstract. diff --git a/talk/sea2012/abstract.rst b/talk/sea2012/abstract.rst --- a/talk/sea2012/abstract.rst +++ b/talk/sea2012/abstract.rst @@ -1,16 +1,17 @@ Fast numeric in Python - NumPy and PyPy ======================================= -Python has seen a growing adoption as a new scientific processing language -in the past few years. It has been successfully used as a glue language -to drive various simulations implemented in either C, Fortran or the array -manipulation language implemented in the NumPy package. Originally the main -reason why Python was used only as a glue language is because the original -Python implementation is relatively slow. With the recent progress in the PyPy -project, it has been shown that while still not at C speeds, it has been -gaining significant performance improvements the releases, bringing it -closer and closer to C-level speeds. In this talk I would like to explore -how to use it right now, in the near future and our plans to provide a very -robust infrastructure for implementing numerical computations. I will also -spend some time exploring the ideas how dynamic compilation can eventually -outperform static compilation and how having a high-level language helps here. +Python increasingly is being utilized as a powerful scientific +processing language. It successfully has been used as a glue language +to drive simulations written in C, Fortran or the array +manipulation language provided by the NumPy package. Originally +Python only was used as a glue language because the original Python +implementation was relatively slow. With the recent progress in the +PyPy project that is showing significant performance +improvements in each release, Python is nearing performance comparable +to native C language implementations. In this talk I will +describe three stages: how to use it right now, in the near future and +our plans to provide a very robust infrastructure for implementing +numerical computations. I also will spend some time exploring ideas +how dynamic compilation eventually can outperform static compilation +and how a high-level language helps accomplish this. From pullrequests-noreply at bitbucket.org Mon Dec 12 22:31:01 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Mon, 12 Dec 2011 21:31:01 -0000 Subject: [pypy-commit] [OPEN] Pull request #18 for pypy/pypy: Adds string constructors to ints and floats In-Reply-To: References: Message-ID: <20111212213101.17459.62373@bitbucket03.managed.contegix.com> Pull request #18 has been updated by Jeff Terrace to include new changes. https://bitbucket.org/pypy/pypy/pull-request/18/adds-string-constructors-to-ints-and Title: Adds string constructors to ints and floats Creator: Jeff Terrace # Added tests for (u)int8-(u)int64 and float32/float64 for taking strings as contructors, e.g. int8('50') # Added string type to FakeSpace # Updated int and float _coerce methods to handle strings. I had to special case it because space.int(space.wrap('50')) does not work. Updated list of changes: ed24df405fd9 by Jeff Terrace: "Change string special case to use call_function instead" d0fa1bba8dd6 by Jeff Terrace: "Add string to FakeSpace to fix failing compile tests" aaa9b6a48bbb by Jeff Terrace: "Updated int and float types to take strings in their constructors so things like?" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Mon Dec 12 22:40:44 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Dec 2011 22:40:44 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Confusing first usage of 'our' this late in the abstract Message-ID: <20111212214044.3882882ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r3985:ec0b4da1f5e8 Date: 2011-12-12 22:40 +0100 http://bitbucket.org/pypy/extradoc/changeset/ec0b4da1f5e8/ Log: Confusing first usage of 'our' this late in the abstract diff --git a/talk/sea2012/abstract.rst b/talk/sea2012/abstract.rst --- a/talk/sea2012/abstract.rst +++ b/talk/sea2012/abstract.rst @@ -11,6 +11,7 @@ improvements in each release, Python is nearing performance comparable to native C language implementations. In this talk I will describe three stages: how to use it right now, in the near future and +(xxx give before this line a hint that "we" or "our" means "pypy developers") our plans to provide a very robust infrastructure for implementing numerical computations. I also will spend some time exploring ideas how dynamic compilation eventually can outperform static compilation From pullrequests-noreply at bitbucket.org Mon Dec 12 22:50:51 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Mon, 12 Dec 2011 21:50:51 -0000 Subject: [pypy-commit] [OPEN] Pull request #18 for pypy/pypy: Adds string constructors to ints and floats In-Reply-To: References: Message-ID: <20111212215051.25906.30267@bitbucket01.managed.contegix.com> Pull request #18 has been updated by Jeff Terrace to include new changes. https://bitbucket.org/pypy/pypy/pull-request/18/adds-string-constructors-to-ints-and Title: Adds string constructors to ints and floats Creator: Jeff Terrace # Added tests for (u)int8-(u)int64 and float32/float64 for taking strings as contructors, e.g. int8('50') # Added string type to FakeSpace # Updated int and float _coerce methods to handle strings. I had to special case it because space.int(space.wrap('50')) does not work. Updated list of changes: cf42a69ca368 by Jeff Terrace: "Remove FakeSpace string code no longer needed" ed24df405fd9 by Jeff Terrace: "Change string special case to use call_function instead" d0fa1bba8dd6 by Jeff Terrace: "Add string to FakeSpace to fix failing compile tests" aaa9b6a48bbb by Jeff Terrace: "Updated int and float types to take strings in their constructors so things like?" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Mon Dec 12 22:52:53 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Mon, 12 Dec 2011 22:52:53 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-strings: Updated int and float types to take strings in their constructors so things like int32('34') work Message-ID: <20111212215253.3741F82ABD@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-dtype-strings Changeset: r50448:aaa9b6a48bbb Date: 2011-12-11 09:08 -0500 http://bitbucket.org/pypy/pypy/changeset/aaa9b6a48bbb/ Log: Updated int and float types to take strings in their constructors so things like int32('34') work diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -193,6 +193,7 @@ assert type(X(True)) is numpy.bool_ assert X(True) is numpy.True_ + assert numpy.bool_("False") is numpy.True_ def test_int8(self): import numpypy as numpy @@ -211,6 +212,10 @@ assert type(int(x)) is int assert int(x) == -128 + assert numpy.int8('50') == numpy.int8(50) + raises(ValueError, numpy.int8, '50.2') + assert numpy.int8('127') == 127 + assert numpy.int8('128') == -128 def test_uint8(self): import numpypy as numpy @@ -232,6 +237,8 @@ assert numpy.uint8(255) == 255 assert numpy.uint8(256) == 0 + assert numpy.uint8('255') == 255 + assert numpy.uint8('256') == 0 def test_int16(self): import numpypy as numpy @@ -240,12 +247,16 @@ assert x == 3 assert numpy.int16(32767) == 32767 assert numpy.int16(32768) == -32768 + assert numpy.int16('32767') == 32767 + assert numpy.int16('32768') == -32768 def test_uint16(self): import numpypy as numpy assert numpy.uint16(65535) == 65535 assert numpy.uint16(65536) == 0 + assert numpy.uint16('65535') == 65535 + assert numpy.uint16('65536') == 0 def test_int32(self): import numpypy as numpy @@ -254,12 +265,16 @@ assert x == 23 assert numpy.int32(2147483647) == 2147483647 assert numpy.int32(2147483648) == -2147483648 + assert numpy.int32('2147483647') == 2147483647 + assert numpy.int32('2147483648') == -2147483648 def test_uint32(self): import numpypy as numpy assert numpy.uint32(4294967295) == 4294967295 assert numpy.uint32(4294967296) == 0 + assert numpy.uint32('4294967295') == 4294967295 + assert numpy.uint32('4294967296') == 0 def test_int_(self): import numpypy as numpy @@ -281,6 +296,9 @@ assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) + + assert numpy.int64('9223372036854775807') == 9223372036854775807 + raises(OverflowError, numpy.int64, '9223372036854775808') def test_uint64(self): import sys @@ -304,6 +322,8 @@ assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] assert numpy.float32(12) == numpy.float64(12) + assert numpy.float32('23.4') == numpy.float32(23.4) + raises(ValueError, numpy.float32, '23.2df') def test_float64(self): import numpypy as numpy @@ -315,6 +335,8 @@ assert numpy.dtype(float).type is numpy.float64 assert numpy.float64(2.0) == 2.0 + assert numpy.float64('23.4') == numpy.float64(23.4) + raises(ValueError, numpy.float64, '23.2df') def test_subclass_type(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -191,7 +191,16 @@ _mixin_ = True def _coerce(self, space, w_item): - return self.box(space.int_w(space.int(w_item))) + if space.isinstance_w(w_item, space.w_str): + try: + val = int(space.str_w(space.str(w_item))) + except ValueError: + raise OperationError(space.w_ValueError, space.wrap("Invalid integer value")) + if not isinstance(val, int): + raise OperationError(space.w_OverflowError, space.wrap("Value out of range")) + else: + val = space.int_w(space.int(w_item)) + return self.box(val) def str_format(self, box): value = self.unbox(box) @@ -289,7 +298,14 @@ _mixin_ = True def _coerce(self, space, w_item): - return self.box(space.float_w(space.float(w_item))) + if space.isinstance_w(w_item, space.w_str): + try: + val = float(space.str_w(space.str(w_item))) + except ValueError: + raise OperationError(space.w_ValueError, space.wrap("Invalid float value")) + else: + val = space.float_w(space.float(w_item)) + return self.box(val) def str_format(self, box): value = self.unbox(box) From noreply at buildbot.pypy.org Mon Dec 12 22:52:54 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Mon, 12 Dec 2011 22:52:54 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-strings: Add string to FakeSpace to fix failing compile tests Message-ID: <20111212215254.5ABEC82ABD@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-dtype-strings Changeset: r50449:d0fa1bba8dd6 Date: 2011-12-11 10:17 -0500 http://bitbucket.org/pypy/pypy/changeset/d0fa1bba8dd6/ Log: Add string to FakeSpace to fix failing compile tests diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -48,6 +48,7 @@ w_long = "long" w_tuple = 'tuple' w_slice = "slice" + w_str = "str" def __init__(self): """NOT_RPYTHON""" @@ -85,6 +86,8 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) + elif isinstance(obj, str): + return StringObject(obj) elif isinstance(obj, W_Root): return obj raise NotImplementedError @@ -120,6 +123,16 @@ assert isinstance(w_obj, interp_boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) + def str_w(self, w_obj): + if isinstance(w_obj, StringObject): + return w_obj._value + raise NotImplementedError + + def str(self, w_obj): + if isinstance(w_obj, StringObject): + return w_obj + raise NotImplementedError + def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) return w_obj.boolval @@ -169,6 +182,11 @@ def __init__(self, intval): self.intval = intval +class StringObject(W_Root): + tp = FakeSpace.w_str + def __init__(self, s): + self._value = s + class ListObject(W_Root): tp = FakeSpace.w_list def __init__(self, items): From noreply at buildbot.pypy.org Mon Dec 12 22:52:55 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Mon, 12 Dec 2011 22:52:55 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-strings: Change string special case to use call_function instead Message-ID: <20111212215255.7C1D382ABD@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-dtype-strings Changeset: r50450:ed24df405fd9 Date: 2011-12-12 16:02 -0500 http://bitbucket.org/pypy/pypy/changeset/ed24df405fd9/ Log: Change string special case to use call_function instead diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -191,16 +191,7 @@ _mixin_ = True def _coerce(self, space, w_item): - if space.isinstance_w(w_item, space.w_str): - try: - val = int(space.str_w(space.str(w_item))) - except ValueError: - raise OperationError(space.w_ValueError, space.wrap("Invalid integer value")) - if not isinstance(val, int): - raise OperationError(space.w_OverflowError, space.wrap("Value out of range")) - else: - val = space.int_w(space.int(w_item)) - return self.box(val) + return self.box(space.int_w(space.call_function(space.w_int, w_item))) def str_format(self, box): value = self.unbox(box) @@ -298,14 +289,7 @@ _mixin_ = True def _coerce(self, space, w_item): - if space.isinstance_w(w_item, space.w_str): - try: - val = float(space.str_w(space.str(w_item))) - except ValueError: - raise OperationError(space.w_ValueError, space.wrap("Invalid float value")) - else: - val = space.float_w(space.float(w_item)) - return self.box(val) + return self.box(space.float_w(space.call_function(space.w_float, w_item))) def str_format(self, box): value = self.unbox(box) From noreply at buildbot.pypy.org Mon Dec 12 22:52:56 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Mon, 12 Dec 2011 22:52:56 +0100 (CET) Subject: [pypy-commit] pypy numpy-dtype-strings: Remove FakeSpace string code no longer needed Message-ID: <20111212215256.9E8CF82ABD@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-dtype-strings Changeset: r50451:cf42a69ca368 Date: 2011-12-12 16:49 -0500 http://bitbucket.org/pypy/pypy/changeset/cf42a69ca368/ Log: Remove FakeSpace string code no longer needed diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -48,7 +48,6 @@ w_long = "long" w_tuple = 'tuple' w_slice = "slice" - w_str = "str" def __init__(self): """NOT_RPYTHON""" @@ -86,8 +85,6 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) - elif isinstance(obj, str): - return StringObject(obj) elif isinstance(obj, W_Root): return obj raise NotImplementedError @@ -123,16 +120,6 @@ assert isinstance(w_obj, interp_boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) - def str_w(self, w_obj): - if isinstance(w_obj, StringObject): - return w_obj._value - raise NotImplementedError - - def str(self, w_obj): - if isinstance(w_obj, StringObject): - return w_obj - raise NotImplementedError - def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) return w_obj.boolval @@ -182,11 +169,6 @@ def __init__(self, intval): self.intval = intval -class StringObject(W_Root): - tp = FakeSpace.w_str - def __init__(self, s): - self._value = s - class ListObject(W_Root): tp = FakeSpace.w_list def __init__(self, items): From pullrequests-noreply at bitbucket.org Mon Dec 12 22:53:07 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Mon, 12 Dec 2011 21:53:07 -0000 Subject: [pypy-commit] [ACCEPTED] Pull request #18 for pypy/pypy: Adds string constructors to ints and floats In-Reply-To: References: Message-ID: <20111212215307.2019.12204@bitbucket13.managed.contegix.com> Pull request #18 has been accepted by Alex Gaynor. Changes in jterrace/pypy have been pulled into pypy/pypy. https://bitbucket.org/pypy/pypy/pull-request/18/adds-string-constructors-to-ints-and -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Tue Dec 13 04:11:04 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Tue, 13 Dec 2011 04:11:04 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: Adds full fromstring support with lots of tests Message-ID: <20111213031104.628DA82ABD@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-full-fromstring Changeset: r50453:8e0167fbb05a Date: 2011-12-12 17:53 -0500 http://bitbucket.org/pypy/pypy/changeset/8e0167fbb05a/ Log: Adds full fromstring support with lots of tests diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,34 +1,49 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.rlib.rstruct.runpack import runpack +from pypy.module.micronumpy import interp_dtype from pypy.rpython.lltypesystem import lltype, rffi FLOAT_SIZE = rffi.sizeof(lltype.Float) - at unwrap_spec(s=str) -def fromstring(space, s): + at unwrap_spec(s=str, count=int, sep=str) +def fromstring(space, s, w_dtype=None, count=-1, sep=''): from pypy.module.micronumpy.interp_numarray import W_NDimArray + + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + itemsize = abs(space.int_w(dtype.descr_get_itemsize(space))) length = len(s) - if length % FLOAT_SIZE == 0: - number = length/FLOAT_SIZE - else: + A = [] + num = 0 + ptr = 0 + + while (num < count or count == -1) and ptr < len(s): + if sep == '': + if length - ptr < itemsize: + raise OperationError(space.w_ValueError, space.wrap( + "string length %d not divisable by item size %d" % (length, itemsize))) + val = dtype.itemtype.runpack_str(s[ptr:ptr+itemsize]) + ptr += itemsize + else: + nextptr = s.find(sep, ptr) + if nextptr < 0: + nextptr = length + val = dtype.coerce(space, space.wrap(s[ptr:nextptr])) + ptr = nextptr + 1 + + num += 1 + A.append(val) + + if count > num: raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - - dtype = get_dtype_cache(space).w_float64dtype - a = W_NDimArray(number, [number], dtype=dtype) - - start = 0 - end = FLOAT_SIZE - i = 0 - while i < number: - part = s[start:end] - a.dtype.setitem(a.storage, i, dtype.box(runpack('d', part))) - i += 1 - start += FLOAT_SIZE - end += FLOAT_SIZE - + "string is smaller than requested size")) + + a = W_NDimArray(num, [num], dtype=dtype) + for i, val in enumerate(A): + a.dtype.setitem(a.storage, i, val) + return space.wrap(a) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1168,13 +1168,69 @@ import struct BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) + cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) + cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) + cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) def test_fromstring(self): - from numpypy import fromstring + from numpypy import fromstring, uint8, float32, int32 a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") + b = fromstring('\x01\x02', dtype=uint8) + assert a[0] == 1 + assert a[1] == 2 + c = fromstring(self.fdata, dtype=float32) + assert c[0] == float32(2.3) + d = fromstring("1 2", sep=' ', count=2, dtype=uint8) + assert len(d) == 2 + assert d[0] == 1 + assert d[1] == 2 + e = fromstring('3, 4,5', dtype=uint8, sep=',') + assert len(e) == 3 + assert e[0] == 3 + assert e[1] == 4 + assert e[2] == 5 + f = fromstring('\x01\x02\x03\x04\x05', dtype=uint8, count=3) + assert len(f) == 3 + assert f[0] == 1 + assert f[1] == 2 + assert f[2] == 3 + raises(ValueError, fromstring, "3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + + def test_fromstring_types(self): + from numpypy import fromstring + from numpypy import int8, int16, int32, int64 + from numpypy import uint8, uint16, uint32 + from numpypy import float32, float64 + a = fromstring('\xFF', dtype=int8) + assert a[0] == -1 + b = fromstring('\xFF', dtype=uint8) + assert b[0] == 255 + c = fromstring('\xFF\xFF', dtype=int16) + assert c[0] == -1 + d = fromstring('\xFF\xFF', dtype=uint16) + assert d[0] == 65535 + e = fromstring('\xFF\xFF\xFF\xFF', dtype=int32) + assert e[0] == -1 + f = fromstring('\xFF\xFF\xFF\xFF', dtype=uint32) + assert f[0] == 4294967295 + g = fromstring('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', dtype=int64) + assert g[0] == -1 + h = fromstring(self.float32val, dtype=float32) + assert h[0] == float32(5.2) + i = fromstring(self.float64val, dtype=float64) + assert i[0] == float64(300.4) + + + def test_fromstring_invalid(self): + from numpypy import fromstring, uint16, uint8 + #default dtype is 64-bit float, so 3 bytes should fail + raises(ValueError, fromstring, "\x01\x02\x03") + #3 bytes is not modulo 2 bytes (int16) + raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) + #5 bytes is larger than 3 bytes + raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) class AppTestRepr(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -8,6 +8,7 @@ from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rstruct.runpack import runpack def simple_unary_op(func): @@ -55,6 +56,8 @@ class Primitive(object): _mixin_ = True + char = "?" + def get_element_size(self): return rffi.sizeof(self.T) @@ -102,6 +105,11 @@ width, storage, i, offset, value ) + def runpack_str(self, s): + if self.char == "?": + raise NotImplementedError + return self.box(runpack(self.char, s)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -241,26 +249,32 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box + char = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box + char = "B" class Int16(BaseType, Integer): T = rffi.SHORT BoxType = interp_boxes.W_Int16Box + char = "h" class UInt16(BaseType, Integer): T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box + char = "H" class Int32(BaseType, Integer): T = rffi.INT BoxType = interp_boxes.W_Int32Box + char = "i" class UInt32(BaseType, Integer): T = rffi.UINT BoxType = interp_boxes.W_UInt32Box + char = "I" class Long(BaseType, Integer): T = rffi.LONG @@ -273,10 +287,12 @@ class Int64(BaseType, Integer): T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box + char = "q" class UInt64(BaseType, Integer): T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + char = "Q" def _coerce(self, space, w_item): try: @@ -403,7 +419,9 @@ class Float32(BaseType, Float): T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box + char = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box \ No newline at end of file + BoxType = interp_boxes.W_Float64Box + char = "d" \ No newline at end of file From noreply at buildbot.pypy.org Tue Dec 13 05:21:14 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Dec 2011 05:21:14 +0100 (CET) Subject: [pypy-commit] pypy numpypy-frompyfunc: tests, implementation of frompyfunc. Needs lazy eval (should inherit from VirtualArray?) Message-ID: <20111213042114.1117C82ABD@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-frompyfunc Changeset: r50454:8f2f51754ac2 Date: 2011-12-13 00:55 +0200 http://bitbucket.org/pypy/pypy/changeset/8f2f51754ac2/ Log: tests, implementation of frompyfunc. Needs lazy eval (should inherit from VirtualArray?) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -8,6 +8,7 @@ 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'frompyfunc': 'interp_numarray.W_FromPyFunc', 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1549,3 +1549,42 @@ __iter__ = interp2app(W_FlatIterator.descr_iter), ) W_FlatIterator.acceptable_as_base_class = False + + +class W_FromPyFunc(Wrappable): + def __init__(self, space, w_func, w_nIn, w_nOut): + self.w_func = w_func + if space.int_w(w_nIn) != 1 or space.int_w(w_nOut) != 1: + raise OperationError(space.w_NotImplementedError, space.wrap('')) + self.nIn = space.int_w(w_nIn) + self.nOut = space.int_w(w_nOut) + + def descr__new__(space, w_subtype, w_func, w_nIn, w_nOut): + return space.wrap(W_FromPyFunc(space, w_func, w_nIn, w_nOut)) + + def descr_call(self, space, w_arrlike): + arr = convert_to_array(space, w_arrlike) + result = W_NDimArray(arr.find_size(), arr.shape[:], dtype=arr.find_dtype(), + order=arr.order) + i = arr.start_iter() + ri = result.start_iter() + shapelen = len(arr.shape) + result_size = arr.find_size() + while not ri.done(): + #numpy_driver.jit_merge_point(signature=signature, + # shapelen=shapelen, + # result_size=result_size, i=i, ri=ri, + # self=self, result=result) + result.dtype.setitem(result.storage, ri.offset, + space.call_function(self.w_func, arr.eval(i))) + i = i.next(shapelen) + ri = ri.next(shapelen) + return space.wrap(result) + + +W_FromPyFunc.typedef = TypeDef( + 'frompyfunc', + __module__ = "numpypy", + __new__=interp2app(W_FromPyFunc.descr__new__.im_func), + __call__=interp2app(W_FromPyFunc.descr_call), +) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -253,11 +253,13 @@ x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 + skip('32 bit overflow') assert numpy.int32(2147483648) == -2147483648 def test_uint32(self): import numpypy as numpy + skip('32 bit overflow') assert numpy.uint32(4294967295) == 4294967295 assert numpy.uint32(4294967296) == 0 @@ -278,7 +280,7 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - + skip('overflow error on 32bit') assert numpy.int64(9223372036854775807) == 9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -371,3 +371,17 @@ (3, 3.5), ]: assert ufunc(a, b) == func(a, b) + + +class AppTestFromPyFunc(BaseNumpyAppTest): + def test_frompyfunc_abs(self): + from numpypy import frompyfunc + ufunc = frompyfunc(abs, 1, 1) + assert (ufunc([-1, 0, 3, 15]) == [1, 0, 3, 15]).all() + + def test_frompyfunc_foo(self): + def foo(x): + return x * x + 1 + from numpypy import frompyfunc, array + ufunc = frompyfunc(foo, 1, 1) + assert (ufunc(range(10)) == array(range(10)) * range(10) + 1).all() From noreply at buildbot.pypy.org Tue Dec 13 09:08:21 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Tue, 13 Dec 2011 09:08:21 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: kill old stuff Message-ID: <20111213080821.C709382210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50455:08f31473c48b Date: 2011-12-13 07:32 +0100 http://bitbucket.org/pypy/pypy/changeset/08f31473c48b/ Log: kill old stuff diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -374,30 +374,6 @@ target_token.exported_state = None - def FIXME_old_stuff(): - preamble_optimizer = self.optimizer - loop.preamble.quasi_immutable_deps = ( - self.optimizer.quasi_immutable_deps) - self.optimizer = self.optimizer.new() - loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps - - - loop.inputargs = inputargs - args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\ - for a in inputargs] - jmp = ResOperation(rop.JUMP, args, None) - jmp.setdescr(loop.token) - loop.preamble.operations.append(jmp) - - loop.operations = self.optimizer.get_newoperations() - maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards - - if self.optimizer.emitted_guards > maxguards: - loop.preamble.token.retraced_count = sys.maxint - - if short: - pass - def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: return From noreply at buildbot.pypy.org Tue Dec 13 09:08:23 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Tue, 13 Dec 2011 09:08:23 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: Dont change the result box of ops as they are emitted. That will cause an unecesarry renaming of the box as it passes a label which complicates unroll.py Message-ID: <20111213080823.06CF682ABD@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50456:1e4c74e007f4 Date: 2011-12-13 09:07 +0100 http://bitbucket.org/pypy/pypy/changeset/1e4c74e007f4/ Log: Dont change the result box of ops as they are emitted. That will cause an unecesarry renaming of the box as it passes a label which complicates unroll.py diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -4211,7 +4211,6 @@ preamble = """ [p0] i0 = strlen(p0) - i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5668,8 +5667,7 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - i7 = same_as(i2) - jump(p2, p3, i7) + jump(p2, p3, i2) """ expected = """ [p1, p2, i1] @@ -5744,9 +5742,7 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - i129 = same_as(i2) - i130 = same_as(i3) - jump(p2, p3, p5, i129, i130) + jump(p2, p3, p5, i2, i3) """ expected = """ [p1, p2, p3, i1, i2] @@ -5959,8 +5955,7 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - i9 = same_as(i4) - jump(p4, i1, i2, p2, i5, i3, i9) + jump(p4, i1, i2, p2, i5, i3, i4) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6082,9 +6077,7 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - i11 = same_as(i1) - i12 = same_as(i2) - jump(p1, p2, p3, i3, i11, i12) + jump(p1, p2, p3, i3, i1, i2) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6304,7 +6297,6 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) - i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6350,9 +6342,7 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - i11 = same_as(i1) - i12 = same_as(i2) - jump(p1, p2, i3, i11, i12) + jump(p1, p2, i3, i1, i2) """ expected = """ [p1, p2, i3, i1, i2] @@ -6925,8 +6915,7 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - i0 = same_as(i843) - jump(p9, i0) + jump(p9, i843) """ short = """ [p9] diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -199,8 +199,8 @@ self.optimizer.emitting_dissabled = True for op in exported_state.inputarg_setup_ops: self.optimizer.send_extra_operation(op) + seen = {} - for op in self.short_boxes.operations(): self.ensure_short_op_emitted(op, self.optimizer, seen) if op and op.result: @@ -211,10 +211,7 @@ self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) newresult = newvalue.get_key_box() - if newresult is not op.result and not newvalue.is_constant(): - self.short_boxes.alias(newresult, op.result) - op = ResOperation(rop.SAME_AS, [op.result], newresult) - self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX + assert newresult is op.result or newvalue.is_constant() self.optimizer.flush() self.optimizer.emitting_dissabled = False @@ -373,7 +370,6 @@ target_token.short_preamble = self.short target_token.exported_state = None - def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: return diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py --- a/pypy/jit/metainterp/optimizeopt/vstring.py +++ b/pypy/jit/metainterp/optimizeopt/vstring.py @@ -44,7 +44,7 @@ class __extend__(optimizer.OptValue): """New methods added to the base class OptValue for this file.""" - def getstrlen(self, string_optimizer, mode): + def getstrlen(self, string_optimizer, mode, lengthbox=None): if mode is mode_string: s = self.get_constant_string_spec(mode_string) if s is not None: @@ -57,7 +57,8 @@ return None self.ensure_nonnull() box = self.force_box(string_optimizer) - lengthbox = BoxInt() + if lengthbox is None: + lengthbox = BoxInt() string_optimizer.emit_operation(ResOperation(mode.STRLEN, [box], lengthbox)) return lengthbox @@ -135,7 +136,7 @@ self._chars = longerlist[start:stop] # slice the 'longerlist', which may also contain Nones - def getstrlen(self, _, mode): + def getstrlen(self, _, mode, lengthbox=None): if self._lengthbox is None: self._lengthbox = ConstInt(len(self._chars)) return self._lengthbox @@ -216,7 +217,7 @@ self.left = left self.right = right - def getstrlen(self, string_optimizer, mode): + def getstrlen(self, string_optimizer, mode, lengthbox=None): if self.lengthbox is None: len1box = self.left.getstrlen(string_optimizer, mode) if len1box is None: @@ -268,7 +269,7 @@ self.vstart = vstart self.vlength = vlength - def getstrlen(self, optforce, mode): + def getstrlen(self, optforce, mode, lengthbox=None): return self.vlength.force_box(optforce) @specialize.arg(1) @@ -360,7 +361,7 @@ string_optimizer.emit_operation(ResOperation(rop.INT_SUB, [box1, box2], resbox)) return resbox -def _strgetitem(string_optimizer, strbox, indexbox, mode): +def _strgetitem(string_optimizer, strbox, indexbox, mode, resbox=None): if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt): if mode is mode_string: s = strbox.getref(lltype.Ptr(rstr.STR)) @@ -368,7 +369,8 @@ else: s = strbox.getref(lltype.Ptr(rstr.UNICODE)) return ConstInt(ord(s.chars[indexbox.getint()])) - resbox = BoxInt() + if resbox is None: + resbox = BoxInt() string_optimizer.emit_operation(ResOperation(mode.STRGETITEM, [strbox, indexbox], resbox)) return resbox @@ -434,10 +436,13 @@ def _optimize_STRGETITEM(self, op, mode): value = self.getvalue(op.getarg(0)) vindex = self.getvalue(op.getarg(1)) - vresult = self.strgetitem(value, vindex, mode) - self.make_equal_to(op.result, vresult) + vresult = self.strgetitem(value, vindex, mode, op.result) + if op.result in self.optimizer.values: + assert self.getvalue(op.result) is vresult + else: + self.make_equal_to(op.result, vresult) - def strgetitem(self, value, vindex, mode): + def strgetitem(self, value, vindex, mode, resbox=None): value.ensure_nonnull() # if value.is_virtual() and isinstance(value, VStringSliceValue): @@ -464,7 +469,7 @@ vindex = optimizer.ConstantValue(ConstInt(index - len1)) return self.strgetitem(value.right, vindex, mode) # - resbox = _strgetitem(self, value.force_box(self), vindex.force_box(self), mode) + resbox = _strgetitem(self, value.force_box(self), vindex.force_box(self), mode, resbox) return self.getvalue(resbox) def optimize_STRLEN(self, op): @@ -474,8 +479,11 @@ def _optimize_STRLEN(self, op, mode): value = self.getvalue(op.getarg(0)) - lengthbox = value.getstrlen(self, mode) - self.make_equal_to(op.result, self.getvalue(lengthbox)) + lengthbox = value.getstrlen(self, mode, op.result) + if op.result in self.optimizer.values: + assert self.getvalue(op.result) is self.getvalue(lengthbox) + elif op.result is not lengthbox: + self.make_equal_to(op.result, self.getvalue(lengthbox)) def optimize_COPYSTRCONTENT(self, op): self._optimize_COPYSTRCONTENT(op, mode_string) From noreply at buildbot.pypy.org Tue Dec 13 09:18:27 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Tue, 13 Dec 2011 09:18:27 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: kill ShortBoxes.aliases Message-ID: <20111213081827.ADBD282210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50457:a155d05decc6 Date: 2011-12-13 09:11 +0100 http://bitbucket.org/pypy/pypy/changeset/a155d05decc6/ Log: kill ShortBoxes.aliases diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -563,7 +563,6 @@ self.potential_ops = {} self.alternatives = {} self.synthetic = {} - self.aliases = {} self.rename = {} self.optimizer = optimizer @@ -583,7 +582,6 @@ def clone(self): sb = ShortBoxes(self.optimizer, None) - sb.aliases.update(self.aliases) sb.short_boxes = {} sb.short_boxes.update(self.short_boxes) return sb @@ -690,13 +688,3 @@ def has_producer(self, box): return box in self.short_boxes - - def alias(self, newbox, oldbox): - if not isinstance(oldbox, Const) and newbox not in self.short_boxes: - self.short_boxes[newbox] = self.short_boxes[oldbox] - self.aliases[newbox] = oldbox - - def original(self, box): - while box in self.aliases: - box = self.aliases[box] - return box From noreply at buildbot.pypy.org Tue Dec 13 09:18:28 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Tue, 13 Dec 2011 09:18:28 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: ShortBoxes is no longer mutated after it is crated so there is no point in cloning it anymore Message-ID: <20111213081828.D39E082210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50458:fb0f3b8c369a Date: 2011-12-13 09:14 +0100 http://bitbucket.org/pypy/pypy/changeset/fb0f3b8c369a/ Log: ShortBoxes is no longer mutated after it is crated so there is no point in cloning it anymore diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -179,7 +179,7 @@ self.short = target_token.short_preamble[:] self.short_seen = {} - self.short_boxes = exported_state.short_boxes.clone() + self.short_boxes = exported_state.short_boxes self.imported_state = exported_state self.inputargs = targetop.getarglist() self.initial_virtual_state = target_token.virtual_state diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -580,12 +580,6 @@ except BoxNotProducable: pass - def clone(self): - sb = ShortBoxes(self.optimizer, None) - sb.short_boxes = {} - sb.short_boxes.update(self.short_boxes) - return sb - def prioritized_alternatives(self, box): if box not in self.alternatives: return [self.potential_ops[box]] From noreply at buildbot.pypy.org Tue Dec 13 10:53:03 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Tue, 13 Dec 2011 10:53:03 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: Translation fix. Why am I not allowed to use a default value on the lengthbox argument? Message-ID: <20111213095303.D09E082210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50459:e2479d8f3ce5 Date: 2011-12-13 10:52 +0100 http://bitbucket.org/pypy/pypy/changeset/e2479d8f3ce5/ Log: Translation fix. Why am I not allowed to use a default value on the lengthbox argument? diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py --- a/pypy/jit/metainterp/optimizeopt/vstring.py +++ b/pypy/jit/metainterp/optimizeopt/vstring.py @@ -44,7 +44,7 @@ class __extend__(optimizer.OptValue): """New methods added to the base class OptValue for this file.""" - def getstrlen(self, string_optimizer, mode, lengthbox=None): + def getstrlen(self, string_optimizer, mode, lengthbox): if mode is mode_string: s = self.get_constant_string_spec(mode_string) if s is not None: @@ -74,7 +74,7 @@ # Copies the pointer-to-string 'self' into the target string # given by 'targetbox', at the specified offset. Returns the offset # at the end of the copy. - lengthbox = self.getstrlen(string_optimizer, mode) + lengthbox = self.getstrlen(string_optimizer, mode, None) srcbox = self.force_box(string_optimizer) return copy_str_content(string_optimizer, srcbox, targetbox, CONST_0, offsetbox, lengthbox, mode) @@ -103,7 +103,7 @@ return assert self.source_op is not None self.box = box = self.source_op.result - lengthbox = self.getstrlen(optforce, self.mode) + lengthbox = self.getstrlen(optforce, self.mode, None) op = ResOperation(self.mode.NEWSTR, [lengthbox], box) if not we_are_translated(): op.name = 'FORCE' @@ -136,7 +136,7 @@ self._chars = longerlist[start:stop] # slice the 'longerlist', which may also contain Nones - def getstrlen(self, _, mode, lengthbox=None): + def getstrlen(self, _, mode, lengthbox): if self._lengthbox is None: self._lengthbox = ConstInt(len(self._chars)) return self._lengthbox @@ -217,12 +217,12 @@ self.left = left self.right = right - def getstrlen(self, string_optimizer, mode, lengthbox=None): + def getstrlen(self, string_optimizer, mode, lengthbox): if self.lengthbox is None: - len1box = self.left.getstrlen(string_optimizer, mode) + len1box = self.left.getstrlen(string_optimizer, mode, None) if len1box is None: return None - len2box = self.right.getstrlen(string_optimizer, mode) + len2box = self.right.getstrlen(string_optimizer, mode, None) if len2box is None: return None self.lengthbox = _int_add(string_optimizer, len1box, len2box) @@ -269,7 +269,7 @@ self.vstart = vstart self.vlength = vlength - def getstrlen(self, optforce, mode, lengthbox=None): + def getstrlen(self, optforce, mode, lengthbox): return self.vlength.force_box(optforce) @specialize.arg(1) @@ -286,7 +286,7 @@ return None def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode): - lengthbox = self.getstrlen(string_optimizer, mode) + lengthbox = self.getstrlen(string_optimizer, mode, None) return copy_str_content(string_optimizer, self.vstr.force_box(string_optimizer), targetbox, self.vstart.force_box(string_optimizer), offsetbox, @@ -459,7 +459,7 @@ return result # if isinstance(value, VStringConcatValue) and vindex.is_constant(): - len1box = value.left.getstrlen(self, mode) + len1box = value.left.getstrlen(self, mode, None) if isinstance(len1box, ConstInt): index = vindex.box.getint() len1 = len1box.getint() @@ -602,8 +602,8 @@ v1 = self.getvalue(op.getarg(1)) v2 = self.getvalue(op.getarg(2)) # - l1box = v1.getstrlen(None, mode) - l2box = v2.getstrlen(None, mode) + l1box = v1.getstrlen(None, mode, None) + l2box = v2.getstrlen(None, mode, None) if (l1box is not None and l2box is not None and isinstance(l1box, ConstInt) and isinstance(l2box, ConstInt) and @@ -632,15 +632,15 @@ return False def handle_str_equal_level1(self, v1, v2, resultbox, mode): - l2box = v2.getstrlen(None, mode) + l2box = v2.getstrlen(None, mode, None) if isinstance(l2box, ConstInt): if l2box.value == 0: - lengthbox = v1.getstrlen(self, mode) + lengthbox = v1.getstrlen(self, mode, None) seo = self.optimizer.send_extra_operation seo(ResOperation(rop.INT_EQ, [lengthbox, CONST_0], resultbox)) return True if l2box.value == 1: - l1box = v1.getstrlen(None, mode) + l1box = v1.getstrlen(None, mode, None) if isinstance(l1box, ConstInt) and l1box.value == 1: # comparing two single chars vchar1 = self.strgetitem(v1, optimizer.CVAL_ZERO, mode) @@ -676,7 +676,7 @@ return False def handle_str_equal_level2(self, v1, v2, resultbox, mode): - l2box = v2.getstrlen(None, mode) + l2box = v2.getstrlen(None, mode, None) if isinstance(l2box, ConstInt): if l2box.value == 1: vchar = self.strgetitem(v2, optimizer.CVAL_ZERO, mode) From noreply at buildbot.pypy.org Tue Dec 13 11:33:48 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Dec 2011 11:33:48 +0100 (CET) Subject: [pypy-commit] pypy default: simplify this code by removing some mostly dead code Message-ID: <20111213103348.F16C782210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50460:22b2d8a07bcd Date: 2011-12-13 05:33 -0500 http://bitbucket.org/pypy/pypy/changeset/22b2d8a07bcd/ Log: simplify this code by removing some mostly dead code diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -791,7 +791,7 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - concrete.setitem_w(space, item, w_value) + concrete.setitem(item, concrete.dtype.coerce(space, w_value)) return if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) @@ -1178,10 +1178,6 @@ def eval(self, iter): return self.parent.getitem(iter.get_offset()) - @unwrap_spec(item=int) - def setitem_w(self, space, item, w_value): - return self.parent.setitem_w(space, item, w_value) - def setitem(self, item, value): # This is currently not possible to be called from anywhere. raise NotImplementedError @@ -1330,9 +1326,6 @@ raise OperationError(space.w_TypeError, space.wrap( "len() of unsized object")) - def setitem_w(self, space, item, w_value): - return self.setitem(item, self.dtype.coerce(space, w_value)) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) From noreply at buildbot.pypy.org Tue Dec 13 11:40:05 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Dec 2011 11:40:05 +0100 (CET) Subject: [pypy-commit] pypy default: whoops fix error from previous commit Message-ID: <20111213104005.2AAE382210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50461:adc2497aaa14 Date: 2011-12-13 05:39 -0500 http://bitbucket.org/pypy/pypy/changeset/adc2497aaa14/ Log: whoops fix error from previous commit diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -791,7 +791,8 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - concrete.setitem(item, concrete.dtype.coerce(space, w_value)) + dtype = concrete.find_dtype() + concrete.setitem(item, dtype.coerce(space, w_value)) return if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) From noreply at buildbot.pypy.org Tue Dec 13 13:55:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 13:55:54 +0100 (CET) Subject: [pypy-commit] pypy default: Let the JIT see the app-level 'type(x)' construct. Message-ID: <20111213125554.CBB2682210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50462:7d73e99929bb Date: 2011-12-13 13:55 +0100 http://bitbucket.org/pypy/pypy/changeset/7d73e99929bb/ Log: Let the JIT see the app-level 'type(x)' construct. diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py --- a/pypy/objspace/std/typetype.py +++ b/pypy/objspace/std/typetype.py @@ -10,7 +10,6 @@ w_dict=gateway.NoneNotWrapped): "This is used to create user-defined classes only." - from pypy.objspace.std.typeobject import W_TypeObject # XXX check types w_typetype = _precheck_for_new(space, w_typetype) @@ -19,10 +18,18 @@ if (space.is_w(space.type(w_typetype), space.w_type) and w_bases is None and w_dict is None): return space.type(w_name) - elif w_bases is None or w_dict is None: + else: + return _create_new_type(space, w_typetype, w_name, w_bases, w_dict) + + +def _create_new_type(space, w_typetype, w_name, w_bases, w_dict): + # this is in its own function because we want the special case 'type(x)' + # above to be seen by the jit. + from pypy.objspace.std.typeobject import W_TypeObject + + if w_bases is None or w_dict is None: raise OperationError(space.w_TypeError, space.wrap("type() takes 1 or 3 arguments")) - bases_w = space.fixedview(w_bases) w_winner = w_typetype From noreply at buildbot.pypy.org Tue Dec 13 15:17:49 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 15:17:49 +0100 (CET) Subject: [pypy-commit] pypy default: add comments: keep the backend in sync Message-ID: <20111213141749.4D64782210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50463:c260c94f7761 Date: 2011-12-13 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/c260c94f7761/ Log: add comments: keep the backend in sync diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -381,11 +381,11 @@ 'GUARD_ISNULL/1d', 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION/0d', - 'GUARD_EXCEPTION/1d', + 'GUARD_NO_EXCEPTION/0d', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', - 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- From noreply at buildbot.pypy.org Tue Dec 13 16:33:30 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Tue, 13 Dec 2011 16:33:30 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: Refactor fromstring into text and binary Message-ID: <20111213153330.4C01D82210@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-full-fromstring Changeset: r50464:06bf54903f53 Date: 2011-12-13 10:33 -0500 http://bitbucket.org/pypy/pypy/changeset/06bf54903f53/ Log: Refactor fromstring into text and binary diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,49 +1,68 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.rpython.lltypesystem import lltype, rffi from pypy.module.micronumpy import interp_dtype -from pypy.rpython.lltypesystem import lltype, rffi FLOAT_SIZE = rffi.sizeof(lltype.Float) +def _fromstring_text(space, s, count, sep, length, dtype): + import string + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + A = [] + num_items = 0 + ptr = 0 + + while (num_items < count or count == -1) and ptr < len(s): + nextptr = s.find(sep, ptr) + if nextptr < 0: + nextptr = length + piece = s[ptr:nextptr] + #FIXME: need to check piece.isspace() also, but does not translate + if len(piece) > 0: + val = dtype.coerce(space, space.wrap(piece)) + A.append(val) + num_items += 1 + ptr = nextptr + 1 + + if count > num_items: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(num_items, [num_items], dtype=dtype) + for i, val in enumerate(A): + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + +def _fromstring_bin(space, s, count, length, dtype): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + itemsize = dtype.itemtype.get_element_size() + if count == -1: + count = length / itemsize + if length % itemsize != 0: + raise OperationError(space.w_ValueError, space.wrap( + "string length %d not divisable by item size %d" % (length, itemsize))) + if count * itemsize > length: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(count, [count], dtype=dtype) + for i in range(count): + val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + @unwrap_spec(s=str, count=int, sep=str) def fromstring(space, s, w_dtype=None, count=-1, sep=''): - from pypy.module.micronumpy.interp_numarray import W_NDimArray - dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) - itemsize = abs(space.int_w(dtype.descr_get_itemsize(space))) length = len(s) - - A = [] - num = 0 - ptr = 0 - - while (num < count or count == -1) and ptr < len(s): - if sep == '': - if length - ptr < itemsize: - raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by item size %d" % (length, itemsize))) - val = dtype.itemtype.runpack_str(s[ptr:ptr+itemsize]) - ptr += itemsize - else: - nextptr = s.find(sep, ptr) - if nextptr < 0: - nextptr = length - val = dtype.coerce(space, space.wrap(s[ptr:nextptr])) - ptr = nextptr + 1 - - num += 1 - A.append(val) - - if count > num: - raise OperationError(space.w_ValueError, space.wrap( - "string is smaller than requested size")) - - a = W_NDimArray(num, [num], dtype=dtype) - for i, val in enumerate(A): - a.dtype.setitem(a.storage, i, val) - - return space.wrap(a) + if sep == '': + return _fromstring_bin(space, s, count, length, dtype) + else: + return _fromstring_text(space, s, count, sep, length, dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1196,7 +1196,17 @@ assert f[0] == 1 assert f[1] == 2 assert f[2] == 3 - raises(ValueError, fromstring, "3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + g = fromstring("1 2 3 ", dtype=uint8, sep=" ") + assert len(g) == 3 + assert g[0] == 1 + assert g[1] == 2 + assert g[2] == 3 + #FIXME: below should work + #h = fromstring("1, , 2, 3", dtype=uint8, sep=",") + #assert len(h) == 3 + #assert h[0] == 1 + #assert h[1] == 2 + #assert h[2] == 3 def test_fromstring_types(self): from numpypy import fromstring @@ -1224,13 +1234,15 @@ def test_fromstring_invalid(self): - from numpypy import fromstring, uint16, uint8 + from numpypy import fromstring, uint16, uint8, int32 #default dtype is 64-bit float, so 3 bytes should fail raises(ValueError, fromstring, "\x01\x02\x03") #3 bytes is not modulo 2 bytes (int16) raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) #5 bytes is larger than 3 bytes raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) + #can't cast floats to ints with fromstring + raises(ValueError, fromstring, "3.4 2.0 3.8 2.2", dtype=int32, sep=" ") class AppTestRepr(BaseNumpyAppTest): From noreply at buildbot.pypy.org Tue Dec 13 16:54:02 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 16:54:02 +0100 (CET) Subject: [pypy-commit] pypy default: Checking a lighter variant of SpecialisedTuples with only 'ii', 'ff' and 'oo' Message-ID: <20111213155402.9B6CA82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50465:bbbbff8ac1d3 Date: 2011-12-13 16:53 +0100 http://bitbucket.org/pypy/pypy/changeset/bbbbff8ac1d3/ Log: Checking a lighter variant of SpecialisedTuples with only 'ii', 'ff' and 'oo' specialisations. It seems that the previous version causes long warm-up times on the sympy_* benchmarks. We will see how it compares in performance. diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -177,52 +177,55 @@ _specialisations = [] Cls_ii = make_specialised_class((int, int)) -Cls_is = make_specialised_class((int, str)) -Cls_io = make_specialised_class((int, object)) -Cls_si = make_specialised_class((str, int)) -Cls_ss = make_specialised_class((str, str)) -Cls_so = make_specialised_class((str, object)) -Cls_oi = make_specialised_class((object, int)) -Cls_os = make_specialised_class((object, str)) +#Cls_is = make_specialised_class((int, str)) +#Cls_io = make_specialised_class((int, object)) +#Cls_si = make_specialised_class((str, int)) +#Cls_ss = make_specialised_class((str, str)) +#Cls_so = make_specialised_class((str, object)) +#Cls_oi = make_specialised_class((object, int)) +#Cls_os = make_specialised_class((object, str)) Cls_oo = make_specialised_class((object, object)) Cls_ff = make_specialised_class((float, float)) -Cls_ooo = make_specialised_class((object, object, object)) +#Cls_ooo = make_specialised_class((object, object, object)) def makespecialisedtuple(space, list_w): if len(list_w) == 2: w_arg1, w_arg2 = list_w w_type1 = space.type(w_arg1) - w_type2 = space.type(w_arg2) + #w_type2 = space.type(w_arg2) # if w_type1 is space.w_int: + w_type2 = space.type(w_arg2) if w_type2 is space.w_int: return Cls_ii(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_is(space, w_arg1, w_arg2) - else: - return Cls_io(space, w_arg1, w_arg2) + #elif w_type2 is space.w_str: + # return Cls_is(space, w_arg1, w_arg2) + #else: + # return Cls_io(space, w_arg1, w_arg2) # - elif w_type1 is space.w_str: - if w_type2 is space.w_int: - return Cls_si(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_ss(space, w_arg1, w_arg2) - else: - return Cls_so(space, w_arg1, w_arg2) + #elif w_type1 is space.w_str: + # if w_type2 is space.w_int: + # return Cls_si(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_ss(space, w_arg1, w_arg2) + # else: + # return Cls_so(space, w_arg1, w_arg2) # - elif w_type1 is space.w_float and w_type2 is space.w_float: - return Cls_ff(space, w_arg1, w_arg2) + elif w_type1 is space.w_float: + w_type2 = space.type(w_arg2) + if w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) # - else: - if w_type2 is space.w_int: - return Cls_oi(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_os(space, w_arg1, w_arg2) - else: - return Cls_oo(space, w_arg1, w_arg2) + #else: + # if w_type2 is space.w_int: + # return Cls_oi(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_os(space, w_arg1, w_arg2) + # else: + return Cls_oo(space, w_arg1, w_arg2) # - elif len(list_w) == 3: - return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + #elif len(list_w) == 3: + # return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) else: raise NotSpecialised diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -33,15 +33,15 @@ N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) - def hash_test(values): + def hash_test(values, must_be_specialized=True): N_values_w = [N_space.wrap(value) for value in values] S_values_w = [S_space.wrap(value) for value in values] N_w_tuple = N_space.newtuple(N_values_w) S_w_tuple = S_space.newtuple(S_values_w) - - assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + + if must_be_specialized: + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) assert isinstance(N_w_tuple, W_TupleObject) - assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) @@ -53,7 +53,7 @@ hash_test([1,(1,2)]) hash_test([1,('a',2)]) hash_test([1,()]) - hash_test([1,2,3]) + hash_test([1,2,3], must_be_specialized=False) class AppTestW_SpecialisedTupleObject: @@ -83,6 +83,8 @@ return ("SpecialisedTupleObject" + expected) in r def test_createspecialisedtuple(self): + have = ['ii', 'ff', 'oo'] + # spec = {int: 'i', float: 'f', str: 's', @@ -92,14 +94,14 @@ for y in [43, 4.3, "bar", []]: expected1 = spec[type(x)] expected2 = spec[type(y)] - if (expected1 == 'f') ^ (expected2 == 'f'): - if expected1 == 'f': expected1 = 'o' - if expected2 == 'f': expected2 = 'o' + if expected1 + expected2 not in have: + expected1 = expected2 = 'o' obj = (x, y) assert self.isspecialised(obj, '_' + expected1 + expected2) # - obj = (1, 2, 3) - assert self.isspecialised(obj, '_ooo') + if 'ooo' in have: + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') def test_delegation(self): t = self.forbid_delegation((42, 43)) @@ -214,6 +216,8 @@ raises(IndexError, "t[-3]") def test_three_tuples(self): + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") b = self.forbid_delegation((1, 2, 3)) c = (1,) d = c + (2, 3) @@ -221,6 +225,16 @@ assert b == d def test_mongrel(self): + a = self.forbid_delegation((2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 2 + assert a[0] == 2.2 and a[1] == '333' + b = ('333',) + assert a == (2.2,) + b + assert not a != (2.2,) + b + # + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") a = self.forbid_delegation((1, 2.2, '333')) assert self.isspecialised(a) assert len(a) == 3 From noreply at buildbot.pypy.org Tue Dec 13 19:44:23 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Dec 2011 19:44:23 +0100 (CET) Subject: [pypy-commit] pypy default: added __rsub__ for numpy boxes Message-ID: <20111213184423.0BA2082210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50466:c3836e8813ff Date: 2011-12-13 13:44 -0500 http://bitbucket.org/pypy/pypy/changeset/c3836e8813ff/ Log: added __rsub__ for numpy boxes diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -86,6 +86,7 @@ descr_ge = _binop_impl("greater_equal") descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") descr_rmul = _binop_right_impl("multiply") descr_neg = _unaryop_impl("negative") @@ -170,7 +171,8 @@ __mul__ = interp2app(W_GenericBox.descr_mul), __div__ = interp2app(W_GenericBox.descr_div), - __radd__ = interp2app(W_GenericBox.descr_add), + __radd__ = interp2app(W_GenericBox.descr_radd), + __rsub__ = interp2app(W_GenericBox.descr_rsub), __rmul__ = interp2app(W_GenericBox.descr_rmul), __eq__ = interp2app(W_GenericBox.descr_eq), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -491,6 +491,11 @@ for i in range(5): assert b[i] == i - 5 + def test_scalar_subtract(self): + from numpypy import int32 + assert int32(2) - 1 == 1 + assert 1 - int32(2) == -1 + def test_mul(self): import numpypy From noreply at buildbot.pypy.org Tue Dec 13 20:50:46 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Dec 2011 20:50:46 +0100 (CET) Subject: [pypy-commit] pypy default: Move __debug_repr__ in micronumpy into a new numpypy.pypy module Message-ID: <20111213195046.757CC82210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50467:9b76d833f3c4 Date: 2011-12-13 14:50 -0500 http://bitbucket.org/pypy/pypy/changeset/9b76d833f3c4/ Log: Move __debug_repr__ in micronumpy into a new numpypy.pypy module diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,9 +1,19 @@ from pypy.interpreter.mixedmodule import MixedModule +class PyPyModule(MixedModule): + interpleveldefs = { + 'debug_repr': 'interp_extras.debug_repr', + } + appleveldefs = {} + class Module(MixedModule): applevel_name = 'numpypy' + submodules = { + 'pypy': PyPyModule + } + interpleveldefs = { 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_extras.py @@ -0,0 +1,7 @@ +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy.interp_numarray import BaseArray + + + at unwrap_spec(array=BaseArray) +def debug_repr(space, array): + return space.wrap(array.debug_repr()) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -925,9 +925,6 @@ def start_iter(self, res_shape=None): raise NotImplementedError - def descr_debug_repr(self, space): - return space.wrap(self.debug_repr()) - def descr_array_iface(self, space): concrete = self.get_concrete() storage = concrete.get_storage(space) @@ -1466,7 +1463,6 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), - __debug_repr__ = interp2app(BaseArray.descr_debug_repr), __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -873,16 +873,17 @@ def test_debug_repr(self): from numpypy import zeros, sin + from numpypy.pypy import debug_repr a = zeros(1) - assert a.__debug_repr__() == 'Array' - assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' - assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' - assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + assert debug_repr(a) == 'Array' + assert debug_repr(a + a) == 'Call2(add, Array, Array)' + assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' + assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(sin(a)) == 'Call1(sin, Array)' b = a + a b[0] = 3 - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Call2(add, forced=Array)' def test_tolist_scalar(self): from numpypy import int32, bool_ From noreply at buildbot.pypy.org Tue Dec 13 21:01:30 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 13 Dec 2011 21:01:30 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: r0 is now SCRATCH reg Message-ID: <20111213200130.B39CA82210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50468:20d7accd990a Date: 2011-12-13 20:55 +0100 http://bitbucket.org/pypy/pypy/changeset/20d7accd990a/ Log: r0 is now SCRATCH reg diff --git a/pypy/jit/backend/ppc/ppcgen/register.py b/pypy/jit/backend/ppc/ppcgen/register.py --- a/pypy/jit/backend/ppc/ppcgen/register.py +++ b/pypy/jit/backend/ppc/ppcgen/register.py @@ -20,10 +20,11 @@ f24, f25, f26, f27, f28, f29, f30, f31] -SPP = r31 -SP = r1 -TOC = r2 -RES = r3 +SCRATCH = r0 +SP = r1 +TOC = r2 +RES = r3 +SPP = r31 MANAGED_REGS = [r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r14, r15, r16, r17, r18, From noreply at buildbot.pypy.org Tue Dec 13 21:01:31 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 13 Dec 2011 21:01:31 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Adjust OverwritingBuilder to interface of PPC builder Message-ID: <20111213200131.DB8FB82210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50469:4887e168450b Date: 2011-12-13 20:59 +0100 http://bitbucket.org/pypy/pypy/changeset/4887e168450b/ Log: Adjust OverwritingBuilder to interface of PPC builder diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -945,19 +945,20 @@ self.save_exc = save_exc class OverwritingBuilder(PPCAssembler): - def __init__(self, cb, start, size): + def __init__(self, cb, start, num_insts): PPCAssembler.__init__(self) self.cb = cb self.index = start - self.end = start + size + self.num_insts = num_insts def currpos(self): - return self.index + assert 0, "not implemented" - def writechar(self, char): - assert self.index <= self.end - self.cb.overwrite(self.index, char) - self.index += 1 + def overwrite(self): + assert len(self.insts) <= self.num_insts + startindex = self.index / 4 + for i, new_inst in enumerate(self.insts): + self.cb.insts[i + startindex] = new_inst class PPCBuilder(BlockBuilderMixin, PPCAssembler): def __init__(self, failargs_limit=1000, r0_in_use=False): From noreply at buildbot.pypy.org Tue Dec 13 21:01:33 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 13 Dec 2011 21:01:33 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): Implementation of CALL_ASSEMBLER seems to work Message-ID: <20111213200133.0D9F182210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50470:eb94afc2c1cd Date: 2011-12-13 21:01 +0100 http://bitbucket.org/pypy/pypy/changeset/eb94afc2c1cd/ Log: (bivab, hager): Implementation of CALL_ASSEMBLER seems to work diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -362,11 +362,12 @@ for i, arg in enumerate(stack_args): offset = param_offset + i * WORD if arg is not None: - self.mc.load_imm(r.r0, arg.value) + #self.mc.load_imm(r.SCRATCH, arg.value) + self.regalloc_mov(regalloc.loc(arg), r.SCRATCH) if IS_PPC_32: - self.mc.stw(r.r0.value, r.SP.value, offset) + self.mc.stw(r.SCRATCH.value, r.SP.value, offset) else: - self.mc.std(r.r0.value, r.SP.value, offset) + self.mc.std(r.SCRATCH.value, r.SP.value, offset) self.mc.free_scratch_reg() # collect variables that need to go in registers @@ -397,16 +398,16 @@ regalloc.before_call(save_all_regs=2) # remap values stored in core registers - remap_frame_layout(self, non_float_locs, non_float_regs, r.r0) + remap_frame_layout(self, non_float_locs, non_float_regs, r.SCRATCH) # the actual call if IS_PPC_32: self.mc.bl_abs(adr) else: - self.mc.load_from_addr(r.r0, adr) - self.mc.load_from_addr(r.r2, adr + WORD) + self.mc.load_from_addr(r.SCRATCH, adr) + self.mc.load_from_addr(r.TOC, adr + WORD) self.mc.load_from_addr(r.r11, adr + 2 * WORD) - self.mc.mtctr(r.r0.value) + self.mc.mtctr(r.SCRATCH.value) self.mc.bctrl() @@ -855,12 +856,12 @@ raise AssertionError(kind) # check value resloc = regalloc.try_allocate_reg(resbox) - assert resloc is r.r3 + assert resloc is r.RES self.mc.alloc_scratch_reg(value) if IS_PPC_32: - self.mc.cmpw(0, resloc.value, r.r0.value) + self.mc.cmpw(0, resloc.value, r.SCRATCH.value) else: - self.mc.cmpd(0, resloc.value, r.r0.value) + self.mc.cmpd(0, resloc.value, r.SCRATCH.value) self.mc.free_scratch_reg() regalloc.possibly_free_var(resbox) @@ -874,21 +875,22 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - with saved_registers(self.mc, r.NONVOLATILES + [r.r3]): - # resbox is already in r3 - self.mov_loc_loc(arglocs[1], r.r4) - if IS_PPC_32: - self.mc.bl_abs(asm_helper_adr) - else: - self.mc.load_from_addr(r.r0, asm_helper_adr) - self.mc.load_from_addr(r.r2, asm_helper_adr + WORD) - self.mc.load_from_addr(r.r11, asm_helper_adr + 2 * WORD) - self.mc.mtctr(r.r0.value) - self.mc.bctrl() - if op.result: - resloc = regalloc.after_call(op.result) - if resloc.is_vfp_reg(): - assert 0, "not implemented yet" + + # do call to helper function + self.mov_loc_loc(arglocs[1], r.r4) + if IS_PPC_32: + self.mc.bl_abs(asm_helper_adr) + else: + self.mc.load_from_addr(r.SCRATCH, asm_helper_adr) + self.mc.load_from_addr(r.TOC, asm_helper_adr + WORD) + self.mc.load_from_addr(r.r11, asm_helper_adr + 2 * WORD) + self.mc.mtctr(r.r0.value) + self.mc.bctrl() + + if op.result: + resloc = regalloc.after_call(op.result) + if resloc.is_vfp_reg(): + assert 0, "not implemented yet" # jump to merge point jmp_pos = self.mc.currpos() @@ -898,8 +900,10 @@ # Fast Path using result boxes # patch the jump to the fast path offset = self.mc.currpos() - fast_jmp_pos - pmc = OverwritingBuilder(self.mc, fast_jmp_pos, WORD) - pmc.b(offset) + pmc = OverwritingBuilder(self.mc, fast_jmp_pos, 1) + # 12 and 2 mean: jump if the 3rd bit in CR is set + pmc.bc(12, 2, offset) + pmc.overwrite() # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: @@ -909,12 +913,12 @@ ofs = fielddescr.offset resloc = regalloc.force_allocate_reg(resbox) self.alloc_scratch_reg() - self.mov_loc_loc(arglocs[1], r.r0) + self.mov_loc_loc(arglocs[1], r.SCRATCH) self.mc.li(resloc.value, 0) if IS_PPC_32: - self.mc.stwx(resloc.value, 0, r.r0.value) + self.mc.stwx(resloc.value, 0, r.SCRATCH.value) else: - self.mc.stdx(resloc.value, 0, r.r0.value) + self.mc.stdx(resloc.value, 0, r.SCRATCH.value) self.free_scratch_reg() regalloc.possibly_free_var(resbox) @@ -936,21 +940,25 @@ assert 0, "not implemented yet" else: if IS_PPC_32: - self.mc.lwzx(resloc.value, 0, r.r0.value) + self.mc.lwzx(resloc.value, 0, r.SCRATCH.value) else: - self.mc.ldx(resloc.value, 0, r.r0.value) + self.mc.ldx(resloc.value, 0, r.SCRATCH.value) self.mc.free_scratch_reg() # merge point offset = self.mc.currpos() - jmp_pos + if offset >= 0: + pmc = OverwritingBuilder(self.mc, jmp_pos, 1) + pmc.b(offset) + pmc.overwrite() self.mc.alloc_scratch_reg() if IS_PPC_32: - self.mc.cmpwi(0, r.r0.value, 0) - self.mc.lwz(r.r0.value, r.SPP.value, 0) + self.mc.cmpwi(0, r.SCRATCH.value, 0) + self.mc.lwz(r.SCRATCH.value, r.SPP.value, 0) else: - self.mc.cmpdi(0, r.r0.value, 0) - self.mc.ld(r.r0.value, r.SPP.value, 0) + self.mc.cmpdi(0, r.SCRATCH.value, 0) + self.mc.ld(r.SCRATCH.value, r.SPP.value, 0) self.mc.cror(2, 1, 2) self.mc.free_scratch_reg() diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -145,15 +145,15 @@ if IS_PPC_32: # save it in previous frame (Backchain) self.mc.stwu(r.SP.value, r.SP.value, -frame_depth) - self.mc.mflr(r.r0.value) # move old link register + self.mc.mflr(r.SCRATCH.value) # move old link register # save old link register in previous frame - self.mc.stw(r.r0.value, r.SP.value, frame_depth + WORD) + self.mc.stw(r.SCRATCH.value, r.SP.value, frame_depth + WORD) # save r31 at the bottom of the stack frame self.mc.stw(r.SPP.value, r.SP.value, WORD) else: self.mc.stdu(r.SP.value, r.SP.value, -frame_depth) - self.mc.mflr(r.r0.value) - self.mc.std(r.r0.value, r.SP.value, frame_depth + 2 * WORD) + self.mc.mflr(r.SCRATCH.value) + self.mc.std(r.SCRATCH.value, r.SP.value, frame_depth + 2 * WORD) self.mc.std(r.SPP.value, r.SP.value, WORD) # compute spilling pointer (SPP) @@ -328,7 +328,7 @@ mc.mr(r.r4.value, r.SP.value) # load stack pointer mc.mr(r.r5.value, r.SPP.value) # load spilling pointer # - # load address of decoding function into r0 + # load address of decoding function into SCRATCH mc.alloc_scratch_reg(addr) if IS_PPC_64: mc.std(r.r2.value, r.SP.value, 3 * WORD) @@ -336,7 +336,7 @@ mc.load_imm(r.r2, r2_value) mc.load_imm(r.r11, r11_value) # ... and branch there - mc.mtctr(r.r0.value) + mc.mtctr(r.SCRATCH.value) mc.free_scratch_reg() mc.bctrl() if IS_PPC_64: @@ -386,10 +386,10 @@ if loc.is_reg(): reg = loc else: - reg = r.r0 + reg = r.SCRATCH self.mc.load_from_addr(reg, addr) if loc.is_stack(): - self.regalloc_mov(r.r0, loc) + self.regalloc_mov(r.SCRATCH, loc) def gen_direct_bootstrap_code(self, loophead, looptoken, inputargs, frame_depth): self._make_frame(frame_depth) @@ -424,7 +424,7 @@ # remap values stored in core registers self.mc.alloc_scratch_reg() - remap_frame_layout(self, nonfloat_args, nonfloat_regs, r.r0) + remap_frame_layout(self, nonfloat_args, nonfloat_regs, r.SCRATCH) self.mc.free_scratch_reg() # load values passed on the stack to the corresponding locations @@ -453,10 +453,10 @@ count += 1 self.mc.alloc_scratch_reg() if IS_PPC_32: - self.mc.lwz(r.r0.value, r.SPP.value, stack_position) + self.mc.lwz(r.SCRATCH.value, r.SPP.value, stack_position) else: - self.mc.ld(r.r0.value, r.SPP.value, stack_position) - self.mov_loc_loc(r.r0, loc) + self.mc.ld(r.SCRATCH.value, r.SPP.value, stack_position) + self.mov_loc_loc(r.SCRATCH, loc) self.mc.free_scratch_reg() else: assert 0, 'invalid location' @@ -767,9 +767,9 @@ # store addr in force index field self.mc.alloc_scratch_reg(memaddr) if IS_PPC_32: - self.mc.stw(r.r0.value, r.SPP.value, self.ENCODING_AREA) + self.mc.stw(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) else: - self.mc.std(r.r0.value, r.SPP.value, self.ENCODING_AREA) + self.mc.std(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) self.mc.free_scratch_reg() if save_exc: @@ -819,12 +819,14 @@ return # move immediate value to memory elif loc.is_stack(): + self.mc.alloc_scratch_reg() offset = loc.as_key() * WORD - WORD - self.mc.load_imm(r.r0.value, value) + self.mc.load_imm(r.SCRATCH.value, value) if IS_PPC_32: - self.mc.stw(r.r0.value, r.SPP.value, offset) + self.mc.stw(r.SCRATCH.value, r.SPP.value, offset) else: - self.mc.std(r.r0.value, r.SPP.value, offset) + self.mc.std(r.SCRATCH.value, r.SPP.value, offset) + self.mc.free_scratch_reg() return assert 0, "not supported location" elif prev_loc.is_stack(): @@ -840,12 +842,14 @@ # move in memory elif loc.is_stack(): target_offset = loc.as_key() * WORD - WORD + self.mc.alloc_scratch_reg() if IS_PPC_32: - self.mc.lwz(r.r0.value, r.SPP.value, offset) - self.mc.stw(r.r0.value, r.SPP.value, target_offset) + self.mc.lwz(r.SCRATCH.value, r.SPP.value, offset) + self.mc.stw(r.SCRATCH.value, r.SPP.value, target_offset) else: - self.mc.ld(r.r0.value, r.SPP.value, offset) - self.mc.std(r.r0.value, r.SPP.value, target_offset) + self.mc.ld(r.SCRATCH.value, r.SPP.value, offset) + self.mc.std(r.SCRATCH.value, r.SPP.value, target_offset) + self.mc.free_scratch_reg() return assert 0, "not supported location" elif prev_loc.is_reg(): @@ -869,7 +873,7 @@ def regalloc_push(self, loc): """Pushes the value stored in loc to the stack - Can trash the current value of r0 when pushing a stack + Can trash the current value of SCRATCH when pushing a stack loc""" if loc.is_stack(): @@ -896,7 +900,7 @@ def regalloc_pop(self, loc): """Pops the value on top of the stack to loc. Can trash the current - value of r0 when popping to a stack loc""" + value of SCRATCH when popping to a stack loc""" if loc.is_stack(): if loc.type == FLOAT: assert 0, "not implemented yet" @@ -962,11 +966,11 @@ return 0 def _write_fail_index(self, fail_index): - self.mc.load_imm(r.r0, fail_index) + self.mc.load_imm(r.SCRATCH, fail_index) if IS_PPC_32: - self.mc.stw(r.r0.value, r.SPP.value, self.ENCODING_AREA) + self.mc.stw(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) else: - self.mc.std(r.r0.value, r.SPP.value, self.ENCODING_AREA) + self.mc.std(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) def load(self, loc, value): assert loc.is_reg() and value.is_imm() From noreply at buildbot.pypy.org Tue Dec 13 21:17:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 21:17:16 +0100 (CET) Subject: [pypy-commit] pypy default: Reintroduce the equivalent of 'unused', which was killed during the Message-ID: <20111213201716.AA49C82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50471:28fdd95654de Date: 2011-12-13 18:29 +0100 http://bitbucket.org/pypy/pypy/changeset/28fdd95654de/ Log: Reintroduce the equivalent of 'unused', which was killed during the merge of jit-simplify-backendintf. diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -174,12 +174,11 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) return operations @@ -481,7 +480,7 @@ # only to guard operations or to jump or to finish produced = {} last_used = {} - #useful = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -492,10 +491,13 @@ opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - #if opnum != rop.JUMP and opnum != rop.FINISH: - # useful[arg] = None - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -503,7 +505,8 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + self.last_real_usage = last_real_usage + # longevity = {} for arg in produced: if arg in last_used: @@ -519,7 +522,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity#, useful + self.longevity = longevity def loc(self, v): if v is None: # xxx kludgy @@ -1471,6 +1474,16 @@ inputargs = op.getarglist() arglocs = [None] * len(inputargs) # + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) + # # we need to make sure that the tmpreg and xmmtmp are free tmpreg = X86RegisterManager.all_regs[0] tmpvar = TempBox() @@ -1491,7 +1504,7 @@ # for i in range(len(inputargs)): arg = inputargs[i] - assert not isinstance(arg, Const) + assert isinstance(arg, Box) loc = self.loc(arg) assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) arglocs[i] = loc From noreply at buildbot.pypy.org Tue Dec 13 21:17:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 21:17:17 +0100 (CET) Subject: [pypy-commit] pypy default: Tentative: kill the usage of eax as a reserved register around LABEL and Message-ID: <20111213201717.D2DCC82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50472:3385f481f372 Date: 2011-12-13 18:47 +0100 http://bitbucket.org/pypy/pypy/changeset/3385f481f372/ Log: Tentative: kill the usage of eax as a reserved register around LABEL and JUMPs. On x86-64, we have already r11 for that. On x86-32, too bad, we emit slightly-inefficient code that moves from %ebp-x to %ebp-y by generating "push (%ebp-x); pop (%ebp-y)". The hope is that we should win overall anyway, because 9b45755e2c2b should hopefully have removed most such moves. diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -163,7 +163,7 @@ if not we_are_translated() and self.box_types is not None: assert isinstance(v, TempBox) or v.type in self.box_types - def possibly_free_var(self, v, _hint_dont_reuse_quickly=False): + def possibly_free_var(self, v): """ If v is stored in a register and v is not used beyond the current position, then free it. Must be called at some point for all variables that might be in registers. @@ -173,10 +173,7 @@ return if v not in self.longevity or self.longevity[v][1] <= self.position: if v in self.reg_bindings: - if _hint_dont_reuse_quickly: - self.free_regs.insert(0, self.reg_bindings[v]) - else: - self.free_regs.append(self.reg_bindings[v]) + self.free_regs.append(self.reg_bindings[v]) del self.reg_bindings[v] if self.frame_manager is not None: self.frame_manager.mark_as_free(v) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -66,6 +66,10 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -1387,13 +1387,6 @@ assert isinstance(descr, TargetToken) arglocs = descr._x86_arglocs self.jump_target_descr = descr - # compute 'tmploc' to be all_regs[0] by spilling what is there - tmpbox1 = TempBox() - tmpbox2 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - self.rm.force_allocate_reg(tmpbox1, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(tmpbox2, selected_reg=xmmtmp) # Part about non-floats src_locations1 = [] dst_locations1 = [] @@ -1405,19 +1398,23 @@ box = op.getarg(i) src_loc = self.loc(box) dst_loc = arglocs[i] - assert dst_loc != tmpreg and dst_loc != xmmtmp if box.type != FLOAT: src_locations1.append(src_loc) dst_locations1.append(dst_loc) else: src_locations2.append(src_loc) dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None # Do the remapping remap_frame_layout_mixed(assembler, src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(tmpbox1) - self.xrm.possibly_free_var(tmpbox2) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1484,17 +1481,6 @@ if self.last_real_usage.get(arg, -1) <= position: self.force_spill_var(arg) # - # we need to make sure that the tmpreg and xmmtmp are free - tmpreg = X86RegisterManager.all_regs[0] - tmpvar = TempBox() - self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) - self.rm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) - # - xmmtmp = X86XMMRegisterManager.all_regs[0] - tmpvar = TempBox() - self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) - self.xrm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) - # # we need to make sure that no variable is stored in ebp for arg in inputargs: if self.loc(arg) is ebp: @@ -1506,7 +1492,7 @@ arg = inputargs[i] assert isinstance(arg, Box) loc = self.loc(arg) - assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) + assert loc is not ebp arglocs[i] = loc if isinstance(loc, RegLoc): self.fm.mark_as_free(arg) diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -71,6 +71,18 @@ ('mov', eax, s24), ('mov', s12, edi)] +def test_no_tmp_reg(): + assembler = MockAssembler() + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) + remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], None) + assert assembler.ops == [('push', s8), + ('pop', s20), + ('mov', eax, s24), + ('mov', s12, edi)] + def test_reordering(): assembler = MockAssembler() s8 = frame_pos(8, INT) From noreply at buildbot.pypy.org Tue Dec 13 21:17:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 21:17:19 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111213201719.0A0F382210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50473:585211322729 Date: 2011-12-13 21:16 +0100 http://bitbucket.org/pypy/pypy/changeset/585211322729/ Log: merge heads diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,9 +1,19 @@ from pypy.interpreter.mixedmodule import MixedModule +class PyPyModule(MixedModule): + interpleveldefs = { + 'debug_repr': 'interp_extras.debug_repr', + } + appleveldefs = {} + class Module(MixedModule): applevel_name = 'numpypy' + submodules = { + 'pypy': PyPyModule + } + interpleveldefs = { 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -86,6 +86,7 @@ descr_ge = _binop_impl("greater_equal") descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") descr_rmul = _binop_right_impl("multiply") descr_neg = _unaryop_impl("negative") @@ -170,7 +171,8 @@ __mul__ = interp2app(W_GenericBox.descr_mul), __div__ = interp2app(W_GenericBox.descr_div), - __radd__ = interp2app(W_GenericBox.descr_add), + __radd__ = interp2app(W_GenericBox.descr_radd), + __rsub__ = interp2app(W_GenericBox.descr_rsub), __rmul__ = interp2app(W_GenericBox.descr_rmul), __eq__ = interp2app(W_GenericBox.descr_eq), diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_extras.py @@ -0,0 +1,7 @@ +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy.interp_numarray import BaseArray + + + at unwrap_spec(array=BaseArray) +def debug_repr(space, array): + return space.wrap(array.debug_repr()) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -925,9 +925,6 @@ def start_iter(self, res_shape=None): raise NotImplementedError - def descr_debug_repr(self, space): - return space.wrap(self.debug_repr()) - def descr_array_iface(self, space): concrete = self.get_concrete() storage = concrete.get_storage(space) @@ -1466,7 +1463,6 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), - __debug_repr__ = interp2app(BaseArray.descr_debug_repr), __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -491,6 +491,11 @@ for i in range(5): assert b[i] == i - 5 + def test_scalar_subtract(self): + from numpypy import int32 + assert int32(2) - 1 == 1 + assert 1 - int32(2) == -1 + def test_mul(self): import numpypy @@ -868,16 +873,17 @@ def test_debug_repr(self): from numpypy import zeros, sin + from numpypy.pypy import debug_repr a = zeros(1) - assert a.__debug_repr__() == 'Array' - assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' - assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' - assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + assert debug_repr(a) == 'Array' + assert debug_repr(a + a) == 'Call2(add, Array, Array)' + assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' + assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(sin(a)) == 'Call1(sin, Array)' b = a + a b[0] = 3 - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Call2(add, forced=Array)' def test_tolist_scalar(self): from numpypy import int32, bool_ diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -177,52 +177,55 @@ _specialisations = [] Cls_ii = make_specialised_class((int, int)) -Cls_is = make_specialised_class((int, str)) -Cls_io = make_specialised_class((int, object)) -Cls_si = make_specialised_class((str, int)) -Cls_ss = make_specialised_class((str, str)) -Cls_so = make_specialised_class((str, object)) -Cls_oi = make_specialised_class((object, int)) -Cls_os = make_specialised_class((object, str)) +#Cls_is = make_specialised_class((int, str)) +#Cls_io = make_specialised_class((int, object)) +#Cls_si = make_specialised_class((str, int)) +#Cls_ss = make_specialised_class((str, str)) +#Cls_so = make_specialised_class((str, object)) +#Cls_oi = make_specialised_class((object, int)) +#Cls_os = make_specialised_class((object, str)) Cls_oo = make_specialised_class((object, object)) Cls_ff = make_specialised_class((float, float)) -Cls_ooo = make_specialised_class((object, object, object)) +#Cls_ooo = make_specialised_class((object, object, object)) def makespecialisedtuple(space, list_w): if len(list_w) == 2: w_arg1, w_arg2 = list_w w_type1 = space.type(w_arg1) - w_type2 = space.type(w_arg2) + #w_type2 = space.type(w_arg2) # if w_type1 is space.w_int: + w_type2 = space.type(w_arg2) if w_type2 is space.w_int: return Cls_ii(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_is(space, w_arg1, w_arg2) - else: - return Cls_io(space, w_arg1, w_arg2) + #elif w_type2 is space.w_str: + # return Cls_is(space, w_arg1, w_arg2) + #else: + # return Cls_io(space, w_arg1, w_arg2) # - elif w_type1 is space.w_str: - if w_type2 is space.w_int: - return Cls_si(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_ss(space, w_arg1, w_arg2) - else: - return Cls_so(space, w_arg1, w_arg2) + #elif w_type1 is space.w_str: + # if w_type2 is space.w_int: + # return Cls_si(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_ss(space, w_arg1, w_arg2) + # else: + # return Cls_so(space, w_arg1, w_arg2) # - elif w_type1 is space.w_float and w_type2 is space.w_float: - return Cls_ff(space, w_arg1, w_arg2) + elif w_type1 is space.w_float: + w_type2 = space.type(w_arg2) + if w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) # - else: - if w_type2 is space.w_int: - return Cls_oi(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_os(space, w_arg1, w_arg2) - else: - return Cls_oo(space, w_arg1, w_arg2) + #else: + # if w_type2 is space.w_int: + # return Cls_oi(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_os(space, w_arg1, w_arg2) + # else: + return Cls_oo(space, w_arg1, w_arg2) # - elif len(list_w) == 3: - return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + #elif len(list_w) == 3: + # return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) else: raise NotSpecialised diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -33,15 +33,15 @@ N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) - def hash_test(values): + def hash_test(values, must_be_specialized=True): N_values_w = [N_space.wrap(value) for value in values] S_values_w = [S_space.wrap(value) for value in values] N_w_tuple = N_space.newtuple(N_values_w) S_w_tuple = S_space.newtuple(S_values_w) - - assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + + if must_be_specialized: + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) assert isinstance(N_w_tuple, W_TupleObject) - assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) @@ -53,7 +53,7 @@ hash_test([1,(1,2)]) hash_test([1,('a',2)]) hash_test([1,()]) - hash_test([1,2,3]) + hash_test([1,2,3], must_be_specialized=False) class AppTestW_SpecialisedTupleObject: @@ -83,6 +83,8 @@ return ("SpecialisedTupleObject" + expected) in r def test_createspecialisedtuple(self): + have = ['ii', 'ff', 'oo'] + # spec = {int: 'i', float: 'f', str: 's', @@ -92,14 +94,14 @@ for y in [43, 4.3, "bar", []]: expected1 = spec[type(x)] expected2 = spec[type(y)] - if (expected1 == 'f') ^ (expected2 == 'f'): - if expected1 == 'f': expected1 = 'o' - if expected2 == 'f': expected2 = 'o' + if expected1 + expected2 not in have: + expected1 = expected2 = 'o' obj = (x, y) assert self.isspecialised(obj, '_' + expected1 + expected2) # - obj = (1, 2, 3) - assert self.isspecialised(obj, '_ooo') + if 'ooo' in have: + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') def test_delegation(self): t = self.forbid_delegation((42, 43)) @@ -214,6 +216,8 @@ raises(IndexError, "t[-3]") def test_three_tuples(self): + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") b = self.forbid_delegation((1, 2, 3)) c = (1,) d = c + (2, 3) @@ -221,6 +225,16 @@ assert b == d def test_mongrel(self): + a = self.forbid_delegation((2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 2 + assert a[0] == 2.2 and a[1] == '333' + b = ('333',) + assert a == (2.2,) + b + assert not a != (2.2,) + b + # + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") a = self.forbid_delegation((1, 2.2, '333')) assert self.isspecialised(a) assert len(a) == 3 From noreply at buildbot.pypy.org Tue Dec 13 21:44:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 21:44:07 +0100 (CET) Subject: [pypy-commit] pypy default: Add missing doc file. Message-ID: <20111213204407.C4AF382210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50474:2a81fc4ed8d2 Date: 2011-12-13 21:43 +0100 http://bitbucket.org/pypy/pypy/changeset/2a81fc4ed8d2/ Log: Add missing doc file. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). From noreply at buildbot.pypy.org Tue Dec 13 21:51:22 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Tue, 13 Dec 2011 21:51:22 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: Wrote some really ugly code to deal with all the corner cases of numpy fromstring error handling Message-ID: <20111213205122.BCAB982210@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-full-fromstring Changeset: r50475:8c97ceda4c34 Date: 2011-12-13 15:51 -0500 http://bitbucket.org/pypy/pypy/changeset/8c97ceda4c34/ Log: Wrote some really ugly code to deal with all the corner cases of numpy fromstring error handling diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -2,14 +2,17 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.rpython.lltypesystem import lltype, rffi from pypy.module.micronumpy import interp_dtype +from pypy.objspace.std.strutil import strip_spaces FLOAT_SIZE = rffi.sizeof(lltype.Float) def _fromstring_text(space, s, count, sep, length, dtype): - import string from pypy.module.micronumpy.interp_numarray import W_NDimArray + sep_stripped = strip_spaces(sep) + skip_bad_vals = True if len(sep_stripped) == 0 else False + A = [] num_items = 0 ptr = 0 @@ -18,10 +21,28 @@ nextptr = s.find(sep, ptr) if nextptr < 0: nextptr = length - piece = s[ptr:nextptr] - #FIXME: need to check piece.isspace() also, but does not translate - if len(piece) > 0: - val = dtype.coerce(space, space.wrap(piece)) + piece = strip_spaces(s[ptr:nextptr]) + if len(piece) > 0 or not skip_bad_vals: + if len(piece) == 0 and not skip_bad_vals: + val = dtype.itemtype.default_fromstring(space) + else: + try: + val = dtype.coerce(space, space.wrap(piece)) + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + gotit = False + while not gotit and len(piece) > 0: + piece = piece[:-1] + try: + val = dtype.coerce(space, space.wrap(piece)) + gotit = True + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + if not gotit: + val = dtype.itemtype.default_fromstring(space) + nextptr = length A.append(val) num_items += 1 ptr = nextptr + 1 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1173,7 +1173,7 @@ cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) def test_fromstring(self): - from numpypy import fromstring, uint8, float32, int32 + from numpypy import fromstring, array, uint8, float32, int32 a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 @@ -1201,12 +1201,28 @@ assert g[0] == 1 assert g[1] == 2 assert g[2] == 3 - #FIXME: below should work - #h = fromstring("1, , 2, 3", dtype=uint8, sep=",") - #assert len(h) == 3 - #assert h[0] == 1 - #assert h[1] == 2 - #assert h[2] == 3 + h = fromstring("1, , 2, 3", dtype=uint8, sep=",") + assert h.tolist() == [1,0,2,3] + i = fromstring("1 2 3", dtype=uint8, sep=" ") + assert i.tolist() == [1,2,3] + j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") + assert j.tolist() == [1,2,3] + k = fromstring("1,x,2,3", dtype=uint8, sep=",") + assert k.tolist() == [1,0] + l = fromstring("1,x,2,3", dtype='float32', sep=",") + assert l.tolist() == [1.0,-1.0] + m = fromstring("1,,2,3", sep=",") + assert m.tolist() == [1.0,-1.0,2.0,3.0] + n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + assert n.tolist() == [3] + o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") + assert len(o) == 2 + assert o[0] == 1.0 + assert o[1] == 2.0 + p = fromstring("1.0,,2.0,3.0", sep=",") + assert p.tolist() == [1.0, -1.0, 2.0, 3.0] + q = fromstring("1.0,,2.0,3.0", sep=" ") + assert q.tolist() == [1.0] def test_fromstring_types(self): from numpypy import fromstring @@ -1241,8 +1257,6 @@ raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) #5 bytes is larger than 3 bytes raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) - #can't cast floats to ints with fromstring - raises(ValueError, fromstring, "3.4 2.0 3.8 2.2", dtype=int32, sep=" ") class AppTestRepr(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -87,6 +87,9 @@ def _coerce(self, space, w_item): raise NotImplementedError + def default_fromstring(self, space): + raise NotImplementedError + def read(self, storage, width, i, offset): return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset @@ -213,6 +216,9 @@ def for_computation(self, v): return widen(v) + + def default_fromstring(self, space): + return self._coerce(space, space.wrap(0)) @simple_binary_op def div(self, v1, v2): @@ -320,6 +326,9 @@ def for_computation(self, v): return float(v) + def default_fromstring(self, space): + return self._coerce(space, space.wrap(-1.0)) + @simple_binary_op def div(self, v1, v2): try: From noreply at buildbot.pypy.org Tue Dec 13 22:29:35 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 22:29:35 +0100 (CET) Subject: [pypy-commit] pypy default: Improve the test in the pre-jit-targets world. Message-ID: <20111213212935.7D39C82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50476:f6eb69437a26 Date: 2011-12-13 21:24 +0000 http://bitbucket.org/pypy/pypy/changeset/f6eb69437a26/ Log: Improve the test in the pre-jit-targets world. diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -6,6 +6,8 @@ def main(n): def f(): for i in range(10000): + i -= 1 + i -= 42 # ID: subtract yield i def g(): @@ -24,3 +26,8 @@ setfield_gc(p45, i29, descr=) jump(..., descr=...) """) + assert loop.match_by_id("subtract", """ + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + i2 = int_sub_ovf(i1, 42) + guard_no_overflow(descr=...) + """) From noreply at buildbot.pypy.org Tue Dec 13 22:29:36 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 22:29:36 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for jit-simplify-backendintf. Message-ID: <20111213212936.A011982210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50477:869dcd3db31a Date: 2011-12-13 21:52 +0100 http://bitbucket.org/pypy/pypy/changeset/869dcd3db31a/ Log: Fix for jit-simplify-backendintf. diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -210,9 +210,9 @@ def entry_bridge_ops(self, *args, **kwds): ops = list(self._allops(*args, **kwds)) labels = [op for op in ops if op.name == 'label'] - assert ops.index(labels[0]) == 0 - i = ops.index(labels[1]) - return ops[1:i] + i0 = ops.index(labels[0]) + i1 = ops.index(labels[1]) + return ops[i0+1:i1] @property def chunks(self): From noreply at buildbot.pypy.org Tue Dec 13 22:29:37 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 22:29:37 +0100 (CET) Subject: [pypy-commit] pypy default: Fix: if expected_ops contains a '...' line, the resulting line numbers got out of sync. Message-ID: <20111213212937.C2BB082210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50478:4d738293f0a9 Date: 2011-12-13 22:10 +0100 http://bitbucket.org/pypy/pypy/changeset/4d738293f0a9/ Log: Fix: if expected_ops contains a '...' line, the resulting line numbers got out of sync. diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -409,7 +409,7 @@ """ iter_exp_ops = iter(expected_ops) iter_ops = RevertableIterator(self.ops) - for opindex, exp_op in enumerate(iter_exp_ops): + for exp_op in iter_exp_ops: try: if exp_op == '...': # loop until we find an operation which matches @@ -430,7 +430,7 @@ if exp_op[4] is False: # optional operation iter_ops.revert_one() continue # try to match with the next exp_op - e.opindex = opindex + e.opindex = iter_ops.index - 1 raise # # make sure we exhausted iter_ops From noreply at buildbot.pypy.org Tue Dec 13 22:29:38 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 22:29:38 +0100 (CET) Subject: [pypy-commit] pypy default: Merge heads. This new old version of test_generators still fails. Message-ID: <20111213212938.E589182210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50479:a425bfb85325 Date: 2011-12-13 21:28 +0000 http://bitbucket.org/pypy/pypy/changeset/a425bfb85325/ Log: Merge heads. This new old version of test_generators still fails. diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -6,6 +6,8 @@ def main(n): def f(): for i in range(10000): + i -= 1 + i -= 42 # ID: subtract yield i def g(): @@ -26,3 +28,8 @@ i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) + assert loop.match_by_id("subtract", """ + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + i2 = int_sub_ovf(i1, 42) + guard_no_overflow(descr=...) + """) From noreply at buildbot.pypy.org Tue Dec 13 22:47:34 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 22:47:34 +0100 (CET) Subject: [pypy-commit] pypy default: Pass multiple --jit parameters correctly. Message-ID: <20111213214734.5F80682210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50480:fa6d4dc7b545 Date: 2011-12-13 22:44 +0100 http://bitbucket.org/pypy/pypy/changeset/fa6d4dc7b545/ Log: Pass multiple --jit parameters correctly. diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -45,8 +45,10 @@ cmdline = [sys.executable] if not import_site: cmdline.append('-S') - for key, value in jitopts.iteritems(): - cmdline += ['--jit', '%s=%s' % (key, value)] + if jitopts: + jitcmdline = ['%s=%s' % (key, value) + for key, value in jitopts.items()] + cmdline += ['--jit', ','.join(jitcmdline)] cmdline.append(str(self.filepath)) # print cmdline, logfile From noreply at buildbot.pypy.org Tue Dec 13 22:47:35 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Dec 2011 22:47:35 +0100 (CET) Subject: [pypy-commit] pypy default: Found out why the test fails, and document it. Message-ID: <20111213214735.89C2682210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50481:b673742c84f1 Date: 2011-12-13 22:47 +0100 http://bitbucket.org/pypy/pypy/changeset/b673742c84f1/ Log: Found out why the test fails, and document it. diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -17,6 +17,13 @@ g() log = self.run(main, [500]) + # XXX XXX this test fails so far because of a detail that + # changed with jit-simplify-backendintf. We should try to + # think of a way to be more resistent against such details. + # The issue is that we now get one Tracing, then go back + # to the interpreter hoping to immediately run the JITted + # code; but instead, we Trace again, just because another + # counter was also about to reach its limit... loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') assert loop.match_by_id("generator", """ ... From noreply at buildbot.pypy.org Tue Dec 13 23:15:53 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Tue, 13 Dec 2011 23:15:53 +0100 (CET) Subject: [pypy-commit] pypy numpy-identity: Added numpy.identity function Message-ID: <20111213221553.1399A82210@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-identity Changeset: r50482:2ff54fec76e7 Date: 2011-12-13 17:15 -0500 http://bitbucket.org/pypy/pypy/changeset/2ff54fec76e7/ Log: Added numpy.identity function diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -81,6 +81,7 @@ 'mean': 'app_numpy.mean', 'sum': 'app_numpy.sum', 'min': 'app_numpy.min', + 'identity': 'app_numpy.identity', 'max': 'app_numpy.max', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -13,6 +13,11 @@ # weighting, just the average part! return mean(a) +def identity(n, dtype=None): + a = numpypy.zeros((n,n), dtype=dtype) + for i in range(n): + a[i][i] = 1 + return a def mean(a): if not hasattr(a, "mean"): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -722,6 +722,37 @@ a = array([True] * 5, bool) assert a.sum() == 5 + def test_identity(self): + from numpypy import identity, array + from numpypy import int32, float64 + a = identity(0) + assert len(a) == 0 + assert repr(a.dtype) == "dtype('float64')" + assert a.shape == (0,0) + b = identity(1, dtype=int32) + assert len(b) == 1 + assert b[0][0] == 1 + assert b.shape == (1,1) + assert repr(b.dtype) == "dtype('int32')" + c = identity(2) + assert c.shape == (2,2) + assert c[0][0] == 1.0 + assert c[0][1] == 0.0 + assert c[1][0] == 0.0 + assert c[1][1] == 1.0 + d = identity(3, dtype='int32') + assert d.shape == (3,3) + assert repr(d.dtype) == "dtype('int32')" + assert d[0][0] == 1.0 + assert d[0][1] == 0.0 + assert d[0][2] == 0.0 + assert d[1][0] == 0.0 + assert d[1][1] == 1.0 + assert d[1][2] == 0.0 + assert d[2][0] == 0.0 + assert d[2][1] == 0.0 + assert d[2][2] == 1.0 + def test_prod(self): from numpypy import array a = array(range(1, 6)) From noreply at buildbot.pypy.org Tue Dec 13 23:41:34 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Tue, 13 Dec 2011 23:41:34 +0100 (CET) Subject: [pypy-commit] pypy numpy-identity: Made tests look nicer Message-ID: <20111213224134.118D682210@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-identity Changeset: r50483:506329de8bc3 Date: 2011-12-13 17:41 -0500 http://bitbucket.org/pypy/pypy/changeset/506329de8bc3/ Log: Made tests look nicer diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -724,34 +724,23 @@ def test_identity(self): from numpypy import identity, array - from numpypy import int32, float64 + from numpypy import int32, float64, dtype a = identity(0) assert len(a) == 0 - assert repr(a.dtype) == "dtype('float64')" + assert a.dtype == dtype('float64') assert a.shape == (0,0) b = identity(1, dtype=int32) assert len(b) == 1 assert b[0][0] == 1 assert b.shape == (1,1) - assert repr(b.dtype) == "dtype('int32')" + assert b.dtype == dtype('int32') c = identity(2) assert c.shape == (2,2) - assert c[0][0] == 1.0 - assert c[0][1] == 0.0 - assert c[1][0] == 0.0 - assert c[1][1] == 1.0 + assert (c == [[1,0],[0,1]]).all() d = identity(3, dtype='int32') assert d.shape == (3,3) - assert repr(d.dtype) == "dtype('int32')" - assert d[0][0] == 1.0 - assert d[0][1] == 0.0 - assert d[0][2] == 0.0 - assert d[1][0] == 0.0 - assert d[1][1] == 1.0 - assert d[1][2] == 0.0 - assert d[2][0] == 0.0 - assert d[2][1] == 0.0 - assert d[2][2] == 1.0 + assert d.dtype == dtype('int32') + assert (d == [[1,0,0],[0,1,0],[0,0,1]]).all() def test_prod(self): from numpypy import array From noreply at buildbot.pypy.org Wed Dec 14 00:44:41 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 00:44:41 +0100 (CET) Subject: [pypy-commit] pypy default: (jterrace) merge numpy-identity branch, adding numpy.identity function Message-ID: <20111213234441.6976082210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50484:3b81c36d2bd1 Date: 2011-12-14 01:43 +0200 http://bitbucket.org/pypy/pypy/changeset/3b81c36d2bd1/ Log: (jterrace) merge numpy-identity branch, adding numpy.identity function diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -91,6 +91,7 @@ 'mean': 'app_numpy.mean', 'sum': 'app_numpy.sum', 'min': 'app_numpy.min', + 'identity': 'app_numpy.identity', 'max': 'app_numpy.max', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -13,6 +13,11 @@ # weighting, just the average part! return mean(a) +def identity(n, dtype=None): + a = numpypy.zeros((n,n), dtype=dtype) + for i in range(n): + a[i][i] = 1 + return a def mean(a): if not hasattr(a, "mean"): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -727,6 +727,26 @@ a = array([True] * 5, bool) assert a.sum() == 5 + def test_identity(self): + from numpypy import identity, array + from numpypy import int32, float64, dtype + a = identity(0) + assert len(a) == 0 + assert a.dtype == dtype('float64') + assert a.shape == (0,0) + b = identity(1, dtype=int32) + assert len(b) == 1 + assert b[0][0] == 1 + assert b.shape == (1,1) + assert b.dtype == dtype('int32') + c = identity(2) + assert c.shape == (2,2) + assert (c == [[1,0],[0,1]]).all() + d = identity(3, dtype='int32') + assert d.shape == (3,3) + assert d.dtype == dtype('int32') + assert (d == [[1,0,0],[0,1,0],[0,0,1]]).all() + def test_prod(self): from numpypy import array a = array(range(1, 6)) From noreply at buildbot.pypy.org Wed Dec 14 00:44:42 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 00:44:42 +0100 (CET) Subject: [pypy-commit] pypy numpy-identity: close branch Message-ID: <20111213234442.86C2682210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-identity Changeset: r50485:b391b4c1ef21 Date: 2011-12-14 01:44 +0200 http://bitbucket.org/pypy/pypy/changeset/b391b4c1ef21/ Log: close branch From noreply at buildbot.pypy.org Wed Dec 14 03:09:56 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 14 Dec 2011 03:09:56 +0100 (CET) Subject: [pypy-commit] pypy default: fix for numpy tests on <64bit platforms Message-ID: <20111214020956.92C4282210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50486:238ddc6d667c Date: 2011-12-13 21:09 -0500 http://bitbucket.org/pypy/pypy/changeset/238ddc6d667c/ Log: fix for numpy tests on <64bit platforms diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -259,22 +259,31 @@ assert numpy.uint16('65536') == 0 def test_int32(self): + import sys import numpypy as numpy x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 - assert numpy.int32(2147483648) == -2147483648 assert numpy.int32('2147483647') == 2147483647 - assert numpy.int32('2147483648') == -2147483648 + if sys.maxint > 2 ** 31 - 1: + assert numpy.int32(2147483648) == -2147483648 + assert numpy.int32('2147483648') == -2147483648 + else: + raises(OverflowError, numpy.int32, 2147483648) + raises(OverflowError, numpy.int32, '2147483648') def test_uint32(self): + import sys import numpypy as numpy - assert numpy.uint32(4294967295) == 4294967295 - assert numpy.uint32(4294967296) == 0 - assert numpy.uint32('4294967295') == 4294967295 - assert numpy.uint32('4294967296') == 0 + assert numpy.uint32(10) == 10 + + if sys.maxint > 2 ** 31 - 1: + assert numpy.uint32(4294967295) == 4294967295 + assert numpy.uint32(4294967296) == 0 + assert numpy.uint32('4294967295') == 4294967295 + assert numpy.uint32('4294967296') == 0 def test_int_(self): import numpypy as numpy @@ -294,10 +303,14 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - assert numpy.int64(9223372036854775807) == 9223372036854775807 + if sys.maxint >= 2 ** 63 - 1: + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64('9223372036854775807') == 9223372036854775807 + else: + raises(OverflowError, numpy.int64, 9223372036854775807) + raises(OverflowError, numpy.int64, '9223372036854775807') + raises(OverflowError, numpy.int64, 9223372036854775808) - - assert numpy.int64('9223372036854775807') == 9223372036854775807 raises(OverflowError, numpy.int64, '9223372036854775808') def test_uint64(self): From noreply at buildbot.pypy.org Wed Dec 14 10:41:27 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 14 Dec 2011 10:41:27 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Added code for GUARD_EXCEPTION Message-ID: <20111214094127.3B9B582210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50487:c8e31eebcc23 Date: 2011-12-14 10:41 +0100 http://bitbucket.org/pypy/pypy/changeset/c8e31eebcc23/ Log: Added code for GUARD_EXCEPTION diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -310,6 +310,38 @@ emit_cast_ptr_to_int = emit_same_as emit_cast_int_to_ptr = emit_same_as + def emit_guard_exception(self, op, arglocs, regalloc): + loc, loc1, resloc, pos_exc_value, pos_exception = arglocs[:5] + failargs = arglocs[5:] + self.mc.load_imm(loc1, pos_exception.value) + + self.mc.alloc_scratch_reg() + if IS_PPC_32: + self.mc.lwz(r.SCRATCH.value, loc1.value, 0) + self.mc.cmpw(0, r.SCRATCH.value, loc.value) + else: + self.mc.ld(r.SCRATCH.value, loc1.value, 0) + self.mc.cmpd(0, r.SCRATCH.value, loc.value) + self.mc.free_scratch_reg() + + self._emit_guard(op, failargs, c.EQ, save_exc=True) + self.mc.load_imm(loc, pos_exc_value.value) + + if resloc: + if IS_PPC_32: + self.mc.lwz(resloc.value, loc.value, 0) + else: + self.mc.ld(resloc.value, loc.value, 0) + + self.mc.alloc_scratch_reg(0) + if IS_PPC_32: + self.mc.stw(r.SCRATCH.value, loc.value, 0) + self.mc.stw(r.SCRATCH.value, loc1.value, 0) + else: + self.mc.sd(r.SCRATCH.value, loc.value, 0) + self.mc.sd(r.SCRATCH.value, loc1.value, 0) + self.mc.free_scratch_reg() + def emit_call(self, op, args, regalloc, force_index=-1): adr = args[0].value arglist = op.getarglist()[1:] diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -337,6 +337,26 @@ prepare_guard_overflow = prepare_guard_no_overflow + def prepare_guard_exception(self, op): + boxes = list(op.getarglist()) + arg0 = ConstInt(rffi.cast(lltype.Signed, op.getarg(0).getint())) + loc, box = self._ensure_value_is_boxed(arg0) + boxes.append(box) + box = TempInt() + loc1 = self.force_allocate_reg(box, boxes) + boxes.append(box) + if op.result in self.longevity: + resloc = self.force_allocate_reg(op.result, boxes) + boxes.append(op.result) + else: + resloc = None + pos_exc_value = imm(self.cpu.pos_exc_value()) + pos_exception = imm(self.cpu.pos_exception()) + arglocs = self._prepare_guard(op, [loc, loc1, resloc, pos_exc_value, pos_exception]) + self.possibly_free_vars(boxes) + self.possibly_free_vars(op.getfailargs()) + return arglocs + def prepare_guard_value(self, op): boxes = list(op.getarglist()) b0, b1 = boxes From noreply at buildbot.pypy.org Wed Dec 14 11:02:56 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 11:02:56 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: start refactoring - change test_base and make it pass Message-ID: <20111214100256.4B23E82210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50488:9d888e0a1e4b Date: 2011-12-14 12:02 +0200 http://bitbucket.org/pypy/pypy/changeset/9d888e0a1e4b/ Log: start refactoring - change test_base and make it pass diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -28,11 +28,6 @@ self.char = char self.w_box_type = w_box_type self.alternate_constructors = alternate_constructors - self.array_signature = signature.ArraySignature() - self.scalar_signature = signature.ScalarSignature() - self.forced_signature = signature.ForcedSignature() - #self.flatiter_signature = signature.FlatiterSignature() - #self.view_signature = signature.ViewSignature() def malloc(self, length): # XXX find out why test_zjit explodes with tracking of allocations diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -2,6 +2,13 @@ from pypy.rlib import jit from pypy.rlib.objectmodel import instantiate +class NumpyEvalFrame(object): + def __init__(self, iterators): + self.iterators = iterators + + def next(self, shapelen): + xxx + # Iterators for arrays # -------------------- # all those iterators with the exception of BroadcastIterator iterate over the diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,8 +7,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import instantiate - +from pypy.module.micronumpy.interp_iter import NumpyEvalFrame, ArrayIterator numpy_driver = jit.JitDriver( greens=['shapelen', 'signature'], @@ -199,7 +198,7 @@ return new_strides class BaseArray(Wrappable): - _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", + _attrs_ = ["invalidates", "shape", "strides", "backstrides", "start", 'order'] _immutable_fields_ = ['start', "order"] @@ -310,7 +309,7 @@ reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] ) def loop(self): - i = self.start_iter() + i = self.signature.create_iter(self, {}) cur_best = self.eval(i) shapelen = len(self.shape) i = i.next(shapelen) @@ -709,11 +708,19 @@ raise NotImplementedError def start_iter(self, res_shape=None): - raise NotImplementedError + all_iters = self.signature.create_iter(self, {}, res_shape) + return NumpyEvalFrame(all_iters) def descr_debug_repr(self, space): return space.wrap(self.signature.debug_repr()) + def find_sig(self): + """ find a correct signature for the array + """ + sig = self.create_sig() + sig.invent_numbering() + return signature.find_sig(sig) + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -739,7 +746,6 @@ BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value - self.signature = dtype.scalar_signature def find_size(self): return 1 @@ -756,9 +762,6 @@ def eval(self, iter): return self.value - def start_iter(self, res_shape=None): - return ConstantIterator() - def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): builder.append(self.dtype.itemtype.str_format(self.value)) @@ -770,14 +773,16 @@ # so in order to have a consistent API, let it go through. pass + def create_sig(self): + return signature.ScalarSignature(self.dtype) + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, signature, shape, res_dtype, order): + def __init__(self, shape, res_dtype, order): BaseArray.__init__(self, shape, order) self.forced_result = None - self.signature = signature self.res_dtype = res_dtype def _del_sources(self): @@ -786,10 +791,10 @@ def compute(self): i = 0 - signature = self.signature result_size = self.find_size() result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) + xxx i = self.start_iter() ri = result.start_iter() while not ri.done(): @@ -805,7 +810,6 @@ def force_if_needed(self): if self.forced_result is None: self.forced_result = self.compute() - self.signature = self.find_dtype().forced_signature self._del_sources() def get_concrete(self): @@ -834,10 +838,11 @@ class Call1(VirtualArray): - def __init__(self, signature, shape, res_dtype, values, order): - VirtualArray.__init__(self, signature, shape, res_dtype, + def __init__(self, ufunc, shape, res_dtype, values, order): + VirtualArray.__init__(self, shape, res_dtype, values.order) self.values = values + self.ufunc = ufunc def _del_sources(self): self.values = None @@ -855,18 +860,16 @@ assert isinstance(sig, signature.Call1) return sig.unfunc(self.res_dtype, val) - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - return Call1Iterator(self.values.start_iter(res_shape)) + def create_sig(self): + return signature.Call1(self.ufunc, self.values.create_sig()) class Call2(VirtualArray): """ Intermediate class for performing binary operations. """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): - # XXX do something if left.order != right.order - VirtualArray.__init__(self, signature, shape, res_dtype, left.order) + def __init__(self, ufunc, shape, calc_dtype, res_dtype, left, right): + VirtualArray.__init__(self, shape, res_dtype, left.order) + self.ufunc = ufunc self.left = left self.right = right self.calc_dtype = calc_dtype @@ -881,14 +884,6 @@ def _find_size(self): return self.size - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - if res_shape is None: - res_shape = self.shape # we still force the shape on children - return Call2Iterator(self.left.start_iter(res_shape), - self.right.start_iter(res_shape)) - def _eval(self, iter): assert isinstance(iter, Call2Iterator) lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) @@ -897,6 +892,10 @@ assert isinstance(sig, signature.Call2) return sig.binfunc(self.calc_dtype, lhs, rhs) + def create_sig(self): + return signature.Call2(self.ufunc, self.left.create_sig(), + self.right.create_sig()) + class ViewArray(BaseArray): """ Class for representing views of arrays, they will reflect changes of parent @@ -974,7 +973,6 @@ if isinstance(parent, W_NDimSlice): parent = parent.parent ViewArray.__init__(self, parent, strides, backstrides, shape) - self.signature = signature.find_sig(signature.ViewSignature(parent.signature)) self.start = start self.size = 1 for sh in shape: @@ -1005,12 +1003,12 @@ source_iter = source_iter.next(shapelen) res_iter = res_iter.next(shapelen) - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - if len(self.shape) == 1: - return OneDimIterator(self.start, self.strides[0], self.shape[0]) - return ViewIterator(self) + # def start_iter(self, res_shape=None): + # if res_shape is not None and res_shape != self.shape: + # return BroadcastIterator(self, res_shape) + # if len(self.shape) == 1: + # return OneDimIterator(self.start, self.strides[0], self.shape[0]) + # return ViewIterator(self) def setitem(self, item, value): self.parent.setitem(item, value) @@ -1025,6 +1023,9 @@ a_iter = a_iter.next(len(array.shape)) return array + def create_sig(self): + return signature.ViewSignature(self.parent.create_sig()) + class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one @@ -1034,7 +1035,6 @@ self.size = size self.dtype = dtype self.storage = dtype.malloc(size) - self.signature = dtype.array_signature def get_concrete(self): return self @@ -1073,17 +1073,20 @@ self.invalidated() self.dtype.setitem(self.storage, item, value) - def start_iter(self, res_shape=None): - if self.order == 'C': - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return ArrayIterator(self.size) - raise NotImplementedError # use ViewIterator simply, test it + # def start_iter(self, res_shape=None): + # if self.order == 'C': + # if res_shape is not None and res_shape != self.shape: + # return BroadcastIterator(self, res_shape) + # return ArrayIterator(self.size) + # raise NotImplementedError # use ViewIterator simply, test it def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) + def create_sig(self): + return signature.ArraySignature(self.dtype) + def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1134,11 +1137,11 @@ ) arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) - iters = arr.signature.create_iterator() - arr_iter = arr.start_iter(arr.shape) + arr_iter = ArrayIterator(arr) for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + dtype.setitem(arr.storage, arr_iter.offset, + dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1241,14 +1244,12 @@ self.shapelen = len(arr.shape) self.arr = arr self.iter = self.start_iter() - self.signature = signature.find_sig(signature.FlatiterSignature( - arr.signature)) - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return OneDimIterator(self.arr.start, self.strides[0], - self.shape[0]) + # def start_iter(self, res_shape=None): + # if res_shape is not None and res_shape != self.shape: + # return BroadcastIterator(self, res_shape) + # return OneDimIterator(self.arr.start, self.strides[0], + # self.shape[0]) def find_dtype(self): return self.arr.find_dtype() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -117,10 +117,7 @@ if isinstance(w_obj, Scalar): return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) - new_sig = signature.find_sig(signature.Call1(self.func, - self.name, - w_obj.signature)) - w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) + w_res = Call1(self.func, w_obj.shape, res_dtype, w_obj, w_obj.order) w_obj.add_invalidates(w_res) return w_res @@ -158,12 +155,8 @@ w_rhs.value.convert_to(calc_dtype) ) - new_sig = signature.find_sig(signature.Call2(self.func, - self.name, - w_lhs.signature, - w_rhs.signature)) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - w_res = Call2(new_sig, new_shape, calc_dtype, + w_res = Call2(self.func, new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,7 +1,7 @@ from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.rlib.rarithmetic import intmask from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ - BroadcastIterator, OneDimIterator + BroadcastIterator, OneDimIterator, ConstantIterator # def components_eq(lhs, rhs): @@ -31,14 +31,11 @@ return known_sigs.setdefault(sig, sig) class Signature(object): - def eq(self, other): - return self is other + def create_iter(self, array, cache, res_shape=None): + raise NotImplementedError - def hash(self): - return compute_hash(self) - - def create_iter(self, array, cache): - raise NotImplementedError + def invent_numbering(self): + pass # XXX class ViewSignature(Signature): def __init__(self, child): @@ -55,17 +52,33 @@ def debug_repr(self): return 'Slice(%s)' % self.child.debug_repr() - def create_iter(self, array, cache): - xxxx +class ArraySignature(Signature): + def __init__(self, dtype): + self.dtype = dtype -class ArraySignature(Signature): + def eq(self, other): + if type(self) is not type(other): + return False + return self.dtype is other.dtype + + def hash(self): + return compute_identity_hash(self.dtype) + def debug_repr(self): return 'Array' - def create_iter(self, array, cache): - xxx +class ScalarSignature(Signature): + def __init__(self, dtype): + self.dtype = dtype -class ScalarSignature(Signature): + def eq(self, other): + if type(self) is not type(other): + return False + return self.dtype is other.dtype + + def hash(self): + return compute_identity_hash(self.dtype) + def debug_repr(self): return 'Scalar' @@ -74,16 +87,15 @@ return 'FlatIter(%s)' % self.child.debug_repr() class Call1(Signature): - def __init__(self, func, name, child): + def __init__(self, func, child): self.unfunc = func - self.name = name self.child = child def hash(self): - return compute_hash(self.name) ^ self.child.hash() << 1 + return compute_identity_hash(self.unfunc) ^ self.child.hash() << 1 def eq(self, other): - if type(other) is not type(self): + if type(self) is not type(other): return False return self.unfunc is other.unfunc and self.child.eq(other.child) @@ -92,18 +104,17 @@ self.child.debug_repr()) class Call2(Signature): - def __init__(self, func, name, left, right): + def __init__(self, func, left, right): self.binfunc = func - self.name = name self.left = left self.right = right def hash(self): - return (compute_hash(self.name) ^ (self.left.hash() << 1) ^ + return (compute_identity_hash(self.binfunc) ^ (self.left.hash() << 1) ^ (self.right.hash() << 2)) def eq(self, other): - if type(other) is not type(self): + if type(self) is not type(other): return False return (self.binfunc is other.binfunc and self.left.eq(other.left) and self.right.eq(other.right)) @@ -113,36 +124,5 @@ self.left.debug_repr(), self.right.debug_repr()) -class ForcedSignature(Signature): - def debug_repr(self): - return 'Forced' - class ReduceSignature(Call2): pass - -# class Signature(BaseSignature): -# _known_sigs = r_dict(components_eq, components_hash) - -# _attrs_ = ["components"] -# _immutable_fields_ = ["components[*]"] - -# def __init__(self, components): -# self.components = components - -# @staticmethod -# def find_sig(components): -# return Signature._known_sigs.setdefault(components, Signature(components)) - -# class Call1(BaseSignature): -# _immutable_fields_ = ["func", "name"] - -# def __init__(self, func): -# self.func = func -# self.name = func.func_name - -# class Call2(BaseSignature): -# _immutable_fields_ = ["func", "name"] - -# def __init__(self, func): -# self.func = func -# self.name = func.func_name diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -17,18 +17,18 @@ ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) - assert v1.signature is not v2.signature + assert v1.find_sig() is not v2.find_sig() v3 = ar.descr_add(space, Scalar(float64_dtype, 1.0)) - assert v2.signature is v3.signature + assert v2.find_sig() is v3.find_sig() v4 = ar.descr_add(space, ar) - assert v1.signature is v4.signature + assert v1.find_sig() is v4.find_sig() bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) - assert v5.signature is not v1.signature - assert v5.signature is not v2.signature + assert v5.find_sig() is not v1.find_sig() + assert v5.find_sig() is not v2.find_sig() v6 = ar.descr_add(space, bool_ar) - assert v5.signature is v6.signature + assert v5.find_sig() is v6.find_sig() def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype @@ -36,11 +36,11 @@ ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) - assert v1.signature is v2.signature + assert v1.find_sig() is v2.find_sig() v3 = v2.descr_add(space, v1) v4 = v1.descr_add(space, v2) - assert v3.signature is v4.signature + assert v3.find_sig() is v4.find_sig() class TestUfuncCoerscion(object): def test_binops(self, space): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -8,9 +8,6 @@ class MockDtype(object): - array_signature = signature.ArraySignature() - scalar_signature = signature.ScalarSignature() - def malloc(self, size): return None From noreply at buildbot.pypy.org Wed Dec 14 11:28:21 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 14 Dec 2011 11:28:21 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: remove debug output from compute_frame_depth Message-ID: <20111214102821.0916382210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50489:a21c0f06db3e Date: 2011-12-14 11:17 +0100 http://bitbucket.org/pypy/pypy/changeset/a21c0f06db3e/ Log: remove debug output from compute_frame_depth diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -716,10 +716,6 @@ PARAMETER_AREA += MAX_REG_PARAMS * WORD SPILLING_AREA = regalloc.frame_manager.frame_depth * WORD - print "PARAMETER SAVE AREA = %d" % PARAMETER_AREA - print "SPILLING AREA = %d" % SPILLING_AREA - print "OFFSET TO ENCODING = %d" % (PARAMETER_AREA + SPILLING_AREA) - frame_depth = ( GPR_SAVE_AREA + FPR_SAVE_AREA + FLOAT_INT_CONVERSION From noreply at buildbot.pypy.org Wed Dec 14 11:28:22 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 14 Dec 2011 11:28:22 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: add r2 to volatile registers Message-ID: <20111214102822.298F682210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50490:1282dafdf468 Date: 2011-12-14 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/1282dafdf468/ Log: add r2 to volatile registers diff --git a/pypy/jit/backend/ppc/ppcgen/register.py b/pypy/jit/backend/ppc/ppcgen/register.py --- a/pypy/jit/backend/ppc/ppcgen/register.py +++ b/pypy/jit/backend/ppc/ppcgen/register.py @@ -14,7 +14,7 @@ NONVOLATILES = [r14, r15, r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, r30, r31] -VOLATILES = [r0, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13] +VOLATILES = [r0, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13] NONVOLATILES_FLOAT = [f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28, f29, f30, f31] From noreply at buildbot.pypy.org Wed Dec 14 11:28:23 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 14 Dec 2011 11:28:23 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: make call to function before leave jitted code and save volatile regs Message-ID: <20111214102823.5358682210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50491:cc34d3b179e4 Date: 2011-12-14 11:27 +0100 http://bitbucket.org/pypy/pypy/changeset/cc34d3b179e4/ Log: make call to function before leave jitted code and save volatile regs diff --git a/pypy/jit/backend/ppc/ppcgen/helper/assembler.py b/pypy/jit/backend/ppc/ppcgen/helper/assembler.py --- a/pypy/jit/backend/ppc/ppcgen/helper/assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/helper/assembler.py @@ -122,3 +122,28 @@ space = (6 + MAX_REG_PARAMS + len(self.regs)) * WORD self.mc.addi(r.SP.value, r.SP.value, space) +class Saved_Volatiles(object): + """ used in _gen_leave_jitted_hook_code to save volatile registers + in ENCODING AREA around calls + """ + + def __init__(self, codebuilder): + self.mc = codebuilder + + def __enter__(self): + """ before a call, volatile registers are saved in ENCODING AREA + """ + for i, reg in enumerate(r.VOLATILES): + if IS_PPC_32: + self.mc.stw(reg.value, r.SPP.value, i * WORD) + else: + self.mc.std(reg.value, r.SPP.value, i * WORD) + + def __exit__(self, *args): + """ after call, volatile registers have to be restored + """ + for i, reg in enumerate(r.VOLATILES): + if IS_PPC_32: + self.mc.lwz(reg.value, r.SPP.value, i * WORD) + else: + self.mc.ld(reg.value, r.SPP.value, i * WORD) diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -17,7 +17,8 @@ from pypy.jit.backend.ppc.ppcgen.helper.assembler import (gen_emit_cmp_op, encode32, decode32, decode64, - count_reg_args) + count_reg_args, + Saved_Volatiles) import pypy.jit.backend.ppc.ppcgen.register as r import pypy.jit.backend.ppc.ppcgen.condition as c from pypy.jit.metainterp.history import (Const, ConstPtr, LoopToken, @@ -292,6 +293,11 @@ def _gen_leave_jitted_hook_code(self, save_exc=False): mc = PPCBuilder() + + with Saved_Volatiles(mc): + addr = self.cpu.get_on_leave_jitted_int(save_exception=save_exc) + mc.bl_abs(addr) + mc.b_abs(self.exit_code_adr) mc.prepare_insts_blocks() return mc.materialize(self.cpu.asmmemmgr, [], @@ -505,7 +511,7 @@ self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) self.setup_failure_recovery() self.exit_code_adr = self._gen_exit_path() - #self._leave_jitted_hook_save_exc = self._gen_leave_jitted_hook_code(True) + self._leave_jitted_hook_save_exc = self._gen_leave_jitted_hook_code(True) self._leave_jitted_hook = self._gen_leave_jitted_hook_code(False) def assemble_loop(self, inputargs, operations, looptoken, log): From noreply at buildbot.pypy.org Wed Dec 14 11:28:52 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 11:28:52 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: invent numbering Message-ID: <20111214102852.A4DA082210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50492:3edbb810b5a0 Date: 2011-12-14 12:28 +0200 http://bitbucket.org/pypy/pypy/changeset/3edbb810b5a0/ Log: invent numbering diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -35,7 +35,36 @@ raise NotImplementedError def invent_numbering(self): - pass # XXX + cache = r_dict(sigeq, sighash) + self._invent_numbering(cache) + + def _invent_numbering(self, cache): + try: + no = cache[self] + except KeyError: + no = len(cache) + cache[self] = no + self.iter_no = no + +class ConcreteSignature(Signature): + def __init__(self, dtype): + self.dtype = dtype + + def eq(self, other): + if type(self) is not type(other): + return False + return self.dtype is other.dtype + + def hash(self): + return compute_identity_hash(self.dtype) + +class ArraySignature(ConcreteSignature): + def debug_repr(self): + return 'Array' + +class ScalarSignature(ConcreteSignature): + def debug_repr(self): + return 'Scalar' class ViewSignature(Signature): def __init__(self, child): @@ -52,36 +81,6 @@ def debug_repr(self): return 'Slice(%s)' % self.child.debug_repr() -class ArraySignature(Signature): - def __init__(self, dtype): - self.dtype = dtype - - def eq(self, other): - if type(self) is not type(other): - return False - return self.dtype is other.dtype - - def hash(self): - return compute_identity_hash(self.dtype) - - def debug_repr(self): - return 'Array' - -class ScalarSignature(Signature): - def __init__(self, dtype): - self.dtype = dtype - - def eq(self, other): - if type(self) is not type(other): - return False - return self.dtype is other.dtype - - def hash(self): - return compute_identity_hash(self.dtype) - - def debug_repr(self): - return 'Scalar' - class FlatiterSignature(ViewSignature): def debug_repr(self): return 'FlatIter(%s)' % self.child.debug_repr() @@ -103,6 +102,9 @@ return 'Call1(%s, %s)' % (self.name, self.child.debug_repr()) + def _invent_numbering(self, cache): + self.values._invent_numbering(cache) + class Call2(Signature): def __init__(self, func, left, right): self.binfunc = func @@ -119,6 +121,10 @@ return (self.binfunc is other.binfunc and self.left.eq(other.left) and self.right.eq(other.right)) + def _invent_numbering(self, cache): + self.left._invent_numbering(cache) + self.right._invent_numbering(cache) + def debug_repr(self): return 'Call2(%s, %s, %s)' % (self.name, self.left.debug_repr(), diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -17,9 +17,14 @@ ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) - assert v1.find_sig() is not v2.find_sig() + sig1 = v1.find_sig() + sig2 = v2.find_sig() + assert v1 is not v2 + assert sig1.left.iter_no == sig1.right.iter_no + assert sig2.left.iter_no != sig2.right.iter_no v3 = ar.descr_add(space, Scalar(float64_dtype, 1.0)) - assert v2.find_sig() is v3.find_sig() + sig3 = v3.find_sig() + assert sig2 is sig3 v4 = ar.descr_add(space, ar) assert v1.find_sig() is v4.find_sig() From noreply at buildbot.pypy.org Wed Dec 14 11:41:55 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 11:41:55 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: some more tests Message-ID: <20111214104155.BF0DF82210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50493:f2e26827d79c Date: 2011-12-14 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/f2e26827d79c/ Log: some more tests diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -34,6 +34,11 @@ assert v5.find_sig() is not v2.find_sig() v6 = ar.descr_add(space, bool_ar) assert v5.find_sig() is v6.find_sig() + v7 = v6.descr_add(space, v6) + sig7 = v7.find_sig() + assert sig7.left.left.iter_no == sig7.right.left.iter_no + assert sig7.left.left.iter_no != sig7.right.right.iter_no + assert sig7.left.right.iter_no == sig7.right.right.iter_no def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype From noreply at buildbot.pypy.org Wed Dec 14 11:44:07 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 14 Dec 2011 11:44:07 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: removed bug in emit_guard_exception Message-ID: <20111214104407.774A382210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50494:91d134f49a07 Date: 2011-12-14 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/91d134f49a07/ Log: removed bug in emit_guard_exception diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -324,7 +324,7 @@ self.mc.cmpd(0, r.SCRATCH.value, loc.value) self.mc.free_scratch_reg() - self._emit_guard(op, failargs, c.EQ, save_exc=True) + self._emit_guard(op, failargs, c.NE, save_exc=True) self.mc.load_imm(loc, pos_exc_value.value) if resloc: From noreply at buildbot.pypy.org Wed Dec 14 11:50:58 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 14 Dec 2011 11:50:58 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: implemented GUARD_NO_EXCEPTION Message-ID: <20111214105058.DEC7282210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50495:1828b538ddf7 Date: 2011-12-14 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/1828b538ddf7/ Log: implemented GUARD_NO_EXCEPTION diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -310,6 +310,19 @@ emit_cast_ptr_to_int = emit_same_as emit_cast_int_to_ptr = emit_same_as + def emit_guard_no_exception(self, op, arglocs, regalloc): + loc = arglocs[0] + failargs = arglocs[1:] + + if IS_PPC_32: + self.mc.lwz(loc.value, loc.value, 0) + self.mc.cmpwi(0, loc.value, 0) + else: + self.mc.ld(loc.value, loc.value, 0) + self.mc.cmpdi(0, loc.value, 0) + + self._emit_guard(op, failargs, c.NE, save_exc=True) + def emit_guard_exception(self, op, arglocs, regalloc): loc, loc1, resloc, pos_exc_value, pos_exception = arglocs[:5] failargs = arglocs[5:] diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -357,6 +357,14 @@ self.possibly_free_vars(op.getfailargs()) return arglocs + def prepare_guard_no_exception(self, op): + loc, box = self._ensure_value_is_boxed( + ConstInt(self.cpu.pos_exception())) + arglocs = self._prepare_guard(op, [loc]) + self.possibly_free_var(box) + self.possibly_free_vars(op.getfailargs()) + return arglocs + def prepare_guard_value(self, op): boxes = list(op.getarglist()) b0, b1 = boxes From noreply at buildbot.pypy.org Wed Dec 14 12:22:59 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 12:22:59 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: refactor up to a nice point of segmentation fault Message-ID: <20111214112259.6533382210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50496:8fc2393b390b Date: 2011-12-14 13:22 +0200 http://bitbucket.org/pypy/pypy/changeset/8fc2393b390b/ Log: refactor up to a nice point of segmentation fault diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -2,13 +2,6 @@ from pypy.rlib import jit from pypy.rlib.objectmodel import instantiate -class NumpyEvalFrame(object): - def __init__(self, iterators): - self.iterators = iterators - - def next(self, shapelen): - xxx - # Iterators for arrays # -------------------- # all those iterators with the exception of BroadcastIterator iterate over the @@ -159,38 +152,6 @@ def get_offset(self): return self.offset -class Call2Iterator(BaseIterator): - def __init__(self, left, right): - self.left = left - self.right = right - - def next(self, shapelen): - return Call2Iterator(self.left.next(shapelen), - self.right.next(shapelen)) - - def done(self): - if isinstance(self.left, ConstantIterator): - return self.right.done() - return self.left.done() - - def get_offset(self): - if isinstance(self.left, ConstantIterator): - return self.right.get_offset() - return self.left.get_offset() - -class Call1Iterator(BaseIterator): - def __init__(self, child): - self.child = child - - def next(self, shapelen): - return Call1Iterator(self.child.next(shapelen)) - - def done(self): - return self.child.done() - - def get_offset(self): - return self.child.get_offset() - class ConstantIterator(BaseIterator): def next(self, shapelen): return self diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,11 +7,12 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.module.micronumpy.interp_iter import NumpyEvalFrame, ArrayIterator +from pypy.module.micronumpy.interp_iter import ArrayIterator numpy_driver = jit.JitDriver( greens=['shapelen', 'signature'], - reds=['result_size', 'i', 'ri', 'self', 'result'] + virtualizables=['frame'], + reds=['result_size', 'frame', 'ri', 'self', 'result'] ) all_driver = jit.JitDriver( greens=['shapelen', 'signature'], @@ -759,9 +760,6 @@ def getitem(self, item): raise NotImplementedError - def eval(self, iter): - return self.value - def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): builder.append(self.dtype.itemtype.str_format(self.value)) @@ -790,20 +788,22 @@ raise NotImplementedError def compute(self): - i = 0 result_size = self.find_size() result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) - xxx - i = self.start_iter() - ri = result.start_iter() + signature = self.find_sig() + frame = signature.create_frame(self) + ri = ArrayIterator(result) while not ri.done(): numpy_driver.jit_merge_point(signature=signature, shapelen=shapelen, - result_size=result_size, i=i, ri=ri, + result_size=result_size, + frame=frame, + ri=ri, self=self, result=result) - result.dtype.setitem(result.storage, ri.offset, self.eval(i)) - i = i.next(shapelen) + result.dtype.setitem(result.storage, ri.offset, + signature.eval(frame, self)) + frame.next(shapelen) ri = ri.next(shapelen) return result @@ -816,11 +816,6 @@ self.force_if_needed() return self.forced_result - def eval(self, iter): - if self.forced_result is not None: - return self.forced_result.eval(iter) - return self._eval(iter) - def getitem(self, item): return self.get_concrete().getitem(item) @@ -853,14 +848,9 @@ def _find_dtype(self): return self.res_dtype - def _eval(self, iter): - assert isinstance(iter, Call1Iterator) - val = self.values.eval(iter.child).convert_to(self.res_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Call1) - return sig.unfunc(self.res_dtype, val) - def create_sig(self): + if self.forced_result is not None: + return self.forced_result.create_sig() return signature.Call1(self.ufunc, self.values.create_sig()) class Call2(VirtualArray): @@ -884,15 +874,9 @@ def _find_size(self): return self.size - def _eval(self, iter): - assert isinstance(iter, Call2Iterator) - lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) - rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Call2) - return sig.binfunc(self.calc_dtype, lhs, rhs) - def create_sig(self): + if self.forced_result is not None: + return self.forced_result.create_sig() return signature.Call2(self.ufunc, self.left.create_sig(), self.right.create_sig()) @@ -1048,9 +1032,6 @@ def getitem(self, item): return self.dtype.getitem(self.storage, item) - def eval(self, iter): - return self.dtype.getitem(self.storage, iter.get_offset()) - def copy(self): array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,8 +1,7 @@ -from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import r_dict, compute_identity_hash from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ BroadcastIterator, OneDimIterator, ConstantIterator - +from pypy.rlib.jit import hint, unroll_safe # def components_eq(lhs, rhs): # if len(lhs) != len(rhs): @@ -30,10 +29,19 @@ def find_sig(sig): return known_sigs.setdefault(sig, sig) +class NumpyEvalFrame(object): + _virtualizable2_ = ['iterators[*]'] + + def __init__(self, iterators): + self = hint(self, access_directly=True) + self.iterators = iterators + + @unroll_safe + def next(self, shapelen): + for i in range(len(self.iterators)): + self.iterators[i] = self.iterators[i].next(shapelen) + class Signature(object): - def create_iter(self, array, cache, res_shape=None): - raise NotImplementedError - def invent_numbering(self): cache = r_dict(sigeq, sighash) self._invent_numbering(cache) @@ -44,7 +52,12 @@ except KeyError: no = len(cache) cache[self] = no - self.iter_no = no + self.iter_no = no + + def create_frame(self, arr, res_shape=None): + iterlist = [] + self._create_iter(iterlist, arr, res_shape) + return NumpyEvalFrame(iterlist) class ConcreteSignature(Signature): def __init__(self, dtype): @@ -62,10 +75,27 @@ def debug_repr(self): return 'Array' + def _create_iter(self, iterlist, arr, res_shape): + if self.iter_no >= len(iterlist): + iter = ArrayIterator(arr) + iterlist.append(iter) + + def eval(self, frame, arr): + iter = frame.iterators[self.iter_no] + return arr.dtype.getitem(arr.storage, iter.offset) + class ScalarSignature(ConcreteSignature): def debug_repr(self): return 'Scalar' + def _create_iter(self, iterlist, arr, res_shape): + if self.iter_no >= len(iterlist): + iter = ConstantIterator() + iterlist.append(iter) + + def eval(self, frame, arr): + return arr.value + class ViewSignature(Signature): def __init__(self, child): self.child = child @@ -81,10 +111,18 @@ def debug_repr(self): return 'Slice(%s)' % self.child.debug_repr() + def _create_iter(self, iterlist, arr, res_shape): + if self.iter_no >= len(iterlist): + iter = ViewIterator(arr) + iterlist.append(iter) + class FlatiterSignature(ViewSignature): def debug_repr(self): return 'FlatIter(%s)' % self.child.debug_repr() + def _create_iter(self, iterlist, arr, res_shape): + XXX + class Call1(Signature): def __init__(self, func, child): self.unfunc = func @@ -105,6 +143,13 @@ def _invent_numbering(self, cache): self.values._invent_numbering(cache) + def _create_iter(self, iterlist, arr, res_shape): + self.child._create_iter(iterlist, arr.values, res_shape) + + def eval(self, frame, arr): + v = self.child.eval(frame, arr.values).convert_to(arr.res_dtype) + return self.unfunc(arr.res_dtype, v) + class Call2(Signature): def __init__(self, func, left, right): self.binfunc = func @@ -125,6 +170,15 @@ self.left._invent_numbering(cache) self.right._invent_numbering(cache) + def _create_iter(self, iterlist, arr, res_shape): + self.left._create_iter(iterlist, arr.left, res_shape) + self.right._create_iter(iterlist, arr.right, res_shape) + + def eval(self, frame, arr): + lhs = self.left.eval(frame, arr.left).convert_to(arr.calc_dtype) + rhs = self.right.eval(frame, arr.right).convert_to(arr.calc_dtype) + return self.binfunc(arr.calc_dtype, lhs, rhs) + def debug_repr(self): return 'Call2(%s, %s, %s)' % (self.name, self.left.debug_repr(), diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -39,6 +39,8 @@ assert sig7.left.left.iter_no == sig7.right.left.iter_no assert sig7.left.left.iter_no != sig7.right.right.iter_no assert sig7.left.right.iter_no == sig7.right.right.iter_no + v1.forced_result = ar + assert v1.find_sig() is not sig1 def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype From noreply at buildbot.pypy.org Wed Dec 14 12:26:03 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 12:26:03 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: oomph Message-ID: <20111214112603.3A1BA82210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50497:67cba7342bdb Date: 2011-12-14 13:25 +0200 http://bitbucket.org/pypy/pypy/changeset/67cba7342bdb/ Log: oomph diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -793,7 +793,7 @@ shapelen = len(self.shape) signature = self.find_sig() frame = signature.create_frame(self) - ri = ArrayIterator(result) + ri = ArrayIterator(result_size) while not ri.done(): numpy_driver.jit_merge_point(signature=signature, shapelen=shapelen, diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -77,7 +77,7 @@ def _create_iter(self, iterlist, arr, res_shape): if self.iter_no >= len(iterlist): - iter = ArrayIterator(arr) + iter = ArrayIterator(arr.size) iterlist.append(iter) def eval(self, frame, arr): From noreply at buildbot.pypy.org Wed Dec 14 12:35:05 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 12:35:05 +0100 (CET) Subject: [pypy-commit] pypy default: Fix, as shown by test_zll_stress. Message-ID: <20111214113505.E45C482210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50498:0fe404e7740d Date: 2011-12-14 12:17 +0100 http://bitbucket.org/pypy/pypy/changeset/0fe404e7740d/ Log: Fix, as shown by test_zll_stress. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -855,6 +856,18 @@ else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV_bi(to_loc.value, low_part) + self.mc.MOV_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,7 +12,7 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: @@ -31,7 +31,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +66,9 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return if tmpreg is None: assembler.regalloc_push(src) assembler.regalloc_pop(dst) diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -44,6 +44,10 @@ _location_code = 'b' def __init__(self, position, ebp_offset, num_words, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset self.width = num_words * WORD @@ -91,7 +95,10 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): + _immutable_ = True + +class ImmedLoc(ImmediateAssemblerLocation): _immutable_ = True width = WORD _location_code = 'i' @@ -179,7 +186,7 @@ raise AssertionError(self._location_code) return result -class ConstFloatLoc(AssemblerLocation): +class ConstFloatLoc(ImmediateAssemblerLocation): # XXX: We have to use this class instead of just AddressLoc because # we want a width of 8 (... I think. Check this!) _immutable_ = True @@ -193,7 +200,7 @@ return '' % (self.value,) if IS_X86_32: - class FloatImmedLoc(AssemblerLocation): + class FloatImmedLoc(ImmediateAssemblerLocation): # This stands for an immediate float. It cannot be directly used in # any assembler instruction. Instead, it is meant to be decomposed # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function From noreply at buildbot.pypy.org Wed Dec 14 12:35:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 12:35:07 +0100 (CET) Subject: [pypy-commit] pypy default: Refactor away the 'width' attribute on AssemblerLocation. Message-ID: <20111214113507.2D3DF82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50499:1665fe50f5da Date: 2011-12-14 12:30 +0100 http://bitbucket.org/pypy/pypy/changeset/1665fe50f5da/ Log: Refactor away the 'width' attribute on AssemblerLocation. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -838,7 +838,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -849,7 +849,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) @@ -1019,18 +1019,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -2083,7 +2083,7 @@ argtypes=op.getdescr().get_arg_types(), callconv=op.getdescr().get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return if op.getdescr().get_return_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long @@ -2568,11 +2568,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -94,7 +94,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -130,9 +130,9 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,17 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): + def __init__(self, position, ebp_offset, type): # _getregkey() returns self.value; the value returned must not # conflict with RegLoc._getregkey(). It doesn't a bit by chance, # so let it fail the following assert if it no longer does. assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -67,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -78,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -100,7 +109,6 @@ class ImmedLoc(ImmediateAssemblerLocation): _immutable_ = True - width = WORD _location_code = 'i' def __init__(self, value): @@ -111,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): return "ImmedLoc(%d)" % (self.value) @@ -123,7 +134,6 @@ class AddressLoc(AssemblerLocation): _immutable_ = True - width = WORD # The address is base_loc + (scaled_loc << scale) + static_offset def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): assert 0 <= scale < 4 @@ -152,6 +162,9 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) + def get_width(self): + return WORD + def value_a(self): return self.loc_a @@ -187,15 +200,15 @@ return result class ConstFloatLoc(ImmediateAssemblerLocation): - # XXX: We have to use this class instead of just AddressLoc because - # we want a width of 8 (... I think. Check this!) _immutable_ = True - width = 8 _location_code = 'j' def __init__(self, address): self.value = address + def get_width(self): + return 8 + def __repr__(self): return '' % (self.value,) @@ -206,12 +219,14 @@ # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function # instead; see below. _immutable_ = True - width = 8 _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage + def get_width(self): + return 8 + def low_part(self): return intmask(self.aslonglong) From noreply at buildbot.pypy.org Wed Dec 14 13:01:27 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 13:01:27 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: enough to make test_sum pass. Reduce seems to nicely work Message-ID: <20111214120127.227A082210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50500:a3a1fac3df73 Date: 2011-12-14 14:00 +0200 http://bitbucket.org/pypy/pypy/changeset/a3a1fac3df73/ Log: enough to make test_sum pass. Reduce seems to nicely work diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -10,7 +10,7 @@ from pypy.module.micronumpy.interp_iter import ArrayIterator numpy_driver = jit.JitDriver( - greens=['shapelen', 'signature'], + greens=['shapelen', 'sig'], virtualizables=['frame'], reds=['result_size', 'frame', 'ri', 'self', 'result'] ) @@ -708,19 +708,13 @@ def getitem(self, item): raise NotImplementedError - def start_iter(self, res_shape=None): - all_iters = self.signature.create_iter(self, {}, res_shape) - return NumpyEvalFrame(all_iters) - def descr_debug_repr(self, space): return space.wrap(self.signature.debug_repr()) def find_sig(self): """ find a correct signature for the array """ - sig = self.create_sig() - sig.invent_numbering() - return signature.find_sig(sig) + return signature.find_sig(self.create_sig()) def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): @@ -791,18 +785,18 @@ result_size = self.find_size() result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) - signature = self.find_sig() - frame = signature.create_frame(self) + sig = self.find_sig() + frame = sig.create_frame(self) ri = ArrayIterator(result_size) while not ri.done(): - numpy_driver.jit_merge_point(signature=signature, + numpy_driver.jit_merge_point(sig=sig, shapelen=shapelen, result_size=result_size, frame=frame, ri=ri, self=self, result=result) result.dtype.setitem(result.storage, ri.offset, - signature.eval(frame, self)) + sig.eval(frame, self)) frame.next(shapelen) ri = ri.next(shapelen) return result @@ -987,13 +981,6 @@ source_iter = source_iter.next(shapelen) res_iter = res_iter.next(shapelen) - # def start_iter(self, res_shape=None): - # if res_shape is not None and res_shape != self.shape: - # return BroadcastIterator(self, res_shape) - # if len(self.shape) == 1: - # return OneDimIterator(self.start, self.strides[0], self.shape[0]) - # return ViewIterator(self) - def setitem(self, item, value): self.parent.setitem(item, value) @@ -1054,13 +1041,6 @@ self.invalidated() self.dtype.setitem(self.storage, item, value) - # def start_iter(self, res_shape=None): - # if self.order == 'C': - # if res_shape is not None and res_shape != self.shape: - # return BroadcastIterator(self, res_shape) - # return ArrayIterator(self.size) - # raise NotImplementedError # use ViewIterator simply, test it - def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) @@ -1226,12 +1206,6 @@ self.arr = arr self.iter = self.start_iter() - # def start_iter(self, res_shape=None): - # if res_shape is not None and res_shape != self.shape: - # return BroadcastIterator(self, res_shape) - # return OneDimIterator(self.arr.start, self.strides[0], - # self.shape[0]) - def find_dtype(self): return self.arr.find_dtype() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,15 +2,16 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types +from pypy.module.micronumpy import interp_boxes, interp_dtype, types +from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature, find_sig from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - reduce_driver = jit.JitDriver( - greens = ['shapelen', "signature"], - reds = ["i", "self", "dtype", "value", "obj"] + greens = ['shapelen', "sig"], + virtualizables = ["frame"], + reds = ["frame", "self", "dtype", "value", "obj"] ) class W_Ufunc(Wrappable): @@ -49,7 +50,8 @@ return self.reduce(space, w_obj, multidim=False) def reduce(self, space, w_obj, multidim): - from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar, Call2 + if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -65,8 +67,10 @@ space, obj.find_dtype(), promote_to_largest=True ) - start = obj.start_iter(obj.shape) shapelen = len(obj.shape) + sig = find_sig(ReduceSignature(self.func, ScalarSignature(dtype), + obj.create_sig())) + frame = sig.create_frame(obj) if shapelen > 1 and not multidim: raise OperationError(space.w_NotImplementedError, space.wrap("not implemented yet")) @@ -74,24 +78,20 @@ if size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - value = obj.eval(start).convert_to(dtype) - start = start.next(shapelen) + value = sig.eval(frame, obj).convert_to(dtype) + frame.next(shapelen) else: value = self.identity.convert_to(dtype) - new_sig = signature.find_sig( - signature.ReduceSignature(self.func, self.name, - dtype.scalar_signature, - obj.signature)) - return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) + return self.reduce_loop(shapelen, sig, frame, value, obj, dtype) - def reduce_loop(self, signature, shapelen, i, value, obj, dtype): - while not i.done(): - reduce_driver.jit_merge_point(signature=signature, + def reduce_loop(self, shapelen, sig, frame, value, obj, dtype): + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - value=value, obj=obj, i=i, + value=value, obj=obj, frame=frame, dtype=dtype) - value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) - i = i.next(shapelen) + value = self.func(dtype, value, sig.eval(frame, obj).convert_to(dtype)) + frame.next(shapelen) return value class W_Ufunc1(W_Ufunc): diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -27,7 +27,12 @@ known_sigs = r_dict(sigeq, sighash) def find_sig(sig): - return known_sigs.setdefault(sig, sig) + try: + return known_sigs[sig] + except KeyError: + sig.invent_numbering() + known_sigs[sig] = sig + return sig class NumpyEvalFrame(object): _virtualizable2_ = ['iterators[*]'] @@ -35,6 +40,16 @@ def __init__(self, iterators): self = hint(self, access_directly=True) self.iterators = iterators + self.final_iter = None + for i, iter in enumerate(self.iterators): + if not isinstance(iter, ConstantIterator) or not isinstance(iter, BroadcastIterator): + self.final_iter = i + break + else: + raise Exception("Cannot find a non-broadcast non-constant iter") + + def done(self): + return self.iterators[self.final_iter].done() @unroll_safe def next(self, shapelen): @@ -185,4 +200,11 @@ self.right.debug_repr()) class ReduceSignature(Call2): - pass + def _create_iter(self, iterlist, arr, res_shape): + self.right._create_iter(iterlist, arr, res_shape) + + def _invent_numbering(self, cache): + self.right._invent_numbering(cache) + + def eval(self, frame, arr): + return self.right.eval(frame, arr) diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,7 +4,6 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) - class BaseNumpyAppTest(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['micronumpy']) From noreply at buildbot.pypy.org Wed Dec 14 13:32:38 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 13:32:38 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: makes tests pass, they're super slow though Message-ID: <20111214123238.1DE9882210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50501:a2834b804b57 Date: 2011-12-14 14:32 +0200 http://bitbucket.org/pypy/pypy/changeset/a2834b804b57/ Log: makes tests pass, they're super slow though diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,7 +7,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.module.micronumpy.interp_iter import ArrayIterator +from pypy.module.micronumpy.interp_iter import ArrayIterator, ViewIterator numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], @@ -15,16 +15,19 @@ reds=['result_size', 'frame', 'ri', 'self', 'result'] ) all_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) any_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) slice_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['self', 'source', 'source_iter', 'res_iter'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['self', 'frame', 'source', 'res_iter'] ) def _find_shape_and_elems(space, w_iterable): @@ -340,15 +343,16 @@ def _all(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - all_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + all_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if not dtype.itemtype.bool(self.eval(i)): + dtype=dtype, frame=frame) + if not dtype.itemtype.bool(sig.eval(frame, self)): return False - i = i.next(shapelen) + frame.next(shapelen) return True def descr_all(self, space): @@ -356,15 +360,16 @@ def _any(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - any_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + any_driver.jit_merge_point(sig=sig, frame=frame, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if dtype.itemtype.bool(self.eval(i)): + dtype=dtype) + if dtype.itemtype.bool(sig.eval(frame, self)): return True - i = i.next(shapelen) + frame.next(shapelen) return False def descr_any(self, space): @@ -950,6 +955,10 @@ def __init__(self, parent, start, strides, backstrides, shape): if isinstance(parent, W_NDimSlice): parent = parent.parent + else: + # XXX this should not force the array, but it did before the + # refactoring anyway, just in a more obscure way + parent = parent.get_concrete() ViewArray.__init__(self, parent, strides, backstrides, shape) self.start = start self.size = 1 @@ -967,18 +976,19 @@ self._sliceloop(w_value, res_shape) def _sliceloop(self, source, res_shape): - source_iter = source.start_iter(res_shape) - res_iter = self.start_iter(res_shape) + sig = source.find_sig() + frame = sig.create_frame(source) + res_iter = ViewIterator(self) shapelen = len(res_shape) while not res_iter.done(): - slice_driver.jit_merge_point(signature=source.signature, + slice_driver.jit_merge_point(sig=sig, + frame=frame, shapelen=shapelen, self=self, source=source, - res_iter=res_iter, - source_iter=source_iter) - self.setitem(res_iter.offset, source.eval(source_iter).convert_to( + res_iter=res_iter) + self.setitem(res_iter.offset, sig.eval(frame, source).convert_to( self.find_dtype())) - source_iter = source_iter.next(shapelen) + frame.next(shapelen) res_iter = res_iter.next(shapelen) def setitem(self, item, value): @@ -986,8 +996,8 @@ def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = self.start_iter() - a_iter = array.start_iter() + iter = ViewIterator(self) + a_iter = ArrayIterator(array.size) while not iter.done(): array.setitem(a_iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -35,7 +35,7 @@ return sig class NumpyEvalFrame(object): - _virtualizable2_ = ['iterators[*]'] + _virtualizable2_ = ['iterators[*]', 'final_iter'] def __init__(self, iterators): self = hint(self, access_directly=True) @@ -131,6 +131,10 @@ iter = ViewIterator(arr) iterlist.append(iter) + def eval(self, frame, arr): + iter = frame.iterators[self.iter_no] + return arr.find_dtype().getitem(arr.parent.storage, iter.offset) + class FlatiterSignature(ViewSignature): def debug_repr(self): return 'FlatIter(%s)' % self.child.debug_repr() @@ -156,7 +160,7 @@ self.child.debug_repr()) def _invent_numbering(self, cache): - self.values._invent_numbering(cache) + self.child._invent_numbering(cache) def _create_iter(self, iterlist, arr, res_shape): self.child._create_iter(iterlist, arr.values, res_shape) From noreply at buildbot.pypy.org Wed Dec 14 14:52:09 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 14:52:09 +0100 (CET) Subject: [pypy-commit] pypy default: We decided to not explicitly support the whole ctypes-2.7 Message-ID: <20111214135209.28C2E82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50502:29823c2fcce6 Date: 2011-12-14 14:51 +0100 http://bitbucket.org/pypy/pypy/changeset/29823c2fcce6/ Log: We decided to not explicitly support the whole ctypes-2.7 and instead go for a case-by-case, demand-driven approach. So this test is skipped instead of failing. diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), From noreply at buildbot.pypy.org Wed Dec 14 15:12:27 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 15:12:27 +0100 (CET) Subject: [pypy-commit] pypy default: "xfail" this test too. It has been failing at least since pypy 1.6. Message-ID: <20111214141227.D78BC82210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50503:e6032a99a1a5 Date: 2011-12-14 15:12 +0100 http://bitbucket.org/pypy/pypy/changeset/e6032a99a1a5/ Log: "xfail" this test too. It has been failing at least since pypy 1.6. diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -98,6 +98,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc From noreply at buildbot.pypy.org Wed Dec 14 15:13:34 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 15:13:34 +0100 (CET) Subject: [pypy-commit] pypy default: Bah, sorry. Message-ID: <20111214141334.D9E6982210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50504:8a5fb10fc8c4 Date: 2011-12-14 15:13 +0100 http://bitbucket.org/pypy/pypy/changeset/8a5fb10fc8c4/ Log: Bah, sorry. diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): From noreply at buildbot.pypy.org Wed Dec 14 16:42:07 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 16:42:07 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: make some more tests pass, skip the debug_repr for now Message-ID: <20111214154207.5082382210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50505:cb2ba9e1d8ad Date: 2011-12-14 17:39 +0200 http://bitbucket.org/pypy/pypy/changeset/cb2ba9e1d8ad/ Log: make some more tests pass, skip the debug_repr for now diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -309,28 +309,30 @@ def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] + greens=['shapelen', 'sig'], + reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'] ) def loop(self): - i = self.signature.create_iter(self, {}) - cur_best = self.eval(i) + sig = self.find_sig() + frame = sig.create_frame(self) + cur_best = sig.eval(frame, self) shapelen = len(self.shape) - i = i.next(shapelen) + frame.next(shapelen) dtype = self.find_dtype() result = 0 idx = 1 - while not i.done(): - reduce_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, dtype=dtype, - i=i, result=result, idx=idx, + frame=frame, result=result, + idx=idx, cur_best=cur_best) - new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + new_best = getattr(dtype.itemtype, op_name)(cur_best, sig.eval(frame, self)) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - i = i.next(shapelen) + frame.next(shapelen) idx += 1 return result def impl(self, space): @@ -689,9 +691,11 @@ if self.find_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + concr = self.get_concrete() + sig = concr.find_sig() + frame = sig.create_frame(self) return space.wrap(space.is_true( - self.get_concrete().eval(self.start_iter(self.shape)) - )) + sig.eval(frame, concr))) def descr_get_transpose(self, space): concrete = self.get_concrete() @@ -714,7 +718,7 @@ raise NotImplementedError def descr_debug_repr(self, space): - return space.wrap(self.signature.debug_repr()) + return space.wrap(self.find_sig().debug_repr()) def find_sig(self): """ find a correct signature for the array diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -96,6 +96,7 @@ iterlist.append(iter) def eval(self, frame, arr): + arr = arr.get_concrete() iter = frame.iterators[self.iter_no] return arr.dtype.getitem(arr.storage, iter.offset) @@ -132,6 +133,7 @@ iterlist.append(iter) def eval(self, frame, arr): + arr = arr.get_concrete() iter = frame.iterators[self.iter_no] return arr.find_dtype().getitem(arr.parent.storage, iter.offset) @@ -156,8 +158,7 @@ return self.unfunc is other.unfunc and self.child.eq(other.child) def debug_repr(self): - return 'Call1(%s, %s)' % (self.name, - self.child.debug_repr()) + return 'Call1(%s)' % (self.child.debug_repr()) def _invent_numbering(self, cache): self.child._invent_numbering(cache) @@ -199,9 +200,8 @@ return self.binfunc(arr.calc_dtype, lhs, rhs) def debug_repr(self): - return 'Call2(%s, %s, %s)' % (self.name, - self.left.debug_repr(), - self.right.debug_repr()) + return 'Call2(%s, %s)' % (self.left.debug_repr(), + self.right.debug_repr()) class ReduceSignature(Call2): def _create_iter(self, iterlist, arr, res_shape): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -865,6 +865,7 @@ assert (a == [8, 6, 4, 2, 0]).all() def test_debug_repr(self): + skip("for now") from numpypy import zeros, sin a = zeros(1) assert a.__debug_repr__() == 'Array' From noreply at buildbot.pypy.org Wed Dec 14 17:27:02 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 14 Dec 2011 17:27:02 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: Added some review notes. Message-ID: <20111214162702.72E0582210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-full-fromstring Changeset: r50506:44e16832a75d Date: 2011-12-14 11:26 -0500 http://bitbucket.org/pypy/pypy/changeset/44e16832a75d/ Log: Added some review notes. diff --git a/pypy/module/micronumpy/REVIEW b/pypy/module/micronumpy/REVIEW new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/REVIEW @@ -0,0 +1,14 @@ +Review items +============ + +* Rather than `True if len(sep_stripped) == 0 else False` just use: + `len(sep_stripped) == 0`. +* Rather than name the variable `A`, name it `items` or somsething like that. +* Rather than using `ptr`, use `idx`, since it's not really a pointer. +* Rather than doing a string format to raise an error (L67), use + `operationerrfmt`. +* Same comment about comparing array equality as before. +* No need for the `self.char == "?"` default. +* Rather than name the attribute `char`, name it `format_code`. +* `default_fromstring` can do `self.box(-1.0)`, instead of the coerce thing. +* Tests for both bool and long dtypes with this. From noreply at buildbot.pypy.org Wed Dec 14 17:27:04 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 14 Dec 2011 17:27:04 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: merged default in Message-ID: <20111214162704.015D482210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-full-fromstring Changeset: r50507:2fbc0588123e Date: 2011-12-14 11:26 -0500 http://bitbucket.org/pypy/pypy/changeset/2fbc0588123e/ Log: merged default in diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -163,7 +163,7 @@ if not we_are_translated() and self.box_types is not None: assert isinstance(v, TempBox) or v.type in self.box_types - def possibly_free_var(self, v, _hint_dont_reuse_quickly=False): + def possibly_free_var(self, v): """ If v is stored in a register and v is not used beyond the current position, then free it. Must be called at some point for all variables that might be in registers. @@ -173,10 +173,7 @@ return if v not in self.longevity or self.longevity[v][1] <= self.position: if v in self.reg_bindings: - if _hint_dont_reuse_quickly: - self.free_regs.insert(0, self.reg_bindings[v]) - else: - self.free_regs.append(self.reg_bindings[v]) + self.free_regs.append(self.reg_bindings[v]) del self.reg_bindings[v] if self.frame_manager is not None: self.frame_manager.mark_as_free(v) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -837,7 +838,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -848,13 +849,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV_bi(to_loc.value, low_part) + self.mc.MOV_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1006,18 +1019,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -2070,7 +2083,7 @@ argtypes=op.getdescr().get_arg_types(), callconv=op.getdescr().get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return if op.getdescr().get_return_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long @@ -2555,11 +2568,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,7 +12,7 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: @@ -31,7 +31,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +66,13 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) @@ -87,7 +94,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -130,9 +130,9 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: @@ -174,12 +174,11 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) return operations @@ -481,7 +480,7 @@ # only to guard operations or to jump or to finish produced = {} last_used = {} - #useful = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -492,10 +491,13 @@ opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - #if opnum != rop.JUMP and opnum != rop.FINISH: - # useful[arg] = None - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -503,7 +505,8 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + self.last_real_usage = last_real_usage + # longevity = {} for arg in produced: if arg in last_used: @@ -519,7 +522,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity#, useful + self.longevity = longevity def loc(self, v): if v is None: # xxx kludgy @@ -1384,13 +1387,6 @@ assert isinstance(descr, TargetToken) arglocs = descr._x86_arglocs self.jump_target_descr = descr - # compute 'tmploc' to be all_regs[0] by spilling what is there - tmpbox1 = TempBox() - tmpbox2 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - self.rm.force_allocate_reg(tmpbox1, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(tmpbox2, selected_reg=xmmtmp) # Part about non-floats src_locations1 = [] dst_locations1 = [] @@ -1402,19 +1398,23 @@ box = op.getarg(i) src_loc = self.loc(box) dst_loc = arglocs[i] - assert dst_loc != tmpreg and dst_loc != xmmtmp if box.type != FLOAT: src_locations1.append(src_loc) dst_locations1.append(dst_loc) else: src_locations2.append(src_loc) dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None # Do the remapping remap_frame_layout_mixed(assembler, src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(tmpbox1) - self.xrm.possibly_free_var(tmpbox2) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1471,16 +1471,15 @@ inputargs = op.getarglist() arglocs = [None] * len(inputargs) # - # we need to make sure that the tmpreg and xmmtmp are free - tmpreg = X86RegisterManager.all_regs[0] - tmpvar = TempBox() - self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) - self.rm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) - # - xmmtmp = X86XMMRegisterManager.all_regs[0] - tmpvar = TempBox() - self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) - self.xrm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) # # we need to make sure that no variable is stored in ebp for arg in inputargs: @@ -1491,9 +1490,9 @@ # for i in range(len(inputargs)): arg = inputargs[i] - assert not isinstance(arg, Const) + assert isinstance(arg, Box) loc = self.loc(arg) - assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) + assert loc is not ebp arglocs[i] = loc if isinstance(loc, RegLoc): self.fm.mark_as_free(arg) diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,13 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): + def __init__(self, position, ebp_offset, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -63,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -74,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -91,9 +104,11 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True - width = WORD + +class ImmedLoc(ImmediateAssemblerLocation): + _immutable_ = True _location_code = 'i' def __init__(self, value): @@ -104,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): return "ImmedLoc(%d)" % (self.value) @@ -116,7 +134,6 @@ class AddressLoc(AssemblerLocation): _immutable_ = True - width = WORD # The address is base_loc + (scaled_loc << scale) + static_offset def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): assert 0 <= scale < 4 @@ -145,6 +162,9 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) + def get_width(self): + return WORD + def value_a(self): return self.loc_a @@ -179,32 +199,34 @@ raise AssertionError(self._location_code) return result -class ConstFloatLoc(AssemblerLocation): - # XXX: We have to use this class instead of just AddressLoc because - # we want a width of 8 (... I think. Check this!) +class ConstFloatLoc(ImmediateAssemblerLocation): _immutable_ = True - width = 8 _location_code = 'j' def __init__(self, address): self.value = address + def get_width(self): + return 8 + def __repr__(self): return '' % (self.value,) if IS_X86_32: - class FloatImmedLoc(AssemblerLocation): + class FloatImmedLoc(ImmediateAssemblerLocation): # This stands for an immediate float. It cannot be directly used in # any assembler instruction. Instead, it is meant to be decomposed # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function # instead; see below. _immutable_ = True - width = 8 _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage + def get_width(self): + return 8 + def low_part(self): return intmask(self.aslonglong) diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -71,6 +71,18 @@ ('mov', eax, s24), ('mov', s12, edi)] +def test_no_tmp_reg(): + assembler = MockAssembler() + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) + remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], None) + assert assembler.ops == [('push', s8), + ('pop', s20), + ('mov', eax, s24), + ('mov', s12, edi)] + def test_reordering(): assembler = MockAssembler() s8 = frame_pos(8, INT) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -381,11 +381,11 @@ 'GUARD_ISNULL/1d', 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION/0d', - 'GUARD_EXCEPTION/1d', + 'GUARD_NO_EXCEPTION/0d', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', - 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,9 +1,19 @@ from pypy.interpreter.mixedmodule import MixedModule +class PyPyModule(MixedModule): + interpleveldefs = { + 'debug_repr': 'interp_extras.debug_repr', + } + appleveldefs = {} + class Module(MixedModule): applevel_name = 'numpypy' + submodules = { + 'pypy': PyPyModule + } + interpleveldefs = { 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', @@ -81,6 +91,7 @@ 'mean': 'app_numpy.mean', 'sum': 'app_numpy.sum', 'min': 'app_numpy.min', + 'identity': 'app_numpy.identity', 'max': 'app_numpy.max', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -13,6 +13,11 @@ # weighting, just the average part! return mean(a) +def identity(n, dtype=None): + a = numpypy.zeros((n,n), dtype=dtype) + for i in range(n): + a[i][i] = 1 + return a def mean(a): if not hasattr(a, "mean"): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -86,6 +86,7 @@ descr_ge = _binop_impl("greater_equal") descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") descr_rmul = _binop_right_impl("multiply") descr_neg = _unaryop_impl("negative") @@ -170,7 +171,8 @@ __mul__ = interp2app(W_GenericBox.descr_mul), __div__ = interp2app(W_GenericBox.descr_div), - __radd__ = interp2app(W_GenericBox.descr_add), + __radd__ = interp2app(W_GenericBox.descr_radd), + __rsub__ = interp2app(W_GenericBox.descr_rsub), __rmul__ = interp2app(W_GenericBox.descr_rmul), __eq__ = interp2app(W_GenericBox.descr_eq), diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_extras.py @@ -0,0 +1,7 @@ +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy.interp_numarray import BaseArray + + + at unwrap_spec(array=BaseArray) +def debug_repr(space, array): + return space.wrap(array.debug_repr()) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -791,7 +791,8 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - concrete.setitem_w(space, item, w_value) + dtype = concrete.find_dtype() + concrete.setitem(item, dtype.coerce(space, w_value)) return if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) @@ -924,9 +925,6 @@ def start_iter(self, res_shape=None): raise NotImplementedError - def descr_debug_repr(self, space): - return space.wrap(self.debug_repr()) - def descr_array_iface(self, space): concrete = self.get_concrete() storage = concrete.get_storage(space) @@ -1178,10 +1176,6 @@ def eval(self, iter): return self.parent.getitem(iter.get_offset()) - @unwrap_spec(item=int) - def setitem_w(self, space, item, w_value): - return self.parent.setitem_w(space, item, w_value) - def setitem(self, item, value): # This is currently not possible to be called from anywhere. raise NotImplementedError @@ -1330,9 +1324,6 @@ raise OperationError(space.w_TypeError, space.wrap( "len() of unsized object")) - def setitem_w(self, space, item, w_value): - return self.setitem(item, self.dtype.coerce(space, w_value)) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) @@ -1472,7 +1463,6 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), - __debug_repr__ = interp2app(BaseArray.descr_debug_repr), __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -259,22 +259,31 @@ assert numpy.uint16('65536') == 0 def test_int32(self): + import sys import numpypy as numpy x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 - assert numpy.int32(2147483648) == -2147483648 assert numpy.int32('2147483647') == 2147483647 - assert numpy.int32('2147483648') == -2147483648 + if sys.maxint > 2 ** 31 - 1: + assert numpy.int32(2147483648) == -2147483648 + assert numpy.int32('2147483648') == -2147483648 + else: + raises(OverflowError, numpy.int32, 2147483648) + raises(OverflowError, numpy.int32, '2147483648') def test_uint32(self): + import sys import numpypy as numpy - assert numpy.uint32(4294967295) == 4294967295 - assert numpy.uint32(4294967296) == 0 - assert numpy.uint32('4294967295') == 4294967295 - assert numpy.uint32('4294967296') == 0 + assert numpy.uint32(10) == 10 + + if sys.maxint > 2 ** 31 - 1: + assert numpy.uint32(4294967295) == 4294967295 + assert numpy.uint32(4294967296) == 0 + assert numpy.uint32('4294967295') == 4294967295 + assert numpy.uint32('4294967296') == 0 def test_int_(self): import numpypy as numpy @@ -294,10 +303,14 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - assert numpy.int64(9223372036854775807) == 9223372036854775807 + if sys.maxint >= 2 ** 63 - 1: + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64('9223372036854775807') == 9223372036854775807 + else: + raises(OverflowError, numpy.int64, 9223372036854775807) + raises(OverflowError, numpy.int64, '9223372036854775807') + raises(OverflowError, numpy.int64, 9223372036854775808) - - assert numpy.int64('9223372036854775807') == 9223372036854775807 raises(OverflowError, numpy.int64, '9223372036854775808') def test_uint64(self): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -491,6 +491,11 @@ for i in range(5): assert b[i] == i - 5 + def test_scalar_subtract(self): + from numpypy import int32 + assert int32(2) - 1 == 1 + assert 1 - int32(2) == -1 + def test_mul(self): import numpypy @@ -722,6 +727,26 @@ a = array([True] * 5, bool) assert a.sum() == 5 + def test_identity(self): + from numpypy import identity, array + from numpypy import int32, float64, dtype + a = identity(0) + assert len(a) == 0 + assert a.dtype == dtype('float64') + assert a.shape == (0,0) + b = identity(1, dtype=int32) + assert len(b) == 1 + assert b[0][0] == 1 + assert b.shape == (1,1) + assert b.dtype == dtype('int32') + c = identity(2) + assert c.shape == (2,2) + assert (c == [[1,0],[0,1]]).all() + d = identity(3, dtype='int32') + assert d.shape == (3,3) + assert d.dtype == dtype('int32') + assert (d == [[1,0,0],[0,1,0],[0,0,1]]).all() + def test_prod(self): from numpypy import array a = array(range(1, 6)) @@ -868,16 +893,17 @@ def test_debug_repr(self): from numpypy import zeros, sin + from numpypy.pypy import debug_repr a = zeros(1) - assert a.__debug_repr__() == 'Array' - assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' - assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' - assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + assert debug_repr(a) == 'Array' + assert debug_repr(a + a) == 'Call2(add, Array, Array)' + assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' + assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(sin(a)) == 'Call1(sin, Array)' b = a + a b[0] = 3 - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Call2(add, forced=Array)' def test_tolist_scalar(self): from numpypy import int32, bool_ diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -210,9 +210,9 @@ def entry_bridge_ops(self, *args, **kwds): ops = list(self._allops(*args, **kwds)) labels = [op for op in ops if op.name == 'label'] - assert ops.index(labels[0]) == 0 - i = ops.index(labels[1]) - return ops[1:i] + i0 = ops.index(labels[0]) + i1 = ops.index(labels[1]) + return ops[i0+1:i1] @property def chunks(self): @@ -409,7 +409,7 @@ """ iter_exp_ops = iter(expected_ops) iter_ops = RevertableIterator(self.ops) - for opindex, exp_op in enumerate(iter_exp_ops): + for exp_op in iter_exp_ops: try: if exp_op == '...': # loop until we find an operation which matches @@ -430,7 +430,7 @@ if exp_op[4] is False: # optional operation iter_ops.revert_one() continue # try to match with the next exp_op - e.opindex = opindex + e.opindex = iter_ops.index - 1 raise # # make sure we exhausted iter_ops diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -45,8 +45,10 @@ cmdline = [sys.executable] if not import_site: cmdline.append('-S') - for key, value in jitopts.iteritems(): - cmdline += ['--jit', '%s=%s' % (key, value)] + if jitopts: + jitcmdline = ['%s=%s' % (key, value) + for key, value in jitopts.items()] + cmdline += ['--jit', ','.join(jitcmdline)] cmdline.append(str(self.filepath)) # print cmdline, logfile diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -6,6 +6,8 @@ def main(n): def f(): for i in range(10000): + i -= 1 + i -= 42 # ID: subtract yield i def g(): @@ -15,6 +17,13 @@ g() log = self.run(main, [500]) + # XXX XXX this test fails so far because of a detail that + # changed with jit-simplify-backendintf. We should try to + # think of a way to be more resistent against such details. + # The issue is that we now get one Tracing, then go back + # to the interpreter hoping to immediately run the JITted + # code; but instead, we Trace again, just because another + # counter was also about to reach its limit... loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') assert loop.match_by_id("generator", """ ... @@ -26,3 +35,8 @@ i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) + assert loop.match_by_id("subtract", """ + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + i2 = int_sub_ovf(i1, 42) + guard_no_overflow(descr=...) + """) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -177,52 +177,55 @@ _specialisations = [] Cls_ii = make_specialised_class((int, int)) -Cls_is = make_specialised_class((int, str)) -Cls_io = make_specialised_class((int, object)) -Cls_si = make_specialised_class((str, int)) -Cls_ss = make_specialised_class((str, str)) -Cls_so = make_specialised_class((str, object)) -Cls_oi = make_specialised_class((object, int)) -Cls_os = make_specialised_class((object, str)) +#Cls_is = make_specialised_class((int, str)) +#Cls_io = make_specialised_class((int, object)) +#Cls_si = make_specialised_class((str, int)) +#Cls_ss = make_specialised_class((str, str)) +#Cls_so = make_specialised_class((str, object)) +#Cls_oi = make_specialised_class((object, int)) +#Cls_os = make_specialised_class((object, str)) Cls_oo = make_specialised_class((object, object)) Cls_ff = make_specialised_class((float, float)) -Cls_ooo = make_specialised_class((object, object, object)) +#Cls_ooo = make_specialised_class((object, object, object)) def makespecialisedtuple(space, list_w): if len(list_w) == 2: w_arg1, w_arg2 = list_w w_type1 = space.type(w_arg1) - w_type2 = space.type(w_arg2) + #w_type2 = space.type(w_arg2) # if w_type1 is space.w_int: + w_type2 = space.type(w_arg2) if w_type2 is space.w_int: return Cls_ii(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_is(space, w_arg1, w_arg2) - else: - return Cls_io(space, w_arg1, w_arg2) + #elif w_type2 is space.w_str: + # return Cls_is(space, w_arg1, w_arg2) + #else: + # return Cls_io(space, w_arg1, w_arg2) # - elif w_type1 is space.w_str: - if w_type2 is space.w_int: - return Cls_si(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_ss(space, w_arg1, w_arg2) - else: - return Cls_so(space, w_arg1, w_arg2) + #elif w_type1 is space.w_str: + # if w_type2 is space.w_int: + # return Cls_si(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_ss(space, w_arg1, w_arg2) + # else: + # return Cls_so(space, w_arg1, w_arg2) # - elif w_type1 is space.w_float and w_type2 is space.w_float: - return Cls_ff(space, w_arg1, w_arg2) + elif w_type1 is space.w_float: + w_type2 = space.type(w_arg2) + if w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) # - else: - if w_type2 is space.w_int: - return Cls_oi(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_os(space, w_arg1, w_arg2) - else: - return Cls_oo(space, w_arg1, w_arg2) + #else: + # if w_type2 is space.w_int: + # return Cls_oi(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_os(space, w_arg1, w_arg2) + # else: + return Cls_oo(space, w_arg1, w_arg2) # - elif len(list_w) == 3: - return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + #elif len(list_w) == 3: + # return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) else: raise NotSpecialised diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -33,15 +33,15 @@ N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) - def hash_test(values): + def hash_test(values, must_be_specialized=True): N_values_w = [N_space.wrap(value) for value in values] S_values_w = [S_space.wrap(value) for value in values] N_w_tuple = N_space.newtuple(N_values_w) S_w_tuple = S_space.newtuple(S_values_w) - - assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + + if must_be_specialized: + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) assert isinstance(N_w_tuple, W_TupleObject) - assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) @@ -53,7 +53,7 @@ hash_test([1,(1,2)]) hash_test([1,('a',2)]) hash_test([1,()]) - hash_test([1,2,3]) + hash_test([1,2,3], must_be_specialized=False) class AppTestW_SpecialisedTupleObject: @@ -83,6 +83,8 @@ return ("SpecialisedTupleObject" + expected) in r def test_createspecialisedtuple(self): + have = ['ii', 'ff', 'oo'] + # spec = {int: 'i', float: 'f', str: 's', @@ -92,14 +94,14 @@ for y in [43, 4.3, "bar", []]: expected1 = spec[type(x)] expected2 = spec[type(y)] - if (expected1 == 'f') ^ (expected2 == 'f'): - if expected1 == 'f': expected1 = 'o' - if expected2 == 'f': expected2 = 'o' + if expected1 + expected2 not in have: + expected1 = expected2 = 'o' obj = (x, y) assert self.isspecialised(obj, '_' + expected1 + expected2) # - obj = (1, 2, 3) - assert self.isspecialised(obj, '_ooo') + if 'ooo' in have: + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') def test_delegation(self): t = self.forbid_delegation((42, 43)) @@ -214,6 +216,8 @@ raises(IndexError, "t[-3]") def test_three_tuples(self): + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") b = self.forbid_delegation((1, 2, 3)) c = (1,) d = c + (2, 3) @@ -221,6 +225,16 @@ assert b == d def test_mongrel(self): + a = self.forbid_delegation((2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 2 + assert a[0] == 2.2 and a[1] == '333' + b = ('333',) + assert a == (2.2,) + b + assert not a != (2.2,) + b + # + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") a = self.forbid_delegation((1, 2.2, '333')) assert self.isspecialised(a) assert len(a) == 3 diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py --- a/pypy/objspace/std/typetype.py +++ b/pypy/objspace/std/typetype.py @@ -10,7 +10,6 @@ w_dict=gateway.NoneNotWrapped): "This is used to create user-defined classes only." - from pypy.objspace.std.typeobject import W_TypeObject # XXX check types w_typetype = _precheck_for_new(space, w_typetype) @@ -19,10 +18,18 @@ if (space.is_w(space.type(w_typetype), space.w_type) and w_bases is None and w_dict is None): return space.type(w_name) - elif w_bases is None or w_dict is None: + else: + return _create_new_type(space, w_typetype, w_name, w_bases, w_dict) + + +def _create_new_type(space, w_typetype, w_name, w_bases, w_dict): + # this is in its own function because we want the special case 'type(x)' + # above to be seen by the jit. + from pypy.objspace.std.typeobject import W_TypeObject + + if w_bases is None or w_dict is None: raise OperationError(space.w_TypeError, space.wrap("type() takes 1 or 3 arguments")) - bases_w = space.fixedview(w_bases) w_winner = w_typetype From noreply at buildbot.pypy.org Wed Dec 14 17:30:54 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 14 Dec 2011 17:30:54 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, hager): fix bug in code generator Message-ID: <20111214163054.8516182210@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50508:284314e1eb5f Date: 2011-12-14 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/284314e1eb5f/ Log: (bivab, hager): fix bug in code generator diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -63,7 +63,7 @@ MI = Form("rA", "rS", "SH", "MB", "ME", "Rc") MB = Form("rA", "rS", "rB", "MB", "ME", "Rc") MDI = Form("rA", "rS", "sh", "mbe", "XO5", "Rc") -MDS = Form("rA", "rS", "rB", "mbe", "XO5", "Rc") +MDS = Form("rA", "rS", "rB", "mbe", "XO7", "Rc") class BasicPPCAssembler(Assembler): @@ -459,10 +459,10 @@ rfid = X(19, XO1=18) - rldcl = MDS(30, XO5=8, Rc=0) - rldclx = MDS(30, XO5=8, Rc=1) - rldcr = MDS(30, XO5=9, Rc=0) - rldcrx = MDS(30, XO5=9, Rc=1) + rldcl = MDS(30, XO7=8, Rc=0) + rldclx = MDS(30, XO7=8, Rc=1) + rldcr = MDS(30, XO7=9, Rc=0) + rldcrx = MDS(30, XO7=9, Rc=1) rldic = MDI(30, XO5=2, Rc=0) rldicx = MDI(30, XO5=2, Rc=1) diff --git a/pypy/jit/backend/ppc/ppcgen/form.py b/pypy/jit/backend/ppc/ppcgen/form.py --- a/pypy/jit/backend/ppc/ppcgen/form.py +++ b/pypy/jit/backend/ppc/ppcgen/form.py @@ -186,7 +186,7 @@ for fname in specializations: field = self.fieldmap[fname] if field not in self.fields: - raise FormException, "no nothin bout '%s'"%k + raise FormException, "no nothin bout '%s'"%fname s[field] = specializations[fname] return IDesc(self.fieldmap, self.fields, s) diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_field.py b/pypy/jit/backend/ppc/ppcgen/ppc_field.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_field.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_field.py @@ -49,6 +49,7 @@ "XO4": (30, 31), "XO5": (27, 29), "XO6": (21, 29), + "XO7": (27, 30) } From noreply at buildbot.pypy.org Wed Dec 14 18:07:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 18:07:53 +0100 (CET) Subject: [pypy-commit] pypy default: Complain if we see "lltype.free(x, track_allocation=False)". Should Message-ID: <20111214170753.BD2F682210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50509:b1a9e14a476c Date: 2011-12-14 17:14 +0100 http://bitbucket.org/pypy/pypy/changeset/b1a9e14a476c/ Log: Complain if we see "lltype.free(x, track_allocation=False)". Should be fixed. diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -531,8 +531,11 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].concretetype.TO return self._do_builtin_call(op, 'raw_free', [op.args[0]], extra = (ARRAY,), extrakey = ARRAY) From noreply at buildbot.pypy.org Wed Dec 14 18:07:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 18:07:54 +0100 (CET) Subject: [pypy-commit] pypy default: Support "malloc(STRUCT, flavor='raw')" too. Message-ID: <20111214170754.EAC8182ABD@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50510:0af6042f9ef6 Date: 2011-12-14 17:23 +0100 http://bitbucket.org/pypy/pypy/changeset/0af6042f9ef6/ Log: Support "malloc(STRUCT, flavor='raw')" too. diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -498,27 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - d = op.args[1].value.copy() - d.pop('flavor') - add_memory_pressure = d.pop('add_memory_pressure', False) - zero = d.pop('zero', False) - track_allocation = d.pop('track_allocation', True) - if d: - raise UnsupportedMallocFlags(d) - ARRAY = op.args[0].value - name = 'raw_malloc' - if zero: - name += '_zero' - if add_memory_pressure: - name += '_add_memory_pressure' - if not track_allocation: - name += '_no_track_allocation' - return self._do_builtin_call(op, name, - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -739,6 +741,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,21 +599,59 @@ return p return _ll_0_alloc_with_del - def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) - return _ll_1_raw_malloc - return build_ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - build_ll_1_raw_malloc = build_raw_malloc_builder() - build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) - build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) - build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) - build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) - build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) - build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) - build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -550,7 +550,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str assert op1.opname == '-live-' assert op1.args == [] @@ -564,7 +564,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str assert op1.opname == '-live-' assert op1.args == [] @@ -578,6 +578,18 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address diff --git a/pypy/jit/metainterp/test/test_rawmem.py b/pypy/jit/metainterp/test/test_rawmem.py --- a/pypy/jit/metainterp/test/test_rawmem.py +++ b/pypy/jit/metainterp/test/test_rawmem.py @@ -8,7 +8,7 @@ VOID_TP = lltype.Array(lltype.Void, hints={"nolength": True, "uncast_on_llgraph": True}) class A(object): def __init__(self, x): - self.storage = rffi.cast(lltype.Ptr(VOID_TP), x)\ + self.storage = rffi.cast(lltype.Ptr(VOID_TP), x) def f(n): x = lltype.malloc(TP, n, flavor="raw", zero=True) @@ -19,4 +19,14 @@ lltype.free(x, flavor="raw") return s res = self.interp_operations(f, [10]) - assert res == 1.0 \ No newline at end of file + + def test_fixed_size_malloc(self): + TIMEVAL = lltype.Struct('dummy', ('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)) + def f(): + p = lltype.malloc(TIMEVAL, flavor='raw') + lltype.free(p, flavor='raw') + return 42 + res = self.interp_operations(f, []) + assert res == 42 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'finish': 1}) From noreply at buildbot.pypy.org Wed Dec 14 18:07:56 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 18:07:56 +0100 (CET) Subject: [pypy-commit] pypy default: Changeset by tumbleweed: we are seeing this on Linux too, Message-ID: <20111214170756.19D4582210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50511:b907a1252c68 Date: 2011-12-14 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/b907a1252c68/ Log: Changeset by tumbleweed: we are seeing this on Linux too, not only on Darwin. Obscure. diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -161,11 +161,16 @@ def test_shutdown(self): import socket, ssl, sys, gc - if sys.platform == 'darwin': - skip("get also on CPython: error: [Errno 0]") ss = socket.ssl(self.s) ss.write("hello\n") - assert ss.shutdown() is self.s._sock + try: + result = ss.shutdown() + except socket.error, e: + # xxx obscure case; throwing errno 0 is pretty odd... + if e.errno == 0: + skip("Shutdown raised errno 0. CPython does this too") + raise + assert result is self.s._sock raises(ssl.SSLError, ss.write, "hello\n") del ss; gc.collect() From noreply at buildbot.pypy.org Wed Dec 14 18:15:44 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 18:15:44 +0100 (CET) Subject: [pypy-commit] pypy default: See comments. Thanks tumbleweed Message-ID: <20111214171544.EABA582210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50512:0eea2bcfa892 Date: 2011-12-14 18:15 +0100 http://bitbucket.org/pypy/pypy/changeset/0eea2bcfa892/ Log: See comments. Thanks tumbleweed diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -656,7 +656,11 @@ os.fsync(f) # <- should also work with a file, or anything finally: # with a fileno() method f.close() - raises(OSError, os.fsync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fsync(fd) + except OSError: + pass raises(ValueError, os.fsync, -1) if hasattr(os, 'fdatasync'): @@ -668,7 +672,11 @@ os.fdatasync(fd) finally: f.close() - raises(OSError, os.fdatasync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fdatasync(fd) + except OSError: + pass raises(ValueError, os.fdatasync, -1) if hasattr(os, 'fchdir'): From noreply at buildbot.pypy.org Wed Dec 14 18:29:50 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Dec 2011 18:29:50 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: skip some tests and make the rest pass. Broadcasting and FlatIter unsupported Message-ID: <20111214172950.C32EA82210@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50513:16065f56db0c Date: 2011-12-14 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/16065f56db0c/ Log: skip some tests and make the rest pass. Broadcasting and FlatIter unsupported so far diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -7,7 +7,8 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.module.micronumpy.interp_iter import ArrayIterator, ViewIterator +from pypy.module.micronumpy.interp_iter import ArrayIterator, ViewIterator,\ + OneDimIterator numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], @@ -1214,11 +1215,12 @@ size = 1 for sh in arr.shape: size *= sh - ViewArray.__init__(self, arr, [arr.strides[-1]], + ViewArray.__init__(self, arr.get_concrete(), [arr.strides[-1]], [arr.backstrides[-1]], [size]) self.shapelen = len(arr.shape) self.arr = arr - self.iter = self.start_iter() + self.iter = OneDimIterator(self.arr.start, self.strides[0], + arr.shape[0]) def find_dtype(self): return self.arr.find_dtype() diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -69,9 +69,9 @@ cache[self] = no self.iter_no = no - def create_frame(self, arr, res_shape=None): + def create_frame(self, arr): iterlist = [] - self._create_iter(iterlist, arr, res_shape) + self._create_iter(iterlist, arr) return NumpyEvalFrame(iterlist) class ConcreteSignature(Signature): @@ -90,10 +90,9 @@ def debug_repr(self): return 'Array' - def _create_iter(self, iterlist, arr, res_shape): + def _create_iter(self, iterlist, arr): if self.iter_no >= len(iterlist): - iter = ArrayIterator(arr.size) - iterlist.append(iter) + iterlist.append(ArrayIterator(arr.size)) def eval(self, frame, arr): arr = arr.get_concrete() @@ -104,7 +103,7 @@ def debug_repr(self): return 'Scalar' - def _create_iter(self, iterlist, arr, res_shape): + def _create_iter(self, iterlist, arr): if self.iter_no >= len(iterlist): iter = ConstantIterator() iterlist.append(iter) @@ -127,10 +126,9 @@ def debug_repr(self): return 'Slice(%s)' % self.child.debug_repr() - def _create_iter(self, iterlist, arr, res_shape): + def _create_iter(self, iterlist, arr): if self.iter_no >= len(iterlist): - iter = ViewIterator(arr) - iterlist.append(iter) + iterlist.append(ViewIterator(arr)) def eval(self, frame, arr): arr = arr.get_concrete() @@ -141,7 +139,7 @@ def debug_repr(self): return 'FlatIter(%s)' % self.child.debug_repr() - def _create_iter(self, iterlist, arr, res_shape): + def _create_iter(self, iterlist, arr): XXX class Call1(Signature): @@ -163,8 +161,8 @@ def _invent_numbering(self, cache): self.child._invent_numbering(cache) - def _create_iter(self, iterlist, arr, res_shape): - self.child._create_iter(iterlist, arr.values, res_shape) + def _create_iter(self, iterlist, arr): + self.child._create_iter(iterlist, arr.values) def eval(self, frame, arr): v = self.child.eval(frame, arr.values).convert_to(arr.res_dtype) @@ -190,9 +188,9 @@ self.left._invent_numbering(cache) self.right._invent_numbering(cache) - def _create_iter(self, iterlist, arr, res_shape): - self.left._create_iter(iterlist, arr.left, res_shape) - self.right._create_iter(iterlist, arr.right, res_shape) + def _create_iter(self, iterlist, arr): + self.left._create_iter(iterlist, arr.left) + self.right._create_iter(iterlist, arr.right) def eval(self, frame, arr): lhs = self.left.eval(frame, arr.left).convert_to(arr.calc_dtype) @@ -204,8 +202,8 @@ self.right.debug_repr()) class ReduceSignature(Call2): - def _create_iter(self, iterlist, arr, res_shape): - self.right._create_iter(iterlist, arr, res_shape) + def _create_iter(self, iterlist, arr): + self.right._create_iter(iterlist, arr) def _invent_numbering(self, cache): self.right._invent_numbering(cache) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1001,6 +1001,7 @@ assert a[0, 1, 2] == 1.0 def test_broadcast_ufunc(self): + skip("broadcast unsupported") from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) b = array([5, 6]) @@ -1008,6 +1009,7 @@ assert c.all() def test_broadcast_setslice(self): + skip("broadcast unsupported") from numpypy import zeros, ones a = zeros((100, 100)) b = ones(100) @@ -1015,6 +1017,7 @@ assert a[13, 15] == 1 def test_broadcast_shape_agreement(self): + skip("broadcast unsupported") from numpypy import zeros, array a = zeros((3, 1, 3)) b = array(((10, 11, 12), (20, 21, 22), (30, 31, 32))) @@ -1029,6 +1032,7 @@ assert c.all() def test_broadcast_scalar(self): + skip("broadcast unsupported") from numpypy import zeros a = zeros((4, 5), 'd') a[:, 1] = 3 @@ -1040,6 +1044,7 @@ assert a[3, 2] == 0 def test_broadcast_call2(self): + skip("broadcast unsupported") from numpypy import zeros, ones a = zeros((4, 1, 5)) b = ones((4, 3, 5)) @@ -1088,6 +1093,7 @@ assert(b[:, 0] == a[0, :]).all() def test_flatiter(self): + skip("unsupported") from numpypy import array, flatiter a = array([[10, 30], [40, 60]]) f_iter = a.flat @@ -1103,6 +1109,7 @@ assert s == 140 def test_flatiter_array_conv(self): + skip("unsupported") from numpypy import array, dot a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 From noreply at buildbot.pypy.org Wed Dec 14 20:41:09 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 20:41:09 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the missing case of raw_free(track_allocation=False). Message-ID: <20111214194109.A502982210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50514:6e98be6147f6 Date: 2011-12-14 18:17 +0100 http://bitbucket.org/pypy/pypy/changeset/6e98be6147f6/ Log: Fix the missing case of raw_free(track_allocation=False). diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -536,11 +536,15 @@ d = op.args[1].value.copy() assert d['flavor'] == 'raw' d.pop('flavor') + track_allocation = d.pop('track_allocation', True) if d: raise UnsupportedMallocFlags(d) - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -605,7 +605,8 @@ def build_ll_1_raw_malloc_varsize(ARRAY): def _ll_1_raw_malloc_varsize(n): return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, - add_memory_pressure=add_memory_pressure) + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) return _ll_1_raw_malloc_varsize return build_ll_1_raw_malloc_varsize @@ -632,7 +633,8 @@ def build_ll_0_raw_malloc_fixedsize(STRUCT): def _ll_0_raw_malloc_fixedsize(): return lltype.malloc(STRUCT, flavor='raw', zero=zero, - add_memory_pressure=add_memory_pressure) + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) return _ll_0_raw_malloc_fixedsize return build_ll_0_raw_malloc_fixedsize @@ -653,10 +655,19 @@ build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -590,6 +590,23 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address From noreply at buildbot.pypy.org Wed Dec 14 20:41:10 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 20:41:10 +0100 (CET) Subject: [pypy-commit] pypy default: issue963 resolved Message-ID: <20111214194110.DC44882210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50515:46c074e2da29 Date: 2011-12-14 20:37 +0100 http://bitbucket.org/pypy/pypy/changeset/46c074e2da29/ Log: issue963 resolved Similarly to not calling compute_unique_id() on the immutable objects, we should avoid calling compute_identity_hash() on them. Test and fix. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -190,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -706,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -31,9 +31,9 @@ imag2 = float2longlong(imag2) return real1 == real2 and imag1 == imag2 - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_COMPLEX as tag real = space.float_w(space.getattr(self, space.wrap("real"))) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,9 +34,9 @@ two = float2longlong(space.float_w(w_other)) return one == two - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_FLOAT as tag val = float2longlong(space.float_w(self)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -26,9 +26,9 @@ return self is w_other return space.int_w(self) == space.int_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_INT as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -18,9 +18,9 @@ return self is w_other return space.bigint_w(self).eq(space.bigint_w(w_other)) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_LONG as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -32,9 +32,9 @@ return False return space.str_w(self) is space.str_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.str_w(self))) diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -253,6 +253,12 @@ y = 2j assert id(x) != id(y) + def test_object_hash_immutable(self): + x = 42 + y = 40 + y += 2 + assert object.__hash__(x) == object.__hash__(y) + def test_isinstance_shortcut(): from pypy.objspace.std import objspace diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -32,9 +32,9 @@ return False return space.unicode_w(self) is space.unicode_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.unicode_w(self))) From noreply at buildbot.pypy.org Wed Dec 14 21:31:04 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 14 Dec 2011 21:31:04 +0100 (CET) Subject: [pypy-commit] pypy default: Don't inline rffi.free_nonmovingbuffer in the JIT, it just raise an error during translation. Message-ID: <20111214203104.C35A882210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50516:0be914199ee6 Date: 2011-12-14 15:30 -0500 http://bitbucket.org/pypy/pypy/changeset/0be914199ee6/ Log: Don't inline rffi.free_nonmovingbuffer in the JIT, it just raise an error during translation. diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -16,6 +16,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.rstring import StringBuilder, UnicodeBuilder +from pypy.rlib import jit from pypy.rpython.lltypesystem import llmemory import os, sys @@ -249,8 +250,7 @@ wrapper = func_with_new_name(wrapper, name) if calling_conv != "c": - from pypy.rlib.jit import dont_look_inside - wrapper = dont_look_inside(wrapper) + wrapper = jit.dont_look_inside(wrapper) return wrapper @@ -717,6 +717,8 @@ get_nonmovingbuffer._annenforceargs_ = [strtype] # (str, char*) -> None + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def free_nonmovingbuffer(data, buf): """ Either free a non-moving buffer or keep the original storage alive. From noreply at buildbot.pypy.org Wed Dec 14 21:31:06 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 14 Dec 2011 21:31:06 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20111214203106.2E01B82210@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50517:7907ec2d1927 Date: 2011-12-14 15:30 -0500 http://bitbucket.org/pypy/pypy/changeset/7907ec2d1927/ Log: merged upstream diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -190,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -706,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -536,11 +536,15 @@ d = op.args[1].value.copy() assert d['flavor'] == 'raw' d.pop('flavor') + track_allocation = d.pop('track_allocation', True) if d: raise UnsupportedMallocFlags(d) - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -605,7 +605,8 @@ def build_ll_1_raw_malloc_varsize(ARRAY): def _ll_1_raw_malloc_varsize(n): return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, - add_memory_pressure=add_memory_pressure) + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) return _ll_1_raw_malloc_varsize return build_ll_1_raw_malloc_varsize @@ -632,7 +633,8 @@ def build_ll_0_raw_malloc_fixedsize(STRUCT): def _ll_0_raw_malloc_fixedsize(): return lltype.malloc(STRUCT, flavor='raw', zero=zero, - add_memory_pressure=add_memory_pressure) + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) return _ll_0_raw_malloc_fixedsize return build_ll_0_raw_malloc_fixedsize @@ -653,10 +655,19 @@ build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -590,6 +590,23 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -31,9 +31,9 @@ imag2 = float2longlong(imag2) return real1 == real2 and imag1 == imag2 - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_COMPLEX as tag real = space.float_w(space.getattr(self, space.wrap("real"))) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,9 +34,9 @@ two = float2longlong(space.float_w(w_other)) return one == two - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_FLOAT as tag val = float2longlong(space.float_w(self)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -26,9 +26,9 @@ return self is w_other return space.int_w(self) == space.int_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_INT as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -18,9 +18,9 @@ return self is w_other return space.bigint_w(self).eq(space.bigint_w(w_other)) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_LONG as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -32,9 +32,9 @@ return False return space.str_w(self) is space.str_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.str_w(self))) diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -253,6 +253,12 @@ y = 2j assert id(x) != id(y) + def test_object_hash_immutable(self): + x = 42 + y = 40 + y += 2 + assert object.__hash__(x) == object.__hash__(y) + def test_isinstance_shortcut(): from pypy.objspace.std import objspace diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -32,9 +32,9 @@ return False return space.unicode_w(self) is space.unicode_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.unicode_w(self))) From noreply at buildbot.pypy.org Wed Dec 14 21:48:37 2011 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 Dec 2011 21:48:37 +0100 (CET) Subject: [pypy-commit] pypy numpypy-frompyfunc: more tests, and a failing one Message-ID: <20111214204837.7562282210@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-frompyfunc Changeset: r50518:4136f83fdb42 Date: 2011-12-13 21:52 +0200 http://bitbucket.org/pypy/pypy/changeset/4136f83fdb42/ Log: more tests, and a failing one diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -26,6 +26,10 @@ greens=['shapelen', 'signature'], reds=['self', 'source', 'source_iter', 'res_iter'] ) +ufunc_driver = jit.JitDriver( + greens=['shapelen', 'signature'], + reds=['result_size', 'i', 'ri', 'arr', 'result'] +) def _find_shape_and_elems(space, w_iterable): shape = [space.len_w(w_iterable)] @@ -1571,10 +1575,10 @@ shapelen = len(arr.shape) result_size = arr.find_size() while not ri.done(): - #numpy_driver.jit_merge_point(signature=signature, - # shapelen=shapelen, - # result_size=result_size, i=i, ri=ri, - # self=self, result=result) + ufunc_driver.jit_merge_point(signature=signature, + shapelen=shapelen, + result_size=result_size, i=i, ri=ri, + arr=arr, result=result) result.dtype.setitem(result.storage, ri.offset, space.call_function(self.w_func, arr.eval(i))) i = i.next(shapelen) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -380,8 +380,31 @@ assert (ufunc([-1, 0, 3, 15]) == [1, 0, 3, 15]).all() def test_frompyfunc_foo(self): + from numpypy import frompyfunc, array def foo(x): return x * x + 1 + def bar(x): + return x + 1 + ufunc = frompyfunc(foo, 1, 1) + assert (ufunc(range(10)) == array(range(10)) * range(10) + 1).all() + #Make sure the user-visible function does not modify the ufunc + foo = bar + assert (ufunc(range(10)) == array(range(10)) * range(10) + 1).all() + #but messing with the func_code WILL change it: numpy is sensitive + #to this in the same way + def foo(x): + return x * x + 1 + def bar(x): + return x + 1 from numpypy import frompyfunc, array ufunc = frompyfunc(foo, 1, 1) assert (ufunc(range(10)) == array(range(10)) * range(10) + 1).all() + foo.func_code = bar.func_code + assert not (ufunc(range(10)) == array(range(10)) * range(10) + 1).all() + def test_frompyfunc_broadcast(self): + from numpypy import frompyfunc, array + def foo(x, y): + return x * y + 1 + ufunc = frompyfunc(foo, 2, 1) + assert (ufunc(range(10),range(10)) == array(range(10)) * range(10) + 1).all() + From noreply at buildbot.pypy.org Wed Dec 14 21:48:38 2011 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 Dec 2011 21:48:38 +0100 (CET) Subject: [pypy-commit] pypy numpypy-frompyfunc: not yet rpython Message-ID: <20111214204838.A5A0482210@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-frompyfunc Changeset: r50519:6bb82e4bc325 Date: 2011-12-14 21:16 +0200 http://bitbucket.org/pypy/pypy/changeset/6bb82e4bc325/ Log: not yet rpython diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -26,10 +26,6 @@ greens=['shapelen', 'signature'], reds=['self', 'source', 'source_iter', 'res_iter'] ) -ufunc_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result_size', 'i', 'ri', 'arr', 'result'] -) def _find_shape_and_elems(space, w_iterable): shape = [space.len_w(w_iterable)] @@ -1557,31 +1553,43 @@ class W_FromPyFunc(Wrappable): def __init__(self, space, w_func, w_nIn, w_nOut): - self.w_func = w_func - if space.int_w(w_nIn) != 1 or space.int_w(w_nOut) != 1: - raise OperationError(space.w_NotImplementedError, space.wrap('')) self.nIn = space.int_w(w_nIn) self.nOut = space.int_w(w_nOut) + if self.nOut != 1: + raise OperationError(space.w_NotImplementedError, space.wrap('')) + self.signature = signature.CallPyFunc(w_func) + # should check that the nIn and nOut match the function signature, + # but how? + self.w_func = w_func def descr__new__(space, w_subtype, w_func, w_nIn, w_nOut): return space.wrap(W_FromPyFunc(space, w_func, w_nIn, w_nOut)) - def descr_call(self, space, w_arrlike): - arr = convert_to_array(space, w_arrlike) - result = W_NDimArray(arr.find_size(), arr.shape[:], dtype=arr.find_dtype(), - order=arr.order) - i = arr.start_iter() + def descr_call(self, space, args_w): + if len(args_w) != self.nIn: + raise OperationError(space.w_ValueError, space.wrap( + 'invalid number of arguments')) + if self.nIn == 0: + return space.wrap(space.call_function(self.w_func)) + arr_s = [convert_to_array(space, a) for a in args_w] + result = W_NDimArray(arr_s[0].find_size(), arr_s[0].shape[:], + dtype=arr_s[0].find_dtype(), order=arr_s[0].order) + i_s = [a.start_iter() for a in arr_s] ri = result.start_iter() - shapelen = len(arr.shape) - result_size = arr.find_size() + shapelen = len(result.shape) + result_size = result.find_size() + signature = self.signature + # TODO: use the signature to return a VirtualArray (lazy eval) + # TODO: what about shape mismatch and broadcasting if nIn > 1? while not ri.done(): - ufunc_driver.jit_merge_point(signature=signature, - shapelen=shapelen, - result_size=result_size, i=i, ri=ri, - arr=arr, result=result) - result.dtype.setitem(result.storage, ri.offset, - space.call_function(self.w_func, arr.eval(i))) - i = i.next(shapelen) + if len(arr_s) == 1: + result.dtype.setitem(result.storage, ri.offset, + space.call_function(self.w_func, arr_s[0].eval(i_s[0]))) + else: + result.dtype.setitem(result.storage, ri.offset, + space.call_function(self.w_func, + *[a.eval(i) for a,i in zip(arr_s, i_s)])) + i_s = [i.next(shapelen) for i in i_s] ri = ri.next(shapelen) return space.wrap(result) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -52,3 +52,10 @@ def __init__(self, func): self.func = func self.name = func.func_name + +class CallPyFunc(BaseSignature): + _immutable_fields_ = ["func", "name"] + + def __init__(self, func): + self.func = func + self.name = func.name From noreply at buildbot.pypy.org Wed Dec 14 21:48:39 2011 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 Dec 2011 21:48:39 +0100 (CET) Subject: [pypy-commit] pypy numpypy-frompyfunc: tests, translate passes Message-ID: <20111214204839.E07B982210@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-frompyfunc Changeset: r50520:66a6917800a8 Date: 2011-12-14 22:47 +0200 http://bitbucket.org/pypy/pypy/changeset/66a6917800a8/ Log: tests, translate passes diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1552,15 +1552,18 @@ class W_FromPyFunc(Wrappable): + _attrs_ = ['nIn', 'nOut', 'signature'] + _immutable_fields_ = ['nIn', 'nOut', 'signature'] + def __init__(self, space, w_func, w_nIn, w_nOut): self.nIn = space.int_w(w_nIn) self.nOut = space.int_w(w_nOut) if self.nOut != 1: raise OperationError(space.w_NotImplementedError, space.wrap('')) - self.signature = signature.CallPyFunc(w_func) + self.signature = signature.CallPyFunc(space, w_func) + # TODO: once we have lazy eval kill next line # should check that the nIn and nOut match the function signature, # but how? - self.w_func = w_func def descr__new__(space, w_subtype, w_func, w_nIn, w_nOut): return space.wrap(W_FromPyFunc(space, w_func, w_nIn, w_nOut)) @@ -1570,9 +1573,9 @@ raise OperationError(space.w_ValueError, space.wrap( 'invalid number of arguments')) if self.nIn == 0: - return space.wrap(space.call_function(self.w_func)) + return space.call(self.signature.w_func, space.newlist([])) arr_s = [convert_to_array(space, a) for a in args_w] - result = W_NDimArray(arr_s[0].find_size(), arr_s[0].shape[:], + result = W_NDimArray(arr_s[0].find_size(), arr_s[0].shape[:], dtype=arr_s[0].find_dtype(), order=arr_s[0].order) i_s = [a.start_iter() for a in arr_s] ri = result.start_iter() @@ -1584,11 +1587,14 @@ while not ri.done(): if len(arr_s) == 1: result.dtype.setitem(result.storage, ri.offset, - space.call_function(self.w_func, arr_s[0].eval(i_s[0]))) + space.call_function(signature.w_func, + arr_s[0].eval(i_s[0]))) else: - result.dtype.setitem(result.storage, ri.offset, - space.call_function(self.w_func, - *[a.eval(i) for a,i in zip(arr_s, i_s)])) + w_fargs = space.newlist([arr_s[0].eval(i_s[0])]) + for j in range(1, len(arr_s)): + space.call_method(w_fargs, "append", arr_s[j].eval(i_s[j])) + result.dtype.setitem(result.storage, ri.offset, space.call( + signature.w_func, w_fargs)) i_s = [i.next(shapelen) for i in i_s] ri = ri.next(shapelen) return space.wrap(result) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -56,6 +56,6 @@ class CallPyFunc(BaseSignature): _immutable_fields_ = ["func", "name"] - def __init__(self, func): - self.func = func - self.name = func.name + def __init__(self, space, w_func): + self.w_func = w_func + self.name = w_func.getname(space) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -378,11 +378,20 @@ from numpypy import frompyfunc ufunc = frompyfunc(abs, 1, 1) assert (ufunc([-1, 0, 3, 15]) == [1, 0, 3, 15]).all() + + def test_frompyfunc_nullfunc(self): + def foo(): + return 165.0 + from numpypy import frompyfunc + ufunc = frompyfunc(foo, 0, 1) + assert ufunc() == 165.0 def test_frompyfunc_foo(self): from numpypy import frompyfunc, array + def foo(x): return x * x + 1 + def bar(x): return x + 1 ufunc = frompyfunc(foo, 1, 1) @@ -392,19 +401,22 @@ assert (ufunc(range(10)) == array(range(10)) * range(10) + 1).all() #but messing with the func_code WILL change it: numpy is sensitive #to this in the same way + def foo(x): return x * x + 1 + def bar(x): return x + 1 - from numpypy import frompyfunc, array ufunc = frompyfunc(foo, 1, 1) assert (ufunc(range(10)) == array(range(10)) * range(10) + 1).all() foo.func_code = bar.func_code assert not (ufunc(range(10)) == array(range(10)) * range(10) + 1).all() + def test_frompyfunc_broadcast(self): from numpypy import frompyfunc, array + def foo(x, y): return x * y + 1 ufunc = frompyfunc(foo, 2, 1) - assert (ufunc(range(10),range(10)) == array(range(10)) * range(10) + 1).all() - + assert (ufunc(range(10), range(10)) == \ + array(range(10)) * range(10) + 1).all() From noreply at buildbot.pypy.org Wed Dec 14 21:53:46 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 14 Dec 2011 21:53:46 +0100 (CET) Subject: [pypy-commit] pypy default: inline descr_get_shape to prevent a residual call on two dimensional array accesses Message-ID: <20111214205346.67D8682210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50521:085083392791 Date: 2011-12-14 21:53 +0100 http://bitbucket.org/pypy/pypy/changeset/085083392791/ Log: inline descr_get_shape to prevent a residual call on two dimensional array accesses diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -581,6 +581,7 @@ def descr_get_dtype(self, space): return space.wrap(self.find_dtype()) + @jit.unroll_safe def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) From noreply at buildbot.pypy.org Wed Dec 14 22:14:07 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 14 Dec 2011 22:14:07 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: (Alex_Gaynor, hakanardo): whishlisting Message-ID: <20111214211407.A1AF382210@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: extradoc Changeset: r3986:5176509bd13c Date: 2011-12-14 22:13 +0100 http://bitbucket.org/pypy/extradoc/changeset/5176509bd13c/ Log: (Alex_Gaynor, hakanardo): whishlisting diff --git a/planning/micronumpy.txt b/planning/micronumpy.txt --- a/planning/micronumpy.txt +++ b/planning/micronumpy.txt @@ -20,3 +20,9 @@ - axis= parameter to various methods - expose ndarray.ctypes + +- subclassing ndarray (instantiating subcalsses curently returns the wrong type) + + * keep subclass type when slicing, __array_finalize__ + + * ndarray.view From noreply at buildbot.pypy.org Wed Dec 14 22:17:52 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Wed, 14 Dec 2011 22:17:52 +0100 (CET) Subject: [pypy-commit] pypy numpy-concatenate: Adds numpy.concatenate Message-ID: <20111214211752.61E2982210@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-concatenate Changeset: r50522:0d81f0701f18 Date: 2011-12-14 16:17 -0500 http://bitbucket.org/pypy/pypy/changeset/0d81f0701f18/ Log: Adds numpy.concatenate diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -97,4 +97,5 @@ 'pi': 'app_numpy.pi', 'arange': 'app_numpy.arange', 'reshape': 'app_numpy.reshape', + 'concatenate': 'app_numpy.concatenate', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -34,6 +34,37 @@ a = numpypy.array(a) return a.max() +def concatenate(array_iter, axis=0): + arrays = [] + shape = None + for a in array_iter: + if not hasattr(a, 'shape'): + a = numpypy.array(a) + arrays.append(a) + if len(a.shape) < axis + 1: + raise ValueError("bad axis argument") + if shape is None: + shape = list(a.shape) + else: + for i, axis_size in enumerate(a.shape): + if len(a.shape) != len(shape) or (i != axis and axis_size != shape[i]): + raise ValueError("array dimensions must agree except for axis being concatenated") + elif i == axis: + shape[i] += axis_size + + if len(arrays) == 0: + raise ValueError("concatenation of zero-length sequences is impossible") + + out_array = numpypy.zeros(shape) + slicing_index = [slice(None)] * len(shape) + axis_ptr = 0 + for a in arrays: + slicing_index[axis] = slice(axis_ptr, axis_ptr + a.shape[axis]) + out_array[tuple(slicing_index)] = a + axis_ptr += a.shape[axis] + + return out_array + def arange(start, stop=None, step=1, dtype=None): '''arange([start], stop[, step], dtype=None) Generate values in the half-interval [start, stop). diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -923,6 +923,31 @@ assert a[:,0].tolist() == [17.1, 40.3] assert a[0].tolist() == [17.1, 27.2] + def test_concatenate(self): + from numpypy import array, concatenate + a1 = array([0,1,2]) + a2 = array([3,4,5]) + a = concatenate((a1, a2)) + assert len(a) == 6 + assert (a == [0,1,2,3,4,5]).all() + b1 = array([[1, 2], [3, 4]]) + b2 = array([[5, 6]]) + b = concatenate((b1, b2), axis=0) + assert (b == [[1, 2],[3, 4],[5, 6]]).all() + c = concatenate((b1, b2.T), axis=1) + assert (c == [[1, 2, 5],[3, 4, 6]]).all() + + bad_axis = raises(ValueError, concatenate, (a1,a2), axis=1) + assert str(bad_axis.value) == "bad axis argument" + + concat_zero = raises(ValueError, concatenate, ()) + assert str(concat_zero.value) == \ + "concatenation of zero-length sequences is impossible" + + dims_disagree = raises(ValueError, concatenate, (a1, b1), axis=0) + assert str(dims_disagree.value) == \ + "array dimensions must agree except for axis being concatenated" + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): From notifications-noreply at bitbucket.org Wed Dec 14 22:36:40 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Wed, 14 Dec 2011 21:36:40 -0000 Subject: [pypy-commit] Notification: pypy-sandbox-4-pycode Message-ID: <20111214213640.19707.45937@bitbucket12.managed.contegix.com> You have received a notification from trampgeek. Hi, I forked pypy. My fork is at https://bitbucket.org/trampgeek/pypy-sandbox-4-pycode. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Wed Dec 14 23:01:28 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Wed, 14 Dec 2011 23:01:28 +0100 (CET) Subject: [pypy-commit] pypy numpy-concatenate: Changed some variable names and added a couple more tests Message-ID: <20111214220128.5456882210@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-concatenate Changeset: r50523:083461b61320 Date: 2011-12-14 17:01 -0500 http://bitbucket.org/pypy/pypy/changeset/083461b61320/ Log: Changed some variable names and added a couple more tests diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -55,13 +55,13 @@ if len(arrays) == 0: raise ValueError("concatenation of zero-length sequences is impossible") - out_array = numpypy.zeros(shape) + out_array = numpypy.empty(shape) slicing_index = [slice(None)] * len(shape) - axis_ptr = 0 + axis_start = 0 for a in arrays: - slicing_index[axis] = slice(axis_ptr, axis_ptr + a.shape[axis]) + slicing_index[axis] = slice(axis_start, axis_start + a.shape[axis]) out_array[tuple(slicing_index)] = a - axis_ptr += a.shape[axis] + axis_start += a.shape[axis] return out_array diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -936,6 +936,14 @@ assert (b == [[1, 2],[3, 4],[5, 6]]).all() c = concatenate((b1, b2.T), axis=1) assert (c == [[1, 2, 5],[3, 4, 6]]).all() + d = concatenate(([0],[1])) + assert (d == [0,1]).all() + e1 = array([[0,1],[2,3]]) + e = concatenate(e1) + assert (e == [0,1,2,3]).all() + f1 = array([0,1]) + f = concatenate((f1, [2], f1, [7])) + assert (f == [0,1,2,0,1,7]).all() bad_axis = raises(ValueError, concatenate, (a1,a2), axis=1) assert str(bad_axis.value) == "bad axis argument" From noreply at buildbot.pypy.org Wed Dec 14 23:53:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Dec 2011 23:53:03 +0100 (CET) Subject: [pypy-commit] pypy default: I know it contains a loop and thus cannot be inlined at all at the Message-ID: <20111214225303.E22E982210@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50524:5a9a29b9c0ae Date: 2011-12-14 23:50 +0100 http://bitbucket.org/pypy/pypy/changeset/5a9a29b9c0ae/ Log: I know it contains a loop and thus cannot be inlined at all at the moment, but it doesn't hurt to disable it explicitly too diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -697,6 +697,8 @@ return b.build() # str -> char* + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def get_nonmovingbuffer(data): """ Either returns a non-moving copy or performs neccessary pointer From noreply at buildbot.pypy.org Thu Dec 15 01:54:11 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Thu, 15 Dec 2011 01:54:11 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: Fix most review comments, two have responses in the notes sections of REVIEW Message-ID: <20111215005411.4443182210@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-full-fromstring Changeset: r50525:d5854741cdef Date: 2011-12-14 19:53 -0500 http://bitbucket.org/pypy/pypy/changeset/d5854741cdef/ Log: Fix most review comments, two have responses in the notes sections of REVIEW diff --git a/pypy/module/micronumpy/REVIEW b/pypy/module/micronumpy/REVIEW --- a/pypy/module/micronumpy/REVIEW +++ b/pypy/module/micronumpy/REVIEW @@ -1,14 +1,25 @@ Review items ============ +Fixed +----- * Rather than `True if len(sep_stripped) == 0 else False` just use: `len(sep_stripped) == 0`. * Rather than name the variable `A`, name it `items` or somsething like that. * Rather than using `ptr`, use `idx`, since it's not really a pointer. +* Same comment about comparing array equality as before. +* Rather than name the attribute `char`, name it `format_code`. +* `default_fromstring` can do `self.box(-1.0)`, instead of the coerce thing. * Rather than doing a string format to raise an error (L67), use `operationerrfmt`. -* Same comment about comparing array equality as before. + +Notes +----- +* Tests for both bool and long dtypes with this. + (jterrace: fromstring with bool segfaults on my numpy 1.5.1. + Supposedly it has been fixed in later versions, but I can't + seem to install it to check to make sure my tests are correct. + There are already some tests for int64. Is that what you meant + by long types?) * No need for the `self.char == "?"` default. -* Rather than name the attribute `char`, name it `format_code`. -* `default_fromstring` can do `self.box(-1.0)`, instead of the coerce thing. -* Tests for both bool and long dtypes with this. + (jterrace: Does not translate without it) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.rpython.lltypesystem import lltype, rffi from pypy.module.micronumpy import interp_dtype @@ -11,17 +11,17 @@ from pypy.module.micronumpy.interp_numarray import W_NDimArray sep_stripped = strip_spaces(sep) - skip_bad_vals = True if len(sep_stripped) == 0 else False + skip_bad_vals = len(sep_stripped) == 0 - A = [] + items = [] num_items = 0 - ptr = 0 + idx = 0 - while (num_items < count or count == -1) and ptr < len(s): - nextptr = s.find(sep, ptr) - if nextptr < 0: - nextptr = length - piece = strip_spaces(s[ptr:nextptr]) + while (num_items < count or count == -1) and idx < len(s): + nextidx = s.find(sep, idx) + if nextidx < 0: + nextidx = length + piece = strip_spaces(s[idx:nextidx]) if len(piece) > 0 or not skip_bad_vals: if len(piece) == 0 and not skip_bad_vals: val = dtype.itemtype.default_fromstring(space) @@ -42,17 +42,17 @@ raise if not gotit: val = dtype.itemtype.default_fromstring(space) - nextptr = length - A.append(val) + nextidx = length + items.append(val) num_items += 1 - ptr = nextptr + 1 + idx = nextidx + 1 if count > num_items: raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) a = W_NDimArray(num_items, [num_items], dtype=dtype) - for i, val in enumerate(A): + for i, val in enumerate(items): a.dtype.setitem(a.storage, i, val) return space.wrap(a) @@ -64,8 +64,9 @@ if count == -1: count = length / itemsize if length % itemsize != 0: - raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by item size %d" % (length, itemsize))) + raise operationerrfmt(space.w_ValueError, + "string length %d not divisable by item size %d", + length, itemsize) if count * itemsize > length: raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1228,27 +1228,31 @@ assert g[1] == 2 assert g[2] == 3 h = fromstring("1, , 2, 3", dtype=uint8, sep=",") - assert h.tolist() == [1,0,2,3] + assert (h == [1,0,2,3]).all() i = fromstring("1 2 3", dtype=uint8, sep=" ") - assert i.tolist() == [1,2,3] + assert (i == [1,2,3]).all() j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") - assert j.tolist() == [1,2,3] + assert (j == [1,2,3]).all() k = fromstring("1,x,2,3", dtype=uint8, sep=",") - assert k.tolist() == [1,0] + assert (k == [1,0]).all() l = fromstring("1,x,2,3", dtype='float32', sep=",") - assert l.tolist() == [1.0,-1.0] + assert (l == [1.0,-1.0]).all() m = fromstring("1,,2,3", sep=",") - assert m.tolist() == [1.0,-1.0,2.0,3.0] + assert (m == [1.0,-1.0,2.0,3.0]).all() n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") - assert n.tolist() == [3] + assert (n == [3]).all() o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") assert len(o) == 2 assert o[0] == 1.0 assert o[1] == 2.0 p = fromstring("1.0,,2.0,3.0", sep=",") - assert p.tolist() == [1.0, -1.0, 2.0, 3.0] + assert (p == [1.0, -1.0, 2.0, 3.0]).all() q = fromstring("1.0,,2.0,3.0", sep=" ") - assert q.tolist() == [1.0] + assert (q == [1.0]).all() + r = fromstring("\x01\x00\x02", dtype='bool') + assert (r == [True, False, True]).all() + s = fromstring("1,2,3,,5", dtype="bool", sep=",") + assert (s == [True, True, True, False, True]).all() def test_fromstring_types(self): from numpypy import fromstring diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -56,7 +56,7 @@ class Primitive(object): _mixin_ = True - char = "?" + format_code = '?' def get_element_size(self): return rffi.sizeof(self.T) @@ -109,9 +109,7 @@ ) def runpack_str(self, s): - if self.char == "?": - raise NotImplementedError - return self.box(runpack(self.char, s)) + return self.box(runpack(self.format_code, s)) @simple_binary_op def add(self, v1, v2): @@ -175,6 +173,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool BoxType = interp_boxes.W_BoolBox + format_code = '?' True = BoxType(True) False = BoxType(False) @@ -203,6 +202,9 @@ def for_computation(self, v): return int(v) + + def default_fromstring(self, space): + return self.box(False) class Integer(Primitive): _mixin_ = True @@ -218,7 +220,7 @@ return widen(v) def default_fromstring(self, space): - return self._coerce(space, space.wrap(0)) + return self.box(0) @simple_binary_op def div(self, v1, v2): @@ -255,32 +257,32 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box - char = "b" + format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box - char = "B" + format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT BoxType = interp_boxes.W_Int16Box - char = "h" + format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box - char = "H" + format_code = "H" class Int32(BaseType, Integer): T = rffi.INT BoxType = interp_boxes.W_Int32Box - char = "i" + format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT BoxType = interp_boxes.W_UInt32Box - char = "I" + format_code = "I" class Long(BaseType, Integer): T = rffi.LONG @@ -293,12 +295,12 @@ class Int64(BaseType, Integer): T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box - char = "q" + format_code = "q" class UInt64(BaseType, Integer): T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box - char = "Q" + format_code = "Q" def _coerce(self, space, w_item): try: @@ -327,7 +329,7 @@ return float(v) def default_fromstring(self, space): - return self._coerce(space, space.wrap(-1.0)) + return self.box(-1.0) @simple_binary_op def div(self, v1, v2): @@ -428,9 +430,9 @@ class Float32(BaseType, Float): T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box - char = "f" + format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE BoxType = interp_boxes.W_Float64Box - char = "d" \ No newline at end of file + format_code = "d" \ No newline at end of file From noreply at buildbot.pypy.org Thu Dec 15 02:28:56 2011 From: noreply at buildbot.pypy.org (ned) Date: Thu, 15 Dec 2011 02:28:56 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox-2: Make sandlib logging conditional, and only import pypy.tool.ansi_print if we are logging. Message-ID: <20111215012856.D862D82210@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox-2 Changeset: r50526:47a28aeffae1 Date: 2011-12-14 20:16 -0500 http://bitbucket.org/pypy/pypy/changeset/47a28aeffae1/ Log: Make sandlib logging conditional, and only import pypy.tool.ansi_print if we are logging. diff --git a/pypy/translator/sandbox/pypy_interact.py b/pypy/translator/sandbox/pypy_interact.py --- a/pypy/translator/sandbox/pypy_interact.py +++ b/pypy/translator/sandbox/pypy_interact.py @@ -29,15 +29,15 @@ from pypy.tool.lib_pypy import LIB_ROOT class PyPySandboxedProc(VirtualizedSandboxedProc, SimpleIOSandboxedProc): - debug = True argv0 = '/bin/pypy-c' virtual_cwd = '/tmp' virtual_env = {} virtual_console_isatty = True - def __init__(self, executable, arguments, tmpdir=None): + def __init__(self, executable, arguments, tmpdir=None, debug=True): self.executable = executable = os.path.abspath(executable) self.tmpdir = tmpdir + self.debug = debug super(PyPySandboxedProc, self).__init__([self.argv0] + arguments, executable=executable) @@ -67,12 +67,13 @@ if __name__ == '__main__': from getopt import getopt # and not gnu_getopt! - options, arguments = getopt(sys.argv[1:], 't:h', + options, arguments = getopt(sys.argv[1:], 't:hq', ['tmp=', 'heapsize=', 'timeout=', 'log=', - 'help']) + 'quiet', 'help']) tmpdir = None timeout = None logfile = None + debug = True extraoptions = [] def help(): @@ -104,6 +105,8 @@ timeout = int(value) elif option == '--log': logfile = value + elif option in ['-q', '--quiet']: + debug = False elif option in ['-h', '--help']: help() else: @@ -113,7 +116,7 @@ help() sandproc = PyPySandboxedProc(arguments[0], extraoptions + arguments[1:], - tmpdir=tmpdir) + tmpdir=tmpdir, debug=debug) if timeout is not None: sandproc.settimeout(timeout, interrupt_main=True) if logfile is not None: diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -6,23 +6,27 @@ import py import sys, os, posixpath, errno, stat, time -from pypy.tool.ansi_print import AnsiLog import subprocess from pypy.tool.killsubprocess import killsubprocess from pypy.translator.sandbox.vfs import UID, GID -class MyAnsiLog(AnsiLog): - KW_TO_COLOR = { - 'call': ((34,), False), - 'result': ((34,), False), - 'exception': ((34,), False), - 'vpath': ((35,), False), - 'timeout': ((1, 31), True), - } +def create_log(): + """Make and return a log for the sandbox to use, if needed.""" + # This import is local to avoid importing pypy if we don't need to. + from pypy.tool.ansi_print import AnsiLog -log = py.log.Producer("sandlib") -py.log.setconsumer("sandlib", MyAnsiLog()) + class MyAnsiLog(AnsiLog): + KW_TO_COLOR = { + 'call': ((34,), False), + 'result': ((34,), False), + 'exception': ((34,), False), + 'vpath': ((35,), False), + 'timeout': ((1, 31), True), + } + log = py.log.Producer("sandlib") + py.log.setconsumer("sandlib", MyAnsiLog()) + return log # Note: we use lib_pypy/marshal.py instead of the built-in marshal # for two reasons. The built-in module could be made to segfault @@ -126,6 +130,7 @@ for the external functions xxx that you want to support. """ debug = False + log = None os_level_sandboxing = False # Linux only: /proc/PID/seccomp def __init__(self, args, executable=None): @@ -142,6 +147,9 @@ self.currenttimeout = None self.currentlyidlefrom = None + if self.debug: + self.log = create_log() + def withlock(self, function, *args, **kwds): lock = self.popenlock if lock is not None: @@ -169,7 +177,8 @@ if delay <= 0.0: break # expired! time.sleep(min(delay*1.001, 1)) - log.timeout("timeout!") + if self.log: + self.log.timeout("timeout!") self.kill() #if interrupt_main: # if hasattr(os, 'kill'): @@ -246,22 +255,22 @@ args = read_message(child_stdout) except EOFError, e: break - if self.debug and not self.is_spam(fnname, *args): - log.call('%s(%s)' % (fnname, + if self.log and not self.is_spam(fnname, *args): + self.log.call('%s(%s)' % (fnname, ', '.join([shortrepr(x) for x in args]))) try: answer, resulttype = self.handle_message(fnname, *args) except Exception, e: tb = sys.exc_info()[2] write_exception(child_stdin, e, tb) - if self.debug: + if self.log: if str(e): - log.exception('%s: %s' % (e.__class__.__name__, e)) + self.log.exception('%s: %s' % (e.__class__.__name__, e)) else: - log.exception('%s' % (e.__class__.__name__,)) + self.log.exception('%s' % (e.__class__.__name__,)) else: - if self.debug and not self.is_spam(fnname, *args): - log.result(shortrepr(answer)) + if self.log and not self.is_spam(fnname, *args): + self.log.result(shortrepr(answer)) try: write_message(child_stdin, 0) # error code - 0 for ok write_message(child_stdin, answer, resulttype) @@ -440,7 +449,8 @@ node = dirnode.join(name) else: node = dirnode - log.vpath('%r => %r' % (vpath, node)) + if self.log: + self.log.vpath('%r => %r' % (vpath, node)) return node def do_ll_os__ll_os_stat(self, vpathname): From noreply at buildbot.pypy.org Thu Dec 15 04:59:07 2011 From: noreply at buildbot.pypy.org (ned) Date: Thu, 15 Dec 2011 04:59:07 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox-2: Move another import so we don't require 'import py' Message-ID: <20111215035907.0C6B682210@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox-2 Changeset: r50527:585b46142c3e Date: 2011-12-14 22:58 -0500 http://bitbucket.org/pypy/pypy/changeset/585b46142c3e/ Log: Move another import so we don't require 'import py' diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -4,7 +4,6 @@ for the outer process, which can run CPython or PyPy. """ -import py import sys, os, posixpath, errno, stat, time import subprocess from pypy.tool.killsubprocess import killsubprocess @@ -12,7 +11,8 @@ def create_log(): """Make and return a log for the sandbox to use, if needed.""" - # This import is local to avoid importing pypy if we don't need to. + # These imports are local to avoid importing pypy if we don't need to. + import py from pypy.tool.ansi_print import AnsiLog class MyAnsiLog(AnsiLog): From noreply at buildbot.pypy.org Thu Dec 15 08:53:48 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 08:53:48 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: make viewsignatures always differ Message-ID: <20111215075348.1953582221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50528:9400300d69d3 Date: 2011-12-15 09:53 +0200 http://bitbucket.org/pypy/pypy/changeset/9400300d69d3/ Log: make viewsignatures always differ diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -116,9 +116,11 @@ self.child = child def eq(self, other): - if type(self) is not type(other): - return False - return self.child.eq(other.child) + return False # two views are not identical for now + # They would be if steps in all dimensions are equal + #if type(self) is not type(other): + # return False + #return self.child.eq(other.child) def hash(self): return self.child.hash() ^ 0x12345 From noreply at buildbot.pypy.org Thu Dec 15 08:56:16 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 15 Dec 2011 08:56:16 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: merged default Message-ID: <20111215075616.C726A82221@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-full-fromstring Changeset: r50529:d0c17d270f6c Date: 2011-12-15 02:52 -0500 http://bitbucket.org/pypy/pypy/changeset/d0c17d270f6c/ Log: merged default diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -190,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -706,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -498,27 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - d = op.args[1].value.copy() - d.pop('flavor') - add_memory_pressure = d.pop('add_memory_pressure', False) - zero = d.pop('zero', False) - track_allocation = d.pop('track_allocation', True) - if d: - raise UnsupportedMallocFlags(d) - ARRAY = op.args[0].value - name = 'raw_malloc' - if zero: - name += '_zero' - if add_memory_pressure: - name += '_add_memory_pressure' - if not track_allocation: - name += '_no_track_allocation' - return self._do_builtin_call(op, name, - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -531,11 +533,18 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO @@ -736,6 +745,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,26 +599,75 @@ return p return _ll_0_alloc_with_del - def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) - return _ll_1_raw_malloc - return build_ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - build_ll_1_raw_malloc = build_raw_malloc_builder() - build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) - build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) - build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) - build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) - build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) - build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) - build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -550,7 +550,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str assert op1.opname == '-live-' assert op1.args == [] @@ -564,7 +564,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str assert op1.opname == '-live-' assert op1.args == [] @@ -578,6 +578,35 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address diff --git a/pypy/jit/metainterp/test/test_rawmem.py b/pypy/jit/metainterp/test/test_rawmem.py --- a/pypy/jit/metainterp/test/test_rawmem.py +++ b/pypy/jit/metainterp/test/test_rawmem.py @@ -8,7 +8,7 @@ VOID_TP = lltype.Array(lltype.Void, hints={"nolength": True, "uncast_on_llgraph": True}) class A(object): def __init__(self, x): - self.storage = rffi.cast(lltype.Ptr(VOID_TP), x)\ + self.storage = rffi.cast(lltype.Ptr(VOID_TP), x) def f(n): x = lltype.malloc(TP, n, flavor="raw", zero=True) @@ -19,4 +19,14 @@ lltype.free(x, flavor="raw") return s res = self.interp_operations(f, [10]) - assert res == 1.0 \ No newline at end of file + + def test_fixed_size_malloc(self): + TIMEVAL = lltype.Struct('dummy', ('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)) + def f(): + p = lltype.malloc(TIMEVAL, flavor='raw') + lltype.free(p, flavor='raw') + return 42 + res = self.interp_operations(f, []) + assert res == 42 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'finish': 1}) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -161,11 +161,16 @@ def test_shutdown(self): import socket, ssl, sys, gc - if sys.platform == 'darwin': - skip("get also on CPython: error: [Errno 0]") ss = socket.ssl(self.s) ss.write("hello\n") - assert ss.shutdown() is self.s._sock + try: + result = ss.shutdown() + except socket.error, e: + # xxx obscure case; throwing errno 0 is pretty odd... + if e.errno == 0: + skip("Shutdown raised errno 0. CPython does this too") + raise + assert result is self.s._sock raises(ssl.SSLError, ss.write, "hello\n") del ss; gc.collect() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -581,6 +581,7 @@ def descr_get_dtype(self, space): return space.wrap(self.find_dtype()) + @jit.unroll_safe def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -656,7 +656,11 @@ os.fsync(f) # <- should also work with a file, or anything finally: # with a fileno() method f.close() - raises(OSError, os.fsync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fsync(fd) + except OSError: + pass raises(ValueError, os.fsync, -1) if hasattr(os, 'fdatasync'): @@ -668,7 +672,11 @@ os.fdatasync(fd) finally: f.close() - raises(OSError, os.fdatasync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fdatasync(fd) + except OSError: + pass raises(ValueError, os.fdatasync, -1) if hasattr(os, 'fchdir'): diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -31,9 +31,9 @@ imag2 = float2longlong(imag2) return real1 == real2 and imag1 == imag2 - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_COMPLEX as tag real = space.float_w(space.getattr(self, space.wrap("real"))) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,9 +34,9 @@ two = float2longlong(space.float_w(w_other)) return one == two - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_FLOAT as tag val = float2longlong(space.float_w(self)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -26,9 +26,9 @@ return self is w_other return space.int_w(self) == space.int_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_INT as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -18,9 +18,9 @@ return self is w_other return space.bigint_w(self).eq(space.bigint_w(w_other)) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_LONG as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -32,9 +32,9 @@ return False return space.str_w(self) is space.str_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.str_w(self))) diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -253,6 +253,12 @@ y = 2j assert id(x) != id(y) + def test_object_hash_immutable(self): + x = 42 + y = 40 + y += 2 + assert object.__hash__(x) == object.__hash__(y) + def test_isinstance_shortcut(): from pypy.objspace.std import objspace diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -32,9 +32,9 @@ return False return space.unicode_w(self) is space.unicode_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.unicode_w(self))) diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -16,6 +16,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.rstring import StringBuilder, UnicodeBuilder +from pypy.rlib import jit from pypy.rpython.lltypesystem import llmemory import os, sys @@ -249,8 +250,7 @@ wrapper = func_with_new_name(wrapper, name) if calling_conv != "c": - from pypy.rlib.jit import dont_look_inside - wrapper = dont_look_inside(wrapper) + wrapper = jit.dont_look_inside(wrapper) return wrapper @@ -697,6 +697,8 @@ return b.build() # str -> char* + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def get_nonmovingbuffer(data): """ Either returns a non-moving copy or performs neccessary pointer @@ -717,6 +719,8 @@ get_nonmovingbuffer._annenforceargs_ = [strtype] # (str, char*) -> None + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def free_nonmovingbuffer(data, buf): """ Either free a non-moving buffer or keep the original storage alive. From noreply at buildbot.pypy.org Thu Dec 15 08:56:17 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 15 Dec 2011 08:56:17 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: remaining review notes, almost there Message-ID: <20111215075617.F3C0182221@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: numpy-full-fromstring Changeset: r50530:69e0e93f244d Date: 2011-12-15 02:56 -0500 http://bitbucket.org/pypy/pypy/changeset/69e0e93f244d/ Log: remaining review notes, almost there diff --git a/pypy/module/micronumpy/REVIEW b/pypy/module/micronumpy/REVIEW --- a/pypy/module/micronumpy/REVIEW +++ b/pypy/module/micronumpy/REVIEW @@ -1,18 +1,6 @@ Review items ============ -Fixed ------ -* Rather than `True if len(sep_stripped) == 0 else False` just use: - `len(sep_stripped) == 0`. -* Rather than name the variable `A`, name it `items` or somsething like that. -* Rather than using `ptr`, use `idx`, since it's not really a pointer. -* Same comment about comparing array equality as before. -* Rather than name the attribute `char`, name it `format_code`. -* `default_fromstring` can do `self.box(-1.0)`, instead of the coerce thing. -* Rather than doing a string format to raise an error (L67), use - `operationerrfmt`. - Notes ----- * Tests for both bool and long dtypes with this. @@ -21,5 +9,9 @@ seem to install it to check to make sure my tests are correct. There are already some tests for int64. Is that what you meant by long types?) -* No need for the `self.char == "?"` default. - (jterrace: Does not translate without it) \ No newline at end of file + * http://bpaste.net/show/20968/ is a log of me playing with bool `fromstring` + in 1.6, I can't seem to make it work with text parsing at all (or I'm an + idiot, I'm open to both interpretations), however binary parsing works so + maybe just do that for now. + * by long I mean `dtype(int)`, which is different from either + `dtype("int32")` or `dtype("int64")` (for reasons beyond my comprehension) From noreply at buildbot.pypy.org Thu Dec 15 09:01:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 09:01:16 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: this is what I meant Message-ID: <20111215080116.2498782221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50531:7cbc688e4439 Date: 2011-12-15 10:00 +0200 http://bitbucket.org/pypy/pypy/changeset/7cbc688e4439/ Log: this is what I meant diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -59,14 +59,16 @@ class Signature(object): def invent_numbering(self): cache = r_dict(sigeq, sighash) - self._invent_numbering(cache) + allnumbers = [] + self._invent_numbering(cache, allnumbers) - def _invent_numbering(self, cache): + def _invent_numbering(self, cache, allnumbers): try: no = cache[self] except KeyError: - no = len(cache) + no = len(allnumbers) cache[self] = no + allnumbers.append(no) self.iter_no = no def create_frame(self, arr): @@ -116,11 +118,9 @@ self.child = child def eq(self, other): - return False # two views are not identical for now - # They would be if steps in all dimensions are equal - #if type(self) is not type(other): - # return False - #return self.child.eq(other.child) + if type(self) is not type(other): + return False + return self.child.eq(other.child) def hash(self): return self.child.hash() ^ 0x12345 @@ -128,6 +128,12 @@ def debug_repr(self): return 'Slice(%s)' % self.child.debug_repr() + def _invent_numbering(self, cache, allnumbers): + # always invent a new number for view + no = len(allnumbers) + allnumbers.append(no) + self.iter_no = no + def _create_iter(self, iterlist, arr): if self.iter_no >= len(iterlist): iterlist.append(ViewIterator(arr)) @@ -160,8 +166,8 @@ def debug_repr(self): return 'Call1(%s)' % (self.child.debug_repr()) - def _invent_numbering(self, cache): - self.child._invent_numbering(cache) + def _invent_numbering(self, cache, allnumbers): + self.child._invent_numbering(cache, allnumbers) def _create_iter(self, iterlist, arr): self.child._create_iter(iterlist, arr.values) @@ -186,9 +192,9 @@ return (self.binfunc is other.binfunc and self.left.eq(other.left) and self.right.eq(other.right)) - def _invent_numbering(self, cache): - self.left._invent_numbering(cache) - self.right._invent_numbering(cache) + def _invent_numbering(self, cache, allnumbers): + self.left._invent_numbering(cache, allnumbers) + self.right._invent_numbering(cache, allnumbers) def _create_iter(self, iterlist, arr): self.left._create_iter(iterlist, arr.left) @@ -207,8 +213,8 @@ def _create_iter(self, iterlist, arr): self.right._create_iter(iterlist, arr) - def _invent_numbering(self, cache): - self.right._invent_numbering(cache) + def _invent_numbering(self, cache, allnumbers): + self.right._invent_numbering(cache, allnumbers) def eval(self, frame, arr): return self.right.eval(frame, arr) From noreply at buildbot.pypy.org Thu Dec 15 09:15:59 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 15 Dec 2011 09:15:59 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: numpy version Message-ID: <20111215081559.B903B82221@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: extradoc Changeset: r3987:f37efa0faa47 Date: 2011-12-15 09:15 +0100 http://bitbucket.org/pypy/extradoc/changeset/f37efa0faa47/ Log: numpy version diff --git a/talk/iwtc11/benchmarks/convolution/convolution.py b/talk/iwtc11/benchmarks/convolution/convolution.py --- a/talk/iwtc11/benchmarks/convolution/convolution.py +++ b/talk/iwtc11/benchmarks/convolution/convolution.py @@ -57,6 +57,19 @@ self[x, y] = data[y][x] return self +class NumpyArray(Array2D): + def __init__(self, w, h): + self.width = w + self.height = h + import numpypy + self.data = numpypy.zeros([h, w], 'd') + + def __getitem__(self, (x, y)): + return self.data[y, x] + + def __setitem__(self, (x, y), val): + self.data[y, x] = val + def _conv3x3(a, b, k): assert k.width == k.height == 3 for y in xrange(1, a.height-1): @@ -88,6 +101,13 @@ _conv3x3(a, b, Array2D(3,3)) return 'conv3x3(Array2D(%sx%s))' % tuple(args) +def conv3x3_numpy(args): + a = NumpyArray(int(args[0]), int(args[1])) + b = NumpyArray(a.width, a.height) + for i in range(10): + _conv3x3(a, b, NumpyArray(3,3)) + return 'conv3x3(NumpyArray(%sx%s))' % tuple(args) + def dilate3x3(args): a = Array2D(int(args[0]), int(args[1])) b = Array2D(a.width, a.height) From noreply at buildbot.pypy.org Thu Dec 15 09:34:59 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 09:34:59 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: RPythonization Message-ID: <20111215083459.BB67582221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50532:78db9015b5f0 Date: 2011-12-15 10:14 +0200 http://bitbucket.org/pypy/pypy/changeset/78db9015b5f0/ Log: RPythonization diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -893,6 +893,7 @@ self.strides = strides self.backstrides = backstrides BaseArray.__init__(self, shape, parent.order) + assert isinstance(parent, W_NDimArray) self.parent = parent self.invalidates = parent.invalidates @@ -1113,7 +1114,7 @@ ) arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) - arr_iter = ArrayIterator(arr) + arr_iter = ArrayIterator(arr.size) for i in range(len(elems_w)): w_elem = elems_w[i] dtype.setitem(arr.storage, arr_iter.offset, diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -40,9 +40,8 @@ def __init__(self, iterators): self = hint(self, access_directly=True) self.iterators = iterators - self.final_iter = None for i, iter in enumerate(self.iterators): - if not isinstance(iter, ConstantIterator) or not isinstance(iter, BroadcastIterator): + if not isinstance(iter, ConstantIterator):# or not isinstance(iter, BroadcastIterator): self.final_iter = i break else: @@ -93,11 +92,16 @@ return 'Array' def _create_iter(self, iterlist, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + arr = arr.get_concrete() + assert isinstance(arr, W_NDimArray) if self.iter_no >= len(iterlist): iterlist.append(ArrayIterator(arr.size)) def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimArray arr = arr.get_concrete() + assert isinstance(arr, W_NDimArray) iter = frame.iterators[self.iter_no] return arr.dtype.getitem(arr.storage, iter.offset) @@ -111,6 +115,8 @@ iterlist.append(iter) def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Scalar + assert isinstance(arr, Scalar) return arr.value class ViewSignature(Signature): @@ -139,6 +145,8 @@ iterlist.append(ViewIterator(arr)) def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import W_NDimSlice + assert isinstance(arr, W_NDimSlice) arr = arr.get_concrete() iter = frame.iterators[self.iter_no] return arr.find_dtype().getitem(arr.parent.storage, iter.offset) @@ -148,7 +156,7 @@ return 'FlatIter(%s)' % self.child.debug_repr() def _create_iter(self, iterlist, arr): - XXX + raise NotImplementedError class Call1(Signature): def __init__(self, func, child): @@ -170,9 +178,13 @@ self.child._invent_numbering(cache, allnumbers) def _create_iter(self, iterlist, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) self.child._create_iter(iterlist, arr.values) def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) v = self.child.eval(frame, arr.values).convert_to(arr.res_dtype) return self.unfunc(arr.res_dtype, v) @@ -197,10 +209,15 @@ self.right._invent_numbering(cache, allnumbers) def _create_iter(self, iterlist, arr): + from pypy.module.micronumpy.interp_numarray import Call2 + + assert isinstance(arr, Call2) self.left._create_iter(iterlist, arr.left) self.right._create_iter(iterlist, arr.right) def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) lhs = self.left.eval(frame, arr.left).convert_to(arr.calc_dtype) rhs = self.right.eval(frame, arr.right).convert_to(arr.calc_dtype) return self.binfunc(arr.calc_dtype, lhs, rhs) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -48,9 +48,11 @@ interp = InterpreterState(codes[i]) interp.run(space) w_res = interp.results[-1] - if isinstance(w_res, BaseArray): - w_res = w_res.eval(w_res.start_iter()) - + if isinstance(w_res, W_NDimArray): + concr = w_res.get_concrete() + sig = concr.find_sig() + frame = sig.create_frame(w_res) + w_res = sig.eval(frame, concr) if isinstance(w_res, interp_boxes.W_Float64Box): return w_res.value elif isinstance(w_res, interp_boxes.W_BoolBox): From noreply at buildbot.pypy.org Thu Dec 15 09:35:00 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 09:35:00 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: rpythonization, remove one int_add Message-ID: <20111215083500.E602082221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50533:876091f5df5a Date: 2011-12-15 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/876091f5df5a/ Log: rpythonization, remove one int_add diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -782,10 +782,11 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, shape, res_dtype, order): + def __init__(self, name, shape, res_dtype, order): BaseArray.__init__(self, shape, order) self.forced_result = None self.res_dtype = res_dtype + self.name = name def _del_sources(self): # Function for deleting references to source arrays, to allow garbage-collecting them @@ -837,8 +838,8 @@ class Call1(VirtualArray): - def __init__(self, ufunc, shape, res_dtype, values, order): - VirtualArray.__init__(self, shape, res_dtype, + def __init__(self, ufunc, name, shape, res_dtype, values, order): + VirtualArray.__init__(self, name, shape, res_dtype, values.order) self.values = values self.ufunc = ufunc @@ -855,14 +856,14 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() - return signature.Call1(self.ufunc, self.values.create_sig()) + return signature.Call1(self.ufunc, self.name, self.values.create_sig()) class Call2(VirtualArray): """ Intermediate class for performing binary operations. """ - def __init__(self, ufunc, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, shape, res_dtype, left.order) + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): + VirtualArray.__init__(self, name, shape, res_dtype, left.order) self.ufunc = ufunc self.left = left self.right = right @@ -881,7 +882,7 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() - return signature.Call2(self.ufunc, self.left.create_sig(), + return signature.Call2(self.ufunc, self.name, self.left.create_sig(), self.right.create_sig()) class ViewArray(BaseArray): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -50,7 +50,7 @@ return self.reduce(space, w_obj, multidim=False) def reduce(self, space, w_obj, multidim): - from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar, Call2 + from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " @@ -68,7 +68,8 @@ promote_to_largest=True ) shapelen = len(obj.shape) - sig = find_sig(ReduceSignature(self.func, ScalarSignature(dtype), + sig = find_sig(ReduceSignature(self.func, self.name, + ScalarSignature(dtype), obj.create_sig())) frame = sig.create_frame(obj) if shapelen > 1 and not multidim: @@ -117,7 +118,8 @@ if isinstance(w_obj, Scalar): return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) - w_res = Call1(self.func, w_obj.shape, res_dtype, w_obj, w_obj.order) + w_res = Call1(self.func, self.name, w_obj.shape, res_dtype, w_obj, + w_obj.order) w_obj.add_invalidates(w_res) return w_res @@ -156,7 +158,7 @@ ) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - w_res = Call2(self.func, new_shape, calc_dtype, + w_res = Call2(self.func, self.name, new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,7 +1,7 @@ -from pypy.rlib.objectmodel import r_dict, compute_identity_hash +from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ BroadcastIterator, OneDimIterator, ConstantIterator -from pypy.rlib.jit import hint, unroll_safe +from pypy.rlib.jit import hint, unroll_safe, promote # def components_eq(lhs, rhs): # if len(lhs) != len(rhs): @@ -37,18 +37,23 @@ class NumpyEvalFrame(object): _virtualizable2_ = ['iterators[*]', 'final_iter'] + @unroll_safe def __init__(self, iterators): - self = hint(self, access_directly=True) - self.iterators = iterators - for i, iter in enumerate(self.iterators): + self = hint(self, access_directly=True, fresh_virtualizable=True) + self.iterators = iterators[:] + for i in range(len(self.iterators)): + iter = self.iterators[i] if not isinstance(iter, ConstantIterator):# or not isinstance(iter, BroadcastIterator): self.final_iter = i break else: - raise Exception("Cannot find a non-broadcast non-constant iter") + self.final_iter = -1 def done(self): - return self.iterators[self.final_iter].done() + final_iter = promote(self.final_iter) + if final_iter < 0: + return False + return self.iterators[final_iter].done() @unroll_safe def next(self, shapelen): @@ -159,12 +164,13 @@ raise NotImplementedError class Call1(Signature): - def __init__(self, func, child): + def __init__(self, func, name, child): self.unfunc = func self.child = child + self.name = name def hash(self): - return compute_identity_hash(self.unfunc) ^ self.child.hash() << 1 + return compute_hash(self.name) ^ self.child.hash() << 1 def eq(self, other): if type(self) is not type(other): @@ -172,7 +178,7 @@ return self.unfunc is other.unfunc and self.child.eq(other.child) def debug_repr(self): - return 'Call1(%s)' % (self.child.debug_repr()) + return 'Call1(%s, %s)' % (self.name, self.child.debug_repr()) def _invent_numbering(self, cache, allnumbers): self.child._invent_numbering(cache, allnumbers) @@ -189,13 +195,14 @@ return self.unfunc(arr.res_dtype, v) class Call2(Signature): - def __init__(self, func, left, right): + def __init__(self, func, name, left, right): self.binfunc = func self.left = left self.right = right + self.name = name def hash(self): - return (compute_identity_hash(self.binfunc) ^ (self.left.hash() << 1) ^ + return (compute_hash(self.name) ^ (self.left.hash() << 1) ^ (self.right.hash() << 2)) def eq(self, other): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -80,7 +80,7 @@ def test_add(self): result = self.run("add") self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 3, + 'setinteriorfield_raw': 1, 'int_add': 2, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 From noreply at buildbot.pypy.org Thu Dec 15 09:37:03 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 09:37:03 +0100 (CET) Subject: [pypy-commit] jitviewer default: insist a bit more on displayng loops Message-ID: <20111215083703.6180C82221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r177:ca0dcdbd8036 Date: 2011-12-15 10:36 +0200 http://bitbucket.org/pypy/jitviewer/changeset/ca0dcdbd8036/ Log: insist a bit more on displayng loops diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -136,7 +136,10 @@ except (IOError, OSError): source = CodeReprNoFile(loop) else: - source = CodeRepr(inspect.getsource(code), code, loop) + try: + source = CodeRepr(inspect.getsource(code), code, loop) + except: + source = CodeReprNoFile(loop) d = {'html': flask.render_template('loop.html', source=source, current_loop=no, From noreply at buildbot.pypy.org Thu Dec 15 09:53:55 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 09:53:55 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: an extra test just because Message-ID: <20111215085355.BC27E82221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50534:c0dff290b339 Date: 2011-12-15 10:44 +0200 http://bitbucket.org/pypy/pypy/changeset/c0dff290b339/ Log: an extra test just because diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -137,6 +137,16 @@ interp = self.run(code) assert interp.results[0].value.value == 15 + def test_sum2(self): + code = """ + a = |30| + b = a + a + sum(b) + """ + interp = self.run(code) + assert interp.results[0].value.value == 30 * (30 - 1) + + def test_array_write(self): code = """ a = [1,2,3,4,5] From noreply at buildbot.pypy.org Thu Dec 15 09:53:56 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 09:53:56 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: remove some int_add's Message-ID: <20111215085356.E584082221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50535:cd58a567d17d Date: 2011-12-15 10:53 +0200 http://bitbucket.org/pypy/pypy/changeset/cd58a567d17d/ Log: remove some int_add's diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -48,13 +48,15 @@ interp = InterpreterState(codes[i]) interp.run(space) w_res = interp.results[-1] - if isinstance(w_res, W_NDimArray): + if isinstance(w_res, BaseArray): concr = w_res.get_concrete() sig = concr.find_sig() - frame = sig.create_frame(w_res) + frame = sig.create_frame(concr) w_res = sig.eval(frame, concr) if isinstance(w_res, interp_boxes.W_Float64Box): return w_res.value + if isinstance(w_res, interp_boxes.W_Int64Box): + return float(w_res.value) elif isinstance(w_res, interp_boxes.W_BoolBox): return float(w_res.value) raise TypeError(w_res) @@ -108,7 +110,7 @@ result = self.run("sum") assert result == 2 * sum(range(30)) self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, - "int_add": 2, "int_ge": 1, "guard_false": 1, + "int_add": 1, "int_ge": 1, "guard_false": 1, "jump": 1}) def define_prod(): @@ -125,7 +127,7 @@ expected *= i * 2 assert result == expected self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, + "float_mul": 1, "int_add": 1, "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): @@ -166,7 +168,7 @@ result = self.run("any") assert result == 1 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, + "float_ne": 1, "int_add": 1, "int_ge": 1, "jump": 1, "guard_false": 2}) @@ -206,7 +208,7 @@ result = self.run("ufunc") assert result == -6 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, - "setinteriorfield_raw": 1, "int_add": 3, + "setinteriorfield_raw": 1, "int_add": 2, "int_ge": 1, "guard_false": 1, "jump": 1}) def define_specialization(): From noreply at buildbot.pypy.org Thu Dec 15 10:19:56 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 10:19:56 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: Improve a bit on max() bridge Message-ID: <20111215091956.E3AAE82221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50536:dfe668607a47 Date: 2011-12-15 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/dfe668607a47/ Log: Improve a bit on max() bridge diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -882,7 +882,8 @@ def create_sig(self): if self.forced_result is not None: return self.forced_result.create_sig() - return signature.Call2(self.ufunc, self.name, self.left.create_sig(), + return signature.Call2(self.ufunc, self.name, self.calc_dtype, + self.left.create_sig(), self.right.create_sig()) class ViewArray(BaseArray): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -68,7 +68,7 @@ promote_to_largest=True ) shapelen = len(obj.shape) - sig = find_sig(ReduceSignature(self.func, self.name, + sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), obj.create_sig())) frame = sig.create_frame(obj) @@ -98,6 +98,8 @@ class W_Ufunc1(W_Ufunc): argcount = 1 + _immutable_fields_ = ["func", "name"] + def __init__(self, func, name, promote_to_float=False, promote_bools=False, identity=None): @@ -125,7 +127,7 @@ class W_Ufunc2(W_Ufunc): - _immutable_fields_ = ["comparison_func", "func"] + _immutable_fields_ = ["comparison_func", "func", "name"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -158,7 +160,8 @@ ) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - w_res = Call2(self.func, self.name, new_shape, calc_dtype, + w_res = Call2(self.func, self.name, + new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -61,6 +61,9 @@ self.iterators[i] = self.iterators[i].next(shapelen) class Signature(object): + _attrs_ = ['iter_no'] + _immutable_fields_ = ['iter_no'] + def invent_numbering(self): cache = r_dict(sigeq, sighash) allnumbers = [] @@ -81,12 +84,15 @@ return NumpyEvalFrame(iterlist) class ConcreteSignature(Signature): + _immutable_fields_ = ['dtype'] + def __init__(self, dtype): self.dtype = dtype def eq(self, other): if type(self) is not type(other): return False + assert isinstance(other, ConcreteSignature) return self.dtype is other.dtype def hash(self): @@ -108,7 +114,7 @@ arr = arr.get_concrete() assert isinstance(arr, W_NDimArray) iter = frame.iterators[self.iter_no] - return arr.dtype.getitem(arr.storage, iter.offset) + return self.dtype.getitem(arr.storage, iter.offset) class ScalarSignature(ConcreteSignature): def debug_repr(self): @@ -125,12 +131,15 @@ return arr.value class ViewSignature(Signature): + _immutable_fields_ = ['child'] + def __init__(self, child): self.child = child def eq(self, other): if type(self) is not type(other): return False + assert isinstance(other, ViewSignature) return self.child.eq(other.child) def hash(self): @@ -164,6 +173,8 @@ raise NotImplementedError class Call1(Signature): + _immutable_fields_ = ['unfunc', 'name', 'child'] + def __init__(self, func, name, child): self.unfunc = func self.child = child @@ -175,6 +186,7 @@ def eq(self, other): if type(self) is not type(other): return False + assert isinstance(other, Call1) return self.unfunc is other.unfunc and self.child.eq(other.child) def debug_repr(self): @@ -195,11 +207,14 @@ return self.unfunc(arr.res_dtype, v) class Call2(Signature): - def __init__(self, func, name, left, right): + _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] + + def __init__(self, func, name, calc_dtype, left, right): self.binfunc = func self.left = left self.right = right self.name = name + self.calc_dtype = calc_dtype def hash(self): return (compute_hash(self.name) ^ (self.left.hash() << 1) ^ @@ -208,7 +223,9 @@ def eq(self, other): if type(self) is not type(other): return False + assert isinstance(other, Call2) return (self.binfunc is other.binfunc and + self.calc_dtype is other.calc_dtype and self.left.eq(other.left) and self.right.eq(other.right)) def _invent_numbering(self, cache, allnumbers): @@ -225,9 +242,9 @@ def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import Call2 assert isinstance(arr, Call2) - lhs = self.left.eval(frame, arr.left).convert_to(arr.calc_dtype) - rhs = self.right.eval(frame, arr.right).convert_to(arr.calc_dtype) - return self.binfunc(arr.calc_dtype, lhs, rhs) + lhs = self.left.eval(frame, arr.left).convert_to(self.calc_dtype) + rhs = self.right.eval(frame, arr.right).convert_to(self.calc_dtype) + return self.binfunc(self.calc_dtype, lhs, rhs) def debug_repr(self): return 'Call2(%s, %s)' % (self.left.debug_repr(), diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -130,15 +130,18 @@ "float_mul": 1, "int_add": 1, "int_ge": 1, "guard_false": 1, "jump": 1}) - def test_max(self): - py.test.skip("broken, investigate") - result = self.run(""" + def define_max(): + return """ a = |30| a[13] = 128 b = a + a max(b) - """) + """ + + def test_max(self): + result = self.run("max") assert result == 256 + py.test.skip("not there yet, getting though") self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_mul": 1, "int_add": 1, "int_lt": 1, "guard_true": 1, "jump": 1}) From noreply at buildbot.pypy.org Thu Dec 15 10:42:12 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 10:42:12 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: have ForcedSignature instead of ArraySignature (it's the same otherwise), Message-ID: <20111215094212.B241782221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50537:c2da6bb0433f Date: 2011-12-15 11:41 +0200 http://bitbucket.org/pypy/pypy/changeset/c2da6bb0433f/ Log: have ForcedSignature instead of ArraySignature (it's the same otherwise), cut down on number of ops diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -855,7 +855,7 @@ def create_sig(self): if self.forced_result is not None: - return self.forced_result.create_sig() + return signature.ForcedSignature(self.forced_result.dtype) return signature.Call1(self.ufunc, self.name, self.values.create_sig()) class Call2(VirtualArray): @@ -881,7 +881,7 @@ def create_sig(self): if self.forced_result is not None: - return self.forced_result.create_sig() + return signature.ForcedSignature(self.forced_result.dtype) return signature.Call2(self.ufunc, self.name, self.calc_dtype, self.left.create_sig(), self.right.create_sig()) @@ -1019,6 +1019,8 @@ """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ + _immutable_fields_ = ['storage'] + def __init__(self, size, shape, dtype, order='C'): BaseArray.__init__(self, shape, order) self.size = size diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -91,7 +91,8 @@ shapelen=shapelen, self=self, value=value, obj=obj, frame=frame, dtype=dtype) - value = self.func(dtype, value, sig.eval(frame, obj).convert_to(dtype)) + assert isinstance(sig, ReduceSignature) + value = sig.binfunc(dtype, value, sig.eval(frame, obj).convert_to(dtype)) frame.next(shapelen) return value diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -104,18 +104,34 @@ def _create_iter(self, iterlist, arr): from pypy.module.micronumpy.interp_numarray import W_NDimArray - arr = arr.get_concrete() assert isinstance(arr, W_NDimArray) if self.iter_no >= len(iterlist): iterlist.append(ArrayIterator(arr.size)) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import W_NDimArray - arr = arr.get_concrete() assert isinstance(arr, W_NDimArray) iter = frame.iterators[self.iter_no] return self.dtype.getitem(arr.storage, iter.offset) +class ForcedSignature(ArraySignature): + def debug_repr(self): + return 'ForcedArray' + + def _create_iter(self, iterlist, arr): + from pypy.module.micronumpy.interp_numarray import VirtualArray + assert isinstance(arr, VirtualArray) + arr = arr.forced_result + if self.iter_no >= len(iterlist): + iterlist.append(ArrayIterator(arr.size)) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import VirtualArray + assert isinstance(arr, VirtualArray) + arr = arr.forced_result + iter = frame.iterators[self.iter_no] + return self.dtype.getitem(arr.storage, iter.offset) + class ScalarSignature(ConcreteSignature): def debug_repr(self): return 'Scalar' diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -190,14 +190,12 @@ # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - # XXX the comment above is wrong now. We need preferrably a way to - # count the two loops separately - py.test.skip(":(") - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, - 'guard_class': 22, 'int_add': 8, 'float_mul': 2, - 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, + self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 19, + 'getfield_gc_pure': 6, + 'guard_class': 8, 'int_add': 8, 'float_mul': 2, + 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, - 'guard_value': 2}) + }) def define_ufunc(): return """ From noreply at buildbot.pypy.org Thu Dec 15 11:04:33 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 11:04:33 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: Write the parametrisation of decaying counters, and a test. Message-ID: <20111215100433.1598D82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50538:642e93456721 Date: 2011-12-14 17:46 +0100 http://bitbucket.org/pypy/pypy/changeset/642e93456721/ Log: Write the parametrisation of decaying counters, and a test. Missing the actual implementation. diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1557,6 +1557,13 @@ in_recursion = 0 def __init__(self, staticdata, jitdriver_sd): + try: + jitdriver_sd.warmstate.decay_counters + except AttributeError: # for tests + pass + else: + jitdriver_sd.warmstate.decay_counters() + # self.staticdata = staticdata self.cpu = staticdata.cpu self.jitdriver_sd = jitdriver_sd diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,6 +2910,27 @@ res = self.meta_interp(f, [32]) assert res == f(32) + def test_decay_counters(self): + myjitdriver = JitDriver(greens = ['m'], reds = ['n']) + def f(m, n): + while n > 0: + myjitdriver.jit_merge_point(m=m, n=n) + n += m + n -= m + n -= 1 + def main(): + f(5, 7) # run 7x with m=5 counter[m=5] = 7 + f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) + f(5, 5) # run 5x times with m=5 counter[m=5] = 8 + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=9, trace_eagerness=99) + self.check_trace_count(1) + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=8, trace_eagerness=99) + self.check_trace_count(2) + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -275,3 +275,6 @@ state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True + +def test_decay_counters(): + xxx diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,9 +64,11 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, + threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): + function_threshold=4, decay_halflife=0, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, + **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -83,15 +85,16 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(3) # for tests + jd.warmstate.set_param_threshold(threshold) jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(2) # for tests + jd.warmstate.set_param_trace_eagerness(trace_eagerness) jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) + jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -213,6 +213,12 @@ def set_param_inlining(self, value): self.inlining = value + def set_param_decay_halflife(self, value): + if value <= 0: # use 0 or -1 to mean "no decay" + self.decay_factor = 1.0 + else: + self.decay_factor = 0.5 ** (1.0 / value) + def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,6 +395,7 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', + 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() From noreply at buildbot.pypy.org Thu Dec 15 11:04:34 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 11:04:34 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: Implement decaying. See comments. Message-ID: <20111215100434.4775482221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50539:b84533eb4350 Date: 2011-12-15 00:39 +0100 http://bitbucket.org/pypy/pypy/changeset/b84533eb4350/ Log: Implement decaying. See comments. diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, r_uint from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -81,3 +81,8 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") + + def get_current_generation_uint(self): + """Return the current generation, possibly truncated to a uint. + To use only as an approximation for decaying counters.""" + return r_uint(self.current_generation) diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1557,13 +1557,6 @@ in_recursion = 0 def __init__(self, staticdata, jitdriver_sd): - try: - jitdriver_sd.warmstate.decay_counters - except AttributeError: # for tests - pass - else: - jitdriver_sd.warmstate.decay_counters() - # self.staticdata = staticdata self.cpu = staticdata.cpu self.jitdriver_sd = jitdriver_sd diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,3 +1,4 @@ +import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -8,7 +9,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rlib.rarithmetic import r_singlefloat, r_uint def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -277,4 +278,11 @@ assert res is True def test_decay_counters(): - xxx + cell = JitCell(r_uint(5)) + cell.counter = 100 + cell.adjust_counter(r_uint(5), math.log(0.9)) + assert cell.counter == 100 + cell.adjust_counter(r_uint(6), math.log(0.9)) + assert cell.counter == 90 + cell.adjust_counter(r_uint(9), math.log(0.9)) + assert cell.counter == int(90 * (0.9**3)) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref +import sys, weakref, math from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -153,6 +153,24 @@ dont_trace_here = False wref_procedure_token = None + def __init__(self, generation): + # The stored 'counter' value follows an exponential decay model. + # Conceptually after every generation, it decays by getting + # multiplied by a constant <= 1.0. In practice, decaying occurs + # lazily: the following field records the latest seen generation + # number, and adjustment is done by adjust_counter() when needed. + self.latest_generation_seen = generation + + def adjust_counter(self, generation, log_decay_factor): + if generation != self.latest_generation_seen: + # The latest_generation_seen is older than the current generation. + # Adjust by multiplying self.counter N times by decay_factor, i.e. + # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). + N = generation - self.latest_generation_seen + factor = math.exp(log_decay_factor * N) + self.counter = int(self.counter * factor) + self.latest_generation_seen = generation + def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -214,10 +232,15 @@ self.inlining = value def set_param_decay_halflife(self, value): - if value <= 0: # use 0 or -1 to mean "no decay" - self.decay_factor = 1.0 + # Use 0 or -1 to mean "no decay". Initialize the internal variable + # 'log_decay_factor'. It is choosen such that by multiplying the + # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every + # generation, then the counter will be divided by two after 'value' + # generations have passed. + if value <= 0: + self.log_decay_factor = 0.0 # log(1.0) else: - self.decay_factor = 0.5 ** (1.0 / value) + self.log_decay_factor = math.log(0.5) / value def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -288,6 +311,11 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) + memmgr = self.warmrunnerdesc.memory_manager + if memmgr is not None: + get_current_generation = memmgr.get_current_generation_uint + else: + get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -332,6 +360,8 @@ if cell.counter >= 0: # update the profiling counter + cell.adjust_counter(get_current_generation(), + self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n @@ -424,6 +454,15 @@ # return jit_getter + def _new_jitcell(self): + warmrunnerdesc = self.warmrunnerdesc + if (warmrunnerdesc is not None and + warmrunnerdesc.memory_manager is not None): + gen = warmrunnerdesc.memory_manager.get_current_generation_uint() + else: + gen = r_uint(0) + return JitCell(gen) + def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -459,7 +498,7 @@ except KeyError: if not build: return None - cell = JitCell() + cell = self._new_jitcell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -491,7 +530,7 @@ if not build: return cell if cell is None: - cell = JitCell() + cell = self._new_jitcell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -126,10 +126,7 @@ rtype_inplace_rshift = rtype_rshift def rtype_pow(_, hop): - raise MissingRTypeOperation("pow(int, int)" - " (use float**float instead; it is too" - " easy to overlook the overflow" - " issues of int**int)") + raise MissingRTypeOperation("'**' not supported in RPython") rtype_pow_ovf = rtype_pow rtype_inplace_pow = rtype_pow From noreply at buildbot.pypy.org Thu Dec 15 11:04:35 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 11:04:35 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: Not tested: clean up the jitcell_dicts that are used internally. This Message-ID: <20111215100435.6FC4A82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50540:6e5fb0038df9 Date: 2011-12-15 01:01 +0100 http://bitbucket.org/pypy/pypy/changeset/6e5fb0038df9/ Log: Not tested: clean up the jitcell_dicts that are used internally. This is probably important to ensure that a long-running process cannot "leak" arbitrary amounts of memory due to having more and more JitCells stored in these big dicts. diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -38,6 +38,7 @@ self.current_generation = r_int64(1) self.next_check = r_int64(-1) self.alive_loops = {} + self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: @@ -53,6 +54,7 @@ self.current_generation += 1 if self.current_generation == self.next_check: self._kill_old_loops_now() + self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -86,3 +88,27 @@ """Return the current generation, possibly truncated to a uint. To use only as an approximation for decaying counters.""" return r_uint(self.current_generation) + + def record_jitcell_dict(self, warmstate, jitcell_dict): + """NOT_RPYTHON. The given jitcell_dict is a dict that needs + occasional clean-ups of old cells. A cell is old if it never + reached the threshold, and its counter decayed to a tiny value.""" + # note that the various jitcell_dicts have different RPython types, + # so we have to make a different function for each one. These + # functions are chained to each other: each calls the previous one. + def cleanup_dict(): + minimum = min(warmstate.increment_threshold, + warmstate.increment_function_threshold) + current = self.get_current_generation_uint() + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.adjust_counter(current, warmstate.log_decay_factor) + if cell.counter < minimum: + killme.append(key) + for key in killme: + del jitcell_dict[key] + cleanup_previous() + # + cleanup_previous = self._cleanup_jitcell_dicts + self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -190,7 +190,6 @@ class WarmEnterState(object): THRESHOLD_LIMIT = sys.maxint // 2 - default_jitcell_dict = None def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -491,6 +490,9 @@ self.warmrunnerdesc.stats.jitcell_dicts.append(jitcell_dict) except AttributeError: pass + memmgr = self.warmrunnerdesc.memory_manager + if memmgr: + memmgr.record_jitcell_dict(self, jitcell_dict) # def get_jitcell(build, *greenargs): try: @@ -509,6 +511,10 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} + # note that there is no equivalent of record_jitcell_dict() + # in the case of custom getters. We assume that the interpreter + # stores the JitCells on some objects that can go away by GC, + # like the PyCode objects in PyPy. # def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) From noreply at buildbot.pypy.org Thu Dec 15 11:04:36 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 11:04:36 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: Fix yet another obscure case in which we can end up with an apparent Message-ID: <20111215100436.9726A82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50541:ba32c0042356 Date: 2011-12-15 01:43 +0100 http://bitbucket.org/pypy/pypy/changeset/ba32c0042356/ Log: Fix yet another obscure case in which we can end up with an apparent leak. diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -50,9 +50,9 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self): + def next_generation(self, do_cleanups_now=True): self.current_generation += 1 - if self.current_generation == self.next_check: + if do_cleanups_now and self.current_generation >= self.next_check: self._kill_old_loops_now() self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency @@ -89,7 +89,7 @@ To use only as an approximation for decaying counters.""" return r_uint(self.current_generation) - def record_jitcell_dict(self, warmstate, jitcell_dict): + def record_jitcell_dict(self, callback): """NOT_RPYTHON. The given jitcell_dict is a dict that needs occasional clean-ups of old cells. A cell is old if it never reached the threshold, and its counter decayed to a tiny value.""" @@ -97,17 +97,7 @@ # so we have to make a different function for each one. These # functions are chained to each other: each calls the previous one. def cleanup_dict(): - minimum = min(warmstate.increment_threshold, - warmstate.increment_function_threshold) - current = self.get_current_generation_uint() - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.adjust_counter(current, warmstate.log_decay_factor) - if cell.counter < minimum: - killme.append(key) - for key in killme: - del jitcell_dict[key] + callback() cleanup_previous() # cleanup_previous = self._cleanup_jitcell_dicts diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -490,9 +490,38 @@ self.warmrunnerdesc.stats.jitcell_dicts.append(jitcell_dict) except AttributeError: pass - memmgr = self.warmrunnerdesc.memory_manager + # + memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager if memmgr: - memmgr.record_jitcell_dict(self, jitcell_dict) + def _cleanup_dict(): + minimum = min(self.increment_threshold, + self.increment_function_threshold) + currentgen = memmgr.get_current_generation_uint() + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.adjust_counter(currentgen, self.log_decay_factor) + if cell.counter < minimum: + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # If no tracing goes on at all because the jitcells are + # each time for new greenargs, the dictionary grows forever. + # So every one in a (rare) while, we decide to force an + # artificial next_generation() and _cleanup_dict(). + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + memmgr.next_generation(do_cleanups_now=False) + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + memmgr.record_jitcell_dict(_cleanup_dict) + else: + def _maybe_cleanup_dict(): + pass # def get_jitcell(build, *greenargs): try: @@ -500,6 +529,7 @@ except KeyError: if not build: return None + _maybe_cleanup_dict() cell = self._new_jitcell() jitcell_dict[greenargs] = cell return cell From noreply at buildbot.pypy.org Thu Dec 15 11:04:37 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 11:04:37 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: Test and fix. Message-ID: <20111215100437.C4E9E82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50542:f7182dd71d78 Date: 2011-12-15 10:53 +0100 http://bitbucket.org/pypy/pypy/changeset/f7182dd71d78/ Log: Test and fix. diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -286,3 +286,67 @@ assert cell.counter == 90 cell.adjust_counter(r_uint(9), math.log(0.9)) assert cell.counter == int(90 * (0.9**3)) + +def test_cleanup_jitcell_dict(): + from pypy.jit.metainterp.memmgr import MemoryManager + class FakeWarmRunnerDesc: + memory_manager = MemoryManager() + class cpu: + pass + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed] + # + # Test creating tons of jitcells that remain at 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell1 = get_jitcell(True, -1) + assert len(warmstate._jitcell_dict) == 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 + # + for i in range(1, 20005): + get_jitcell(True, i) # should trigger a clean-up at 20001 + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 + # + # Same test, with one jitcell that has a counter of BASE instead of 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate.set_param_decay_halflife(2) + warmstate.set_param_threshold(5) + warmstate.set_param_function_threshold(0) + get_jitcell = warmstate._make_jitcell_getter_default() + cell2 = get_jitcell(True, -2) + cell2.counter = BASE = warmstate.increment_threshold * 3 + # + for i in range(0, 20005): + get_jitcell(True, i) + assert len(warmstate._jitcell_dict) == (i % 19999) + 2 + # + assert cell2 in warmstate._jitcell_dict.values() + assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + # Same test, with jitcells that are compiled and free by the memmgr + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + get_jitcell(True, -1) + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -1 + cell.wref_procedure_token = None # or a dead weakref, equivalently + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + # Same test, with counter == -2 (rare case, kept alive) + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell = get_jitcell(True, -1) + cell.counter = -2 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -2 + assert len(warmstate._jitcell_dict) == i + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -494,8 +494,11 @@ memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager if memmgr: def _cleanup_dict(): - minimum = min(self.increment_threshold, - self.increment_function_threshold) + minimum = sys.maxint + if self.increment_threshold > 0: + minimum = min(minimum, self.increment_threshold) + if self.increment_function_threshold > 0: + minimum = min(minimum, self.increment_function_threshold) currentgen = memmgr.get_current_generation_uint() killme = [] for key, cell in jitcell_dict.iteritems(): @@ -503,6 +506,9 @@ cell.adjust_counter(currentgen, self.log_decay_factor) if cell.counter < minimum: killme.append(key) + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) for key in killme: del jitcell_dict[key] # @@ -518,6 +524,7 @@ _cleanup_dict() # self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests memmgr.record_jitcell_dict(_cleanup_dict) else: def _maybe_cleanup_dict(): From noreply at buildbot.pypy.org Thu Dec 15 12:03:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 12:03:16 +0100 (CET) Subject: [pypy-commit] pypy default: improve caching Message-ID: <20111215110316.BB36082221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50543:976baeae86ed Date: 2011-12-15 13:02 +0200 http://bitbucket.org/pypy/pypy/changeset/976baeae86ed/ Log: improve caching diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,6 +11,9 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) + c_files.extend([py.path.local(f) for f in eci.separate_module_files]) + eci = ExternalCompilationInfo(**eci._copy_attributes()) + eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Thu Dec 15 12:49:56 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 15 Dec 2011 12:49:56 +0100 (CET) Subject: [pypy-commit] pypy default: setinteriorfield_should_not_clear_cache Message-ID: <20111215114956.9E6B682221@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50544:94f53881b7bf Date: 2011-12-15 12:48 +0100 http://bitbucket.org/pypy/pypy/changeset/94f53881b7bf/ Log: setinteriorfield_should_not_clear_cache diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -246,15 +246,16 @@ self.force_lazy_setfields_and_arrayitems_for_guard()) return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or # handled specially - opnum == rop.SETFIELD_RAW or # no effect on GC struct/array - opnum == rop.SETARRAYITEM_GC or # handled specially - opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.STRSETITEM or # no effect on GC struct/array - opnum == rop.UNICODESETITEM or # no effect on GC struct/array - opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever - opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array - opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7755,6 +7755,22 @@ """ self.optimize_loop(ops, expected) + def test_setinteriorfield_should_not_clear_cache(self): + ops = """ + [i0, p0] + i2 = getfield_gc(p0, descr=adescr) + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0) + """ + expected = """ + [i0, p0, i2] + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0, i2) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass From noreply at buildbot.pypy.org Thu Dec 15 12:49:57 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 15 Dec 2011 12:49:57 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge Message-ID: <20111215114957.C438882286@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50545:ee80fd67c26a Date: 2011-12-15 12:49 +0100 http://bitbucket.org/pypy/pypy/changeset/ee80fd67c26a/ Log: hg merge diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,6 +11,9 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) + c_files.extend([py.path.local(f) for f in eci.separate_module_files]) + eci = ExternalCompilationInfo(**eci._copy_attributes()) + eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Thu Dec 15 13:23:48 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 13:23:48 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: close branch for merge Message-ID: <20111215122348.1351182221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50546:9df7dbac5802 Date: 2011-12-15 12:15 +0100 http://bitbucket.org/pypy/pypy/changeset/9df7dbac5802/ Log: close branch for merge From noreply at buildbot.pypy.org Thu Dec 15 13:23:49 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 13:23:49 +0100 (CET) Subject: [pypy-commit] pypy default: Merge the counter-decay branch: Message-ID: <20111215122349.4ED6782286@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50547:5309a1389556 Date: 2011-12-15 12:20 +0100 http://bitbucket.org/pypy/pypy/changeset/5309a1389556/ Log: Merge the counter-decay branch: * add a theoretically useful "exponential decaying" of loop counters * it is useful in practice to decay and kill old JitCells from the jitcell_dict, which is used for all jitdrivers apart from PyPy's main one. diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, r_uint from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -38,6 +38,7 @@ self.current_generation = r_int64(1) self.next_check = r_int64(-1) self.alive_loops = {} + self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: @@ -49,10 +50,11 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self): + def next_generation(self, do_cleanups_now=True): self.current_generation += 1 - if self.current_generation == self.next_check: + if do_cleanups_now and self.current_generation >= self.next_check: self._kill_old_loops_now() + self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -81,3 +83,22 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") + + def get_current_generation_uint(self): + """Return the current generation, possibly truncated to a uint. + To use only as an approximation for decaying counters.""" + return r_uint(self.current_generation) + + def record_jitcell_dict(self, callback): + """NOT_RPYTHON. The given jitcell_dict is a dict that needs + occasional clean-ups of old cells. A cell is old if it never + reached the threshold, and its counter decayed to a tiny value.""" + # note that the various jitcell_dicts have different RPython types, + # so we have to make a different function for each one. These + # functions are chained to each other: each calls the previous one. + def cleanup_dict(): + callback() + cleanup_previous() + # + cleanup_previous = self._cleanup_jitcell_dicts + self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,6 +2910,27 @@ res = self.meta_interp(f, [32]) assert res == f(32) + def test_decay_counters(self): + myjitdriver = JitDriver(greens = ['m'], reds = ['n']) + def f(m, n): + while n > 0: + myjitdriver.jit_merge_point(m=m, n=n) + n += m + n -= m + n -= 1 + def main(): + f(5, 7) # run 7x with m=5 counter[m=5] = 7 + f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) + f(5, 5) # run 5x times with m=5 counter[m=5] = 8 + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=9, trace_eagerness=99) + self.check_trace_count(1) + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=8, trace_eagerness=99) + self.check_trace_count(2) + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,3 +1,4 @@ +import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -8,7 +9,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rlib.rarithmetic import r_singlefloat, r_uint def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -275,3 +276,77 @@ state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True + +def test_decay_counters(): + cell = JitCell(r_uint(5)) + cell.counter = 100 + cell.adjust_counter(r_uint(5), math.log(0.9)) + assert cell.counter == 100 + cell.adjust_counter(r_uint(6), math.log(0.9)) + assert cell.counter == 90 + cell.adjust_counter(r_uint(9), math.log(0.9)) + assert cell.counter == int(90 * (0.9**3)) + +def test_cleanup_jitcell_dict(): + from pypy.jit.metainterp.memmgr import MemoryManager + class FakeWarmRunnerDesc: + memory_manager = MemoryManager() + class cpu: + pass + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed] + # + # Test creating tons of jitcells that remain at 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell1 = get_jitcell(True, -1) + assert len(warmstate._jitcell_dict) == 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 + # + for i in range(1, 20005): + get_jitcell(True, i) # should trigger a clean-up at 20001 + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 + # + # Same test, with one jitcell that has a counter of BASE instead of 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate.set_param_decay_halflife(2) + warmstate.set_param_threshold(5) + warmstate.set_param_function_threshold(0) + get_jitcell = warmstate._make_jitcell_getter_default() + cell2 = get_jitcell(True, -2) + cell2.counter = BASE = warmstate.increment_threshold * 3 + # + for i in range(0, 20005): + get_jitcell(True, i) + assert len(warmstate._jitcell_dict) == (i % 19999) + 2 + # + assert cell2 in warmstate._jitcell_dict.values() + assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + # Same test, with jitcells that are compiled and free by the memmgr + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + get_jitcell(True, -1) + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -1 + cell.wref_procedure_token = None # or a dead weakref, equivalently + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + # Same test, with counter == -2 (rare case, kept alive) + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell = get_jitcell(True, -1) + cell.counter = -2 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -2 + assert len(warmstate._jitcell_dict) == i + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,9 +64,11 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, + threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): + function_threshold=4, decay_halflife=0, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, + **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -83,15 +85,16 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(3) # for tests + jd.warmstate.set_param_threshold(threshold) jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(2) # for tests + jd.warmstate.set_param_trace_eagerness(trace_eagerness) jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) + jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref +import sys, weakref, math from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -153,6 +153,24 @@ dont_trace_here = False wref_procedure_token = None + def __init__(self, generation): + # The stored 'counter' value follows an exponential decay model. + # Conceptually after every generation, it decays by getting + # multiplied by a constant <= 1.0. In practice, decaying occurs + # lazily: the following field records the latest seen generation + # number, and adjustment is done by adjust_counter() when needed. + self.latest_generation_seen = generation + + def adjust_counter(self, generation, log_decay_factor): + if generation != self.latest_generation_seen: + # The latest_generation_seen is older than the current generation. + # Adjust by multiplying self.counter N times by decay_factor, i.e. + # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). + N = generation - self.latest_generation_seen + factor = math.exp(log_decay_factor * N) + self.counter = int(self.counter * factor) + self.latest_generation_seen = generation + def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -172,7 +190,6 @@ class WarmEnterState(object): THRESHOLD_LIMIT = sys.maxint // 2 - default_jitcell_dict = None def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -213,6 +230,17 @@ def set_param_inlining(self, value): self.inlining = value + def set_param_decay_halflife(self, value): + # Use 0 or -1 to mean "no decay". Initialize the internal variable + # 'log_decay_factor'. It is choosen such that by multiplying the + # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every + # generation, then the counter will be divided by two after 'value' + # generations have passed. + if value <= 0: + self.log_decay_factor = 0.0 # log(1.0) + else: + self.log_decay_factor = math.log(0.5) / value + def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -282,6 +310,11 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) + memmgr = self.warmrunnerdesc.memory_manager + if memmgr is not None: + get_current_generation = memmgr.get_current_generation_uint + else: + get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -326,6 +359,8 @@ if cell.counter >= 0: # update the profiling counter + cell.adjust_counter(get_current_generation(), + self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n @@ -418,6 +453,15 @@ # return jit_getter + def _new_jitcell(self): + warmrunnerdesc = self.warmrunnerdesc + if (warmrunnerdesc is not None and + warmrunnerdesc.memory_manager is not None): + gen = warmrunnerdesc.memory_manager.get_current_generation_uint() + else: + gen = r_uint(0) + return JitCell(gen) + def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -447,13 +491,53 @@ except AttributeError: pass # + memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager + if memmgr: + def _cleanup_dict(): + minimum = sys.maxint + if self.increment_threshold > 0: + minimum = min(minimum, self.increment_threshold) + if self.increment_function_threshold > 0: + minimum = min(minimum, self.increment_function_threshold) + currentgen = memmgr.get_current_generation_uint() + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.adjust_counter(currentgen, self.log_decay_factor) + if cell.counter < minimum: + killme.append(key) + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # If no tracing goes on at all because the jitcells are + # each time for new greenargs, the dictionary grows forever. + # So every one in a (rare) while, we decide to force an + # artificial next_generation() and _cleanup_dict(). + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + memmgr.next_generation(do_cleanups_now=False) + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests + memmgr.record_jitcell_dict(_cleanup_dict) + else: + def _maybe_cleanup_dict(): + pass + # def get_jitcell(build, *greenargs): try: cell = jitcell_dict[greenargs] except KeyError: if not build: return None - cell = JitCell() + _maybe_cleanup_dict() + cell = self._new_jitcell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -464,6 +548,10 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} + # note that there is no equivalent of record_jitcell_dict() + # in the case of custom getters. We assume that the interpreter + # stores the JitCells on some objects that can go away by GC, + # like the PyCode objects in PyPy. # def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) @@ -485,7 +573,7 @@ if not build: return cell if cell is None: - cell = JitCell() + cell = self._new_jitcell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,6 +395,7 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', + 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -126,10 +126,7 @@ rtype_inplace_rshift = rtype_rshift def rtype_pow(_, hop): - raise MissingRTypeOperation("pow(int, int)" - " (use float**float instead; it is too" - " easy to overlook the overflow" - " issues of int**int)") + raise MissingRTypeOperation("'**' not supported in RPython") rtype_pow_ovf = rtype_pow rtype_inplace_pow = rtype_pow From noreply at buildbot.pypy.org Thu Dec 15 13:23:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 13:23:50 +0100 (CET) Subject: [pypy-commit] pypy default: add an assert. Message-ID: <20111215122350.74ED282221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50548:e790db7af776 Date: 2011-12-15 13:01 +0100 http://bitbucket.org/pypy/pypy/changeset/e790db7af776/ Log: add an assert. diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -166,6 +166,7 @@ # The latest_generation_seen is older than the current generation. # Adjust by multiplying self.counter N times by decay_factor, i.e. # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). + assert self.counter >= 0 N = generation - self.latest_generation_seen factor = math.exp(log_decay_factor * N) self.counter = int(self.counter * factor) From noreply at buildbot.pypy.org Thu Dec 15 13:23:51 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 13:23:51 +0100 (CET) Subject: [pypy-commit] pypy default: fix Message-ID: <20111215122351.98E6582221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50549:15811e23d71a Date: 2011-12-15 13:08 +0100 http://bitbucket.org/pypy/pypy/changeset/15811e23d71a/ Log: fix diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -21,6 +21,7 @@ # class MemoryManager(object): + NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -36,13 +37,13 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK self.alive_loops = {} self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK else: self.max_age = max_age if check_frequency <= 0: From noreply at buildbot.pypy.org Thu Dec 15 13:23:52 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 13:23:52 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111215122352.C6B3882221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50550:0a4ac8556799 Date: 2011-12-15 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/0a4ac8556799/ Log: merge heads diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -246,15 +246,16 @@ self.force_lazy_setfields_and_arrayitems_for_guard()) return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or # handled specially - opnum == rop.SETFIELD_RAW or # no effect on GC struct/array - opnum == rop.SETARRAYITEM_GC or # handled specially - opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.STRSETITEM or # no effect on GC struct/array - opnum == rop.UNICODESETITEM or # no effect on GC struct/array - opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever - opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array - opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7755,6 +7755,22 @@ """ self.optimize_loop(ops, expected) + def test_setinteriorfield_should_not_clear_cache(self): + ops = """ + [i0, p0] + i2 = getfield_gc(p0, descr=adescr) + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0) + """ + expected = """ + [i0, p0, i2] + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0, i2) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass From noreply at buildbot.pypy.org Thu Dec 15 13:49:28 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 13:49:28 +0100 (CET) Subject: [pypy-commit] pypy virtualizable-experiments: close branch that got nowhere, I don't even remember what it was about Message-ID: <20111215124928.6534482221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: virtualizable-experiments Changeset: r50551:40d47febc01e Date: 2011-12-15 14:47 +0200 http://bitbucket.org/pypy/pypy/changeset/40d47febc01e/ Log: close branch that got nowhere, I don't even remember what it was about From noreply at buildbot.pypy.org Thu Dec 15 13:49:29 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 13:49:29 +0100 (CET) Subject: [pypy-commit] pypy faster-json: close merged branch Message-ID: <20111215124929.9044082221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: faster-json Changeset: r50552:4d5064194771 Date: 2011-12-15 14:47 +0200 http://bitbucket.org/pypy/pypy/changeset/4d5064194771/ Log: close merged branch From noreply at buildbot.pypy.org Thu Dec 15 13:49:30 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 13:49:30 +0100 (CET) Subject: [pypy-commit] pypy numpy-share-iterators: superseeded by refactor-signature Message-ID: <20111215124930.C15F482221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-share-iterators Changeset: r50553:83a3724c72f3 Date: 2011-12-15 14:47 +0200 http://bitbucket.org/pypy/pypy/changeset/83a3724c72f3/ Log: superseeded by refactor-signature From noreply at buildbot.pypy.org Thu Dec 15 13:49:37 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Dec 2011 13:49:37 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: merge in default Message-ID: <20111215124938.0710A82221@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50554:e3c7e20c107b Date: 2011-12-15 14:48 +0200 http://bitbucket.org/pypy/pypy/changeset/e3c7e20c107b/ Log: merge in default diff too long, truncating to 10000 out of 23777 lines diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -304,5 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. + .. include:: _ref.txt diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -191,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -488,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -519,8 +528,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -697,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -1608,6 +1620,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -20,7 +21,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -48,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -322,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -347,6 +361,16 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -381,13 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -521,10 +557,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -617,6 +654,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -959,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1432,6 +1479,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1510,12 +1561,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] @@ -1779,9 +1835,11 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -138,29 +138,30 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + jitcell_token.compiled_loop_token = clt + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -183,9 +185,11 @@ llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types, descr.extrainfo, descr.width) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -239,9 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,21 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/asmmemmgr.py b/pypy/jit/backend/llsupport/asmmemmgr.py --- a/pypy/jit/backend/llsupport/asmmemmgr.py +++ b/pypy/jit/backend/llsupport/asmmemmgr.py @@ -37,25 +37,25 @@ self._add_free_block(smaller_stop, stop) stop = smaller_stop result = (start, stop) - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result # pair (start, stop) def free(self, start, stop): """Free a block (start, stop) returned by a previous malloc().""" - self.total_mallocs -= (stop - start) + self.total_mallocs -= r_uint(stop - start) self._add_free_block(start, stop) def open_malloc(self, minsize): """Allocate at least minsize bytes. Returns (start, stop).""" result = self._allocate_block(minsize) (start, stop) = result - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result def open_free(self, middle, stop): """Used for freeing the end of an open-allocated block of memory.""" if stop - middle >= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -648,14 +648,10 @@ # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1<= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +123,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +146,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +169,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -570,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] @@ -40,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -280,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -303,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -325,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -346,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -107,12 +108,12 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -253,13 +254,13 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -284,12 +285,12 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,22 +32,19 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -106,10 +103,9 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -118,19 +114,20 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -139,19 +136,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -162,15 +162,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -190,15 +192,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -206,14 +210,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -226,17 +229,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -244,14 +251,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -261,19 +267,20 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -290,18 +297,17 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -311,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -320,20 +326,19 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -350,20 +355,20 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -419,14 +424,12 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1082,16 +1085,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1109,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1149,30 +1144,33 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1214,7 +1212,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1222,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1271,7 +1267,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1281,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1330,19 +1324,20 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1400,15 +1395,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1675,15 +1669,14 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1700,9 +1693,9 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1718,14 +1711,13 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1895,18 +1887,14 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1940,18 +1928,14 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -1986,19 +1970,15 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2031,10 +2011,9 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2091,14 +2070,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2147,13 +2126,12 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2169,12 +2147,10 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2183,9 +2159,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2201,9 +2175,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2212,9 +2184,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2415,7 +2385,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 @@ -2423,9 +2393,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2435,11 +2404,10 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -2471,12 +2439,12 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2486,11 +2454,11 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2499,11 +2467,11 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2561,12 +2529,12 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2578,13 +2546,13 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2596,7 +2564,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) @@ -2604,10 +2572,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2958,13 +2925,137 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [BoxInt()] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2, -9) + assert fail.identifier == 42 + class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,9 +3,10 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -179,7 +180,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -525,29 +526,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +581,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +612,22 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -608,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -676,33 +717,55 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,8 +2,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -152,14 +153,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -310,12 +310,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -326,7 +325,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -422,12 +421,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -443,37 +438,35 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, - frame_depth+param_depth) + clt.frame_depth = frame_depth + clt.param_depth = param_depth + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, - rawstart + directbootstrappos, + rawstart + looppos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -484,18 +477,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -548,6 +540,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +663,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,20 +685,24 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -793,152 +797,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -965,7 +838,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -976,13 +849,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV_bi(to_loc.value, low_part) + self.mc.MOV_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1134,18 +1019,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -1882,10 +1767,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1901,7 +1786,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position + pos = loc.position + if pos < 0: + mc.writechar(chr(self.CODE_INPUTARG)) + pos = ~pos + n = self.CODE_FROMSTACK//4 + pos else: assert isinstance(loc, RegLoc) n = loc.value @@ -1921,6 +1810,7 @@ descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] + code_inputarg = False while 1: # decode the next instruction from the bytecode code = rffi.cast(lltype.Signed, bytecode[0]) @@ -1939,11 +1829,17 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: continue + elif code == self.CODE_INPUTARG: + code_inputarg = True + continue else: # 'code' identifies a register kind = code & 3 @@ -1959,6 +1855,7 @@ def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! self.fail_ebp = allregisters[16 + ebp.value] + code_inputarg = False num = 0 value_hi = 0 while 1: @@ -1979,6 +1876,9 @@ # load the value from the stack kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: @@ -1991,6 +1891,9 @@ if code == self.CODE_HOLE: num += 1 continue + if code == self.CODE_INPUTARG: + code_inputarg = True + continue assert code == self.CODE_STOP break code >>= 2 @@ -2095,9 +1998,9 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA in _call_footer below throws - # away most of the frame, including all the PUSHes that we did just - # above. + # _call_header_with_stack_check(). The LEA in _call_footer below + # throws away most of the frame, including all the PUSHes that we + # did just above. self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -2180,7 +2083,7 @@ argtypes=op.getdescr().get_arg_types(), callconv=op.getdescr().get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return if op.getdescr().get_return_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long @@ -2344,11 +2247,11 @@ fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() - assert isinstance(descr, LoopToken) - assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) + assert isinstance(descr, JitCellToken) + assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs # - # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + # Write a call to the target assembler + self._emit_call(fail_index, imm(descr._x86_function_addr), arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None @@ -2578,15 +2481,21 @@ gcrootmap.put(self.gcrootmap_retaddr_forced, mark) self.gcrootmap_retaddr_forced = -1 - def target_arglocs(self, loop_token): - return loop_token._x86_arglocs - - def closing_jump(self, loop_token): - if loop_token is self.currently_compiling_loop: + def closing_jump(self, target_token): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = target_token._x86_clt._debug_nbargs + assert my_nbargs == target_nbargs + # + target = target_token._x86_loop_code + if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(self.looppos - curpos) + self.mc.JMP_l(target - curpos) else: - self.mc.JMP(imm(loop_token._x86_loop_code)) + self.mc.JMP(imm(target)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) @@ -2659,11 +2568,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,7 +12,7 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: @@ -31,7 +31,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +66,13 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) @@ -87,7 +94,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -5,7 +5,8 @@ import os from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ResOperation, BoxPtr, ConstFloat, - BoxFloat, LoopToken, INT, REF, FLOAT) + BoxFloat, INT, REF, FLOAT, + TargetToken, JitCellToken) from pypy.jit.backend.x86.regloc import * from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rlib.objectmodel import we_are_translated @@ -27,7 +28,7 @@ class X86RegisterManager(RegisterManager): box_types = [INT, REF] - all_regs = [eax, ecx, edx, ebx, esi, edi] + all_regs = [ecx, eax, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] frame_reg = ebp @@ -59,7 +60,7 @@ class X86_64_RegisterManager(X86RegisterManager): # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -129,15 +130,19 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -159,6 +164,8 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None + self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -167,74 +174,83 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + self._set_initial_bindings(inputargs) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 13 + return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): - # XXX we can sort out here by longevity if we need something - # more optimal - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - # Don't use all_regs[0] for passing arguments around a loop. - # Must be kept in sync with consider_jump(). - # XXX this should probably go to llsupport/regalloc.py - xmmtmp = self.xrm.free_regs.pop(0) - tmpreg = self.rm.free_regs.pop(0) - assert tmpreg == X86RegisterManager.all_regs[0] - assert xmmtmp == X86XMMRegisterManager.all_regs[0] - for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: - if arg.type == FLOAT: - # xxx is it really a good idea? at the first CALL they - # will all be flushed anyway - reg = self.xrm.try_allocate_reg(arg) + def _set_initial_bindings(self, inputargs): + if IS_X86_64: + inputargs = self._set_initial_bindings_regs_64(inputargs) + # ... + # stack layout: arg2 + # arg1 + # arg0 + # return address + # saved ebp <-- ebp points here + # ... + cur_frame_pos = - 1 - FRAME_FIXED_SIZE + assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD + assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD + # + for box in inputargs: + assert isinstance(box, Box) + # + if IS_X86_32 and box.type == FLOAT: + cur_frame_pos -= 2 + else: + cur_frame_pos -= 1 + loc = self.fm.frame_pos(cur_frame_pos, box.type) + self.fm.set_binding(box, loc) + + def _set_initial_bindings_regs_64(self, inputargs): + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + # + pass_on_stack = [] + # + for box in inputargs: + assert isinstance(box, Box) + # + if box.type == FLOAT: + if len(unused_xmm) > 0: + ask = unused_xmm.pop() + got = self.xrm.try_allocate_reg(box, selected_reg=ask) + assert ask == got else: - reg = self.rm.try_allocate_reg(arg) - if reg: - loc = reg + pass_on_stack.append(box) else: - loc = self.fm.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc - # otherwise we have it saved on stack, so no worry - self.rm.free_regs.insert(0, tmpreg) - self.xrm.free_regs.insert(0, xmmtmp) - assert tmpreg not in nonfloatlocs - assert xmmtmp not in floatlocs - # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op args, which is a non-resizable list - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + if len(unused_gpr) > 0: + ask = unused_gpr.pop() + got = self.rm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + # + return pass_on_stack def possibly_free_var(self, var): if var.type == FLOAT: @@ -287,15 +303,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -311,7 +327,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -320,7 +336,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -356,7 +372,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -445,13 +461,26 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop + def flush_loop(self): + # rare case: if the loop is too short, pad with NOPs + mc = self.assembler.mc + while mc.get_relative_pos() < self.min_bytes_before_label: + mc.NOP() + def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,10 +488,16 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -470,7 +505,8 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + self.last_real_usage = last_real_usage + # longevity = {} for arg in produced: if arg in last_used: @@ -486,7 +522,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + self.longevity = longevity def loc(self, v): if v is None: # xxx kludgy @@ -883,7 +919,7 @@ def consider_call_assembler(self, op, guard_op): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) @@ -1313,35 +1349,72 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of 'fm' based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + self.final_jump_op = op + descr = op.getdescr() + assert isinstance(descr, TargetToken) + if descr._x86_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): + arglocs = descr._x86_arglocs + jump_op = self.final_jump_op + assert len(arglocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) + if isinstance(box, Box): + loc = arglocs[i] + if isinstance(loc, StackLoc): + self.fm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) + arglocs = descr._x86_arglocs self.jump_target_descr = descr - nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) - # compute 'tmploc' to be all_regs[0] by spilling what is there - box = TempBox() - box1 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - tmploc = self.rm.force_allocate_reg(box, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None + # Do the remapping remap_frame_layout_mixed(assembler, - src_locations1, dst_locations1, tmploc, + src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(box) - self.xrm.possibly_free_var(box1) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1357,7 +1430,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) @@ -1392,6 +1465,56 @@ # the FORCE_TOKEN operation returns directly 'ebp' self.rm.force_allocate_frame_reg(op.result) + def consider_label(self, op): + descr = op.getdescr() + assert isinstance(descr, TargetToken) + inputargs = op.getarglist() + arglocs = [None] * len(inputargs) + # + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) + # + # we need to make sure that no variable is stored in ebp + for arg in inputargs: + if self.loc(arg) is ebp: + loc2 = self.fm.loc(arg) + self.assembler.mc.MOV(loc2, ebp) + self.rm.bindings_to_frame_reg.clear() + # + for i in range(len(inputargs)): + arg = inputargs[i] + assert isinstance(arg, Box) + loc = self.loc(arg) + assert loc is not ebp + arglocs[i] = loc + if isinstance(loc, RegLoc): + self.fm.mark_as_free(arg) + # + # if we are too close to the start of the loop, the label's target may + # get overridden by redirect_call_assembler(). (rare case) + self.flush_loop() + # + descr._x86_arglocs = arglocs + descr._x86_loop_code = self.assembler.mc.get_relative_pos() + descr._x86_clt = self.assembler.current_clt + self.assembler.target_tokens_currently_compiling[descr] = None + self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) @@ -1447,3 +1570,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If 0, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,14 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): - assert ebp_offset < 0 # so no confusion with RegLoc.value + def __init__(self, position, ebp_offset, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -64,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -75,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -92,9 +104,11 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True - width = WORD + +class ImmedLoc(ImmediateAssemblerLocation): + _immutable_ = True _location_code = 'i' def __init__(self, value): @@ -105,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): return "ImmedLoc(%d)" % (self.value) @@ -117,7 +134,6 @@ class AddressLoc(AssemblerLocation): _immutable_ = True - width = WORD # The address is base_loc + (scaled_loc << scale) + static_offset def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): assert 0 <= scale < 4 @@ -146,6 +162,9 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) + def get_width(self): + return WORD + def value_a(self): return self.loc_a @@ -180,32 +199,34 @@ raise AssertionError(self._location_code) return result -class ConstFloatLoc(AssemblerLocation): - # XXX: We have to use this class instead of just AddressLoc because - # we want a width of 8 (... I think. Check this!) +class ConstFloatLoc(ImmediateAssemblerLocation): _immutable_ = True - width = 8 _location_code = 'j' def __init__(self, address): self.value = address + def get_width(self): + return 8 + def __repr__(self): return '' % (self.value,) if IS_X86_32: - class FloatImmedLoc(AssemblerLocation): + class FloatImmedLoc(ImmediateAssemblerLocation): # This stands for an immediate float. It cannot be directly used in # any assembler instruction. Instead, it is meant to be decomposed # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function # instead; see below. _immutable_ = True - width = 8 _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage + def get_width(self): + return 8 + def low_part(self): return intmask(self.aslonglong) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.objectmodel import we_are_translated +from pypy.jit.codewriter import longlong from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS @@ -21,7 +22,6 @@ supports_floats = True supports_singlefloats = True - BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) dont_keepalive_stuff = False # for tests with_threads = False @@ -91,15 +91,6 @@ return self.assembler.assemble_bridge(faildescr, inputargs, operations, original_loop_token, log=log) - def set_future_value_int(self, index, intvalue): - self.assembler.fail_boxes_int.setitem(index, intvalue) - - def set_future_value_float(self, index, floatvalue): - self.assembler.fail_boxes_float.setitem(index, floatvalue) - - def set_future_value_ref(self, index, ptrvalue): - self.assembler.fail_boxes_ptr.setitem(index, ptrvalue) - def get_latest_value_int(self, index): return self.assembler.fail_boxes_int.getitem(index) @@ -122,27 +113,28 @@ # the FORCE_TOKEN operation and this helper both return 'ebp'. return self.assembler.fail_ebp - def execute_token(self, executable_token): - addr = executable_token._x86_bootstrap_code - #llop.debug_print(lltype.Void, ">>>> Entering", addr) - func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) - fail_index = self._execute_call(func) - #llop.debug_print(lltype.Void, "<<<< Back") - return self.get_fail_descr_from_number(fail_index) - - def _execute_call(self, func): - # help flow objspace - prev_interpreter = None - if not self.translate_support_code: - prev_interpreter = LLInterpreter.current_interpreter - LLInterpreter.current_interpreter = self.debug_ll_interpreter - res = 0 - try: - res = func() - finally: + def make_execute_token(self, *ARGS): + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) + # + def execute_token(executable_token, *args): + clt = executable_token.compiled_loop_token + assert len(args) == clt._debug_nbargs + # + addr = executable_token._x86_function_addr + func = rffi.cast(FUNCPTR, addr) + #llop.debug_print(lltype.Void, ">>>> Entering", addr) + prev_interpreter = None # help flow space if not self.translate_support_code: - LLInterpreter.current_interpreter = prev_interpreter - return res + prev_interpreter = LLInterpreter.current_interpreter + LLInterpreter.current_interpreter = self.debug_ll_interpreter + try: + fail_index = func(*args) + finally: + if not self.translate_support_code: + LLInterpreter.current_interpreter = prev_interpreter + #llop.debug_print(lltype.Void, "<<<< Back") + return self.get_fail_descr_from_number(fail_index) + return execute_token def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) @@ -215,14 +207,3 @@ super(CPU_X86_64, self).__init__(*args, **kwargs) CPU = CPU386 - -# silence warnings -##history.LoopToken._x86_param_depth = 0 -##history.LoopToken._x86_arglocs = (None, None) -##history.LoopToken._x86_frame_depth = 0 -##history.LoopToken._x86_bootstrap_code = 0 -##history.LoopToken._x86_direct_bootstrap_code = 0 -##history.LoopToken._x86_loop_code = 0 -##history.LoopToken._x86_debug_checksum = 0 -##compile.AbstractFailDescr._x86_current_depths = (0, 0) -##compile.AbstractFailDescr._x86_adr_jump_offset = 0 diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -46,12 +46,13 @@ xmm2] assert len(failargs) == len(locs) assembler.write_failure_recovery_description(mc, failargs, locs) - nums = [Assembler386.DESCR_INT + 4*(16+0), - Assembler386.DESCR_REF + 4*(16+1), - Assembler386.DESCR_FLOAT + 4*(16+10), - Assembler386.DESCR_INT + 4*(16+100), - Assembler386.DESCR_REF + 4*(16+101), - Assembler386.DESCR_FLOAT + 4*(16+110), + base = 8 + 8*IS_X86_64 + nums = [Assembler386.DESCR_INT + 4*(base+0), + Assembler386.DESCR_REF + 4*(base+1), + Assembler386.DESCR_FLOAT + 4*(base+10), + Assembler386.DESCR_INT + 4*(base+100), + Assembler386.DESCR_REF + 4*(base+101), + Assembler386.DESCR_FLOAT + 4*(base+110), Assembler386.CODE_HOLE, Assembler386.CODE_HOLE, Assembler386.DESCR_INT + 4*ebx.value, diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, TreeLoop + BoxPtr, ConstPtr, TreeLoop, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo @@ -113,6 +113,8 @@ descr0 = cpu.fielddescrof(S, 'int') ptr0 = struct_ref + targettoken = TargetToken() + namespace = locals().copy() def test_basic(self): @@ -136,6 +138,7 @@ def test_bug_0(self): ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, descr=targettoken) guard_value(i2, 1) [i2, i3, i4, i5, i6, i7, i0, i1, i8] guard_class(i4, 138998336) [i4, i5, i6, i7, i0, i1, i8] i11 = getfield_gc(i4, descr=descr0) @@ -163,7 +166,7 @@ guard_false(i32) [i4, i6, i7, i0, i1, i24] i33 = getfield_gc(i0, descr=descr0) guard_value(i33, ConstPtr(ptr0)) [i4, i6, i7, i0, i1, i33, i24] - jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24) + jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -71,6 +71,18 @@ ('mov', eax, s24), ('mov', s12, edi)] +def test_no_tmp_reg(): + assembler = MockAssembler() + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) + remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], None) + assert assembler.ops == [('push', s8), + ('pop', s20), + ('mov', eax, s24), + ('mov', s12, edi)] + def test_reordering(): assembler = MockAssembler() s8 = frame_pos(8, INT) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -5,10 +5,11 @@ def test_compile_bridge_not_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -18,22 +19,22 @@ finish(i3, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 def test_compile_bridge_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) - previous = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + previous = loop._jitcelltoken.compiled_loop_token.frame_depth + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -42,19 +43,18 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].getdescr() + descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 assert self.getint(1) == 22 @@ -64,28 +64,30 @@ def test_bridge_jump_to_other_loop(self): loop = self.interpret(''' [i0, i10, i11, i12, i13, i14, i15, i16] + label(i0, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1, i10, i11, i12, i13, i14, i15, i16) - ''', [0]) + jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) + ''', [0, 0, 0, 0, 0, 0, 0, 0]) other_loop = self.interpret(''' - [i3] + [i3, i10, i11, i12, i13, i14, i15, i16] + label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] - jump(i3) - ''', [1]) + jump(i3, descr=targettoken2) + ''', [1, 0, 0, 0, 0, 0, 0, 0]) ops = ''' [i3] - jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=looptoken) + jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, other_loop, 0, looptoken=loop.token) - self.cpu.set_future_value_int(0, 1) - fail = self.run(other_loop) + bridge = self.attach_bridge(ops, other_loop, 1) + fail = self.run(other_loop, 1, 0, 0, 0, 0, 0, 0, 0) assert fail.identifier == 1 def test_bridge_jumps_to_self_deeper(self): loop = self.interpret(''' [i0, i1, i2, i31, i32, i33] + label(i0, i1, i2, i31, i32, i33, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i30 = int_add(i1, i2) @@ -94,8 +96,8 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i30, 1, i30, i30, i30) - ''', [0]) + jump(i3, i30, 1, i30, i30, i30, descr=targettoken) + ''', [0, 0, 0, 0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -104,28 +106,28 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) - jump(i3, i12, i11, i10, i6, i7, descr=looptoken) + jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 5, looptoken=loop.token) - guard_op = loop.operations[5] - loop_frame_depth = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth + bridge = self.attach_bridge(ops, loop, 6) + guard_op = loop.operations[6] + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 def test_bridge_jumps_to_self_shallower(self): loop = self.interpret(''' [i0, i1, i2] + label(i0, i1, i2, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i3 = int_add(i0, 1) @@ -133,19 +135,16 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i1, i2) - ''', [0]) + jump(i3, i1, i2, descr=targettoken) + ''', [0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' [i97, i3] - jump(i3, 0, 1, descr=looptoken) + jump(i3, 0, 1, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 4, looptoken=loop.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + bridge = self.attach_bridge(ops, loop, 5) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, LoopToken, BasicFailDescr + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass @@ -96,10 +96,16 @@ raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + targettoken = TargetToken() + targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 @@ -134,21 +140,31 @@ def interpret(self, ops, args, run=True): loop = self.parse(ops) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - for i, arg in enumerate(args): + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + arguments = [] + for arg in args: if isinstance(arg, int): - self.cpu.set_future_value_int(i, arg) + arguments.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) - self.cpu.set_future_value_float(i, arg) + arguments.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) - self.cpu.set_future_value_ref(i, llgcref) + arguments.append(llgcref) + loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, *arguments) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.original_jitcell_token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -167,10 +183,7 @@ gcref = self.cpu.get_latest_value_ref(index) return lltype.cast_opaque_ptr(T, gcref) - def attach_bridge(self, ops, loop, guard_op_index, looptoken=None, **kwds): - if looptoken is not None: - self.namespace = self.namespace.copy() - self.namespace['looptoken'] = looptoken + def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) @@ -178,20 +191,21 @@ [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, - loop.token) + loop._jitcelltoken) return bridge - def run(self, loop): - return self.cpu.execute_token(loop.token) + def run(self, loop, *arguments): + return self.cpu.execute_token(loop._jitcelltoken, *arguments) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -199,29 +213,30 @@ def test_two_loops_and_a_bridge(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(i0, 1) i5 = int_lt(i4, 20) guard_true(i5) [i4, i1, i2, i3] - jump(i4, i1, i2, i3) + jump(i4, i1, i2, i3, descr=targettoken) ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' - [i5] + [i5, i6, i7, i8] + label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) i4 = int_add(i3, 1) i2 = int_lt(i4, 30) guard_true(i2) [i4] - jump(i4) + jump(i4, descr=targettoken2) ''' - loop2 = self.interpret(ops2, [0]) + loop2 = self.interpret(ops2, [0, 0, 0, 0]) bridge_ops = ''' [i4] - jump(i4, i4, i4, i4, descr=looptoken) + jump(i4, i4, i4, i4, descr=targettoken) ''' - bridge = self.attach_bridge(bridge_ops, loop2, 4, looptoken=loop.token) - self.cpu.set_future_value_int(0, 0) - self.run(loop2) + bridge = self.attach_bridge(bridge_ops, loop2, 5) + self.run(loop2, 0, 0, 0, 0) assert self.getint(0) == 31 assert self.getint(1) == 30 assert self.getint(2) == 30 @@ -230,10 +245,11 @@ def test_pointer_arg(self): ops = ''' [i0, p0] + label(i0, p0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 10) guard_true(i2) [p0] - jump(i1, p0) + jump(i1, p0, descr=targettoken) ''' S = lltype.GcStruct('S') ptr = lltype.malloc(S) @@ -258,8 +274,7 @@ loop = self.interpret(ops, [0]) assert self.getint(0) == 1 bridge = self.attach_bridge(bridge_ops, loop, 2) - self.cpu.set_future_value_int(0, 0) - self.run(loop) + self.run(loop, 0) assert self.getint(0) == 1 def test_inputarg_unused(self): @@ -285,9 +300,7 @@ assert self.getint(0) == 0 assert self.getint(1) == 10 bridge = self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - self.run(loop) + self.run(loop, 0, 10) assert self.getint(0) == 0 assert self.getint(1) == 10 @@ -304,17 +317,16 @@ finish(1, 2) ''' self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 1) - self.run(loop) + self.run(loop, 0, 1) def test_spill_for_constant(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(3, i1) i5 = int_lt(i4, 30) guard_true(i5) [i0, i4, i2, i3] - jump(1, i4, 3, 4) + jump(1, i4, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1, 30, 3, 4] @@ -322,31 +334,34 @@ def test_spill_for_constant_lshift(self): ops = ''' [i0, i2, i1, i3] + label(i0, i2, i1, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 3, i5, 4) + jump(i4, 3, i5, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, i5, 3, 4) + jump(i4, i5, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i3, i1, i2] + label(i0, i3, i1, i2, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 4, i5, 3) + jump(i4, 4, i5, 3, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] @@ -354,11 +369,12 @@ def test_result_selected_reg_via_neg(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i6 = int_neg(i2) i7 = int_add(1, i1) i4 = int_lt(i7, 10) guard_true(i4) [i0, i6, i7] - jump(1, i7, i2, i6) + jump(1, i7, i2, i6, descr=targettoken) ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] @@ -366,11 +382,12 @@ def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lt(i0, i1) i5 = int_add(i3, 1) i6 = int_lt(i5, 30) guard_true(i6) [i4] - jump(i0, i1, i4, i5) + jump(i0, i1, i4, i5, descr=targettoken) ''' self.interpret(ops, [0, 10, 0, 0]) assert self.getint(0) == 1 @@ -378,12 +395,13 @@ def test_jump_different_args(self): ops = ''' [i0, i15, i16, i18, i1, i2, i3] + label(i0, i15, i16, i18, i1, i2, i3, descr=targettoken) i4 = int_add(i3, 1) i5 = int_lt(i4, 20) guard_true(i5) [i2, i1] - jump(i0, i18, i15, i16, i2, i1, i4) + jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' - self.interpret(ops, [0, 1, 2, 3]) + self.interpret(ops, [0, 1, 2, 3, 0, 0, 0]) def test_op_result_unused(self): ops = ''' @@ -417,11 +435,24 @@ finish(i0, i1, i2, i3, i4, i5, i6, i7, i8) ''' self.attach_bridge(bridge_ops, loop, 1) - for i in range(9): - self.cpu.set_future_value_int(i, i) - self.run(loop) + self.run(loop, 0, 1, 2, 3, 4, 5, 6, 7, 8) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + if IS_X86_64: + assert len(regalloc.rm.reg_bindings) == 4 + assert len(regalloc.fm.bindings) == 0 + else: + assert len(regalloc.rm.reg_bindings) == 0 + assert len(regalloc.fm.bindings) == 4 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): @@ -438,6 +469,7 @@ class TestRegallocMoreRegisters(BaseTestRegalloc): cpu = BaseTestRegalloc.cpu + targettoken = TargetToken() S = lltype.GcStruct('S', ('field', lltype.Char)) fielddescr = cpu.fielddescrof(S, 'field') @@ -510,6 +542,7 @@ def test_division_optimized(self): ops = ''' [i7, i6] + label(i7, i6, descr=targettoken) i18 = int_floordiv(i7, i6) i19 = int_xor(i7, i6) i21 = int_lt(i19, 0) @@ -517,7 +550,7 @@ i23 = int_is_true(i22) i24 = int_eq(i6, 4) guard_false(i24) [i18] - jump(i18, i6) + jump(i18, i6, descr=targettoken) ''' self.interpret(ops, [10, 4]) assert self.getint(0) == 2 @@ -586,9 +619,10 @@ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(1) + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(1) def test_two_calls(self): ops = ''' @@ -597,9 +631,10 @@ i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) finish(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(2) + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(2) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -612,7 +647,8 @@ ''' loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 - assert loop.token._x86_param_depth == self.expected_param_depth(10) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(10) def test_bridge_calls_1(self): ops = ''' @@ -632,9 +668,7 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 5*7 def test_bridge_calls_2(self): @@ -655,8 +689,6 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -1,6 +1,6 @@ import py from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, LoopToken + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD @@ -20,10 +20,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 9) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 9) assert cpu.get_latest_value_int(0) == (9 >> 3) assert cpu.get_latest_value_int(1) == (~18) @@ -43,10 +42,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -10) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -10) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == -1000 assert cpu.get_latest_value_int(2) == 1 @@ -140,19 +138,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -13) - cpu.set_future_value_int(1, 10) - cpu.set_future_value_int(2, 10) - cpu.set_future_value_int(3, 8) - cpu.set_future_value_int(4, -8) - cpu.set_future_value_int(5, -16) - cpu.set_future_value_int(6, -18) - cpu.set_future_value_int(7, 46) - cpu.set_future_value_int(8, -12) - cpu.set_future_value_int(9, 26) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 0 assert cpu.get_latest_value_int(2) == 0 @@ -255,19 +243,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 17) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, -6) - cpu.set_future_value_int(3, 6) - cpu.set_future_value_int(4, 1) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, 13) - cpu.set_future_value_int(7, 9) - cpu.set_future_value_int(8, 49) - cpu.set_future_value_int(9, 8) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 8 assert cpu.get_latest_value_int(2) == 1 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -1,9 +1,10 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr, rclass from pypy.rpython.annlowlevel import llhelper -from pypy.jit.metainterp.history import ResOperation, LoopToken +from pypy.jit.metainterp.history import ResOperation, TargetToken, JitCellToken from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstFloat, - ConstPtr, Box, BoxFloat, BasicFailDescr) + ConstPtr, Box, BoxFloat, + BasicFailDescr) from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD from pypy.jit.backend.x86.rx86 import fits_in_32bits @@ -279,13 +280,9 @@ descr=BasicFailDescr()), ] ops[-2].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) - if op == rop.INT_IS_TRUE: - self.cpu.set_future_value_int(0, b.value) - else: - self.cpu.set_future_value_ref(0, b.value) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_latest_value_int(0) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, @@ -329,11 +326,10 @@ ] ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) - for i, box in enumerate(inputargs): - self.cpu.set_future_value_int(i, box.value) - self.cpu.execute_token(looptoken) + inputvalues = [box.value for box in inputargs] + self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_latest_value_int(0) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: @@ -353,9 +349,10 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.number = 17 class FakeString(object): def __init__(self, val): @@ -365,14 +362,15 @@ return self.val operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) + operations[-2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" @@ -385,7 +383,7 @@ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -397,8 +395,7 @@ assert address >= loopaddress + loopsize assert size >= 10 # randomish number - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -408,11 +405,13 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] debug._log = dlog = debug.DebugLog() @@ -499,12 +498,10 @@ ops[3].setfailargs([]) ops[5].setfailargs([]) ops[7].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) - self.cpu.set_future_value_int(0, 123450) - self.cpu.set_future_value_int(1, 123408) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 123450, 123408) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert self.cpu.get_latest_value_int(1) == 42 @@ -523,19 +520,20 @@ loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -547,16 +545,17 @@ def test_debugger_checksum(self): loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) - assert ops.token._x86_debug_checksum == sum([op.getopnum() + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) + assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + n -= x.foo + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -241,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + return True # better safe than sorry + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -479,13 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -498,11 +533,18 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO @@ -703,6 +745,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) @@ -1053,35 +1098,20 @@ # jit.codewriter.support. for _op, _oopspec in [('llong_invert', 'INVERT'), - ('ullong_invert', 'INVERT'), ('llong_lt', 'LT'), ('llong_le', 'LE'), ('llong_eq', 'EQ'), ('llong_ne', 'NE'), ('llong_gt', 'GT'), ('llong_ge', 'GE'), - ('ullong_lt', 'ULT'), - ('ullong_le', 'ULE'), - ('ullong_eq', 'EQ'), - ('ullong_ne', 'NE'), - ('ullong_gt', 'UGT'), - ('ullong_ge', 'UGE'), ('llong_add', 'ADD'), ('llong_sub', 'SUB'), ('llong_mul', 'MUL'), ('llong_and', 'AND'), ('llong_or', 'OR'), ('llong_xor', 'XOR'), - ('ullong_add', 'ADD'), - ('ullong_sub', 'SUB'), - ('ullong_mul', 'MUL'), - ('ullong_and', 'AND'), - ('ullong_or', 'OR'), - ('ullong_xor', 'XOR'), ('llong_lshift', 'LSHIFT'), ('llong_rshift', 'RSHIFT'), - ('ullong_lshift', 'LSHIFT'), - ('ullong_rshift', 'URSHIFT'), ('cast_int_to_longlong', 'FROM_INT'), ('truncate_longlong_to_int', 'TO_INT'), ('cast_float_to_longlong', 'FROM_FLOAT'), @@ -1104,6 +1134,21 @@ ('cast_uint_to_ulonglong', 'FROM_UINT'), ('cast_float_to_ulonglong', 'FROM_FLOAT'), ('cast_ulonglong_to_float', 'U_TO_FLOAT'), + ('ullong_invert', 'INVERT'), + ('ullong_lt', 'ULT'), + ('ullong_le', 'ULE'), + ('ullong_eq', 'EQ'), + ('ullong_ne', 'NE'), + ('ullong_gt', 'UGT'), + ('ullong_ge', 'UGE'), + ('ullong_add', 'ADD'), + ('ullong_sub', 'SUB'), + ('ullong_mul', 'MUL'), + ('ullong_and', 'AND'), + ('ullong_or', 'OR'), + ('ullong_xor', 'XOR'), + ('ullong_lshift', 'LSHIFT'), + ('ullong_rshift', 'URSHIFT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): @@ -1134,7 +1179,7 @@ def rewrite_op_llong_is_true(self, op): v = varoftype(op.args[0].concretetype) - op0 = SpaceOperation('cast_int_to_longlong', + op0 = SpaceOperation('cast_primitive', [Constant(0, lltype.Signed)], v) args = [op.args[0], v] diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -258,6 +258,9 @@ y = ~r_ulonglong(xll) return u_to_longlong(y) +def _ll_1_ullong_invert(xull): + return ~xull + def _ll_2_llong_lt(xll, yll): return xll < yll @@ -276,16 +279,22 @@ def _ll_2_llong_ge(xll, yll): return xll >= yll -def _ll_2_llong_ult(xull, yull): +def _ll_2_ullong_eq(xull, yull): + return xull == yull + +def _ll_2_ullong_ne(xull, yull): + return xull != yull + +def _ll_2_ullong_ult(xull, yull): return xull < yull -def _ll_2_llong_ule(xull, yull): +def _ll_2_ullong_ule(xull, yull): return xull <= yull -def _ll_2_llong_ugt(xull, yull): +def _ll_2_ullong_ugt(xull, yull): return xull > yull -def _ll_2_llong_uge(xull, yull): +def _ll_2_ullong_uge(xull, yull): return xull >= yull def _ll_2_llong_add(xll, yll): @@ -312,14 +321,41 @@ z = r_ulonglong(xll) ^ r_ulonglong(yll) return u_to_longlong(z) +def _ll_2_ullong_add(xull, yull): + z = (xull) + (yull) + return (z) + +def _ll_2_ullong_sub(xull, yull): + z = (xull) - (yull) + return (z) + +def _ll_2_ullong_mul(xull, yull): + z = (xull) * (yull) + return (z) + +def _ll_2_ullong_and(xull, yull): + z = (xull) & (yull) + return (z) + +def _ll_2_ullong_or(xull, yull): + z = (xull) | (yull) + return (z) + +def _ll_2_ullong_xor(xull, yull): + z = (xull) ^ (yull) + return (z) + def _ll_2_llong_lshift(xll, y): z = r_ulonglong(xll) << y return u_to_longlong(z) +def _ll_2_ullong_lshift(xull, y): + return xull << y + def _ll_2_llong_rshift(xll, y): return xll >> y -def _ll_2_llong_urshift(xull, y): +def _ll_2_ullong_urshift(xull, y): return xull >> y def _ll_1_llong_from_int(x): @@ -563,15 +599,75 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,73 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1209,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): @@ -1501,7 +1504,6 @@ all_virtuals=None): from pypy.jit.metainterp.resume import blackhole_from_resumedata #debug_start('jit-blackhole') - metainterp_sd.profiler.start_blackhole() blackholeinterp = blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, jitdriver_sd, @@ -1515,10 +1517,9 @@ current_exc = blackholeinterp._prepare_resume_from_failure( resumedescr.guard_opnum, dont_change_position) - try: - _run_forever(blackholeinterp, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(blackholeinterp, current_exc) + #finally: #debug_stop('jit-blackhole') def convert_and_run_from_pyjitpl(metainterp, raising_exception=False): @@ -1526,7 +1527,6 @@ # 'metainterp.framestack'. #debug_start('jit-blackhole') metainterp_sd = metainterp.staticdata - metainterp_sd.profiler.start_blackhole() nextbh = None for frame in metainterp.framestack: curbh = metainterp_sd.blackholeinterpbuilder.acquire_interp() @@ -1543,8 +1543,7 @@ firstbh.exception_last_value = current_exc current_exc = lltype.nullptr(rclass.OBJECTPTR.TO) # - try: - _run_forever(firstbh, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(firstbh, current_exc) + #finally: #debug_stop('jit-blackhole') diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -9,12 +9,13 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist -from pypy.jit.metainterp.history import TreeLoop, Box, History, LoopToken +from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt -from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const +from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const, ConstInt from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP from pypy.jit.codewriter import heaptracker, longlong @@ -23,7 +24,7 @@ from pypy.jit.metainterp.jitprof import ABORT_BRIDGE raise SwitchToBlackhole(ABORT_BRIDGE) -def show_loop(metainterp_sd, loop=None, error=None): +def show_procedures(metainterp_sd, procedure=None, error=None): # debugging if option.view or option.viewloops: if error: @@ -32,11 +33,12 @@ errmsg += ': ' + str(error) else: errmsg = None - if loop is None: # or type(loop) is TerminatingLoop: - extraloops = [] + if procedure is None: + extraprocedures = [] else: - extraloops = [loop] - metainterp_sd.stats.view(errmsg=errmsg, extraloops=extraloops) + extraprocedures = [procedure] + metainterp_sd.stats.view(errmsg=errmsg, + extraprocedures=extraprocedures) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -45,131 +47,261 @@ return loop -def make_loop_token(nb_args, jitdriver_sd): - loop_token = LoopToken() - loop_token.outermost_jitdriver_sd = jitdriver_sd - return loop_token +def make_jitcell_token(jitdriver_sd): + jitcell_token = JitCellToken() + jitcell_token.outermost_jitdriver_sd = jitdriver_sd + return jitcell_token def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ - # get the original loop token (corresponding to 'loop', or if that is - # a bridge, to the loop that this bridge belongs to) - looptoken = loop.token - assert looptoken is not None + # get the original jitcell token corresponding to jitcell form which + # this trace starts + original_jitcell_token = loop.original_jitcell_token + assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests - assert looptoken.generation > 0 # has been registered with memmgr - wref = weakref.ref(looptoken) + assert original_jitcell_token.generation > 0 # has been registered with memmgr + wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number - looptoken.compiled_loop_token.record_faildescr_index(n) - elif isinstance(descr, LoopToken): - # for a JUMP or a CALL_ASSEMBLER: record it as a potential jump. + original_jitcell_token.compiled_loop_token.record_faildescr_index(n) + elif isinstance(descr, JitCellToken): + # for a CALL_ASSEMBLER: record it as a potential jump. + if descr is not original_jitcell_token: + original_jitcell_token.record_jump_to(descr) + descr.exported_state = None + op._descr = None # clear reference, mostly for tests + elif isinstance(descr, TargetToken): + # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) - if descr is not looptoken: - looptoken.record_jump_to(descr) - op._descr = None # clear reference, mostly for tests + if descr.original_jitcell_token is not original_jitcell_token: + assert descr.original_jitcell_token is not None + original_jitcell_token.record_jump_to(descr.original_jitcell_token) + # exported_state is clear by optimizeopt when the short preamble is + # constrcucted. if that did not happen the label should not show up + # in a trace that will be used + assert descr.exported_state is None if not we_are_translated(): - op._jumptarget_number = descr.number + op._descr_wref = weakref.ref(op._descr) + op._descr = None # clear reference to prevent the history.Stats + # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken - loop.token = None + loop.original_jitcell_token = None if not we_are_translated(): - loop._looptoken_number = looptoken.number + loop._looptoken_number = original_jitcell_token.number # ____________________________________________________________ -def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, - start_resumedescr, full_preamble_needed=True): - """Try to compile a new loop by closing the current history back +def compile_loop(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, full_preamble_needed=True): + """Try to compile a new procedure by closing the current history back to the first operation. """ - from pypy.jit.metainterp.optimize import optimize_loop + from pypy.jit.metainterp.optimizeopt import optimize_trace history = metainterp.history - loop = create_empty_loop(metainterp) - loop.inputargs = history.inputargs[:] + metainterp_sd = metainterp.staticdata + jitdriver_sd = metainterp.jitdriver_sd + + if False: + part = partial_trace + assert False + procedur_token = metainterp.get_procedure_token(greenkey) + assert procedure_token + all_target_tokens = [] + else: + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.start_resumedescr = start_resumedescr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] + + loop = create_empty_loop(metainterp) + loop.inputargs = part.inputargs + loop.operations = part.operations + loop.quasi_immutable_deps = {} + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + while part.operations[-1].getopnum() == rop.LABEL: + inliner = Inliner(inputargs, jumpargs) + part.quasi_immutable_deps = None + part.operations = [part.operations[-1]] + \ + [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], + None, descr=jitcell_token)] + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens.append(target_token) + inputargs = jumpargs + jumpargs = part.operations[-1].getarglist() + + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + + loop.operations = loop.operations[:-1] + part.operations + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + + if not loop.quasi_immutable_deps: + loop.quasi_immutable_deps = None for box in loop.inputargs: assert isinstance(box, Box) - # make a copy, because optimize_loop can mutate the ops and descrs - h_ops = history.operations - loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] + + loop.original_jitcell_token = jitcell_token + for label in all_target_tokens: + assert isinstance(label, TargetToken) + label.original_jitcell_token = jitcell_token + if label.virtual_state and label.short_preamble: + metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) + jitcell_token.target_tokens = all_target_tokens + send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") + record_loop_or_bridge(metainterp_sd, loop) + return all_target_tokens[0] + +def compile_retrace(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, partial_trace, resumekey): + """Try to compile a new procedure by closing the current history back + to the first operation. + """ + from pypy.jit.metainterp.optimizeopt import optimize_trace + + history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.token = loop_token - loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP - loop.preamble = create_empty_loop(metainterp, 'Preamble ') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.preamble.start_resumedescr = start_resumedescr + loop_jitcell_token = metainterp.get_procedure_token(greenkey) + assert loop_jitcell_token + assert partial_trace.operations[-1].getopnum() == rop.LABEL + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + part.start_resumedescr = start_resumedescr + h_ops = history.operations + + part.operations = [partial_trace.operations[-1]] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] + label = part.operations[0] + orignial_label = label.clone() + assert label.getopnum() == rop.LABEL try: - old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop, - jitdriver_sd.warmstate.enable_opts) + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - debug_print("compile_new_loop: got an InvalidLoop") - return None - if old_loop_token is not None: - metainterp.staticdata.log("reusing old loop") - return old_loop_token + #return None # XXX: Dissable for now + # Fall back on jumping to preamble + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert target_token.exported_state + part.operations = [orignial_label] + \ + [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + None, descr=loop_jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + inline_short_preamble=False) + + except InvalidLoop: + return None + assert part.operations[-1].getopnum() != rop.LABEL + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert loop_jitcell_token.target_tokens + loop_jitcell_token.target_tokens.append(target_token) - if loop.preamble.operations is not None: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - record_loop_or_bridge(metainterp_sd, loop) - token = loop.preamble.token - if full_preamble_needed: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, - loop.preamble, "entry bridge") - insert_loop_token(old_loop_tokens, loop.preamble.token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.preamble.token) - record_loop_or_bridge(metainterp_sd, loop.preamble) - elif token.short_preamble: - short = token.short_preamble[-1] - metainterp_sd.logger_ops.log_short_preamble(short.inputargs, - short.operations) - return token - else: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - insert_loop_token(old_loop_tokens, loop_token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.token) - record_loop_or_bridge(metainterp_sd, loop) - return loop_token + loop = partial_trace + loop.operations = loop.operations[:-1] + part.operations -def insert_loop_token(old_loop_tokens, loop_token): - # Find where in old_loop_tokens we should insert this new loop_token. - # The following algo means "as late as possible, but before another - # loop token that would be more general and so completely mask off - # the new loop_token". - # XXX do we still need a list? - old_loop_tokens.append(loop_token) + quasi_immutable_deps = {} + if loop.quasi_immutable_deps: + quasi_immutable_deps.update(loop.quasi_immutable_deps) + if part.quasi_immutable_deps: + quasi_immutable_deps.update(part.quasi_immutable_deps) + if quasi_immutable_deps: + loop.quasi_immutable_deps = quasi_immutable_deps + + for box in loop.inputargs: + assert isinstance(box, Box) + + target_token = loop.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + target_token.original_jitcell_token = loop.original_jitcell_token + record_loop_or_bridge(metainterp_sd, loop) + return target_token + +def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): + vinfo = jitdriver_sd.virtualizable_info + extra_ops = [] + inputargs = loop.inputargs + vable_box = inputargs[jitdriver_sd.index_of_virtualizable] + i = jitdriver_sd.num_red_args + loop.inputargs = inputargs[:i] + for descr in vinfo.static_field_descrs: + assert i < len(inputargs) + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], box, descr)) + i += 1 + arrayindex = 0 + for descr in vinfo.array_field_descrs: + vable = vable_box.getref_base() + arraylen = vinfo.get_array_length(vable, arrayindex) + arraybox = BoxPtr() + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], arraybox, descr)) + arraydescr = vinfo.array_descrs[arrayindex] + assert i + arraylen <= len(inputargs) + for index in range(arraylen): + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETARRAYITEM_GC, + [arraybox, ConstInt(index)], + box, descr=arraydescr)) + i += 1 + arrayindex += 1 + assert i == len(inputargs) + loop.operations = extra_ops + loop.operations def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): - jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, + vinfo = jitdriver_sd.virtualizable_info + if vinfo is not None: + patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) + + original_jitcell_token = loop.original_jitcell_token + jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata - loop_token = loop.token - loop_token.number = n = globaldata.loopnumbering + original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): - show_loop(metainterp_sd, loop) + show_procedures(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) @@ -177,26 +309,19 @@ debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - loop.token, name=loopname) + original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): - if type != "entry bridge": - metainterp_sd.stats.compiled() - else: - loop._ignore_during_counting = True + metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) - short = loop.token.short_preamble - if short: - metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, - short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token) + metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): @@ -204,8 +329,9 @@ jitdriver_sd.on_compile_bridge(metainterp_sd.logger_ops, original_loop_token, operations, n) if not we_are_translated(): - show_loop(metainterp_sd) - TreeLoop.check_consistency_of(inputargs, operations) + show_procedures(metainterp_sd) + seen = dict.fromkeys(inputargs) + TreeLoop.check_consistency_of_branch(operations, seen) metainterp_sd.profiler.start_backend() operations = get_deep_immutable_oplist(operations) debug_start("jit-backend") @@ -221,9 +347,9 @@ # metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset) # - if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( - original_loop_token) + #if metainterp_sd.warmrunnerdesc is not None: # for tests + # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( + # original_loop_token) # ____________________________________________________________ @@ -263,7 +389,7 @@ raise metainterp_sd.ExitFrameWithExceptionRef(cpu, value) -class TerminatingLoopToken(LoopToken): +class TerminatingLoopToken(JitCellToken): # FIXME: kill? terminating = True def __init__(self, nargs, finishdescr): @@ -298,7 +424,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +435,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +455,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,18 +465,21 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) - assert 0, "unreachable" + assert 0, "unreachable" def _trace_and_compile_from_bridge(self, metainterp_sd, jitdriver_sd): # 'jitdriver_sd' corresponds to the outermost one, i.e. the one @@ -354,17 +488,27 @@ # jitdrivers. from pypy.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - return metainterp.handle_guard_failure(self) + metainterp.handle_guard_failure(self) _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +535,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -400,13 +553,13 @@ # We managed to create a bridge. Attach the new operations # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None - new_loop.token = metainterp.resumekey_original_loop_token + new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, - new_loop.token) + new_loop.original_jitcell_token) def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here @@ -589,44 +742,32 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs - # We make a new LoopToken for this entry bridge, and stick it - # to every guard in the loop. - new_loop_token = make_loop_token(len(redargs), jitdriver_sd) - new_loop.token = new_loop_token + new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - self.original_greenkey, - new_loop_token) - # store the new loop in compiled_merge_points_wref too - old_loop_tokens = metainterp.get_compiled_merge_points( - self.original_greenkey) - # it always goes at the end of the list, as it is the most - # general loop token - old_loop_tokens.append(new_loop_token) - metainterp.set_compiled_merge_points(self.original_greenkey, - old_loop_tokens) + jitdriver_sd.warmstate.attach_procedure_to_interp( + self.original_greenkey, jitcell_token) + metainterp_sd.stats.add_jitcell_token(jitcell_token) - def reset_counter_from_failure(self): - pass - -def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): +def compile_trace(metainterp, resumekey, start_resumedescr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ - from pypy.jit.metainterp.optimize import optimize_bridge + from pypy.jit.metainterp.optimizeopt import optimize_trace # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. - # + # # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. - new_loop = create_empty_loop(metainterp) - new_loop.inputargs = metainterp.history.inputargs[:] + new_trace = create_empty_loop(metainterp) + new_trace.inputargs = inputargs = metainterp.history.inputargs[:] # clone ops, as optimize_bridge can mutate the ops - new_loop.operations = [op.clone() for op in metainterp.history.operations] + + new_trace.operations = [op.clone() for op in metainterp.history.operations] + new_trace.start_resumedescr = start_resumedescr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): @@ -634,38 +775,25 @@ else: inline_short_preamble = True try: - target_loop_token = optimize_bridge(metainterp_sd, old_loop_tokens, - new_loop, state.enable_opts, - inline_short_preamble, retraced) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop debug_print('InvalidLoop in compile_new_bridge') return None - # Did it work? - if target_loop_token is not None: - # Yes, we managed to create a bridge. Dispatch to resumekey to + + if new_trace.operations[-1].getopnum() != rop.LABEL: + # We managed to create a bridge. Dispatch to resumekey to # know exactly what we must do (ResumeGuardDescr/ResumeFromInterpDescr) - prepare_last_operation(new_loop, target_loop_token) - resumekey.compile_and_attach(metainterp, new_loop) - record_loop_or_bridge(metainterp_sd, new_loop) - return target_loop_token - -def prepare_last_operation(new_loop, target_loop_token): - op = new_loop.operations[-1] - if not isinstance(target_loop_token, TerminatingLoopToken): - # normal case - #op.setdescr(target_loop_token) # patch the jump target - pass + target_token = new_trace.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, new_trace) + record_loop_or_bridge(metainterp_sd, new_trace) + return target_token else: - # The target_loop_token is a pseudo loop token, - # e.g. loop_tokens_done_with_this_frame_void[0] - # Replace the operation with the real operation we want, i.e. a FINISH - descr = target_loop_token.finishdescr - args = op.getarglist() - new_op = ResOperation(rop.FINISH, args, None, descr=descr) - new_loop.operations[-1] = new_op + metainterp.retrace_needed(new_trace) + return None + # ____________________________________________________________ @@ -676,21 +804,25 @@ assert exception, "PropagateExceptionDescr: no exception??" raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) -def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes, +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redargtypes, memory_manager=None): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled version of the code may end up replacing it. """ - # 'redboxes' is only used to know the types of red arguments. - inputargs = [box.clonebox() for box in redboxes] - loop_token = make_loop_token(len(inputargs), jitdriver_sd) - # 'nb_red_args' might be smaller than len(redboxes), - # because it doesn't include the virtualizable boxes. + jitcell_token = make_jitcell_token(jitdriver_sd) nb_red_args = jitdriver_sd.num_red_args + assert len(redargtypes) == nb_red_args + inputargs = [] + for kind in redargtypes: + if kind == history.INT: box = BoxInt() + elif kind == history.REF: box = BoxPtr() + elif kind == history.FLOAT: box = BoxFloat() + else: raise AssertionError + inputargs.append(box) k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + callargs = [funcbox] + greenboxes + inputargs # result_type = jitdriver_sd.result_type if result_type == history.INT: @@ -717,7 +849,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, loop_token, log=False) + cpu.compile_loop(inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests - memory_manager.keep_loop_alive(loop_token) - return loop_token + memory_manager.keep_loop_alive(jitcell_token) + return jitcell_token diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -344,6 +344,7 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.LABEL, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,8 +12,9 @@ def get_display_text(self): return None -def display_loops(loops, errmsg=None, highlight_loops={}): - graphs = [(loop, highlight_loops.get(loop, 0)) for loop in loops] +def display_procedures(procedures, errmsg=None, highlight_procedures={}): + graphs = [(procedure, highlight_procedures.get(procedure, 0)) + for procedure in procedures] for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): @@ -25,18 +26,19 @@ def is_interesting_guard(op): return hasattr(op.getdescr(), '_debug_suboperations') +def getdescr(op): + if op._descr is not None: + return op._descr + if hasattr(op, '_descr_wref'): + return op._descr_wref() + return None + class ResOpGraphPage(GraphPage): def compute(self, graphs, errmsg=None): resopgen = ResOpGen() for graph, highlight in graphs: - if getattr(graph, 'token', None) is not None: - resopgen.jumps_to_graphs[graph.token] = graph - if getattr(graph, '_looptoken_number', None) is not None: - resopgen.jumps_to_graphs[graph._looptoken_number] = graph - - for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: resopgen.set_errmsg(errmsg) @@ -54,7 +56,7 @@ self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None - self.jumps_to_graphs = {} + self.target_tokens = {} def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -73,16 +75,21 @@ for graphindex in range(len(self.graphs)): self.block_starters[graphindex] = {0: True} for graphindex, graph in enumerate(self.graphs): - last_was_mergepoint = False + mergepointblock = None for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) if op.getopnum() == rop.DEBUG_MERGE_POINT: - if not last_was_mergepoint: - last_was_mergepoint = True - self.mark_starter(graphindex, i) + if mergepointblock is None: + mergepointblock = i + elif op.getopnum() == rop.LABEL: + self.mark_starter(graphindex, i) + self.target_tokens[getdescr(op)] = (graphindex, i) + mergepointblock = i else: - last_was_mergepoint = False + if mergepointblock is not None: + self.mark_starter(graphindex, mergepointblock) + mergepointblock = None def set_errmsg(self, errmsg): self.errmsg = errmsg @@ -172,24 +179,10 @@ (graphindex, opindex)) break if op.getopnum() == rop.JUMP: - tgt_g = -1 - tgt = None - tgt_number = getattr(op, '_jumptarget_number', None) - if tgt_number is not None: - tgt = self.jumps_to_graphs.get(tgt_number) - else: - tgt_descr = op.getdescr() - if tgt_descr is None: - tgt_g = graphindex - else: - tgt = self.jumps_to_graphs.get(tgt_descr.number) - if tgt is None: - tgt = self.jumps_to_graphs.get(tgt_descr) - if tgt is not None: - tgt_g = self.graphs.index(tgt) - if tgt_g != -1: + tgt_descr = getdescr(op) + if tgt_descr is not None and tgt_descr in self.target_tokens: self.genedge((graphindex, opstartindex), - (tgt_g, 0), + self.target_tokens[tgt_descr], weight="0") lines.append("") label = "\\l".join(lines) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker, longlong from pypy.rlib.objectmodel import compute_identity_hash +import weakref # ____________________________________________________________ @@ -123,9 +124,6 @@ def sort_key(self): raise NotImplementedError - def set_future_value(self, cpu, j): - raise NotImplementedError - def nonnull(self): raise NotImplementedError @@ -288,9 +286,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def same_constant(self, other): if isinstance(other, ConstInt): return self.value == other.value @@ -328,9 +323,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def same_constant(self, other): if isinstance(other, ConstFloat): return self.value == other.value @@ -377,9 +369,6 @@ def getaddr(self): return llmemory.cast_ptr_to_adr(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -431,9 +420,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - ## def getaddr(self): ## # so far this is used only when calling ## # CodeWriter.IndirectCallset.bytecode_for_address. We don't need a @@ -539,9 +525,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def nonnull(self): return self.value != 0 @@ -574,9 +557,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def nonnull(self): return self.value != longlong.ZEROF @@ -619,9 +599,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def nonnull(self): return bool(self.value) @@ -666,19 +643,12 @@ def nonnull(self): return bool(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def repr_rpython(self): return repr_rpython(self, 'bo') _getrepr_ = repr_object -def set_future_values(cpu, boxes): - for j in range(len(boxes)): - boxes[j].set_future_value(cpu, j) - # ____________________________________________________________ @@ -723,18 +693,17 @@ # ____________________________________________________________ -# The TreeLoop class contains a loop or a generalized loop, i.e. a tree -# of operations. Each branch ends in a jump which can go either to -# the top of the same loop, or to another TreeLoop; or it ends in a FINISH. +# The JitCellToken class is the root of a tree of traces. Each branch ends +# in a jump which goes to a LABEL operation; or it ends in a FINISH. -class LoopToken(AbstractDescr): +class JitCellToken(AbstractDescr): """Used for rop.JUMP, giving the target of the jump. This is different from TreeLoop: the TreeLoop class contains the whole loop, including 'operations', and goes away after the loop was compiled; but the LoopDescr remains alive and points to the generated assembler. """ - short_preamble = None + target_tokens = None failed_states = None retraced_count = 0 terminating = False # see TerminatingLoopToken in compile.py @@ -751,10 +720,11 @@ def __init__(self): # For memory management of assembled loops - self._keepalive_target_looktokens = {} # set of other LoopTokens + self._keepalive_jitcell_tokens = {} # set of other JitCellToken - def record_jump_to(self, target_loop_token): - self._keepalive_target_looktokens[target_loop_token] = None + def record_jump_to(self, jitcell_token): + assert isinstance(jitcell_token, JitCellToken) + self._keepalive_jitcell_tokens[jitcell_token] = None def __repr__(self): return '' % (self.number, self.generation) @@ -765,17 +735,49 @@ def dump(self): self.compiled_loop_token.cpu.dump_loop_token(self) +class TargetToken(AbstractDescr): + def __init__(self, targeting_jitcell_token=None): + # Warning, two different jitcell_tokens here! + # + # * 'targeting_jitcell_token' is only useful for the front-end, + # and it means: consider the LABEL that uses this TargetToken. + # At this position, the state is logically the one given + # by targeting_jitcell_token. So e.g. if we want to enter the + # JIT with some given green args, if the jitcell matches, then + # we can jump to this LABEL. + # + # * 'original_jitcell_token' is information from the backend's + # point of view: it means that this TargetToken is used in + # a LABEL that belongs to either: + # - a loop; then 'original_jitcell_token' is this loop + # - or a bridge; then 'original_jitcell_token' is the loop + # out of which we made this bridge + # + self.targeting_jitcell_token = targeting_jitcell_token + self.original_jitcell_token = None + + self.virtual_state = None + self.exported_state = None + class TreeLoop(object): inputargs = None operations = None - token = None call_pure_results = None logops = None quasi_immutable_deps = None + start_resumedescr = None + + def _token(*args): + raise Exception("TreeLoop.token is killed") + token = property(_token, _token) + + # This is the jitcell where the trace starts. Labels within the trace might + # belong to some other jitcells in the sens that jumping to this other + # jitcell will result in a jump to the label. + original_jitcell_token = None def __init__(self, name): self.name = name - # self.inputargs = list of distinct Boxes # self.operations = list of ResOperations # ops of the kind 'guard_xxx' contain a further list of operations, # which may itself contain 'guard_xxx' and so on, making a tree. @@ -808,6 +810,10 @@ def check_consistency(self): # for testing "NOT_RPYTHON" self.check_consistency_of(self.inputargs, self.operations) + for op in self.operations: + descr = op.getdescr() + if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): + assert descr.original_jitcell_token is self.original_jitcell_token @staticmethod def check_consistency_of(inputargs, operations): @@ -842,15 +848,23 @@ assert isinstance(box, Box) assert box not in seen seen[box] = True + if op.getopnum() == rop.LABEL: + inputargs = op.getarglist() + for box in inputargs: + assert isinstance(box, Box), "LABEL contains %r" % (box,) + seen = dict.fromkeys(inputargs) + assert len(seen) == len(inputargs), ( + "duplicate Box in the LABEL arguments") + assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: target = operations[-1].getdescr() if target is not None: - assert isinstance(target, LoopToken) + assert isinstance(target, TargetToken) def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -932,6 +946,9 @@ def clear(self): pass + def add_jitcell_token(self, token): + pass + class Stats(object): """For tests.""" @@ -944,17 +961,26 @@ self.loops = [] self.locations = [] self.aborted_keys = [] - self.invalidated_token_numbers = set() + self.invalidated_token_numbers = set() # <- not RPython + self.jitcell_token_wrefs = [] + self.jitcell_dicts = [] # <- not RPython def clear(self): del self.loops[:] del self.locations[:] del self.aborted_keys[:] + del self.jitcell_token_wrefs[:] self.invalidated_token_numbers.clear() self.compiled_count = 0 self.enter_count = 0 self.aborted_count = 0 + for dict in self.jitcell_dicts: + dict.clear() + def add_jitcell_token(self, token): + assert isinstance(token, JitCellToken) + self.jitcell_token_wrefs.append(weakref.ref(token)) + def set_history(self, history): self.operations = history.operations @@ -984,6 +1010,15 @@ def get_all_loops(self): return self.loops + def get_all_jitcell_tokens(self): + tokens = [t() for t in self.jitcell_token_wrefs] + if None in tokens: + assert False, "get_all_jitcell_tokens will not work as "+\ + "loops have been freed" + return tokens + + + def check_history(self, expected=None, **check): insns = {} for op in self.operations: @@ -1001,10 +1036,14 @@ def check_resops(self, expected=None, **check): insns = {} - for loop in self.loops: + for loop in self.get_all_loops(): insns = loop.summary(adding_insns=insns) + return self._check_insns(insns, expected, check) + + def _check_insns(self, insns, expected, check): if expected is not None: insns.pop('debug_merge_point', None) + insns.pop('label', None) assert insns == expected for insn, expected_count in check.items(): getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist @@ -1012,29 +1051,102 @@ assert found == expected_count, ( "found %d %r, expected %d" % (found, insn, expected_count)) return insns + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + + # XXX hacked version, ignore and remove me when jit-targets is merged. + loops = self.get_all_loops() + loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX + assert len(loops) == 1 + loop, = loops + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + insns = {} + for op in loop.operations: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + loops = self.get_all_loops() + assert len(loops) == 1 + loop = loops[0] + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + assert self.check_resops(jump=1) + labels = [op for op in loop.operations if op.getopnum() == rop.LABEL] + targets = [op._descr_wref() for op in labels] + assert None not in targets # TargetToken was freed, give up + target = jumpop._descr_wref() + assert target + assert targets.count(target) == 1 + i = loop.operations.index(labels[targets.index(target)]) + insns = {} + for op in loop.operations[i:]: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_loops(self, expected=None, everywhere=False, **check): + insns = {} + for loop in self.get_all_loops(): + #if not everywhere: + # if getattr(loop, '_ignore_during_counting', False): + # continue + insns = loop.summary(adding_insns=insns) + if expected is not None: + insns.pop('debug_merge_point', None) + print + print + print " self.check_resops(%s)" % str(insns) + print + import pdb; pdb.set_trace() + else: + chk = ['%s=%d' % (i, insns.get(i, 0)) for i in check] + print + print + print " self.check_resops(%s)" % ', '.join(chk) + print + import pdb; pdb.set_trace() + return + + for insn, expected_count in check.items(): + getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist + found = insns.get(insn, 0) + assert found == expected_count, ( + "found %d %r, expected %d" % (found, insn, expected_count)) + return insns + def check_consistency(self): "NOT_RPYTHON" - for loop in self.loops: + for loop in self.get_all_loops(): loop.check_consistency() def maybe_view(self): if option.view: self.view() - def view(self, errmsg=None, extraloops=[]): - from pypy.jit.metainterp.graphpage import display_loops - loops = self.get_all_loops()[:] - for loop in extraloops: - if loop in loops: - loops.remove(loop) - loops.append(loop) - highlight_loops = dict.fromkeys(extraloops, 1) - for loop in loops: - if hasattr(loop, '_looptoken_number') and ( - loop._looptoken_number in self.invalidated_token_numbers): - highlight_loops.setdefault(loop, 2) - display_loops(loops, errmsg, highlight_loops) + def view(self, errmsg=None, extraprocedures=[]): + from pypy.jit.metainterp.graphpage import display_procedures + procedures = self.get_all_loops()[:] + for procedure in extraprocedures: + if procedure in procedures: + procedures.remove(procedure) + procedures.append(procedure) + highlight_procedures = dict.fromkeys(extraprocedures, 1) + for procedure in procedures: + if hasattr(procedure, '_looptoken_number') and ( + procedure._looptoken_number in self.invalidated_token_numbers): + highlight_procedures.setdefault(procedure, 2) + display_procedures(procedures, errmsg, highlight_procedures) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/inliner.py b/pypy/jit/metainterp/inliner.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/inliner.py @@ -0,0 +1,57 @@ +from pypy.jit.metainterp.history import Const +from pypy.jit.metainterp.resume import Snapshot + +class Inliner(object): + def __init__(self, inputargs, jump_args): + assert len(inputargs) == len(jump_args) + self.argmap = {} + for i in range(len(inputargs)): + if inputargs[i] in self.argmap: + assert self.argmap[inputargs[i]] == jump_args[i] + else: + self.argmap[inputargs[i]] = jump_args[i] + self.snapshot_map = {None: None} + + def inline_op(self, newop, ignore_result=False, clone=True, + ignore_failargs=False): + if clone: + newop = newop.clone() + args = newop.getarglist() + newop.initarglist([self.inline_arg(a) for a in args]) + + if newop.is_guard(): + args = newop.getfailargs() + if args and not ignore_failargs: + newop.setfailargs([self.inline_arg(a) for a in args]) + else: + newop.setfailargs([]) + + if newop.result and not ignore_result: + old_result = newop.result + newop.result = newop.result.clonebox() + self.argmap[old_result] = newop.result + + self.inline_descr_inplace(newop.getdescr()) + + return newop + + def inline_descr_inplace(self, descr): + from pypy.jit.metainterp.compile import ResumeGuardDescr + if isinstance(descr, ResumeGuardDescr): + descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) + + def inline_arg(self, arg): + if arg is None: + return None + if isinstance(arg, Const): + return arg + return self.argmap[arg] + + def inline_snapshot(self, snapshot): + if snapshot in self.snapshot_map: + return self.snapshot_map[snapshot] + boxes = [self.inline_arg(a) for a in snapshot.boxes] + new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) + self.snapshot_map[snapshot] = new_snapshot + return new_snapshot + diff --git a/pypy/jit/metainterp/jitdriver.py b/pypy/jit/metainterp/jitdriver.py --- a/pypy/jit/metainterp/jitdriver.py +++ b/pypy/jit/metainterp/jitdriver.py @@ -11,6 +11,7 @@ # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.num_red_args ... pypy.jit.metainterp.warmspot + # self.red_args_types ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.greenfield_info ... pypy.jit.metainterp.warmspot diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -10,8 +10,6 @@ counters=""" TRACING BACKEND -RUNNING -BLACKHOLE OPS RECORDED_OPS GUARDS @@ -66,18 +64,6 @@ def end_backend(self): pass - def start_running(self): - pass - - def end_running(self): - pass - - def start_blackhole(self): - pass - - def end_blackhole(self): - pass - def count(self, kind, inc=1): pass @@ -133,16 +119,6 @@ def start_backend(self): self._start(BACKEND) def end_backend(self): self._end (BACKEND) - # Don't record times for 'running' and 'blackhole' because there are - # too many of them: calling time.time() is a major blocker. - # If you are interested in these numbers, use 'PYPYLOG=file' and - # look at the resulting file with pypy/tool/logparser.py. - def start_running(self): self.count(RUNNING) - def end_running(self): pass - - def start_blackhole(self): self.count(BLACKHOLE) - def end_blackhole(self): pass - def count(self, kind, inc=1): self.counters[kind] += inc @@ -164,8 +140,6 @@ calls = self.calls self._print_line_time("Tracing", cnt[TRACING], tim[TRACING]) self._print_line_time("Backend", cnt[BACKEND], tim[BACKEND]) - self._print_intline("Running asm", cnt[RUNNING]) - self._print_intline("Blackhole", cnt[BLACKHOLE]) line = "TOTAL: \t\t%f" % (self.tk - self.starttime, ) debug_print(line) self._print_intline("ops", cnt[OPS]) diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -4,13 +4,15 @@ from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap from pypy.jit.metainterp.optimizeopt.vstring import OptString -from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble +from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce from pypy.rlib.jit import PARAMETERS from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_start, debug_stop, debug_print + ALL_OPTS = [('intbounds', OptIntBounds), ('rewrite', OptRewrite), @@ -28,8 +30,7 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) -def build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble=True, retraced=False): +def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict @@ -45,12 +46,9 @@ optimizations.append(OptFfiCall()) if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + or 'heap' not in enable_opts or 'unroll' not in enable_opts): optimizations.append(OptSimplify()) - if inline_short_preamble: - optimizations = [OptInlineShortPreamble(retraced)] + optimizations - return optimizations, unroll @@ -80,3 +78,21 @@ if __name__ == '__main__': print ALL_OPTS_NAMES + +def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): + """Optimize loop.operations to remove internal overheadish operations. + """ + + debug_start("jit-optimize") + try: + loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, + loop.operations) + optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) + if unroll: + optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) + else: + optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer.propagate_all_forward() + finally: + debug_stop("jit-optimize") + diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -7,7 +7,7 @@ from pypy.rlib.libffi import Func from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.lltypesystem import llmemory, rffi class FuncInfo(object): @@ -234,10 +234,13 @@ # longlongs are treated as floats, see # e.g. llsupport/descr.py:getDescrClass is_float = True + elif kind == 'u': + # they're all False + pass else: assert False, "unsupported ffitype or kind" # - fieldsize = ffitype.c_size + fieldsize = rffi.getintfield(ffitype, 'c_size') return self.optimizer.cpu.interiorfielddescrof_dynamic( offset, width, fieldsize, is_pointer, is_float, is_signed ) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -500,8 +500,9 @@ else: return CVAL_ZERO - def propagate_all_forward(self): - self.clear_newoperations() + def propagate_all_forward(self, clear=True): + if clear: + self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) self.loop.operations = self.get_newoperations() @@ -564,9 +565,12 @@ descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - newboxes = modifier.finish(self.values, self.pendingfields) - if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here - compile.giveup() + try: + newboxes = modifier.finish(self.values, self.pendingfields) + if len(newboxes) > self.metainterp_sd.options.failargs_limit: + raise resume.TagOverflow + except resume.TagOverflow: + raise compile.giveup() descr.store_final_boxes(op, newboxes) # if op.getopnum() == rop.GUARD_VALUE: diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -1,9 +1,12 @@ from pypy.jit.metainterp.optimizeopt.optimizer import Optimization from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import ResOperation, rop - +from pypy.jit.metainterp.history import TargetToken, JitCellToken class OptSimplify(Optimization): + def __init__(self): + self.last_label_descr = None + def optimize_CALL_PURE(self, op): args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, @@ -28,6 +31,26 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + + def optimize_LABEL(self, op): + self.last_label_descr = op.getdescr() + self.emit_operation(op) + + def optimize_JUMP(self, op): + descr = op.getdescr() + assert isinstance(descr, JitCellToken) + if not descr.target_tokens: + assert self.last_label_descr is not None + target_token = self.last_label_descr + assert isinstance(target_token, TargetToken) + assert target_token.targeting_jitcell_token is descr + op.setdescr(self.last_label_descr) + else: + assert len(descr.target_tokens) == 1 + op.setdescr(descr.target_tokens[0]) + self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -0,0 +1,200 @@ +from pypy.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimize import InvalidLoop +from py.test import raises + +class BaseTestMultiLabel(BaseTest): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + + def optimize_loop(self, ops, expected): + loop = self.parse(ops) + if expected != "crash!": + expected = self.parse(expected) + + part = TreeLoop('part') + part.inputargs = loop.inputargs + part.start_resumedescr = FakeDescrWithSnapshot() + token = loop.original_jitcell_token + + optimized = TreeLoop('optimized') + optimized.inputargs = loop.inputargs + optimized.operations = [] + + labels = [i for i, op in enumerate(loop.operations) \ + if op.getopnum()==rop.LABEL] + prv = 0 + last_label = [] + for nxt in labels + [len(loop.operations)]: + assert prv != nxt + operations = last_label + loop.operations[prv:nxt] + if nxt < len(loop.operations): + label = loop.operations[nxt] + assert label.getopnum() == rop.LABEL + jumpop = ResOperation(rop.JUMP, label.getarglist(), + None, descr=token) + operations.append(jumpop) + part.operations = operations + self._do_optimize_loop(part, None) + if part.operations[-1].getopnum() == rop.LABEL: + last_label = [part.operations.pop()] + else: + last_label = [] + optimized.operations.extend(part.operations) + prv = nxt + 1 + + # + print + print "Optimized:" + if optimized.operations: + print '\n'.join([str(o) for o in optimized.operations]) + else: + print 'Failed!' + print + + assert expected != "crash!", "should have raised an exception" + self.assert_equal(optimized, expected) + + return optimized + + def test_simple(self): + ops = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1) + i3 = int_add(i1, 1) + escape(i3) + jump(i1) + """ + expected = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1, i2) + escape(i2) + jump(i1, i2) + """ + self.optimize_loop(ops, expected) + + def test_forced_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + escape(p3) + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_with_nonmatching_fields(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, 1, descr=valuedescr) + label(p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p4, 1, descr=nextdescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_arrays_with_nonmatching_lens(self): + ops = """ + [p1] + p2 = new_array(3, descr=arraydescr) + label(p2) + p4 = new_array(2, descr=arraydescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_1(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p4, 2, f0, descr=compleximagdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_2(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(2, descr=complexarraydescr) + setinteriorfield_gc(p4, 0, f0, descr=complexrealdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_array(self): + ops = """ + [p1] + p3 = new_array(3, descr=arraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_arraystruct(self): + ops = """ + [p1] + p3 = new_array(3, descr=complexarraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_turns_constant(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + guard_value(p3, ConstPtr(myptr)) [] + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_turns_not_equal(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3, p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + jump(p3, p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + +class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + pass + diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1,7 +1,8 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, FakeMetaInterpStaticData) + LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) +from pypy.jit.metainterp.history import TargetToken, JitCellToken from pypy.jit.metainterp.test.test_compile import FakeLogger import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize @@ -11,7 +12,6 @@ from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.rlib.rarithmetic import LONG_BIT - def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -116,9 +116,13 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" def optimize_loop(self, ops, optops, call_pure_results=None): - loop = self.parse(ops) - expected = self.parse(optops) + token = JitCellToken() + loop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=TargetToken(token))] + \ + loop.operations + if loop.operations[-1].getopnum() == rop.JUMP: + loop.operations[-1].setdescr(token) + expected = convert_old_style_to_targets(self.parse(optops), jump=True) self._do_optimize_loop(loop, call_pure_results) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,13 +1,13 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes) + LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation @@ -15,7 +15,7 @@ from pypy.jit.metainterp.optimizeopt.util import args_dict from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData from pypy.config.pypyoption import get_pypy_config - +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_build_opt_chain(): def check(chain, expected_names): @@ -23,49 +23,37 @@ assert names == expected_names # metainterp_sd = FakeMetaInterpStaticData(None) - chain, _ = build_opt_chain(metainterp_sd, "", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "") check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "heap:intbounds") - check(chain, ["OptInlineShortPreamble", "OptIntBounds", "OptHeap", "OptSimplify"]) + check(chain, ["OptIntBounds", "OptHeap", "OptSimplify"]) # chain, unroll = build_opt_chain(metainterp_sd, "unroll") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) assert unroll # - chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptFfiCall", "OptSimplify"]) # metainterp_sd.config = get_pypy_config(translating=True) assert not metainterp_sd.config.translation.jit_ffi - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptSimplify"]) # ____________________________________________________________ -class FakeDescr(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescr() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescr) - - class BaseTestWithUnroll(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -79,40 +67,41 @@ expected_preamble = self.parse(expected_preamble) if expected_short: expected_short = self.parse(expected_short) - loop.preamble = TreeLoop('preamble') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = LoopToken() - loop.preamble.start_resumedescr = FakeDescr() - # - self._do_optimize_loop(loop, call_pure_results) + + preamble = self.unroll_and_optimize(loop, call_pure_results) + # print print "Preamble:" - print loop.preamble.inputargs - if loop.preamble.operations: - print '\n'.join([str(o) for o in loop.preamble.operations]) + if preamble.operations: + print '\n'.join([str(o) for o in preamble.operations]) else: print 'Failed!' print print "Loop:" - print loop.inputargs print '\n'.join([str(o) for o in loop.operations]) print if expected_short: print "Short Preamble:" - short = loop.preamble.token.short_preamble[0] - print short.inputargs - print '\n'.join([str(o) for o in short.operations]) + short = loop.operations[0].getdescr().short_preamble + print '\n'.join([str(o) for o in short]) print assert expected != "crash!", "should have raised an exception" - self.assert_equal(loop, expected) + self.assert_equal(loop, convert_old_style_to_targets(expected, jump=True)) + assert loop.operations[0].getdescr() == loop.operations[-1].getdescr() if expected_preamble: - self.assert_equal(loop.preamble, expected_preamble, + self.assert_equal(preamble, convert_old_style_to_targets(expected_preamble, jump=False), text_right='expected preamble') + assert preamble.operations[-1].getdescr() == loop.operations[0].getdescr() if expected_short: - self.assert_equal(short, expected_short, + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, convert_old_style_to_targets(expected_short, jump=True), text_right='expected short preamble') + assert short[-1].getdescr() == loop.operations[0].getdescr() return loop @@ -234,7 +223,7 @@ """ % expected_value self.optimize_loop(ops, expected) - def test_reverse_of_cast(self): + def test_reverse_of_cast_1(self): ops = """ [i0] p0 = cast_int_to_ptr(i0) @@ -246,6 +235,8 @@ jump(i0) """ self.optimize_loop(ops, expected) + + def test_reverse_of_cast_2(self): ops = """ [p0] i1 = cast_ptr_to_int(p0) @@ -1181,6 +1172,7 @@ i1 = getfield_gc(p0, descr=valuedescr) i2 = int_sub(i1, 1) i3 = int_add(i0, i1) + i4 = same_as(i2) # This same_as should be killed by backend jump(i3, i2, i1) """ expected = """ @@ -1252,10 +1244,10 @@ i1 = int_add(i0, 1) p1 = new_with_vtable(ConstClass(node_vtable2)) p2 = new_with_vtable(ConstClass(node_vtable2)) - setfield_gc(p0, p1, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) setfield_gc(p2, p1, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p0, p1, descr=nextdescr) jump(p1) """ self.optimize_loop(ops, loop, preamble) @@ -1317,6 +1309,7 @@ p30 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) + p46 = same_as(p30) # This same_as should be killed by backend jump(i29, p30, p3) """ expected = """ @@ -1324,8 +1317,8 @@ i28 = int_add(i0, 1) i29 = int_add(i28, 1) p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) - setfield_gc(p30, i28, descr=nextdescr) jump(i29, p30, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2118,7 +2111,9 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i7 = same_as(i2) # This same_as should be killed by backend + i6 = same_as(i4) + jump(p1, i1, i2, i4, i6) """ expected = """ [p1, i1, i2, i4, i5] @@ -2148,7 +2143,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2177,7 +2173,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2207,7 +2204,9 @@ guard_true(i5) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i8 = same_as(i2) # This same_as should be killed by backend + i7 = same_as(i4) + jump(p1, i1, i2, i4, i7) """ expected = """ [p1, i1, i2, i4, i7] @@ -2433,7 +2432,8 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p4, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - jump(p1, i2, i4, p4, i4) + i101 = same_as(i4) + jump(p1, i2, i4, p4, i101) """ expected = """ [p1, i2, i4, p4, i5] @@ -3276,7 +3276,15 @@ setfield_gc(p1, i3, descr=valuedescr) jump(p1, i4, i3) ''' - self.optimize_loop(ops, ops, ops) + preamble = ''' + [p1, i1, i4] + setfield_gc(p1, i1, descr=valuedescr) + i3 = call_assembler(i1, descr=asmdescr) + setfield_gc(p1, i3, descr=valuedescr) + i143 = same_as(i3) # Should be killed by backend + jump(p1, i4, i3) + ''' + self.optimize_loop(ops, ops, preamble) def test_call_assembler_invalidates_heap_knowledge(self): ops = ''' @@ -3307,7 +3315,9 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i3, descr=valuedescr) - jump(p1, i4, i3, i3) + i148 = same_as(i3) + i147 = same_as(i3) + jump(p1, i4, i3, i148) ''' self.optimize_loop(ops, expected, preamble) @@ -3330,7 +3340,8 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i4, i3, i3) + i151 = same_as(i3) + jump(p1, i4, i3, i151) ''' self.optimize_loop(ops, expected, preamble) @@ -3350,7 +3361,8 @@ escape(i1) escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) - jump(i0, i4, i4) + i153 = same_as(i4) + jump(i0, i4, i153) ''' expected = ''' [i0, i4, i5] @@ -3380,7 +3392,8 @@ escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) guard_no_exception() [] - jump(i0, i4, i4) + i155 = same_as(i4) + jump(i0, i4, i155) ''' expected = ''' [i0, i2, i3] @@ -4198,6 +4211,7 @@ preamble = """ [p0] i0 = strlen(p0) + i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5418,6 +5432,7 @@ [p0] p1 = getfield_gc(p0, descr=valuedescr) setfield_gc(p0, p0, descr=valuedescr) + p4450 = same_as(p0) # Should be killed by backend jump(p0) """ expected = """ @@ -5653,7 +5668,8 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - jump(p2, p3, i2) + i7 = same_as(i2) + jump(p2, p3, i7) """ expected = """ [p1, p2, i1] @@ -5728,7 +5744,9 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - jump(p2, p3, p5, i2, i3) + i129 = same_as(i2) + i130 = same_as(i3) + jump(p2, p3, p5, i129, i130) """ expected = """ [p1, p2, p3, i1, i2] @@ -5788,7 +5806,8 @@ [p1, i1, i2, i3] escape(i3) i4 = int_sub(i2, i1) - jump(p1, i1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i1, i2, i4, i5) """ expected = """ [p1, i1, i2, i3, i4] @@ -5813,7 +5832,8 @@ escape(i5) i4 = int_sub(i2, i1) setfield_gc(p2, i4, descr=valuedescr) - jump(p1, i1, i2, p2, i4, i4) + i8 = same_as(i4) + jump(p1, i1, i2, p2, i8, i4) """ expected = """ [p1, i1, i2, p2, i5, i6] @@ -5939,7 +5959,8 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - jump(p4, i1, i2, p2, i5, i3, i4) + i9 = same_as(i4) + jump(p4, i1, i2, p2, i5, i3, i9) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6061,7 +6082,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - jump(p1, p2, p3, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, p3, i3, i11, i12) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6281,6 +6304,7 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) + i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6326,7 +6350,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - jump(p1, p2, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, i3, i11, i12) """ expected = """ [p1, p2, i3, i1, i2] @@ -6482,6 +6508,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] @@ -6614,7 +6655,8 @@ p188 = getarrayitem_gc(p187, 42, descr=) guard_value(p188, ConstPtr(myptr)) [] p25 = getfield_gc(ConstPtr(myptr), descr=otherdescr) - jump(p25, p187, i184, p25) + p26 = same_as(p25) + jump(p25, p187, i184, p26) """ short = """ [p1, p187, i184] @@ -6883,7 +6925,8 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - jump(p9, i843) + i0 = same_as(i843) + jump(p9, i0) """ short = """ [p9] @@ -6999,6 +7042,40 @@ """ self.optimize_loop(ops, expected) + def test_duplicated_aliased_virtual(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + jump(p3, p4) + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, expected) + + def test_imported_aliased_virtual_in_failargs(self): + ops = """ + [p1, p2, i0] + i2 = int_lt(i0, 10) + guard_true(i2) [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + i1 = int_add(i0, 1) + jump(p3, p4, i1) + """ + expected = """ + [i0] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize_loop(ops, expected) + def test_chained_virtuals(self): ops = """ [p0, p1] @@ -7575,7 +7652,8 @@ call(i2, descr=nonwritedescr) setfield_gc(p22, i1, descr=valuedescr) guard_nonnull_class(p18, ConstClass(node_vtable)) [] - jump(p22, p18, i1, i1) + i10 = same_as(i1) + jump(p22, p18, i1, i10) """ short = """ [p22, p18, i1] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -8,7 +8,8 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, Const, TreeLoop, BoxObj, - ConstObj, AbstractDescr) + ConstObj, AbstractDescr, + JitCellToken, TargetToken) from pypy.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.codewriter.effectinfo import EffectInfo @@ -18,6 +19,8 @@ from pypy.jit.metainterp import compile, resume, history from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.config.pypyoption import get_pypy_config +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -344,6 +347,11 @@ self.config = get_pypy_config(translating=True) self.config.translation.jit_ffi = True + class logger_noopt: + @classmethod + def log_loop(*args): + pass + class warmrunnerdesc: class memory_manager: retrace_limit = 5 @@ -394,7 +402,7 @@ expected.operations, False, remap, text_right) def _do_optimize_loop(self, loop, call_pure_results): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 + from pypy.jit.metainterp.optimizeopt import optimize_trace from pypy.jit.metainterp.optimizeopt.util import args_dict self.loop = loop @@ -408,7 +416,83 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - optimize_loop_1(metainterp_sd, loop, self.enable_opts) + optimize_trace(metainterp_sd, loop, self.enable_opts) + + def unroll_and_optimize(self, loop, call_pure_results=None): + operations = loop.operations + jumpop = operations[-1] + assert jumpop.getopnum() == rop.JUMP + inputargs = loop.inputargs + + jump_args = jumpop.getarglist()[:] + operations = operations[:-1] + cloned_operations = [op.clone() for op in operations] + + preamble = TreeLoop('preamble') + preamble.inputargs = inputargs + preamble.start_resumedescr = FakeDescrWithSnapshot() + + token = JitCellToken() + preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ + operations + \ + [ResOperation(rop.JUMP, jump_args, None, descr=token)] + self._do_optimize_loop(preamble, call_pure_results) + + assert preamble.operations[-1].getopnum() == rop.LABEL + + inliner = Inliner(inputargs, jump_args) + loop.start_resumedescr = preamble.start_resumedescr + loop.operations = [preamble.operations[-1]] + \ + [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], + None, descr=token)] + #[inliner.inline_op(jumpop)] + assert loop.operations[-1].getopnum() == rop.JUMP + assert loop.operations[0].getopnum() == rop.LABEL + loop.inputargs = loop.operations[0].getarglist() + + self._do_optimize_loop(loop, call_pure_results) + extra_same_as = [] + while loop.operations[0].getopnum() != rop.LABEL: + extra_same_as.append(loop.operations[0]) + del loop.operations[0] + + # Hack to prevent random order of same_as ops + extra_same_as.sort(key=lambda op: str(preamble.operations).find(str(op.getarg(0)))) + + for op in extra_same_as: + preamble.operations.insert(-1, op) + + return preamble + + +class FakeDescr(compile.ResumeGuardDescr): + def clone_if_mutable(self): + return FakeDescr() + def __eq__(self, other): + return isinstance(other, FakeDescr) + +class FakeDescrWithSnapshot(compile.ResumeGuardDescr): + class rd_snapshot: + class prev: + prev = None + boxes = [] + boxes = [] + def clone_if_mutable(self): + return FakeDescrWithSnapshot() + def __eq__(self, other): + return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) + + +def convert_old_style_to_targets(loop, jump): + newloop = TreeLoop(loop.name) + newloop.inputargs = loop.inputargs + newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=FakeDescr())] + \ + loop.operations + if not jump: + assert newloop.operations[-1].getopnum() == rop.JUMP + newloop.operations[-1] = ResOperation(rop.LABEL, newloop.operations[-1].getarglist(), None, descr=FakeDescr()) + return newloop # ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -1,11 +1,12 @@ from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes +from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState from pypy.jit.metainterp.compile import ResumeGuardDescr -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot from pypy.rlib.debug import debug_print @@ -13,63 +14,11 @@ # FIXME: Introduce some VirtualOptimizer super class instead -def optimize_unroll(metainterp_sd, loop, optimizations): +def optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble=True): opt = UnrollOptimizer(metainterp_sd, loop, optimizations) + opt.inline_short_preamble = inline_short_preamble opt.propagate_all_forward() -class Inliner(object): - def __init__(self, inputargs, jump_args): - assert len(inputargs) == len(jump_args) - self.argmap = {} - for i in range(len(inputargs)): - if inputargs[i] in self.argmap: - assert self.argmap[inputargs[i]] == jump_args[i] - else: - self.argmap[inputargs[i]] = jump_args[i] - self.snapshot_map = {None: None} - - def inline_op(self, newop, ignore_result=False, clone=True, - ignore_failargs=False): - if clone: - newop = newop.clone() - args = newop.getarglist() - newop.initarglist([self.inline_arg(a) for a in args]) - - if newop.is_guard(): - args = newop.getfailargs() - if args and not ignore_failargs: - newop.setfailargs([self.inline_arg(a) for a in args]) - else: - newop.setfailargs([]) - - if newop.result and not ignore_result: - old_result = newop.result - newop.result = newop.result.clonebox() - self.argmap[old_result] = newop.result - - self.inline_descr_inplace(newop.getdescr()) - - return newop - - def inline_descr_inplace(self, descr): - if isinstance(descr, ResumeGuardDescr): - descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) - - def inline_arg(self, arg): - if arg is None: - return None - if isinstance(arg, Const): - return arg - return self.argmap[arg] - - def inline_snapshot(self, snapshot): - if snapshot in self.snapshot_map: - return self.snapshot_map[snapshot] - boxes = [self.inline_arg(a) for a in snapshot.boxes] - new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) - self.snapshot_map[snapshot] = new_snapshot - return new_snapshot - class UnrollableOptimizer(Optimizer): def setup(self): self.importable_values = {} @@ -101,14 +50,13 @@ become the preamble or entry bridge (don't think there is a distinction anymore)""" + inline_short_preamble = True + did_import = False + def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) - self.cloned_operations = [] - for op in self.optimizer.loop.operations: - newop = op.clone() - self.cloned_operations.append(newop) - def fix_snapshot(self, loop, jump_args, snapshot): + def fix_snapshot(self, jump_args, snapshot): if snapshot is None: return None snapshot_args = snapshot.boxes @@ -116,116 +64,348 @@ for a in snapshot_args: a = self.getvalue(a).get_key_box() new_snapshot_args.append(a) - prev = self.fix_snapshot(loop, jump_args, snapshot.prev) + prev = self.fix_snapshot(jump_args, snapshot.prev) return Snapshot(prev, new_snapshot_args) def propagate_all_forward(self): loop = self.optimizer.loop + self.optimizer.clear_newoperations() + + + start_label = loop.operations[0] + if start_label.getopnum() == rop.LABEL: + loop.operations = loop.operations[1:] + # We need to emit the label op before import_state() as emitting it + # will clear heap caches + self.optimizer.send_extra_operation(start_label) + else: + start_label = None + jumpop = loop.operations[-1] if jumpop.getopnum() == rop.JUMP: loop.operations = loop.operations[:-1] else: - loopop = None + jumpop = None - self.optimizer.propagate_all_forward() + self.import_state(start_label) + self.optimizer.propagate_all_forward(clear=False) + if not jumpop: + return + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.did_import: - if jumpop: - assert jumpop.getdescr() is loop.token - jump_args = jumpop.getarglist() - jumpop.initarglist([]) + self.close_bridge(start_label) + self.finilize_short_preamble(start_label) + return + + cell_token = jumpop.getdescr() + assert isinstance(cell_token, JitCellToken) + stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) + + if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() - KillHugeIntBounds(self.optimizer).apply() + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + else: + assert stop_label + assert start_label + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + + self.close_loop(jumpop) + self.finilize_short_preamble(start_label) + + def export_state(self, targetop): + original_jump_args = targetop.getarglist() + jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] + + assert self.optimizer.loop.start_resumedescr + start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() + assert isinstance(start_resumedescr, ResumeGuardDescr) + start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) + # FIXME: I dont thnik we need fix_snapshot anymore + + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(jump_args) - loop.preamble.operations = self.optimizer.get_newoperations() - jump_args = [self.getvalue(a).get_key_box() for a in jump_args] + values = [self.getvalue(arg) for arg in jump_args] + inputargs = virtual_state.make_inputargs(values, self.optimizer) + short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() - self.start_resumedescr = start_resumedescr - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(loop, jump_args, - start_resumedescr.rd_snapshot) + constant_inputargs = {} + for box in jump_args: + const = self.get_constant_box(box) + if const: + constant_inputargs[box] = const - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(jump_args) + short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) + aliased_vrituals = {} + for i in range(len(original_jump_args)): + if original_jump_args[i] is not jump_args[i]: + if values[i].is_virtual(): + aliased_vrituals[original_jump_args[i]] = jump_args[i] + else: + short_boxes.alias(original_jump_args[i], jump_args[i]) + + self.optimizer.clear_newoperations() + for box in short_inputargs: + value = self.getvalue(box) + if value.is_virtual(): + value.force_box(self.optimizer) + inputarg_setup_ops = self.optimizer.get_newoperations() + + target_token = targetop.getdescr() + assert isinstance(target_token, TargetToken) + targetop.initarglist(inputargs) + target_token.virtual_state = virtual_state + target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] + target_token.start_resumedescr = start_resumedescr + target_token.exported_state = ExportedState(constant_inputargs, short_boxes, + inputarg_setup_ops, self.optimizer, + aliased_vrituals, jump_args) + + def import_state(self, targetop): + self.did_import = False + if not targetop: + # FIXME: Set up some sort of empty state with no virtuals? + return + target_token = targetop.getdescr() + if not target_token: + return + assert isinstance(target_token, TargetToken) + exported_state = target_token.exported_state + if not exported_state: + # FIXME: Set up some sort of empty state with no virtuals + return + self.did_import = True + + self.short = target_token.short_preamble[:] + self.short_seen = {} + self.short_boxes = exported_state.short_boxes.clone() + for box, const in exported_state.constant_inputargs.items(): + self.short_seen[box] = True + self.imported_state = exported_state + self.inputargs = targetop.getarglist() + self.initial_virtual_state = target_token.virtual_state + self.start_resumedescr = target_token.start_resumedescr + + seen = {} + for box in self.inputargs: + if box in seen: + continue + seen[box] = True + preamble_value = exported_state.optimizer.getvalue(box) + value = self.optimizer.getvalue(box) + value.import_from(preamble_value, self.optimizer) + + for newbox, oldbox in self.short_boxes.aliases.items(): + self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) + + # Setup the state of the new optimizer by emiting the + # short operations and discarding the result + self.optimizer.emitting_dissabled = True + for op in exported_state.inputarg_setup_ops: + self.optimizer.send_extra_operation(op) + seen = {} + + for op in self.short_boxes.operations(): + self.ensure_short_op_emitted(op, self.optimizer, seen) + if op and op.result: + preamble_value = exported_state.optimizer.getvalue(op.result) + value = self.optimizer.getvalue(op.result) + if not value.is_virtual(): + imp = ValueImporter(self, preamble_value, op) + self.optimizer.importable_values[value] = imp + newvalue = self.optimizer.getvalue(op.result) + newresult = newvalue.get_key_box() + if newresult is not op.result and not newvalue.is_constant(): + self.short_boxes.alias(newresult, op.result) + op = ResOperation(rop.SAME_AS, [op.result], newresult) + self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX + #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! + self.optimizer.flush() + self.optimizer.emitting_dissabled = False + + for box, key_box in exported_state.aliased_vrituals.items(): + self.optimizer.make_equal_to(box, self.getvalue(key_box)) + + def close_bridge(self, start_label): + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # We dont need to inline the short preamble we are creating as we are conneting + # the bridge to a different trace with a different short preamble + self.short_inliner = None + + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations): + op = newoperations[i] + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + for a in args: + self.import_box(a, inputargs, short_jumpargs, []) + i += 1 + newoperations = self.optimizer.get_newoperations() + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) + + def close_loop(self, jumpop): + virtual_state = self.initial_virtual_state + short_inputargs = self.short[0].getarglist() + constant_inputargs = self.imported_state.constant_inputargs + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # Construct jumpargs from the virtual state + original_jumpargs = jumpop.getarglist()[:] + values = [self.getvalue(arg) for arg in jumpop.getarglist()] + try: + jumpargs = virtual_state.make_inputargs(values, self.optimizer) + except BadVirtualState: + raise InvalidLoop + jumpop.initarglist(jumpargs) + + # Inline the short preamble at the end of the loop + jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) + assert len(short_inputargs) == len(jmp_to_short_args) + args = {} + for i in range(len(short_inputargs)): + if short_inputargs[i] in args: + if args[short_inputargs[i]] != jmp_to_short_args[i]: + raise InvalidLoop + args[short_inputargs[i]] = jmp_to_short_args[i] + self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) + for box, const in constant_inputargs.items(): + self.short_inliner.argmap[box] = const + for op in self.short[1:]: + newop = self.short_inliner.inline_op(op) + self.optimizer.send_extra_operation(newop) + + # Import boxes produced in the preamble but used in the loop + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = j = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations) or j < len(jumpargs): + if i == len(newoperations): + while j < len(jumpargs): + a = jumpargs[j] + if self.optimizer.loop.logops: + debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + j += 1 + else: + op = newoperations[i] + + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + + if self.optimizer.loop.logops: + debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + for a in args: + if self.optimizer.loop.logops: + debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + i += 1 + newoperations = self.optimizer.get_newoperations() + + jumpop.initarglist(jumpargs) + self.optimizer.send_extra_operation(jumpop) + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=jumpop.getdescr())) + + # Verify that the virtual state at the end of the loop is one + # that is compatible with the virtual state at the start of the loop + modifier = VirtualStateAdder(self.optimizer) + final_virtual_state = modifier.get_virtual_state(original_jumpargs) + debug_start('jit-log-virtualstate') + virtual_state.debug_print('Closed loop with ') + bad = {} + if not virtual_state.generalization_of(final_virtual_state, bad): + # We ended up with a virtual state that is not compatible + # and we are thus unable to jump to the start of the loop + final_virtual_state.debug_print("Bad virtual state at end of loop, ", + bad) + debug_stop('jit-log-virtualstate') + raise InvalidLoop - values = [self.getvalue(arg) for arg in jump_args] - inputargs = virtual_state.make_inputargs(values, self.optimizer) - short_inputargs = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) + debug_stop('jit-log-virtualstate') - self.constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - self.constant_inputargs[box] = const + maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards + if self.optimizer.emitted_guards > maxguards: + target_token = jumpop.getdescr() + assert isinstance(target_token, TargetToken) + target_token.targeting_jitcell_token.retraced_count = sys.maxint + + def finilize_short_preamble(self, start_label): + short = self.short + assert short[-1].getopnum() == rop.JUMP + target_token = start_label.getdescr() + assert isinstance(target_token, TargetToken) - sb = ShortBoxes(self.optimizer, inputargs + self.constant_inputargs.keys()) - self.short_boxes = sb + # Turn guards into conditional jumps to the preamble + for i in range(len(short)): + op = short[i] + if op.is_guard(): + op = op.clone() + op.setfailargs(None) + descr = target_token.start_resumedescr.clone_if_mutable() + op.setdescr(descr) + short[i] = op + + # Clone ops and boxes to get private versions and + short_inputargs = short[0].getarglist() + boxmap = {} + newargs = [None] * len(short_inputargs) + for i in range(len(short_inputargs)): + a = short_inputargs[i] + if a in boxmap: + newargs[i] = boxmap[a] + else: + newargs[i] = a.clonebox() + boxmap[a] = newargs[i] + inliner = Inliner(short_inputargs, newargs) + for box, const in self.imported_state.constant_inputargs.items(): + inliner.argmap[box] = const + for i in range(len(short)): + short[i] = inliner.inline_op(short[i]) + + target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.start_resumedescr) + + # Forget the values to allow them to be freed + for box in short[0].getarglist(): + box.forget_value() + for op in short: + if op.result: + op.result.forget_value() + target_token.short_preamble = self.short + target_token.exported_state = None + + + def FIXME_old_stuff(): preamble_optimizer = self.optimizer loop.preamble.quasi_immutable_deps = ( self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.new() loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps - logops = self.optimizer.loop.logops - if logops: - args = ", ".join([logops.repr_of_arg(arg) for arg in inputargs]) - debug_print('inputargs: ' + args) - args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs]) - debug_print('short inputargs: ' + args) - self.short_boxes.debug_print(logops) - - - # Force virtuals amoung the jump_args of the preamble to get the - # operations needed to setup the proper state of those virtuals - # in the peeled loop - inputarg_setup_ops = [] - preamble_optimizer.clear_newoperations() - seen = {} - for box in inputargs: - if box in seen: - continue - seen[box] = True - preamble_value = preamble_optimizer.getvalue(box) - value = self.optimizer.getvalue(box) - value.import_from(preamble_value, self.optimizer) - for box in short_inputargs: - if box in seen: - continue - seen[box] = True - value = preamble_optimizer.getvalue(box) - value.force_box(preamble_optimizer) - inputarg_setup_ops += preamble_optimizer.get_newoperations() - - # Setup the state of the new optimizer by emiting the - # short preamble operations and discarding the result - self.optimizer.emitting_dissabled = True - for op in inputarg_setup_ops: - self.optimizer.send_extra_operation(op) - seen = {} - for op in self.short_boxes.operations(): - self.ensure_short_op_emitted(op, self.optimizer, seen) - if op and op.result: - preamble_value = preamble_optimizer.getvalue(op.result) - value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): - imp = ValueImporter(self, preamble_value, op) - self.optimizer.importable_values[value] = imp - newresult = self.optimizer.getvalue(op.result).get_key_box() - if newresult is not op.result: - self.short_boxes.alias(newresult, op.result) - self.optimizer.flush() - self.optimizer.emitting_dissabled = False - - initial_inputargs_len = len(inputargs) - self.inliner = Inliner(loop.inputargs, jump_args) - - - short = self.inline(inputargs, self.cloned_operations, - loop.inputargs, short_inputargs, - virtual_state) loop.inputargs = inputargs args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\ @@ -241,149 +421,7 @@ loop.preamble.token.retraced_count = sys.maxint if short: - assert short[-1].getopnum() == rop.JUMP - short[-1].setdescr(loop.token) - - # Turn guards into conditional jumps to the preamble - for i in range(len(short)): - op = short[i] - if op.is_guard(): - op = op.clone() - op.setfailargs(None) - descr = self.start_resumedescr.clone_if_mutable() - op.setdescr(descr) - short[i] = op - - short_loop = TreeLoop('short preamble') - short_loop.inputargs = short_inputargs - short_loop.operations = short - - # Clone ops and boxes to get private versions and - boxmap = {} - newargs = [None] * len(short_loop.inputargs) - for i in range(len(short_loop.inputargs)): - a = short_loop.inputargs[i] - if a in boxmap: - newargs[i] = boxmap[a] - else: - newargs[i] = a.clonebox() - boxmap[a] = newargs[i] - inliner = Inliner(short_loop.inputargs, newargs) - for box, const in self.constant_inputargs.items(): - inliner.argmap[box] = const - short_loop.inputargs = newargs - ops = [inliner.inline_op(op) for op in short_loop.operations] - short_loop.operations = ops - descr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - short_loop.start_resumedescr = descr - - assert isinstance(loop.preamble.token, LoopToken) - if loop.preamble.token.short_preamble: - loop.preamble.token.short_preamble.append(short_loop) - else: - loop.preamble.token.short_preamble = [short_loop] - short_loop.virtual_state = virtual_state - - # Forget the values to allow them to be freed - for box in short_loop.inputargs: - box.forget_value() - for op in short_loop.operations: - if op.result: - op.result.forget_value() - - def inline(self, inputargs, loop_operations, loop_args, short_inputargs, virtual_state): - inliner = self.inliner - - short_jumpargs = inputargs[:] - - short = self.short = [] - short_seen = self.short_seen = {} - for box, const in self.constant_inputargs.items(): - short_seen[box] = True - - # This loop is equivalent to the main optimization loop in - # Optimizer.propagate_all_forward - jumpop = None - for newop in loop_operations: - newop = inliner.inline_op(newop, clone=False) - if newop.getopnum() == rop.JUMP: - jumpop = newop - break - - #self.optimizer.first_optimization.propagate_forward(newop) - self.optimizer.send_extra_operation(newop) - - self.boxes_created_this_iteration = {} - - assert jumpop - original_jumpargs = jumpop.getarglist()[:] - values = [self.getvalue(arg) for arg in jumpop.getarglist()] - jumpargs = virtual_state.make_inputargs(values, self.optimizer) - jumpop.initarglist(jumpargs) - jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - - for box, const in self.constant_inputargs.items(): - self.short_inliner.argmap[box] = const - - for op in short: - newop = self.short_inliner.inline_op(op) - self.optimizer.send_extra_operation(newop) - - newoperations = self.optimizer.get_newoperations() - - i = j = 0 - while i < len(newoperations) or j < len(jumpargs): - if i == len(newoperations): - while j < len(jumpargs): - a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - j += 1 - else: - op = newoperations[i] - - self.boxes_created_this_iteration[op.result] = True - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) - for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - i += 1 - newoperations = self.optimizer.get_newoperations() - - jumpop.initarglist(jumpargs) - self.optimizer.send_extra_operation(jumpop) - short.append(ResOperation(rop.JUMP, short_jumpargs, None)) - - modifier = VirtualStateAdder(self.optimizer) - final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') - bad = {} - if not virtual_state.generalization_of(final_virtual_state, bad): - # We ended up with a virtual state that is not compatible - # and we are thus unable to jump to the start of the loop - # XXX Is it possible to end up here? If so, consider: - # - Fallback on having the preamble jump to itself? - # - Would virtual_state.generate_guards make sense here? - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') - raise InvalidLoop - debug_stop('jit-log-virtualstate') - - return short + pass def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: @@ -399,19 +437,18 @@ guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) optimizer.send_extra_operation(guard) - def add_op_to_short(self, op, short, short_seen, emit=True, guards_needed=False): + def add_op_to_short(self, op, emit=True, guards_needed=False): if op is None: return None - if op.result is not None and op.result in short_seen: - if emit: + if op.result is not None and op.result in self.short_seen: + if emit and self.short_inliner: return self.short_inliner.inline_arg(op.result) else: return None for a in op.getarglist(): - if not isinstance(a, Const) and a not in short_seen: - self.add_op_to_short(self.short_boxes.producer(a), short, short_seen, - emit, guards_needed) + if not isinstance(a, Const) and a not in self.short_seen: + self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): descr = self.start_resumedescr.clone_if_mutable() op.setdescr(descr) @@ -421,9 +458,9 @@ else: value_guards = [] - short.append(op) - short_seen[op.result] = True - if emit: + self.short.append(op) + self.short_seen[op.result] = True + if emit and self.short_inliner: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) else: @@ -432,23 +469,22 @@ if op.is_ovf(): # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) for guard in value_guards: - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) if newop: return newop.result return None - def import_box(self, box, inputargs, short, short_jumpargs, - jumpargs, short_seen): + def import_box(self, box, inputargs, short_jumpargs, jumpargs): if isinstance(box, Const) or box in inputargs: return if box in self.boxes_created_this_iteration: return short_op = self.short_boxes.producer(box) - newresult = self.add_op_to_short(short_op, short, short_seen) + newresult = self.add_op_to_short(short_op) short_jumpargs.append(short_op.result) inputargs.append(box) @@ -456,98 +492,94 @@ if box in self.optimizer.values: box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) - -class OptInlineShortPreamble(Optimization): - def __init__(self, retraced): - self.retraced = retraced + def jump_to_already_compiled_trace(self, jumpop): + assert jumpop.getopnum() == rop.JUMP + cell_token = jumpop.getdescr() - def new(self): - return OptInlineShortPreamble(self.retraced) + assert isinstance(cell_token, JitCellToken) + if not cell_token.target_tokens: + return False - def propagate_forward(self, op): - if op.getopnum() == rop.JUMP: - loop_token = op.getdescr() - assert isinstance(loop_token, LoopToken) - short = loop_token.short_preamble - if short: - args = op.getarglist() - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + if not self.inline_short_preamble: + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return True - for sh in short: - ok = False - extra_guards = [] + args = jumpop.getarglist() + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(args) + debug_start('jit-log-virtualstate') + virtual_state.debug_print("Looking for ") - bad = {} - debugmsg = 'Did not match ' - if sh.virtual_state.generalization_of(virtual_state, bad): - ok = True - debugmsg = 'Matched ' - else: - try: - cpu = self.optimizer.cpu - sh.virtual_state.generate_guards(virtual_state, - args, cpu, - extra_guards) + for target in cell_token.target_tokens: + if not target.virtual_state: + continue + ok = False + extra_guards = [] - ok = True - debugmsg = 'Guarded to match ' - except InvalidLoop: - pass - sh.virtual_state.debug_print(debugmsg, bad) - - if ok: - debug_stop('jit-log-virtualstate') + bad = {} + debugmsg = 'Did not match ' + if target.virtual_state.generalization_of(virtual_state, bad): + ok = True + debugmsg = 'Matched ' + else: + try: + cpu = self.optimizer.cpu + target.virtual_state.generate_guards(virtual_state, + args, cpu, + extra_guards) - values = [self.getvalue(arg) - for arg in op.getarglist()] - args = sh.virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - inliner = Inliner(sh.inputargs, args) - - for guard in extra_guards: - if guard.is_guard(): - descr = sh.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - guard.setdescr(descr) - self.emit_operation(guard) - - try: - for shop in sh.operations: - newop = inliner.inline_op(shop) - self.emit_operation(newop) - except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") - self.emit_operation(op) - return + ok = True + debugmsg = 'Guarded to match ' + except InvalidLoop: + pass + target.virtual_state.debug_print(debugmsg, bad) + + if ok: debug_stop('jit-log-virtualstate') - retraced_count = loop_token.retraced_count - limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit - if not self.retraced and retraced_count self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -180,10 +188,15 @@ self.arraydescr is other.arraydescr) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState for i in range(len(self.fieldstate)): - v = value._items[i] + try: + v = value._items[i] + except IndexError: + raise BadVirtualState s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -248,12 +261,19 @@ s.enum(virtual_state) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayStructValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayStructValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): - v = value._items[i][self.fielddescrs[i][j]] + try: + v = value._items[i][self.fielddescrs[i][j]] + except IndexError: + raise BadVirtualState + except KeyError: + raise BadVirtualState s = self.fieldstate[p] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -546,18 +566,27 @@ self.aliases = {} self.rename = {} self.optimizer = optimizer - for box in surviving_boxes: - self.potential_ops[box] = None - optimizer.produce_potential_short_preamble_ops(self) - self.short_boxes = {} - self.short_boxes_in_production = {} + if surviving_boxes is not None: + for box in surviving_boxes: + self.potential_ops[box] = None + optimizer.produce_potential_short_preamble_ops(self) - for box in self.potential_ops.keys(): - try: - self.produce_short_preamble_box(box) - except BoxNotProducable: - pass + self.short_boxes = {} + self.short_boxes_in_production = {} + + for box in self.potential_ops.keys(): + try: + self.produce_short_preamble_box(box) + except BoxNotProducable: + pass + + def clone(self): + sb = ShortBoxes(self.optimizer, None) + sb.aliases.update(self.aliases) + sb.short_boxes = {} + sb.short_boxes.update(self.short_boxes) + return sb def prioritized_alternatives(self, box): if box not in self.alternatives: @@ -598,6 +627,7 @@ newbox = newop.result = op.result.clonebox() self.short_boxes[newop.result] = newop value = self.optimizer.getvalue(box) + self.optimizer.emit_operation(ResOperation(rop.SAME_AS, [box], newbox)) self.optimizer.make_equal_to(newbox, value) else: self.short_boxes[box] = op diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp import history, compile, resume from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstFloat -from pypy.jit.metainterp.history import Box +from pypy.jit.metainterp.history import Box, TargetToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import executor from pypy.jit.metainterp.logger import Logger @@ -22,7 +22,6 @@ from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr from pypy.jit.codewriter import heaptracker from pypy.jit.metainterp.optimizeopt.util import args_dict_box -from pypy.jit.metainterp.optimize import RetraceLoop # ____________________________________________________________ @@ -243,6 +242,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) @@ -1555,10 +1566,17 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None - self.retracing_loop_from = None + self.partial_trace = None + self.retracing_from = -1 self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + def retrace_needed(self, trace): + self.partial_trace = trace + self.retracing_from = len(self.history.operations) - 1 + self.heapcache.reset() + + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction f = self.newframe(jitcode, greenkey) @@ -1778,7 +1796,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate @@ -1793,7 +1810,7 @@ def _interpret(self): # Execute the frames forward until we raise a DoneWithThisFrame, - # a ExitFrameWithException, or a GenerateMergePoint exception. + # a ExitFrameWithException, or a ContinueRunningNormally exception. self.staticdata.stats.entered() while True: self.framestack[-1].run_one_step() @@ -1841,8 +1858,6 @@ self.seen_loop_header_for_jdindex = -1 try: self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1877,8 +1892,6 @@ if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(ABORT_BRIDGE) self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1926,14 +1939,9 @@ # that failed; # - if self.resumekey is a ResumeFromInterpDescr, it starts directly # from the interpreter. - if not self.retracing_loop_from: - try: - self.compile_bridge(live_arg_boxes) - except RetraceLoop: - start = len(self.history.operations) - self.current_merge_points.append((live_arg_boxes, start)) - self.retracing_loop_from = RetraceState(self, live_arg_boxes) - return + if not self.partial_trace: + # FIXME: Support a retrace to be a bridge as well as a loop + self.compile_trace(live_arg_boxes, resumedescr) # raises in case it works -- which is the common case, hopefully, # at least for bridges starting from a guard. @@ -1955,14 +1963,10 @@ else: # Found! Compile it as a loop. # raises in case it works -- which is the common case - if self.retracing_loop_from and \ - self.retracing_loop_from.merge_point == j: - bridge_arg_boxes = self.retracing_loop_from.live_arg_boxes - self.compile_bridge_and_loop(original_boxes, \ - live_arg_boxes, start, - bridge_arg_boxes, resumedescr) - else: - self.compile(original_boxes, live_arg_boxes, start, resumedescr) + if self.partial_trace: + if start != self.retracing_from: + raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now + self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.staticdata.log('cancelled, tracing more...') @@ -1970,12 +1974,48 @@ start = len(self.history.operations) self.current_merge_points.append((live_arg_boxes, start)) - def designate_target_loop(self, gmp): - loop_token = gmp.target_loop_token + def _unpack_boxes(self, boxes, start, stop): + ints = []; refs = []; floats = [] + for i in range(start, stop): + box = boxes[i] + if box.type == history.INT: ints.append(box.getint()) + elif box.type == history.REF: refs.append(box.getref_base()) + elif box.type == history.FLOAT:floats.append(box.getfloatstorage()) + else: assert 0 + return ints[:], refs[:], floats[:] + + def raise_continue_running_normally(self, live_arg_boxes, loop_token): + self.history.inputargs = None + self.history.operations = None + # For simplicity, we just raise ContinueRunningNormally here and + # ignore the loop_token passed in. It means that we go back to + # interpreted mode, but it should come back very quickly to the + # JIT, find probably the same 'loop_token', and execute it. + if we_are_translated(): + num_green_args = self.jitdriver_sd.num_green_args + gi, gr, gf = self._unpack_boxes(live_arg_boxes, 0, num_green_args) + ri, rr, rf = self._unpack_boxes(live_arg_boxes, num_green_args, + len(live_arg_boxes)) + CRN = self.staticdata.ContinueRunningNormally + raise CRN(gi, gr, gf, ri, rr, rf) + else: + # However, in order to keep the existing tests working + # (which are based on the assumption that 'loop_token' is + # directly used here), a bit of custom non-translatable code... + self._nontranslated_run_directly(live_arg_boxes, loop_token) + assert 0, "unreachable" + + def _nontranslated_run_directly(self, live_arg_boxes, loop_token): + "NOT_RPYTHON" + args = [] num_green_args = self.jitdriver_sd.num_green_args - residual_args = gmp.argboxes[num_green_args:] - history.set_future_values(self.cpu, residual_args) - return loop_token + num_red_args = self.jitdriver_sd.num_red_args + for box in live_arg_boxes[num_green_args:num_green_args+num_red_args]: + if box.type == history.INT: args.append(box.getint()) + elif box.type == history.REF: args.append(box.getref_base()) + elif box.type == history.FLOAT: args.append(box.getfloatstorage()) + else: assert 0 + self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) def prepare_resume_from_failure(self, opnum, dont_change_position=False): frame = self.framestack[-1] @@ -2016,54 +2056,57 @@ from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) - def get_compiled_merge_points(self, greenkey): - """Get the list of looptokens corresponding to the greenkey. - Turns the (internal) list of weakrefs into regular refs. - """ + def get_procedure_token(self, greenkey): cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - return cell.get_compiled_merge_points() + return cell.get_procedure_token() + + def compile_loop(self, original_boxes, live_arg_boxes, start, start_resumedescr): + num_green_args = self.jitdriver_sd.num_green_args + greenkey = original_boxes[:num_green_args] + if not self.partial_trace: + assert self.get_procedure_token(greenkey) is None or \ + self.get_procedure_token(greenkey).target_tokens is None + if self.partial_trace: + target_token = compile.compile_retrace(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr, self.partial_trace, + self.resumekey) + else: + target_token = compile.compile_loop(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr) + if target_token is not None: + assert isinstance(target_token, TargetToken) + self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token) + self.staticdata.stats.add_jitcell_token(target_token.targeting_jitcell_token) - def set_compiled_merge_points(self, greenkey, looptokens): - cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - cell.set_compiled_merge_points(looptokens) - def compile(self, original_boxes, live_arg_boxes, start, start_resumedescr): - num_green_args = self.jitdriver_sd.num_green_args - original_inputargs = self.history.inputargs - self.history.inputargs = original_boxes[num_green_args:] - greenkey = original_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) - loop_token = compile.compile_new_loop(self, old_loop_tokens, - greenkey, start, start_resumedescr) - if loop_token is not None: # raise if it *worked* correctly - self.set_compiled_merge_points(greenkey, old_loop_tokens) - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, loop_token) + if target_token is not None: # raise if it *worked* correctly + assert isinstance(target_token, TargetToken) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) - self.history.inputargs = original_inputargs - self.history.operations.pop() # remove the JUMP - - def compile_bridge(self, live_arg_boxes): + def compile_trace(self, live_arg_boxes, start_resumedescr): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - if len(old_loop_tokens) == 0: + target_jitcell_token = self.get_procedure_token(greenkey) + if not target_jitcell_token: return - #if self.resumekey.guard_opnum == rop.GUARD_CLASS: - # return # Kepp tracing for another iteration - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) + if not target_jitcell_token.target_tokens: + return + + self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, + descr=target_jitcell_token) try: - target_loop_token = compile.compile_new_bridge(self, - old_loop_tokens, - self.resumekey) + target_token = compile.compile_trace(self, self.resumekey, start_resumedescr) finally: self.history.operations.pop() # remove the JUMP - if target_loop_token is not None: # raise if it *worked* correctly - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, target_loop_token) + if target_token is not None: # raise if it *worked* correctly + assert isinstance(target_token, TargetToken) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, bridge_arg_boxes, start_resumedescr): @@ -2099,10 +2142,8 @@ except RetraceLoop: assert False assert target_loop_token is not None - - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, old_loop_tokens[0]) + self.raise_continue_running_normally(live_arg_boxes, + old_loop_tokens[0]) def compile_done_with_this_frame(self, exitbox): self.gen_store_back_in_virtualizable() @@ -2124,21 +2165,21 @@ loop_tokens = sd.loop_tokens_done_with_this_frame_float else: assert False - self.history.record(rop.JUMP, exits, None) - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + # FIXME: kill TerminatingLoopToken? + # FIXME: can we call compile_trace? + token = loop_tokens[0].finishdescr + self.history.record(rop.FINISH, exits, None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() def compile_exit_frame_with_exception(self, valuebox): self.gen_store_back_in_virtualizable() - # temporarily put a JUMP to a pseudo-loop - self.history.record(rop.JUMP, [valuebox], None) sd = self.staticdata - loop_tokens = sd.loop_tokens_exit_frame_with_exception_ref - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr + self.history.record(rop.FINISH, [valuebox], None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() @specialize.arg(1) @@ -2380,22 +2421,6 @@ abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) - def gen_load_from_other_virtualizable(self, vinfo, vbox): - boxes = [] - assert vinfo is not None - for i in range(vinfo.num_static_extra_boxes): - descr = vinfo.static_field_descrs[i] - boxes.append(self.execute_and_record(rop.GETFIELD_GC, descr, vbox)) - virtualizable = vinfo.unwrap_virtualizable_box(vbox) - for k in range(vinfo.num_arrays): - descr = vinfo.array_field_descrs[k] - abox = self.execute_and_record(rop.GETFIELD_GC, descr, vbox) - descr = vinfo.array_descrs[k] - for j in range(vinfo.get_array_length(virtualizable, k)): - boxes.append(self.execute_and_record(rop.GETARRAYITEM_GC, descr, - abox, ConstInt(j))) - return boxes - def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) for frame in self.framestack: @@ -2467,25 +2492,13 @@ greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args - vinfo = targetjitdriver_sd.virtualizable_info - if vinfo is not None: - index = targetjitdriver_sd.index_of_virtualizable - vbox = args[index] - args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) - # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenargs, args) + token = warmrunnerstate.get_assembler_token(greenargs) op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ -class GenerateMergePoint(JitException): - def __init__(self, args, target_loop_token): - assert target_loop_token is not None - self.argboxes = args - self.target_loop_token = target_loop_token - class ChangeFrame(JitException): """Raised after we mutated metainterp.framestack, in order to force it to reload the current top-of-stack frame that gets interpreted.""" diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -369,6 +369,8 @@ 'FINISH/*d', '_FINAL_LAST', + 'LABEL/*d', + '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', 'GUARD_TRUE/1d', @@ -379,11 +381,11 @@ 'GUARD_ISNULL/1d', 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION/0d', - 'GUARD_EXCEPTION/1d', + 'GUARD_NO_EXCEPTION/0d', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', - 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- @@ -494,6 +496,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -93,12 +93,14 @@ TAGMASK = 3 +class TagOverflow(Exception): + pass + def tag(value, tagbits): - if tagbits >> 2: - raise ValueError + assert 0 <= tagbits <= 3 sx = value >> 13 if sx != 0 and sx != -1: - raise ValueError + raise TagOverflow return rffi.r_short(value<<2|tagbits) def untag(value): @@ -153,7 +155,7 @@ return self._newconst(const) try: return tag(val, TAGINT) - except ValueError: + except TagOverflow: pass tagged = self.large_ints.get(val, UNASSIGNED) if not tagged_eq(tagged, UNASSIGNED): @@ -429,8 +431,7 @@ fieldnum = self._gettagged(fieldbox) # the index is limited to 2147483647 (64-bit machines only) if itemindex > 2147483647: - from pypy.jit.metainterp import compile - compile.giveup() + raise TagOverflow itemindex = rffi.cast(rffi.INT, itemindex) # rd_pendingfields[i].lldescr = lldescr diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -4,9 +4,9 @@ from pypy.rpython.ootypesystem import ootype from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.warmstate import unspecialize_value from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value from pypy.jit.codewriter.policy import JitPolicy from pypy.jit.codewriter import codewriter, longlong from pypy.rlib.rfloat import isnan @@ -16,15 +16,16 @@ from pypy.jit.codewriter import support class FakeJitCell(object): - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst + __product_token = None + def get_procedure_token(self): + return self.__product_token + def set_procedure_token(self, token): + self.__product_token = token class FakeWarmRunnerState(object): - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass + def attach_procedure_to_interp(self, greenkey, procedure_token): + cell = self.jit_cell_at_key(greenkey) + cell.set_procedure_token(procedure_token) def helper_func(self, FUNCPTR, func): from pypy.rpython.annlowlevel import llhelper @@ -132,16 +133,14 @@ def _run_with_machine_code(testself, args): metainterp = testself.metainterp num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented + procedure_token = metainterp.get_procedure_token(args[:num_green_args]) # a loop was successfully created by _run_with_pyjitpl(); call it cpu = metainterp.cpu + args1 = [] for i in range(len(args) - num_green_args): x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) + args1.append(unspecialize_value(x)) + faildescr = cpu.execute_token(procedure_token, *args1) assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') if metainterp.jitdriver_sd.result_type == history.INT: return cpu.get_latest_value_int(0) @@ -157,24 +156,34 @@ basic = True def check_resops(self, expected=None, **check): get_stats().check_resops(expected=expected, **check) + def check_simple_loop(self, expected=None, **check): + get_stats().check_simple_loop(expected=expected, **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" + + + def check_trace_count(self, count): # was check_loop_count + # The number of traces compiled assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): + def check_trace_count_at_most(self, count): assert get_stats().compiled_count <= count + + def check_jitcell_token_count(self, count): # was check_tree_loop_count + assert len(get_stats().jitcell_token_wrefs) == count + + def check_target_token_count(self, count): + tokens = get_stats().get_all_jitcell_tokens() + n = sum ([len(t.target_tokens) for t in tokens]) + assert n == count + def check_enter_count(self, count): assert get_stats().enter_count == count def check_enter_count_at_most(self, count): assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + return # FIXME assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): assert get_stats().aborted_count == count def check_aborted_count_at_least(self, count): @@ -217,7 +226,7 @@ # this can be used after interp_operations if expected is not None: expected = dict(expected) - expected['jump'] = 1 + expected['finish'] = 1 self.metainterp.staticdata.stats.check_history(expected, **isns) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -9,12 +9,11 @@ from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.metainterp.warmspot import get_stats -from pypy.jit.metainterp.warmstate import set_future_value from pypy.rlib import rerased from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -66,7 +65,7 @@ res = self.interp_operations(f, [8, 98]) assert res == 110 - def test_loop(self): + def test_loop_1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) def f(x, y): res = 0 @@ -78,19 +77,20 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 42 - self.check_loop_count(1) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + self.check_trace_count(1) + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, + 'guard_true': 2, 'int_sub': 2}) if self.basic: found = 0 - for op in get_stats().loops[0]._all_operations(): + for op in get_stats().get_all_loops()[0]._all_operations(): if op.getopname() == 'guard_true': liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) found += 1 - assert found == 1 + assert found == 2 def test_loop_variant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -106,8 +106,8 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) - self.check_resops(int_mul=3) + self.check_trace_count(1) + self.check_simple_loop(int_mul=1) def test_loop_variant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -123,8 +123,8 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) - self.check_resops(int_mul_ovf=3) + self.check_trace_count(1) + self.check_simple_loop(int_mul_ovf=1) def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -138,8 +138,9 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 252 - self.check_loop_count(1) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + self.check_trace_count(1) + self.check_simple_loop(int_mul=0) + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) @@ -156,66 +157,63 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 308 - self.check_loop_count(1) - self.check_resops({'jump': 2, 'int_lshift': 2, 'int_gt': 2, + self.check_trace_count(1) + self.check_simple_loop(int_mul_ovf=0) + self.check_resops({'jump': 1, 'int_lshift': 2, 'int_gt': 2, 'int_mul_ovf': 1, 'int_add': 4, 'guard_true': 2, 'guard_no_overflow': 1, 'int_sub': 2}) def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'n']) + def f(x, y, n): res = 0 while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, n=n, res=res) + myjitdriver.jit_merge_point(x=x, y=y, n=n, res=res) res += x * x - if y<16: + if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x=x, y=y, res=res, n=n) res += x * x - if y<16: + if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x=x, y=y, res=res, n=n) z = x * x res += z - if y<16: + if y Author: Armin Rigo Branch: closed-branches Changeset: r50555:ecda4725a574 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/ecda4725a574/ Log: Merge closed head 6d7644a6fd38 on branch no-force-guard-lazy-set From noreply at buildbot.pypy.org Thu Dec 15 14:04:05 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:05 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 105fe17b27f7 on branch gmp Message-ID: <20111215130405.206FA82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50556:fd60224546b1 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/fd60224546b1/ Log: Merge closed head 105fe17b27f7 on branch gmp From noreply at buildbot.pypy.org Thu Dec 15 14:04:06 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:06 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 48639d927f25 on branch releasegil-effectinfo Message-ID: <20111215130406.2EC1482221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50557:8c402b1fe7b9 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/8c402b1fe7b9/ Log: Merge closed head 48639d927f25 on branch releasegil-effectinfo From noreply at buildbot.pypy.org Thu Dec 15 14:04:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:07 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 7a58d06641f3 on branch numpy-minilang Message-ID: <20111215130407.3B4F782221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50558:b873a48dd65a Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/b873a48dd65a/ Log: Merge closed head 7a58d06641f3 on branch numpy-minilang From noreply at buildbot.pypy.org Thu Dec 15 14:04:08 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:08 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 0ede8b92968e on branch rgc-mem-pressure Message-ID: <20111215130408.5186482221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50559:dce334355118 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/dce334355118/ Log: Merge closed head 0ede8b92968e on branch rgc-mem-pressure From noreply at buildbot.pypy.org Thu Dec 15 14:04:09 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:09 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 9d40404468cf on branch win64 test Message-ID: <20111215130409.5E66682221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50560:48804171531e Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/48804171531e/ Log: Merge closed head 9d40404468cf on branch win64 test From noreply at buildbot.pypy.org Thu Dec 15 14:04:10 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:10 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head ca0f81ea74b5 on branch win64_gborg Message-ID: <20111215130410.6A6F582221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50561:3a0dec36ccd3 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/3a0dec36ccd3/ Log: Merge closed head ca0f81ea74b5 on branch win64_gborg From noreply at buildbot.pypy.org Thu Dec 15 14:04:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:11 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head ee74794f5464 on branch numpy NDimArray Message-ID: <20111215130411.77AE682221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50562:caa80f4bb7b8 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/caa80f4bb7b8/ Log: Merge closed head ee74794f5464 on branch numpy NDimArray From noreply at buildbot.pypy.org Thu Dec 15 14:04:12 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:12 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 9d59a8501c00 on branch win64_gborg Message-ID: <20111215130412.8458D82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50563:9f9d57f80be2 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/9f9d57f80be2/ Log: Merge closed head 9d59a8501c00 on branch win64_gborg From noreply at buildbot.pypy.org Thu Dec 15 14:04:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:13 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 03816b1a901c on branch win64 test Message-ID: <20111215130413.90B3582221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50564:acc499164a5c Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/acc499164a5c/ Log: Merge closed head 03816b1a901c on branch win64 test From noreply at buildbot.pypy.org Thu Dec 15 14:04:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:14 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head d0d86e088b8b on branch numpy-multidim-shards Message-ID: <20111215130414.9EF5782221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50565:7847dc9bc66c Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/7847dc9bc66c/ Log: Merge closed head d0d86e088b8b on branch numpy-multidim-shards From noreply at buildbot.pypy.org Thu Dec 15 14:04:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:15 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 2f25f1d0005c on branch numpy-monkeyaround Message-ID: <20111215130415.AB54D82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50566:16c7651dc366 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/16c7651dc366/ Log: Merge closed head 2f25f1d0005c on branch numpy-monkeyaround From noreply at buildbot.pypy.org Thu Dec 15 14:04:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:16 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1ea6c70d9971 on branch micronumpy-resync Message-ID: <20111215130416.BCCE782221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50567:8b343089086b Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/8b343089086b/ Log: Merge closed head 1ea6c70d9971 on branch micronumpy-resync From noreply at buildbot.pypy.org Thu Dec 15 14:04:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:17 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head df45bf0c2ee7 on branch numpy-multidim-exp Message-ID: <20111215130417.C8DF382221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50568:7be0184f6d38 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/7be0184f6d38/ Log: Merge closed head df45bf0c2ee7 on branch numpy-multidim-exp From noreply at buildbot.pypy.org Thu Dec 15 14:04:18 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:18 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 9ab059335d1f on branch jit-refactor-tests Message-ID: <20111215130418.D59E182221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50569:ae7bb10725a5 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/ae7bb10725a5/ Log: Merge closed head 9ab059335d1f on branch jit-refactor-tests From noreply at buildbot.pypy.org Thu Dec 15 14:04:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:19 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 78795591bf65 on branch matrixmath Message-ID: <20111215130419.E7F1A82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50570:103191efcd4a Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/103191efcd4a/ Log: Merge closed head 78795591bf65 on branch matrixmath From noreply at buildbot.pypy.org Thu Dec 15 14:04:21 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:21 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head c8b9ca8d5c25 on branch matrixmath-reshape Message-ID: <20111215130421.0237582221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50571:9ee3e533c081 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/9ee3e533c081/ Log: Merge closed head c8b9ca8d5c25 on branch matrixmath-reshape From noreply at buildbot.pypy.org Thu Dec 15 14:04:22 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:22 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 83b5695339e8 on branch matrixmath-reshape-merge Message-ID: <20111215130422.0E18F82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50572:e0b744896524 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/e0b744896524/ Log: Merge closed head 83b5695339e8 on branch matrixmath-reshape-merge From noreply at buildbot.pypy.org Thu Dec 15 14:04:23 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:23 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 2db53dd9c46e on branch temp2 Message-ID: <20111215130423.1A02382221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50573:4fa228cccdda Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/4fa228cccdda/ Log: Merge closed head 2db53dd9c46e on branch temp2 From noreply at buildbot.pypy.org Thu Dec 15 14:04:24 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:24 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 2eb520555de5 on branch nedbat-sandbox Message-ID: <20111215130424.2635182221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50574:33b58ee5d2da Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/33b58ee5d2da/ Log: Merge closed head 2eb520555de5 on branch nedbat-sandbox From noreply at buildbot.pypy.org Thu Dec 15 14:04:25 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:25 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head a48a35a5c618 on branch SpecialisedTuples Message-ID: <20111215130425.31C2D82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50575:619b713a5bb6 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/619b713a5bb6/ Log: Merge closed head a48a35a5c618 on branch SpecialisedTuples From noreply at buildbot.pypy.org Thu Dec 15 14:04:26 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:26 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 53d2c2028ef3 on branch numpy-pi-sum-min-max Message-ID: <20111215130426.3E86782221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50576:1c5de2c38245 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/1c5de2c38245/ Log: Merge closed head 53d2c2028ef3 on branch numpy-pi-sum-min-max From noreply at buildbot.pypy.org Thu Dec 15 14:04:27 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:27 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 539833042097 on branch jit-simplify-backendintf Message-ID: <20111215130427.4AF2382221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50577:0b366011ad13 Date: 2011-12-15 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/0b366011ad13/ Log: Merge closed head 539833042097 on branch jit-simplify-backendintf From noreply at buildbot.pypy.org Thu Dec 15 14:04:28 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:28 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head b391b4c1ef21 on branch numpy-identity Message-ID: <20111215130428.5890882221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50578:dc1585148802 Date: 2011-12-15 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/dc1585148802/ Log: Merge closed head b391b4c1ef21 on branch numpy-identity From noreply at buildbot.pypy.org Thu Dec 15 14:04:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:29 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 40d47febc01e on branch virtualizable-experiments Message-ID: <20111215130429.63FF982221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50579:91e205942073 Date: 2011-12-15 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/91e205942073/ Log: Merge closed head 40d47febc01e on branch virtualizable-experiments From noreply at buildbot.pypy.org Thu Dec 15 14:04:30 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:30 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 4d5064194771 on branch faster-json Message-ID: <20111215130430.7DBC382221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50580:3ef224190801 Date: 2011-12-15 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/3ef224190801/ Log: Merge closed head 4d5064194771 on branch faster-json From noreply at buildbot.pypy.org Thu Dec 15 14:04:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:31 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 83a3724c72f3 on branch numpy-share-iterators Message-ID: <20111215130431.86A0F82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50581:7fe3a58c1918 Date: 2011-12-15 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/7fe3a58c1918/ Log: Merge closed head 83a3724c72f3 on branch numpy-share-iterators From noreply at buildbot.pypy.org Thu Dec 15 14:04:32 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:32 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <20111215130432.9429382221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r50582:4f16091a6497 Date: 2011-12-15 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/4f16091a6497/ Log: re-close this branch From noreply at buildbot.pypy.org Thu Dec 15 14:04:33 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 14:04:33 +0100 (CET) Subject: [pypy-commit] pypy default: fix to handle branch names with special characters, like spaces Message-ID: <20111215130433.B8AC582221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50583:8c37cae3aea2 Date: 2011-12-15 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/8c37cae3aea2/ Log: fix to handle branch names with special characters, like spaces diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -11,14 +11,17 @@ sys.exit(1) def heads(args): - g = os.popen(r"hg heads --topo %s --template '{branches} {node|short}\n'" + g = os.popen(r"hg heads --topo %s --template '{node|short}:{branches}\n'" % args, 'r') result = g.read() g.close() result = result.splitlines(False) - result = [s for s in result - if not s.startswith(' ') - and not s.startswith('closed-branches ')] + for line in result: + if len(line.split(':', 1)) != 2: + raise ValueError("'result' contains: %r" % line) + result = [s.split(':', 1) for s in result] + result = [(head, branch) for (head, branch) in result + if branch not in ['', 'closed-branches']] return result all_heads = heads("--closed") @@ -34,8 +37,7 @@ closed_heads.reverse() -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print '\t', branch print print 'The branches listed above will be merged to "closed-branches".' @@ -54,8 +56,7 @@ print '*** error %r' % (err,) sys.exit(1) -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print print '***** %s ***** %s *****' % (branch, head) do("hg up --clean closed-branches") From noreply at buildbot.pypy.org Thu Dec 15 16:46:11 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 15 Dec 2011 16:46:11 +0100 (CET) Subject: [pypy-commit] buildbot default: add new builder/slave to run benchmarks on speed.python.org Message-ID: <20111215154611.AE52382221@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r600:d4fc18217372 Date: 2011-12-15 16:44 +0100 http://bitbucket.org/pypy/buildbot/changeset/d4fc18217372/ Log: add new builder/slave to run benchmarks on speed.python.org diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -145,6 +145,8 @@ JITONLYLINUX32 = "jitonly-own-linux-x86-32" JITBENCH = "jit-benchmark-linux-x86-32" JITBENCH64 = "jit-benchmark-linux-x86-64" +JITBENCH64_2 = "jit-benchmark-linux-x86-64-2" + BuildmasterConfig = { 'slavePortnum': slavePortnum, @@ -297,6 +299,13 @@ "category": "benchmark-run", # the locks are acquired with fine grain inside the build }, + {"name": JITBENCH64_2, + "slavenames": ["speed-python-64"], + "builddir": JITBENCH64, + "factory": pypyJITBenchmarkFactory64, + "category": "benchmark-run", + # the locks are acquired with fine grain inside the build + }, {"name": MACOSX32, "slavenames": ["minime"], "builddir": MACOSX32, From noreply at buildbot.pypy.org Thu Dec 15 16:46:13 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 15 Dec 2011 16:46:13 +0100 (CET) Subject: [pypy-commit] buildbot default: hg merge default Message-ID: <20111215154613.2673D82221@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r601:4d1b872dbf60 Date: 2011-12-15 16:45 +0100 http://bitbucket.org/pypy/buildbot/changeset/4d1b872dbf60/ Log: hg merge default diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -152,6 +152,8 @@ JITONLYLINUX32 = "jitonly-own-linux-x86-32" JITBENCH = "jit-benchmark-linux-x86-32" JITBENCH64 = "jit-benchmark-linux-x86-64" +JITBENCH64_2 = "jit-benchmark-linux-x86-64-2" + BuildmasterConfig = { 'slavePortnum': slavePortnum, @@ -296,6 +298,13 @@ "category": "benchmark-run", # the locks are acquired with fine grain inside the build }, + {"name": JITBENCH64_2, + "slavenames": ["speed-python-64"], + "builddir": JITBENCH64, + "factory": pypyJITBenchmarkFactory64, + "category": "benchmark-run", + # the locks are acquired with fine grain inside the build + }, {"name": MACOSX32, "slavenames": ["minime"], "builddir": MACOSX32, From noreply at buildbot.pypy.org Thu Dec 15 16:54:40 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 15 Dec 2011 16:54:40 +0100 (CET) Subject: [pypy-commit] buildbot default: fix the config for JITBENCH64_2, and add snakepit32 to the slavenames (this change was actually lying in the WC of buildmaster) Message-ID: <20111215155440.35FAE82221@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r602:922a46875231 Date: 2011-12-15 16:53 +0100 http://bitbucket.org/pypy/buildbot/changeset/922a46875231/ Log: fix the config for JITBENCH64_2, and add snakepit32 to the slavenames (this change was actually lying in the WC of buildmaster) diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -199,6 +199,7 @@ Nightly("nightly-0-00", [ JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) + JITBENCH64_2, # on speed.python.org, uses 1 core (in part exclusively) MACOSX32, # on minime ], branch=None, hour=0, minute=0), # @@ -300,7 +301,7 @@ }, {"name": JITBENCH64_2, "slavenames": ["speed-python-64"], - "builddir": JITBENCH64, + "builddir": JITBENCH64_2, "factory": pypyJITBenchmarkFactory64, "category": "benchmark-run", # the locks are acquired with fine grain inside the build @@ -324,7 +325,7 @@ 'category' : 'mac64', }, {"name": WIN32, - "slavenames": ["bigboard"], + "slavenames": ["snakepit32", "bigboard"], "builddir": WIN32, "factory": pypyOwnTestFactoryWin, "category": 'win32' @@ -336,13 +337,13 @@ "category": 'win32' }, {"name": APPLVLWIN32, - "slavenames": ["bigboard"], + "slavenames": ["snakepit32", "bigboard"], "builddir": APPLVLWIN32, "factory": pypyTranslatedAppLevelTestFactoryWin, "category": "win32" }, {"name" : JITWIN32, - "slavenames": ["bigboard"], + "slavenames": ["snakepit32", "bigboard"], 'builddir' : JITWIN32, 'factory' : pypyJITTranslatedTestFactoryWin, 'category' : 'win32', From noreply at buildbot.pypy.org Thu Dec 15 17:23:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 17:23:00 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: hg merge default Message-ID: <20111215162300.78BBD82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50584:8497ccd603ae Date: 2011-12-15 17:22 +0100 http://bitbucket.org/pypy/pypy/changeset/8497ccd603ae/ Log: hg merge default diff too long, truncating to 10000 out of 23738 lines diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -304,5 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. + .. include:: _ref.txt diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -191,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -488,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -519,8 +528,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -697,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -1608,6 +1620,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -20,7 +21,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -48,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -322,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -347,6 +361,16 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -381,13 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -521,10 +557,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -617,6 +654,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -959,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1432,6 +1479,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1510,12 +1561,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] @@ -1779,9 +1835,11 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -138,29 +138,30 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + jitcell_token.compiled_loop_token = clt + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -183,9 +185,11 @@ llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types, descr.extrainfo, descr.width) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -239,9 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,21 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/asmmemmgr.py b/pypy/jit/backend/llsupport/asmmemmgr.py --- a/pypy/jit/backend/llsupport/asmmemmgr.py +++ b/pypy/jit/backend/llsupport/asmmemmgr.py @@ -37,25 +37,25 @@ self._add_free_block(smaller_stop, stop) stop = smaller_stop result = (start, stop) - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result # pair (start, stop) def free(self, start, stop): """Free a block (start, stop) returned by a previous malloc().""" - self.total_mallocs -= (stop - start) + self.total_mallocs -= r_uint(stop - start) self._add_free_block(start, stop) def open_malloc(self, minsize): """Allocate at least minsize bytes. Returns (start, stop).""" result = self._allocate_block(minsize) (start, stop) = result - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result def open_free(self, middle, stop): """Used for freeing the end of an open-allocated block of memory.""" if stop - middle >= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -773,6 +773,7 @@ if self.layoutbuilder is not None: type_id = self.layoutbuilder.get_type_id(S) assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -16,32 +16,106 @@ """ Manage frame positions """ def __init__(self): - self.frame_bindings = {} - self.frame_depth = 0 + self.bindings = {} + self.used = [] # list of bools + self.hint_frame_locations = {} + + frame_depth = property(lambda:xxx, lambda:xxx) # XXX kill me + + def get_frame_depth(self): + return len(self.used) def get(self, box): - return self.frame_bindings.get(box, None) + return self.bindings.get(box, None) def loc(self, box): - res = self.get(box) - if res is not None: - return res + """Return or create the frame location associated with 'box'.""" + # first check if it's already in the frame_manager + try: + return self.bindings[box] + except KeyError: + pass + # check if we have a hint for this box + if box in self.hint_frame_locations: + # if we do, try to reuse the location for this box + loc = self.hint_frame_locations[box] + if self.try_to_reuse_location(box, loc): + return loc + # no valid hint. make up a new free location + return self.get_new_loc(box) + + def get_new_loc(self, box): size = self.frame_size(box.type) - self.frame_depth += ((-self.frame_depth) & (size-1)) - # ^^^ frame_depth is rounded up to a multiple of 'size', assuming + # frame_depth is rounded up to a multiple of 'size', assuming # that 'size' is a power of two. The reason for doing so is to # avoid obscure issues in jump.py with stack locations that try # to move from position (6,7) to position (7,8). - newloc = self.frame_pos(self.frame_depth, box.type) - self.frame_bindings[box] = newloc - self.frame_depth += size + while self.get_frame_depth() & (size - 1): + self.used.append(False) + # + index = self.get_frame_depth() + newloc = self.frame_pos(index, box.type) + for i in range(size): + self.used.append(True) + # + if not we_are_translated(): # extra testing + testindex = self.get_loc_index(newloc) + assert testindex == index + # + self.bindings[box] = newloc return newloc + def set_binding(self, box, loc): + self.bindings[box] = loc + # + index = self.get_loc_index(loc) + if index < 0: + return + endindex = index + self.frame_size(box.type) + while len(self.used) < endindex: + self.used.append(False) + while index < endindex: + self.used[index] = True + index += 1 + def reserve_location_in_frame(self, size): - frame_depth = self.frame_depth - self.frame_depth += size + frame_depth = self.get_frame_depth() + for i in range(size): + self.used.append(True) return frame_depth + def mark_as_free(self, box): + try: + loc = self.bindings[box] + except KeyError: + return # already gone + del self.bindings[box] + # + size = self.frame_size(box.type) + baseindex = self.get_loc_index(loc) + if baseindex < 0: + return + for i in range(size): + index = baseindex + i + assert 0 <= index < len(self.used) + self.used[index] = False + + def try_to_reuse_location(self, box, loc): + index = self.get_loc_index(loc) + if index < 0: + return False + size = self.frame_size(box.type) + for i in range(size): + while (index + i) >= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +123,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +146,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +169,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -52,6 +52,9 @@ if op.getopnum() == rop.SETFIELD_GC: self.handle_write_barrier_setfield(op) continue + if op.getopnum() == rop.SETINTERIORFIELD_GC: + self.handle_write_barrier_setinteriorfield(op) + continue if op.getopnum() == rop.SETARRAYITEM_GC: self.handle_write_barrier_setarrayitem(op) continue @@ -205,6 +208,17 @@ op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) + def handle_write_barrier_setinteriorfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.newops.append(op) + def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) # no need for a write barrier in the case of previous malloc diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -547,6 +547,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] @@ -40,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -280,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -303,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -325,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -346,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -107,12 +108,12 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -253,13 +254,13 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -284,12 +285,12 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,22 +32,19 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -106,10 +103,9 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -118,19 +114,20 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -139,19 +136,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -162,15 +162,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -190,15 +192,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -206,14 +210,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -226,17 +229,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -244,14 +251,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -261,19 +267,20 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -290,18 +297,17 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -311,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -320,20 +326,19 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -350,20 +355,20 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -419,14 +424,12 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1082,16 +1085,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1109,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1149,30 +1144,33 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1214,7 +1212,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1222,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1271,7 +1267,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1281,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1330,19 +1324,20 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1400,15 +1395,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1675,15 +1669,14 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1700,9 +1693,9 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1718,14 +1711,13 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1895,18 +1887,14 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1940,18 +1928,14 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -1986,19 +1970,15 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2031,10 +2011,9 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2091,14 +2070,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2147,13 +2126,12 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2169,12 +2147,10 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2183,9 +2159,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2201,9 +2175,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2212,9 +2184,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2415,7 +2385,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 @@ -2423,9 +2393,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2435,11 +2404,10 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -2471,12 +2439,12 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2486,11 +2454,11 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2499,11 +2467,11 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2561,12 +2529,12 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2578,13 +2546,13 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2596,7 +2564,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) @@ -2604,10 +2572,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2958,13 +2925,137 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [BoxInt()] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2, -9) + assert fail.identifier == 42 + class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,9 +3,10 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -179,7 +180,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -525,29 +526,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +581,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +612,22 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -608,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -676,33 +717,55 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,8 +2,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -152,14 +153,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -310,12 +310,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -326,7 +325,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -422,12 +421,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -443,37 +438,35 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, - frame_depth+param_depth) + clt.frame_depth = frame_depth + clt.param_depth = param_depth + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, - rawstart + directbootstrappos, + rawstart + looppos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -484,18 +477,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -548,6 +540,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +663,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,20 +685,24 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -793,152 +797,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -965,7 +838,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -976,13 +849,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV_bi(to_loc.value, low_part) + self.mc.MOV_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1134,18 +1019,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -1882,10 +1767,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1901,7 +1786,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position + pos = loc.position + if pos < 0: + mc.writechar(chr(self.CODE_INPUTARG)) + pos = ~pos + n = self.CODE_FROMSTACK//4 + pos else: assert isinstance(loc, RegLoc) n = loc.value @@ -1921,6 +1810,7 @@ descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] + code_inputarg = False while 1: # decode the next instruction from the bytecode code = rffi.cast(lltype.Signed, bytecode[0]) @@ -1939,11 +1829,17 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: continue + elif code == self.CODE_INPUTARG: + code_inputarg = True + continue else: # 'code' identifies a register kind = code & 3 @@ -1959,6 +1855,7 @@ def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! self.fail_ebp = allregisters[16 + ebp.value] + code_inputarg = False num = 0 value_hi = 0 while 1: @@ -1979,6 +1876,9 @@ # load the value from the stack kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: @@ -1991,6 +1891,9 @@ if code == self.CODE_HOLE: num += 1 continue + if code == self.CODE_INPUTARG: + code_inputarg = True + continue assert code == self.CODE_STOP break code >>= 2 @@ -2095,9 +1998,9 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA in _call_footer below throws - # away most of the frame, including all the PUSHes that we did just - # above. + # _call_header_with_stack_check(). The LEA in _call_footer below + # throws away most of the frame, including all the PUSHes that we + # did just above. self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -2180,7 +2083,7 @@ argtypes=op.getdescr().get_arg_types(), callconv=op.getdescr().get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return if op.getdescr().get_return_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long @@ -2344,11 +2247,11 @@ fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() - assert isinstance(descr, LoopToken) - assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) + assert isinstance(descr, JitCellToken) + assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs # - # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + # Write a call to the target assembler + self._emit_call(fail_index, imm(descr._x86_function_addr), arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None @@ -2578,15 +2481,21 @@ gcrootmap.put(self.gcrootmap_retaddr_forced, mark) self.gcrootmap_retaddr_forced = -1 - def target_arglocs(self, loop_token): - return loop_token._x86_arglocs - - def closing_jump(self, loop_token): - if loop_token is self.currently_compiling_loop: + def closing_jump(self, target_token): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = target_token._x86_clt._debug_nbargs + assert my_nbargs == target_nbargs + # + target = target_token._x86_loop_code + if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(self.looppos - curpos) + self.mc.JMP_l(target - curpos) else: - self.mc.JMP(imm(loop_token._x86_loop_code)) + self.mc.JMP(imm(target)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) @@ -2659,11 +2568,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,7 +12,7 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: @@ -31,7 +31,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +66,13 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) @@ -87,7 +94,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -5,7 +5,8 @@ import os from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ResOperation, BoxPtr, ConstFloat, - BoxFloat, LoopToken, INT, REF, FLOAT) + BoxFloat, INT, REF, FLOAT, + TargetToken, JitCellToken) from pypy.jit.backend.x86.regloc import * from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rlib.objectmodel import we_are_translated @@ -27,7 +28,7 @@ class X86RegisterManager(RegisterManager): box_types = [INT, REF] - all_regs = [eax, ecx, edx, ebx, esi, edi] + all_regs = [ecx, eax, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] frame_reg = ebp @@ -59,7 +60,7 @@ class X86_64_RegisterManager(X86RegisterManager): # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -129,15 +130,19 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -159,6 +164,8 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None + self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -167,74 +174,83 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + self._set_initial_bindings(inputargs) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 13 + return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): - # XXX we can sort out here by longevity if we need something - # more optimal - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - # Don't use all_regs[0] for passing arguments around a loop. - # Must be kept in sync with consider_jump(). - # XXX this should probably go to llsupport/regalloc.py - xmmtmp = self.xrm.free_regs.pop(0) - tmpreg = self.rm.free_regs.pop(0) - assert tmpreg == X86RegisterManager.all_regs[0] - assert xmmtmp == X86XMMRegisterManager.all_regs[0] - for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: - if arg.type == FLOAT: - # xxx is it really a good idea? at the first CALL they - # will all be flushed anyway - reg = self.xrm.try_allocate_reg(arg) + def _set_initial_bindings(self, inputargs): + if IS_X86_64: + inputargs = self._set_initial_bindings_regs_64(inputargs) + # ... + # stack layout: arg2 + # arg1 + # arg0 + # return address + # saved ebp <-- ebp points here + # ... + cur_frame_pos = - 1 - FRAME_FIXED_SIZE + assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD + assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD + # + for box in inputargs: + assert isinstance(box, Box) + # + if IS_X86_32 and box.type == FLOAT: + cur_frame_pos -= 2 + else: + cur_frame_pos -= 1 + loc = self.fm.frame_pos(cur_frame_pos, box.type) + self.fm.set_binding(box, loc) + + def _set_initial_bindings_regs_64(self, inputargs): + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + # + pass_on_stack = [] + # + for box in inputargs: + assert isinstance(box, Box) + # + if box.type == FLOAT: + if len(unused_xmm) > 0: + ask = unused_xmm.pop() + got = self.xrm.try_allocate_reg(box, selected_reg=ask) + assert ask == got else: - reg = self.rm.try_allocate_reg(arg) - if reg: - loc = reg + pass_on_stack.append(box) else: - loc = self.fm.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc - # otherwise we have it saved on stack, so no worry - self.rm.free_regs.insert(0, tmpreg) - self.xrm.free_regs.insert(0, xmmtmp) - assert tmpreg not in nonfloatlocs - assert xmmtmp not in floatlocs - # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op args, which is a non-resizable list - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + if len(unused_gpr) > 0: + ask = unused_gpr.pop() + got = self.rm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + # + return pass_on_stack def possibly_free_var(self, var): if var.type == FLOAT: @@ -287,15 +303,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -311,7 +327,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -320,7 +336,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -356,7 +372,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -445,13 +461,26 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop + def flush_loop(self): + # rare case: if the loop is too short, pad with NOPs + mc = self.assembler.mc + while mc.get_relative_pos() < self.min_bytes_before_label: + mc.NOP() + def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,10 +488,16 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -470,7 +505,8 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + self.last_real_usage = last_real_usage + # longevity = {} for arg in produced: if arg in last_used: @@ -486,7 +522,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + self.longevity = longevity def loc(self, v): if v is None: # xxx kludgy @@ -883,7 +919,7 @@ def consider_call_assembler(self, op, guard_op): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) @@ -1313,35 +1349,72 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of 'fm' based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + self.final_jump_op = op + descr = op.getdescr() + assert isinstance(descr, TargetToken) + if descr._x86_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): + arglocs = descr._x86_arglocs + jump_op = self.final_jump_op + assert len(arglocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) + if isinstance(box, Box): + loc = arglocs[i] + if isinstance(loc, StackLoc): + self.fm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) + arglocs = descr._x86_arglocs self.jump_target_descr = descr - nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) - # compute 'tmploc' to be all_regs[0] by spilling what is there - box = TempBox() - box1 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - tmploc = self.rm.force_allocate_reg(box, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None + # Do the remapping remap_frame_layout_mixed(assembler, - src_locations1, dst_locations1, tmploc, + src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(box) - self.xrm.possibly_free_var(box1) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1357,7 +1430,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) @@ -1392,6 +1465,56 @@ # the FORCE_TOKEN operation returns directly 'ebp' self.rm.force_allocate_frame_reg(op.result) + def consider_label(self, op): + descr = op.getdescr() + assert isinstance(descr, TargetToken) + inputargs = op.getarglist() + arglocs = [None] * len(inputargs) + # + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) + # + # we need to make sure that no variable is stored in ebp + for arg in inputargs: + if self.loc(arg) is ebp: + loc2 = self.fm.loc(arg) + self.assembler.mc.MOV(loc2, ebp) + self.rm.bindings_to_frame_reg.clear() + # + for i in range(len(inputargs)): + arg = inputargs[i] + assert isinstance(arg, Box) + loc = self.loc(arg) + assert loc is not ebp + arglocs[i] = loc + if isinstance(loc, RegLoc): + self.fm.mark_as_free(arg) + # + # if we are too close to the start of the loop, the label's target may + # get overridden by redirect_call_assembler(). (rare case) + self.flush_loop() + # + descr._x86_arglocs = arglocs + descr._x86_loop_code = self.assembler.mc.get_relative_pos() + descr._x86_clt = self.assembler.current_clt + self.assembler.target_tokens_currently_compiling[descr] = None + self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) @@ -1447,3 +1570,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If 0, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,14 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): - assert ebp_offset < 0 # so no confusion with RegLoc.value + def __init__(self, position, ebp_offset, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -64,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -75,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -92,9 +104,11 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True - width = WORD + +class ImmedLoc(ImmediateAssemblerLocation): + _immutable_ = True _location_code = 'i' def __init__(self, value): @@ -105,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): return "ImmedLoc(%d)" % (self.value) @@ -117,7 +134,6 @@ class AddressLoc(AssemblerLocation): _immutable_ = True - width = WORD # The address is base_loc + (scaled_loc << scale) + static_offset def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): assert 0 <= scale < 4 @@ -146,6 +162,9 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) + def get_width(self): + return WORD + def value_a(self): return self.loc_a @@ -180,32 +199,34 @@ raise AssertionError(self._location_code) return result -class ConstFloatLoc(AssemblerLocation): - # XXX: We have to use this class instead of just AddressLoc because - # we want a width of 8 (... I think. Check this!) +class ConstFloatLoc(ImmediateAssemblerLocation): _immutable_ = True - width = 8 _location_code = 'j' def __init__(self, address): self.value = address + def get_width(self): + return 8 + def __repr__(self): return '' % (self.value,) if IS_X86_32: - class FloatImmedLoc(AssemblerLocation): + class FloatImmedLoc(ImmediateAssemblerLocation): # This stands for an immediate float. It cannot be directly used in # any assembler instruction. Instead, it is meant to be decomposed # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function # instead; see below. _immutable_ = True - width = 8 _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage + def get_width(self): + return 8 + def low_part(self): return intmask(self.aslonglong) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.objectmodel import we_are_translated +from pypy.jit.codewriter import longlong from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS @@ -21,7 +22,6 @@ supports_floats = True supports_singlefloats = True - BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) dont_keepalive_stuff = False # for tests with_threads = False @@ -91,15 +91,6 @@ return self.assembler.assemble_bridge(faildescr, inputargs, operations, original_loop_token, log=log) - def set_future_value_int(self, index, intvalue): - self.assembler.fail_boxes_int.setitem(index, intvalue) - - def set_future_value_float(self, index, floatvalue): - self.assembler.fail_boxes_float.setitem(index, floatvalue) - - def set_future_value_ref(self, index, ptrvalue): - self.assembler.fail_boxes_ptr.setitem(index, ptrvalue) - def get_latest_value_int(self, index): return self.assembler.fail_boxes_int.getitem(index) @@ -122,27 +113,28 @@ # the FORCE_TOKEN operation and this helper both return 'ebp'. return self.assembler.fail_ebp - def execute_token(self, executable_token): - addr = executable_token._x86_bootstrap_code - #llop.debug_print(lltype.Void, ">>>> Entering", addr) - func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) - fail_index = self._execute_call(func) - #llop.debug_print(lltype.Void, "<<<< Back") - return self.get_fail_descr_from_number(fail_index) - - def _execute_call(self, func): - # help flow objspace - prev_interpreter = None - if not self.translate_support_code: - prev_interpreter = LLInterpreter.current_interpreter - LLInterpreter.current_interpreter = self.debug_ll_interpreter - res = 0 - try: - res = func() - finally: + def make_execute_token(self, *ARGS): + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) + # + def execute_token(executable_token, *args): + clt = executable_token.compiled_loop_token + assert len(args) == clt._debug_nbargs + # + addr = executable_token._x86_function_addr + func = rffi.cast(FUNCPTR, addr) + #llop.debug_print(lltype.Void, ">>>> Entering", addr) + prev_interpreter = None # help flow space if not self.translate_support_code: - LLInterpreter.current_interpreter = prev_interpreter - return res + prev_interpreter = LLInterpreter.current_interpreter + LLInterpreter.current_interpreter = self.debug_ll_interpreter + try: + fail_index = func(*args) + finally: + if not self.translate_support_code: + LLInterpreter.current_interpreter = prev_interpreter + #llop.debug_print(lltype.Void, "<<<< Back") + return self.get_fail_descr_from_number(fail_index) + return execute_token def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) @@ -215,14 +207,3 @@ super(CPU_X86_64, self).__init__(*args, **kwargs) CPU = CPU386 - -# silence warnings -##history.LoopToken._x86_param_depth = 0 -##history.LoopToken._x86_arglocs = (None, None) -##history.LoopToken._x86_frame_depth = 0 -##history.LoopToken._x86_bootstrap_code = 0 -##history.LoopToken._x86_direct_bootstrap_code = 0 -##history.LoopToken._x86_loop_code = 0 -##history.LoopToken._x86_debug_checksum = 0 -##compile.AbstractFailDescr._x86_current_depths = (0, 0) -##compile.AbstractFailDescr._x86_adr_jump_offset = 0 diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -46,12 +46,13 @@ xmm2] assert len(failargs) == len(locs) assembler.write_failure_recovery_description(mc, failargs, locs) - nums = [Assembler386.DESCR_INT + 4*(16+0), - Assembler386.DESCR_REF + 4*(16+1), - Assembler386.DESCR_FLOAT + 4*(16+10), - Assembler386.DESCR_INT + 4*(16+100), - Assembler386.DESCR_REF + 4*(16+101), - Assembler386.DESCR_FLOAT + 4*(16+110), + base = 8 + 8*IS_X86_64 + nums = [Assembler386.DESCR_INT + 4*(base+0), + Assembler386.DESCR_REF + 4*(base+1), + Assembler386.DESCR_FLOAT + 4*(base+10), + Assembler386.DESCR_INT + 4*(base+100), + Assembler386.DESCR_REF + 4*(base+101), + Assembler386.DESCR_FLOAT + 4*(base+110), Assembler386.CODE_HOLE, Assembler386.CODE_HOLE, Assembler386.DESCR_INT + 4*ebx.value, diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, TreeLoop + BoxPtr, ConstPtr, TreeLoop, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo @@ -113,6 +113,8 @@ descr0 = cpu.fielddescrof(S, 'int') ptr0 = struct_ref + targettoken = TargetToken() + namespace = locals().copy() def test_basic(self): @@ -136,6 +138,7 @@ def test_bug_0(self): ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, descr=targettoken) guard_value(i2, 1) [i2, i3, i4, i5, i6, i7, i0, i1, i8] guard_class(i4, 138998336) [i4, i5, i6, i7, i0, i1, i8] i11 = getfield_gc(i4, descr=descr0) @@ -163,7 +166,7 @@ guard_false(i32) [i4, i6, i7, i0, i1, i24] i33 = getfield_gc(i0, descr=descr0) guard_value(i33, ConstPtr(ptr0)) [i4, i6, i7, i0, i1, i33, i24] - jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24) + jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -71,6 +71,18 @@ ('mov', eax, s24), ('mov', s12, edi)] +def test_no_tmp_reg(): + assembler = MockAssembler() + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) + remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], None) + assert assembler.ops == [('push', s8), + ('pop', s20), + ('mov', eax, s24), + ('mov', s12, edi)] + def test_reordering(): assembler = MockAssembler() s8 = frame_pos(8, INT) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -5,10 +5,11 @@ def test_compile_bridge_not_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -18,22 +19,22 @@ finish(i3, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 def test_compile_bridge_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) - previous = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + previous = loop._jitcelltoken.compiled_loop_token.frame_depth + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -42,19 +43,18 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].getdescr() + descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 assert self.getint(1) == 22 @@ -64,28 +64,30 @@ def test_bridge_jump_to_other_loop(self): loop = self.interpret(''' [i0, i10, i11, i12, i13, i14, i15, i16] + label(i0, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1, i10, i11, i12, i13, i14, i15, i16) - ''', [0]) + jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) + ''', [0, 0, 0, 0, 0, 0, 0, 0]) other_loop = self.interpret(''' - [i3] + [i3, i10, i11, i12, i13, i14, i15, i16] + label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] - jump(i3) - ''', [1]) + jump(i3, descr=targettoken2) + ''', [1, 0, 0, 0, 0, 0, 0, 0]) ops = ''' [i3] - jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=looptoken) + jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, other_loop, 0, looptoken=loop.token) - self.cpu.set_future_value_int(0, 1) - fail = self.run(other_loop) + bridge = self.attach_bridge(ops, other_loop, 1) + fail = self.run(other_loop, 1, 0, 0, 0, 0, 0, 0, 0) assert fail.identifier == 1 def test_bridge_jumps_to_self_deeper(self): loop = self.interpret(''' [i0, i1, i2, i31, i32, i33] + label(i0, i1, i2, i31, i32, i33, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i30 = int_add(i1, i2) @@ -94,8 +96,8 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i30, 1, i30, i30, i30) - ''', [0]) + jump(i3, i30, 1, i30, i30, i30, descr=targettoken) + ''', [0, 0, 0, 0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -104,28 +106,28 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) - jump(i3, i12, i11, i10, i6, i7, descr=looptoken) + jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 5, looptoken=loop.token) - guard_op = loop.operations[5] - loop_frame_depth = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth + bridge = self.attach_bridge(ops, loop, 6) + guard_op = loop.operations[6] + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 def test_bridge_jumps_to_self_shallower(self): loop = self.interpret(''' [i0, i1, i2] + label(i0, i1, i2, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i3 = int_add(i0, 1) @@ -133,19 +135,16 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i1, i2) - ''', [0]) + jump(i3, i1, i2, descr=targettoken) + ''', [0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' [i97, i3] - jump(i3, 0, 1, descr=looptoken) + jump(i3, 0, 1, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 4, looptoken=loop.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + bridge = self.attach_bridge(ops, loop, 5) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, LoopToken, BasicFailDescr + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass @@ -96,10 +96,16 @@ raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + targettoken = TargetToken() + targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 @@ -134,21 +140,31 @@ def interpret(self, ops, args, run=True): loop = self.parse(ops) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - for i, arg in enumerate(args): + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + arguments = [] + for arg in args: if isinstance(arg, int): - self.cpu.set_future_value_int(i, arg) + arguments.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) - self.cpu.set_future_value_float(i, arg) + arguments.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) - self.cpu.set_future_value_ref(i, llgcref) + arguments.append(llgcref) + loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, *arguments) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.original_jitcell_token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -167,10 +183,7 @@ gcref = self.cpu.get_latest_value_ref(index) return lltype.cast_opaque_ptr(T, gcref) - def attach_bridge(self, ops, loop, guard_op_index, looptoken=None, **kwds): - if looptoken is not None: - self.namespace = self.namespace.copy() - self.namespace['looptoken'] = looptoken + def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) @@ -178,20 +191,21 @@ [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, - loop.token) + loop._jitcelltoken) return bridge - def run(self, loop): - return self.cpu.execute_token(loop.token) + def run(self, loop, *arguments): + return self.cpu.execute_token(loop._jitcelltoken, *arguments) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -199,29 +213,30 @@ def test_two_loops_and_a_bridge(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(i0, 1) i5 = int_lt(i4, 20) guard_true(i5) [i4, i1, i2, i3] - jump(i4, i1, i2, i3) + jump(i4, i1, i2, i3, descr=targettoken) ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' - [i5] + [i5, i6, i7, i8] + label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) i4 = int_add(i3, 1) i2 = int_lt(i4, 30) guard_true(i2) [i4] - jump(i4) + jump(i4, descr=targettoken2) ''' - loop2 = self.interpret(ops2, [0]) + loop2 = self.interpret(ops2, [0, 0, 0, 0]) bridge_ops = ''' [i4] - jump(i4, i4, i4, i4, descr=looptoken) + jump(i4, i4, i4, i4, descr=targettoken) ''' - bridge = self.attach_bridge(bridge_ops, loop2, 4, looptoken=loop.token) - self.cpu.set_future_value_int(0, 0) - self.run(loop2) + bridge = self.attach_bridge(bridge_ops, loop2, 5) + self.run(loop2, 0, 0, 0, 0) assert self.getint(0) == 31 assert self.getint(1) == 30 assert self.getint(2) == 30 @@ -230,10 +245,11 @@ def test_pointer_arg(self): ops = ''' [i0, p0] + label(i0, p0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 10) guard_true(i2) [p0] - jump(i1, p0) + jump(i1, p0, descr=targettoken) ''' S = lltype.GcStruct('S') ptr = lltype.malloc(S) @@ -258,8 +274,7 @@ loop = self.interpret(ops, [0]) assert self.getint(0) == 1 bridge = self.attach_bridge(bridge_ops, loop, 2) - self.cpu.set_future_value_int(0, 0) - self.run(loop) + self.run(loop, 0) assert self.getint(0) == 1 def test_inputarg_unused(self): @@ -285,9 +300,7 @@ assert self.getint(0) == 0 assert self.getint(1) == 10 bridge = self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - self.run(loop) + self.run(loop, 0, 10) assert self.getint(0) == 0 assert self.getint(1) == 10 @@ -304,17 +317,16 @@ finish(1, 2) ''' self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 1) - self.run(loop) + self.run(loop, 0, 1) def test_spill_for_constant(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(3, i1) i5 = int_lt(i4, 30) guard_true(i5) [i0, i4, i2, i3] - jump(1, i4, 3, 4) + jump(1, i4, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1, 30, 3, 4] @@ -322,31 +334,34 @@ def test_spill_for_constant_lshift(self): ops = ''' [i0, i2, i1, i3] + label(i0, i2, i1, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 3, i5, 4) + jump(i4, 3, i5, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, i5, 3, 4) + jump(i4, i5, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i3, i1, i2] + label(i0, i3, i1, i2, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 4, i5, 3) + jump(i4, 4, i5, 3, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] @@ -354,11 +369,12 @@ def test_result_selected_reg_via_neg(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i6 = int_neg(i2) i7 = int_add(1, i1) i4 = int_lt(i7, 10) guard_true(i4) [i0, i6, i7] - jump(1, i7, i2, i6) + jump(1, i7, i2, i6, descr=targettoken) ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] @@ -366,11 +382,12 @@ def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lt(i0, i1) i5 = int_add(i3, 1) i6 = int_lt(i5, 30) guard_true(i6) [i4] - jump(i0, i1, i4, i5) + jump(i0, i1, i4, i5, descr=targettoken) ''' self.interpret(ops, [0, 10, 0, 0]) assert self.getint(0) == 1 @@ -378,12 +395,13 @@ def test_jump_different_args(self): ops = ''' [i0, i15, i16, i18, i1, i2, i3] + label(i0, i15, i16, i18, i1, i2, i3, descr=targettoken) i4 = int_add(i3, 1) i5 = int_lt(i4, 20) guard_true(i5) [i2, i1] - jump(i0, i18, i15, i16, i2, i1, i4) + jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' - self.interpret(ops, [0, 1, 2, 3]) + self.interpret(ops, [0, 1, 2, 3, 0, 0, 0]) def test_op_result_unused(self): ops = ''' @@ -417,11 +435,24 @@ finish(i0, i1, i2, i3, i4, i5, i6, i7, i8) ''' self.attach_bridge(bridge_ops, loop, 1) - for i in range(9): - self.cpu.set_future_value_int(i, i) - self.run(loop) + self.run(loop, 0, 1, 2, 3, 4, 5, 6, 7, 8) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + if IS_X86_64: + assert len(regalloc.rm.reg_bindings) == 4 + assert len(regalloc.fm.bindings) == 0 + else: + assert len(regalloc.rm.reg_bindings) == 0 + assert len(regalloc.fm.bindings) == 4 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): @@ -438,6 +469,7 @@ class TestRegallocMoreRegisters(BaseTestRegalloc): cpu = BaseTestRegalloc.cpu + targettoken = TargetToken() S = lltype.GcStruct('S', ('field', lltype.Char)) fielddescr = cpu.fielddescrof(S, 'field') @@ -510,6 +542,7 @@ def test_division_optimized(self): ops = ''' [i7, i6] + label(i7, i6, descr=targettoken) i18 = int_floordiv(i7, i6) i19 = int_xor(i7, i6) i21 = int_lt(i19, 0) @@ -517,7 +550,7 @@ i23 = int_is_true(i22) i24 = int_eq(i6, 4) guard_false(i24) [i18] - jump(i18, i6) + jump(i18, i6, descr=targettoken) ''' self.interpret(ops, [10, 4]) assert self.getint(0) == 2 @@ -586,9 +619,10 @@ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(1) + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(1) def test_two_calls(self): ops = ''' @@ -597,9 +631,10 @@ i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) finish(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(2) + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(2) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -612,7 +647,8 @@ ''' loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 - assert loop.token._x86_param_depth == self.expected_param_depth(10) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(10) def test_bridge_calls_1(self): ops = ''' @@ -632,9 +668,7 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 5*7 def test_bridge_calls_2(self): @@ -655,8 +689,6 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -1,6 +1,6 @@ import py from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, LoopToken + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD @@ -20,10 +20,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 9) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 9) assert cpu.get_latest_value_int(0) == (9 >> 3) assert cpu.get_latest_value_int(1) == (~18) @@ -43,10 +42,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -10) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -10) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == -1000 assert cpu.get_latest_value_int(2) == 1 @@ -140,19 +138,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -13) - cpu.set_future_value_int(1, 10) - cpu.set_future_value_int(2, 10) - cpu.set_future_value_int(3, 8) - cpu.set_future_value_int(4, -8) - cpu.set_future_value_int(5, -16) - cpu.set_future_value_int(6, -18) - cpu.set_future_value_int(7, 46) - cpu.set_future_value_int(8, -12) - cpu.set_future_value_int(9, 26) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 0 assert cpu.get_latest_value_int(2) == 0 @@ -255,19 +243,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 17) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, -6) - cpu.set_future_value_int(3, 6) - cpu.set_future_value_int(4, 1) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, 13) - cpu.set_future_value_int(7, 9) - cpu.set_future_value_int(8, 49) - cpu.set_future_value_int(9, 8) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 8 assert cpu.get_latest_value_int(2) == 1 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -1,9 +1,10 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr, rclass from pypy.rpython.annlowlevel import llhelper -from pypy.jit.metainterp.history import ResOperation, LoopToken +from pypy.jit.metainterp.history import ResOperation, TargetToken, JitCellToken from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstFloat, - ConstPtr, Box, BoxFloat, BasicFailDescr) + ConstPtr, Box, BoxFloat, + BasicFailDescr) from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD from pypy.jit.backend.x86.rx86 import fits_in_32bits @@ -279,13 +280,9 @@ descr=BasicFailDescr()), ] ops[-2].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) - if op == rop.INT_IS_TRUE: - self.cpu.set_future_value_int(0, b.value) - else: - self.cpu.set_future_value_ref(0, b.value) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_latest_value_int(0) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, @@ -329,11 +326,10 @@ ] ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) - for i, box in enumerate(inputargs): - self.cpu.set_future_value_int(i, box.value) - self.cpu.execute_token(looptoken) + inputvalues = [box.value for box in inputargs] + self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_latest_value_int(0) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: @@ -353,9 +349,10 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.number = 17 class FakeString(object): def __init__(self, val): @@ -365,14 +362,15 @@ return self.val operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) + operations[-2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" @@ -385,7 +383,7 @@ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -397,8 +395,7 @@ assert address >= loopaddress + loopsize assert size >= 10 # randomish number - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -408,11 +405,13 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] debug._log = dlog = debug.DebugLog() @@ -499,12 +498,10 @@ ops[3].setfailargs([]) ops[5].setfailargs([]) ops[7].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) - self.cpu.set_future_value_int(0, 123450) - self.cpu.set_future_value_int(1, 123408) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 123450, 123408) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert self.cpu.get_latest_value_int(1) == 42 @@ -523,19 +520,20 @@ loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -547,16 +545,17 @@ def test_debugger_checksum(self): loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) - assert ops.token._x86_debug_checksum == sum([op.getopnum() + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) + assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + n -= x.foo + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -241,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + return True # better safe than sorry + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -479,13 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -498,11 +533,18 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO @@ -703,6 +745,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) @@ -1053,35 +1098,20 @@ # jit.codewriter.support. for _op, _oopspec in [('llong_invert', 'INVERT'), - ('ullong_invert', 'INVERT'), ('llong_lt', 'LT'), ('llong_le', 'LE'), ('llong_eq', 'EQ'), ('llong_ne', 'NE'), ('llong_gt', 'GT'), ('llong_ge', 'GE'), - ('ullong_lt', 'ULT'), - ('ullong_le', 'ULE'), - ('ullong_eq', 'EQ'), - ('ullong_ne', 'NE'), - ('ullong_gt', 'UGT'), - ('ullong_ge', 'UGE'), ('llong_add', 'ADD'), ('llong_sub', 'SUB'), ('llong_mul', 'MUL'), ('llong_and', 'AND'), ('llong_or', 'OR'), ('llong_xor', 'XOR'), - ('ullong_add', 'ADD'), - ('ullong_sub', 'SUB'), - ('ullong_mul', 'MUL'), - ('ullong_and', 'AND'), - ('ullong_or', 'OR'), - ('ullong_xor', 'XOR'), ('llong_lshift', 'LSHIFT'), ('llong_rshift', 'RSHIFT'), - ('ullong_lshift', 'LSHIFT'), - ('ullong_rshift', 'URSHIFT'), ('cast_int_to_longlong', 'FROM_INT'), ('truncate_longlong_to_int', 'TO_INT'), ('cast_float_to_longlong', 'FROM_FLOAT'), @@ -1104,6 +1134,21 @@ ('cast_uint_to_ulonglong', 'FROM_UINT'), ('cast_float_to_ulonglong', 'FROM_FLOAT'), ('cast_ulonglong_to_float', 'U_TO_FLOAT'), + ('ullong_invert', 'INVERT'), + ('ullong_lt', 'ULT'), + ('ullong_le', 'ULE'), + ('ullong_eq', 'EQ'), + ('ullong_ne', 'NE'), + ('ullong_gt', 'UGT'), + ('ullong_ge', 'UGE'), + ('ullong_add', 'ADD'), + ('ullong_sub', 'SUB'), + ('ullong_mul', 'MUL'), + ('ullong_and', 'AND'), + ('ullong_or', 'OR'), + ('ullong_xor', 'XOR'), + ('ullong_lshift', 'LSHIFT'), + ('ullong_rshift', 'URSHIFT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): @@ -1134,7 +1179,7 @@ def rewrite_op_llong_is_true(self, op): v = varoftype(op.args[0].concretetype) - op0 = SpaceOperation('cast_int_to_longlong', + op0 = SpaceOperation('cast_primitive', [Constant(0, lltype.Signed)], v) args = [op.args[0], v] diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -258,6 +258,9 @@ y = ~r_ulonglong(xll) return u_to_longlong(y) +def _ll_1_ullong_invert(xull): + return ~xull + def _ll_2_llong_lt(xll, yll): return xll < yll @@ -276,16 +279,22 @@ def _ll_2_llong_ge(xll, yll): return xll >= yll -def _ll_2_llong_ult(xull, yull): +def _ll_2_ullong_eq(xull, yull): + return xull == yull + +def _ll_2_ullong_ne(xull, yull): + return xull != yull + +def _ll_2_ullong_ult(xull, yull): return xull < yull -def _ll_2_llong_ule(xull, yull): +def _ll_2_ullong_ule(xull, yull): return xull <= yull -def _ll_2_llong_ugt(xull, yull): +def _ll_2_ullong_ugt(xull, yull): return xull > yull -def _ll_2_llong_uge(xull, yull): +def _ll_2_ullong_uge(xull, yull): return xull >= yull def _ll_2_llong_add(xll, yll): @@ -312,14 +321,41 @@ z = r_ulonglong(xll) ^ r_ulonglong(yll) return u_to_longlong(z) +def _ll_2_ullong_add(xull, yull): + z = (xull) + (yull) + return (z) + +def _ll_2_ullong_sub(xull, yull): + z = (xull) - (yull) + return (z) + +def _ll_2_ullong_mul(xull, yull): + z = (xull) * (yull) + return (z) + +def _ll_2_ullong_and(xull, yull): + z = (xull) & (yull) + return (z) + +def _ll_2_ullong_or(xull, yull): + z = (xull) | (yull) + return (z) + +def _ll_2_ullong_xor(xull, yull): + z = (xull) ^ (yull) + return (z) + def _ll_2_llong_lshift(xll, y): z = r_ulonglong(xll) << y return u_to_longlong(z) +def _ll_2_ullong_lshift(xull, y): + return xull << y + def _ll_2_llong_rshift(xll, y): return xll >> y -def _ll_2_llong_urshift(xull, y): +def _ll_2_ullong_urshift(xull, y): return xull >> y def _ll_1_llong_from_int(x): @@ -563,15 +599,75 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,73 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1209,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): @@ -1501,7 +1504,6 @@ all_virtuals=None): from pypy.jit.metainterp.resume import blackhole_from_resumedata #debug_start('jit-blackhole') - metainterp_sd.profiler.start_blackhole() blackholeinterp = blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, jitdriver_sd, @@ -1515,10 +1517,9 @@ current_exc = blackholeinterp._prepare_resume_from_failure( resumedescr.guard_opnum, dont_change_position) - try: - _run_forever(blackholeinterp, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(blackholeinterp, current_exc) + #finally: #debug_stop('jit-blackhole') def convert_and_run_from_pyjitpl(metainterp, raising_exception=False): @@ -1526,7 +1527,6 @@ # 'metainterp.framestack'. #debug_start('jit-blackhole') metainterp_sd = metainterp.staticdata - metainterp_sd.profiler.start_blackhole() nextbh = None for frame in metainterp.framestack: curbh = metainterp_sd.blackholeinterpbuilder.acquire_interp() @@ -1543,8 +1543,7 @@ firstbh.exception_last_value = current_exc current_exc = lltype.nullptr(rclass.OBJECTPTR.TO) # - try: - _run_forever(firstbh, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(firstbh, current_exc) + #finally: #debug_stop('jit-blackhole') diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -9,12 +9,13 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist -from pypy.jit.metainterp.history import TreeLoop, Box, History, LoopToken +from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt -from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const +from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const, ConstInt from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP from pypy.jit.codewriter import heaptracker, longlong @@ -23,7 +24,7 @@ from pypy.jit.metainterp.jitprof import ABORT_BRIDGE raise SwitchToBlackhole(ABORT_BRIDGE) -def show_loop(metainterp_sd, loop=None, error=None): +def show_procedures(metainterp_sd, procedure=None, error=None): # debugging if option.view or option.viewloops: if error: @@ -32,11 +33,12 @@ errmsg += ': ' + str(error) else: errmsg = None - if loop is None: # or type(loop) is TerminatingLoop: - extraloops = [] + if procedure is None: + extraprocedures = [] else: - extraloops = [loop] - metainterp_sd.stats.view(errmsg=errmsg, extraloops=extraloops) + extraprocedures = [procedure] + metainterp_sd.stats.view(errmsg=errmsg, + extraprocedures=extraprocedures) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -45,131 +47,261 @@ return loop -def make_loop_token(nb_args, jitdriver_sd): - loop_token = LoopToken() - loop_token.outermost_jitdriver_sd = jitdriver_sd - return loop_token +def make_jitcell_token(jitdriver_sd): + jitcell_token = JitCellToken() + jitcell_token.outermost_jitdriver_sd = jitdriver_sd + return jitcell_token def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ - # get the original loop token (corresponding to 'loop', or if that is - # a bridge, to the loop that this bridge belongs to) - looptoken = loop.token - assert looptoken is not None + # get the original jitcell token corresponding to jitcell form which + # this trace starts + original_jitcell_token = loop.original_jitcell_token + assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests - assert looptoken.generation > 0 # has been registered with memmgr - wref = weakref.ref(looptoken) + assert original_jitcell_token.generation > 0 # has been registered with memmgr + wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number - looptoken.compiled_loop_token.record_faildescr_index(n) - elif isinstance(descr, LoopToken): - # for a JUMP or a CALL_ASSEMBLER: record it as a potential jump. + original_jitcell_token.compiled_loop_token.record_faildescr_index(n) + elif isinstance(descr, JitCellToken): + # for a CALL_ASSEMBLER: record it as a potential jump. + if descr is not original_jitcell_token: + original_jitcell_token.record_jump_to(descr) + descr.exported_state = None + op._descr = None # clear reference, mostly for tests + elif isinstance(descr, TargetToken): + # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) - if descr is not looptoken: - looptoken.record_jump_to(descr) - op._descr = None # clear reference, mostly for tests + if descr.original_jitcell_token is not original_jitcell_token: + assert descr.original_jitcell_token is not None + original_jitcell_token.record_jump_to(descr.original_jitcell_token) + # exported_state is clear by optimizeopt when the short preamble is + # constrcucted. if that did not happen the label should not show up + # in a trace that will be used + assert descr.exported_state is None if not we_are_translated(): - op._jumptarget_number = descr.number + op._descr_wref = weakref.ref(op._descr) + op._descr = None # clear reference to prevent the history.Stats + # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken - loop.token = None + loop.original_jitcell_token = None if not we_are_translated(): - loop._looptoken_number = looptoken.number + loop._looptoken_number = original_jitcell_token.number # ____________________________________________________________ -def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, - start_resumedescr, full_preamble_needed=True): - """Try to compile a new loop by closing the current history back +def compile_loop(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, full_preamble_needed=True): + """Try to compile a new procedure by closing the current history back to the first operation. """ - from pypy.jit.metainterp.optimize import optimize_loop + from pypy.jit.metainterp.optimizeopt import optimize_trace history = metainterp.history - loop = create_empty_loop(metainterp) - loop.inputargs = history.inputargs[:] + metainterp_sd = metainterp.staticdata + jitdriver_sd = metainterp.jitdriver_sd + + if False: + part = partial_trace + assert False + procedur_token = metainterp.get_procedure_token(greenkey) + assert procedure_token + all_target_tokens = [] + else: + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.start_resumedescr = start_resumedescr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] + + loop = create_empty_loop(metainterp) + loop.inputargs = part.inputargs + loop.operations = part.operations + loop.quasi_immutable_deps = {} + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + while part.operations[-1].getopnum() == rop.LABEL: + inliner = Inliner(inputargs, jumpargs) + part.quasi_immutable_deps = None + part.operations = [part.operations[-1]] + \ + [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], + None, descr=jitcell_token)] + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens.append(target_token) + inputargs = jumpargs + jumpargs = part.operations[-1].getarglist() + + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + + loop.operations = loop.operations[:-1] + part.operations + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + + if not loop.quasi_immutable_deps: + loop.quasi_immutable_deps = None for box in loop.inputargs: assert isinstance(box, Box) - # make a copy, because optimize_loop can mutate the ops and descrs - h_ops = history.operations - loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] + + loop.original_jitcell_token = jitcell_token + for label in all_target_tokens: + assert isinstance(label, TargetToken) + label.original_jitcell_token = jitcell_token + if label.virtual_state and label.short_preamble: + metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) + jitcell_token.target_tokens = all_target_tokens + send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") + record_loop_or_bridge(metainterp_sd, loop) + return all_target_tokens[0] + +def compile_retrace(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, partial_trace, resumekey): + """Try to compile a new procedure by closing the current history back + to the first operation. + """ + from pypy.jit.metainterp.optimizeopt import optimize_trace + + history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.token = loop_token - loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP - loop.preamble = create_empty_loop(metainterp, 'Preamble ') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.preamble.start_resumedescr = start_resumedescr + loop_jitcell_token = metainterp.get_procedure_token(greenkey) + assert loop_jitcell_token + assert partial_trace.operations[-1].getopnum() == rop.LABEL + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + part.start_resumedescr = start_resumedescr + h_ops = history.operations + + part.operations = [partial_trace.operations[-1]] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] + label = part.operations[0] + orignial_label = label.clone() + assert label.getopnum() == rop.LABEL try: - old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop, - jitdriver_sd.warmstate.enable_opts) + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - debug_print("compile_new_loop: got an InvalidLoop") - return None - if old_loop_token is not None: - metainterp.staticdata.log("reusing old loop") - return old_loop_token + #return None # XXX: Dissable for now + # Fall back on jumping to preamble + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert target_token.exported_state + part.operations = [orignial_label] + \ + [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + None, descr=loop_jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + inline_short_preamble=False) + + except InvalidLoop: + return None + assert part.operations[-1].getopnum() != rop.LABEL + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert loop_jitcell_token.target_tokens + loop_jitcell_token.target_tokens.append(target_token) - if loop.preamble.operations is not None: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - record_loop_or_bridge(metainterp_sd, loop) - token = loop.preamble.token - if full_preamble_needed: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, - loop.preamble, "entry bridge") - insert_loop_token(old_loop_tokens, loop.preamble.token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.preamble.token) - record_loop_or_bridge(metainterp_sd, loop.preamble) - elif token.short_preamble: - short = token.short_preamble[-1] - metainterp_sd.logger_ops.log_short_preamble(short.inputargs, - short.operations) - return token - else: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - insert_loop_token(old_loop_tokens, loop_token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.token) - record_loop_or_bridge(metainterp_sd, loop) - return loop_token + loop = partial_trace + loop.operations = loop.operations[:-1] + part.operations -def insert_loop_token(old_loop_tokens, loop_token): - # Find where in old_loop_tokens we should insert this new loop_token. - # The following algo means "as late as possible, but before another - # loop token that would be more general and so completely mask off - # the new loop_token". - # XXX do we still need a list? - old_loop_tokens.append(loop_token) + quasi_immutable_deps = {} + if loop.quasi_immutable_deps: + quasi_immutable_deps.update(loop.quasi_immutable_deps) + if part.quasi_immutable_deps: + quasi_immutable_deps.update(part.quasi_immutable_deps) + if quasi_immutable_deps: + loop.quasi_immutable_deps = quasi_immutable_deps + + for box in loop.inputargs: + assert isinstance(box, Box) + + target_token = loop.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + target_token.original_jitcell_token = loop.original_jitcell_token + record_loop_or_bridge(metainterp_sd, loop) + return target_token + +def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): + vinfo = jitdriver_sd.virtualizable_info + extra_ops = [] + inputargs = loop.inputargs + vable_box = inputargs[jitdriver_sd.index_of_virtualizable] + i = jitdriver_sd.num_red_args + loop.inputargs = inputargs[:i] + for descr in vinfo.static_field_descrs: + assert i < len(inputargs) + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], box, descr)) + i += 1 + arrayindex = 0 + for descr in vinfo.array_field_descrs: + vable = vable_box.getref_base() + arraylen = vinfo.get_array_length(vable, arrayindex) + arraybox = BoxPtr() + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], arraybox, descr)) + arraydescr = vinfo.array_descrs[arrayindex] + assert i + arraylen <= len(inputargs) + for index in range(arraylen): + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETARRAYITEM_GC, + [arraybox, ConstInt(index)], + box, descr=arraydescr)) + i += 1 + arrayindex += 1 + assert i == len(inputargs) + loop.operations = extra_ops + loop.operations def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): - jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, + vinfo = jitdriver_sd.virtualizable_info + if vinfo is not None: + patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) + + original_jitcell_token = loop.original_jitcell_token + jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata - loop_token = loop.token - loop_token.number = n = globaldata.loopnumbering + original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): - show_loop(metainterp_sd, loop) + show_procedures(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) @@ -177,26 +309,19 @@ debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - loop.token, name=loopname) + original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): - if type != "entry bridge": - metainterp_sd.stats.compiled() - else: - loop._ignore_during_counting = True + metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) - short = loop.token.short_preamble - if short: - metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, - short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token) + metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): @@ -204,8 +329,9 @@ jitdriver_sd.on_compile_bridge(metainterp_sd.logger_ops, original_loop_token, operations, n) if not we_are_translated(): - show_loop(metainterp_sd) - TreeLoop.check_consistency_of(inputargs, operations) + show_procedures(metainterp_sd) + seen = dict.fromkeys(inputargs) + TreeLoop.check_consistency_of_branch(operations, seen) metainterp_sd.profiler.start_backend() operations = get_deep_immutable_oplist(operations) debug_start("jit-backend") @@ -221,9 +347,9 @@ # metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset) # - if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( - original_loop_token) + #if metainterp_sd.warmrunnerdesc is not None: # for tests + # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( + # original_loop_token) # ____________________________________________________________ @@ -263,7 +389,7 @@ raise metainterp_sd.ExitFrameWithExceptionRef(cpu, value) -class TerminatingLoopToken(LoopToken): +class TerminatingLoopToken(JitCellToken): # FIXME: kill? terminating = True def __init__(self, nargs, finishdescr): @@ -298,7 +424,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +435,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +455,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,18 +465,21 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) - assert 0, "unreachable" + assert 0, "unreachable" def _trace_and_compile_from_bridge(self, metainterp_sd, jitdriver_sd): # 'jitdriver_sd' corresponds to the outermost one, i.e. the one @@ -354,17 +488,27 @@ # jitdrivers. from pypy.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - return metainterp.handle_guard_failure(self) + metainterp.handle_guard_failure(self) _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +535,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -400,13 +553,13 @@ # We managed to create a bridge. Attach the new operations # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None - new_loop.token = metainterp.resumekey_original_loop_token + new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, - new_loop.token) + new_loop.original_jitcell_token) def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here @@ -589,44 +742,32 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs - # We make a new LoopToken for this entry bridge, and stick it - # to every guard in the loop. - new_loop_token = make_loop_token(len(redargs), jitdriver_sd) - new_loop.token = new_loop_token + new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - self.original_greenkey, - new_loop_token) - # store the new loop in compiled_merge_points_wref too - old_loop_tokens = metainterp.get_compiled_merge_points( - self.original_greenkey) - # it always goes at the end of the list, as it is the most - # general loop token - old_loop_tokens.append(new_loop_token) - metainterp.set_compiled_merge_points(self.original_greenkey, - old_loop_tokens) + jitdriver_sd.warmstate.attach_procedure_to_interp( + self.original_greenkey, jitcell_token) + metainterp_sd.stats.add_jitcell_token(jitcell_token) - def reset_counter_from_failure(self): - pass - -def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): +def compile_trace(metainterp, resumekey, start_resumedescr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ - from pypy.jit.metainterp.optimize import optimize_bridge + from pypy.jit.metainterp.optimizeopt import optimize_trace # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. - # + # # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. - new_loop = create_empty_loop(metainterp) - new_loop.inputargs = metainterp.history.inputargs[:] + new_trace = create_empty_loop(metainterp) + new_trace.inputargs = inputargs = metainterp.history.inputargs[:] # clone ops, as optimize_bridge can mutate the ops - new_loop.operations = [op.clone() for op in metainterp.history.operations] + + new_trace.operations = [op.clone() for op in metainterp.history.operations] + new_trace.start_resumedescr = start_resumedescr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): @@ -634,38 +775,25 @@ else: inline_short_preamble = True try: - target_loop_token = optimize_bridge(metainterp_sd, old_loop_tokens, - new_loop, state.enable_opts, - inline_short_preamble, retraced) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop debug_print('InvalidLoop in compile_new_bridge') return None - # Did it work? - if target_loop_token is not None: - # Yes, we managed to create a bridge. Dispatch to resumekey to + + if new_trace.operations[-1].getopnum() != rop.LABEL: + # We managed to create a bridge. Dispatch to resumekey to # know exactly what we must do (ResumeGuardDescr/ResumeFromInterpDescr) - prepare_last_operation(new_loop, target_loop_token) - resumekey.compile_and_attach(metainterp, new_loop) - record_loop_or_bridge(metainterp_sd, new_loop) - return target_loop_token - -def prepare_last_operation(new_loop, target_loop_token): - op = new_loop.operations[-1] - if not isinstance(target_loop_token, TerminatingLoopToken): - # normal case - #op.setdescr(target_loop_token) # patch the jump target - pass + target_token = new_trace.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, new_trace) + record_loop_or_bridge(metainterp_sd, new_trace) + return target_token else: - # The target_loop_token is a pseudo loop token, - # e.g. loop_tokens_done_with_this_frame_void[0] - # Replace the operation with the real operation we want, i.e. a FINISH - descr = target_loop_token.finishdescr - args = op.getarglist() - new_op = ResOperation(rop.FINISH, args, None, descr=descr) - new_loop.operations[-1] = new_op + metainterp.retrace_needed(new_trace) + return None + # ____________________________________________________________ @@ -676,21 +804,25 @@ assert exception, "PropagateExceptionDescr: no exception??" raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) -def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes, +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redargtypes, memory_manager=None): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled version of the code may end up replacing it. """ - # 'redboxes' is only used to know the types of red arguments. - inputargs = [box.clonebox() for box in redboxes] - loop_token = make_loop_token(len(inputargs), jitdriver_sd) - # 'nb_red_args' might be smaller than len(redboxes), - # because it doesn't include the virtualizable boxes. + jitcell_token = make_jitcell_token(jitdriver_sd) nb_red_args = jitdriver_sd.num_red_args + assert len(redargtypes) == nb_red_args + inputargs = [] + for kind in redargtypes: + if kind == history.INT: box = BoxInt() + elif kind == history.REF: box = BoxPtr() + elif kind == history.FLOAT: box = BoxFloat() + else: raise AssertionError + inputargs.append(box) k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + callargs = [funcbox] + greenboxes + inputargs # result_type = jitdriver_sd.result_type if result_type == history.INT: @@ -717,7 +849,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, loop_token, log=False) + cpu.compile_loop(inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests - memory_manager.keep_loop_alive(loop_token) - return loop_token + memory_manager.keep_loop_alive(jitcell_token) + return jitcell_token diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -346,6 +346,7 @@ rop.QUASIIMMUT_FIELD, rop.MALLOC_GC, rop.MALLOC_NURSERY, + rop.LABEL, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,8 +12,9 @@ def get_display_text(self): return None -def display_loops(loops, errmsg=None, highlight_loops={}): - graphs = [(loop, highlight_loops.get(loop, 0)) for loop in loops] +def display_procedures(procedures, errmsg=None, highlight_procedures={}): + graphs = [(procedure, highlight_procedures.get(procedure, 0)) + for procedure in procedures] for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): @@ -25,18 +26,19 @@ def is_interesting_guard(op): return hasattr(op.getdescr(), '_debug_suboperations') +def getdescr(op): + if op._descr is not None: + return op._descr + if hasattr(op, '_descr_wref'): + return op._descr_wref() + return None + class ResOpGraphPage(GraphPage): def compute(self, graphs, errmsg=None): resopgen = ResOpGen() for graph, highlight in graphs: - if getattr(graph, 'token', None) is not None: - resopgen.jumps_to_graphs[graph.token] = graph - if getattr(graph, '_looptoken_number', None) is not None: - resopgen.jumps_to_graphs[graph._looptoken_number] = graph - - for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: resopgen.set_errmsg(errmsg) @@ -54,7 +56,7 @@ self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None - self.jumps_to_graphs = {} + self.target_tokens = {} def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -73,16 +75,21 @@ for graphindex in range(len(self.graphs)): self.block_starters[graphindex] = {0: True} for graphindex, graph in enumerate(self.graphs): - last_was_mergepoint = False + mergepointblock = None for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) if op.getopnum() == rop.DEBUG_MERGE_POINT: - if not last_was_mergepoint: - last_was_mergepoint = True - self.mark_starter(graphindex, i) + if mergepointblock is None: + mergepointblock = i + elif op.getopnum() == rop.LABEL: + self.mark_starter(graphindex, i) + self.target_tokens[getdescr(op)] = (graphindex, i) + mergepointblock = i else: - last_was_mergepoint = False + if mergepointblock is not None: + self.mark_starter(graphindex, mergepointblock) + mergepointblock = None def set_errmsg(self, errmsg): self.errmsg = errmsg @@ -172,24 +179,10 @@ (graphindex, opindex)) break if op.getopnum() == rop.JUMP: - tgt_g = -1 - tgt = None - tgt_number = getattr(op, '_jumptarget_number', None) - if tgt_number is not None: - tgt = self.jumps_to_graphs.get(tgt_number) - else: - tgt_descr = op.getdescr() - if tgt_descr is None: - tgt_g = graphindex - else: - tgt = self.jumps_to_graphs.get(tgt_descr.number) - if tgt is None: - tgt = self.jumps_to_graphs.get(tgt_descr) - if tgt is not None: - tgt_g = self.graphs.index(tgt) - if tgt_g != -1: + tgt_descr = getdescr(op) + if tgt_descr is not None and tgt_descr in self.target_tokens: self.genedge((graphindex, opstartindex), - (tgt_g, 0), + self.target_tokens[tgt_descr], weight="0") lines.append("") label = "\\l".join(lines) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker, longlong from pypy.rlib.objectmodel import compute_identity_hash +import weakref # ____________________________________________________________ @@ -123,9 +124,6 @@ def sort_key(self): raise NotImplementedError - def set_future_value(self, cpu, j): - raise NotImplementedError - def nonnull(self): raise NotImplementedError @@ -288,9 +286,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def same_constant(self, other): if isinstance(other, ConstInt): return self.value == other.value @@ -328,9 +323,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def same_constant(self, other): if isinstance(other, ConstFloat): return self.value == other.value @@ -377,9 +369,6 @@ def getaddr(self): return llmemory.cast_ptr_to_adr(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -431,9 +420,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - ## def getaddr(self): ## # so far this is used only when calling ## # CodeWriter.IndirectCallset.bytecode_for_address. We don't need a @@ -539,9 +525,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def nonnull(self): return self.value != 0 @@ -574,9 +557,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def nonnull(self): return self.value != longlong.ZEROF @@ -619,9 +599,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def nonnull(self): return bool(self.value) @@ -666,19 +643,12 @@ def nonnull(self): return bool(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def repr_rpython(self): return repr_rpython(self, 'bo') _getrepr_ = repr_object -def set_future_values(cpu, boxes): - for j in range(len(boxes)): - boxes[j].set_future_value(cpu, j) - # ____________________________________________________________ @@ -723,18 +693,17 @@ # ____________________________________________________________ -# The TreeLoop class contains a loop or a generalized loop, i.e. a tree -# of operations. Each branch ends in a jump which can go either to -# the top of the same loop, or to another TreeLoop; or it ends in a FINISH. +# The JitCellToken class is the root of a tree of traces. Each branch ends +# in a jump which goes to a LABEL operation; or it ends in a FINISH. -class LoopToken(AbstractDescr): +class JitCellToken(AbstractDescr): """Used for rop.JUMP, giving the target of the jump. This is different from TreeLoop: the TreeLoop class contains the whole loop, including 'operations', and goes away after the loop was compiled; but the LoopDescr remains alive and points to the generated assembler. """ - short_preamble = None + target_tokens = None failed_states = None retraced_count = 0 terminating = False # see TerminatingLoopToken in compile.py @@ -751,10 +720,11 @@ def __init__(self): # For memory management of assembled loops - self._keepalive_target_looktokens = {} # set of other LoopTokens + self._keepalive_jitcell_tokens = {} # set of other JitCellToken - def record_jump_to(self, target_loop_token): - self._keepalive_target_looktokens[target_loop_token] = None + def record_jump_to(self, jitcell_token): + assert isinstance(jitcell_token, JitCellToken) + self._keepalive_jitcell_tokens[jitcell_token] = None def __repr__(self): return '' % (self.number, self.generation) @@ -765,17 +735,49 @@ def dump(self): self.compiled_loop_token.cpu.dump_loop_token(self) +class TargetToken(AbstractDescr): + def __init__(self, targeting_jitcell_token=None): + # Warning, two different jitcell_tokens here! + # + # * 'targeting_jitcell_token' is only useful for the front-end, + # and it means: consider the LABEL that uses this TargetToken. + # At this position, the state is logically the one given + # by targeting_jitcell_token. So e.g. if we want to enter the + # JIT with some given green args, if the jitcell matches, then + # we can jump to this LABEL. + # + # * 'original_jitcell_token' is information from the backend's + # point of view: it means that this TargetToken is used in + # a LABEL that belongs to either: + # - a loop; then 'original_jitcell_token' is this loop + # - or a bridge; then 'original_jitcell_token' is the loop + # out of which we made this bridge + # + self.targeting_jitcell_token = targeting_jitcell_token + self.original_jitcell_token = None + + self.virtual_state = None + self.exported_state = None + class TreeLoop(object): inputargs = None operations = None - token = None call_pure_results = None logops = None quasi_immutable_deps = None + start_resumedescr = None + + def _token(*args): + raise Exception("TreeLoop.token is killed") + token = property(_token, _token) + + # This is the jitcell where the trace starts. Labels within the trace might + # belong to some other jitcells in the sens that jumping to this other + # jitcell will result in a jump to the label. + original_jitcell_token = None def __init__(self, name): self.name = name - # self.inputargs = list of distinct Boxes # self.operations = list of ResOperations # ops of the kind 'guard_xxx' contain a further list of operations, # which may itself contain 'guard_xxx' and so on, making a tree. @@ -808,6 +810,10 @@ def check_consistency(self): # for testing "NOT_RPYTHON" self.check_consistency_of(self.inputargs, self.operations) + for op in self.operations: + descr = op.getdescr() + if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): + assert descr.original_jitcell_token is self.original_jitcell_token @staticmethod def check_consistency_of(inputargs, operations): @@ -842,15 +848,23 @@ assert isinstance(box, Box) assert box not in seen seen[box] = True + if op.getopnum() == rop.LABEL: + inputargs = op.getarglist() + for box in inputargs: + assert isinstance(box, Box), "LABEL contains %r" % (box,) + seen = dict.fromkeys(inputargs) + assert len(seen) == len(inputargs), ( + "duplicate Box in the LABEL arguments") + assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: target = operations[-1].getdescr() if target is not None: - assert isinstance(target, LoopToken) + assert isinstance(target, TargetToken) def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -932,6 +946,9 @@ def clear(self): pass + def add_jitcell_token(self, token): + pass + class Stats(object): """For tests.""" @@ -944,17 +961,26 @@ self.loops = [] self.locations = [] self.aborted_keys = [] - self.invalidated_token_numbers = set() + self.invalidated_token_numbers = set() # <- not RPython + self.jitcell_token_wrefs = [] + self.jitcell_dicts = [] # <- not RPython def clear(self): del self.loops[:] del self.locations[:] del self.aborted_keys[:] + del self.jitcell_token_wrefs[:] self.invalidated_token_numbers.clear() self.compiled_count = 0 self.enter_count = 0 self.aborted_count = 0 + for dict in self.jitcell_dicts: + dict.clear() + def add_jitcell_token(self, token): + assert isinstance(token, JitCellToken) + self.jitcell_token_wrefs.append(weakref.ref(token)) + def set_history(self, history): self.operations = history.operations @@ -984,6 +1010,15 @@ def get_all_loops(self): return self.loops + def get_all_jitcell_tokens(self): + tokens = [t() for t in self.jitcell_token_wrefs] + if None in tokens: + assert False, "get_all_jitcell_tokens will not work as "+\ + "loops have been freed" + return tokens + + + def check_history(self, expected=None, **check): insns = {} for op in self.operations: @@ -1001,10 +1036,14 @@ def check_resops(self, expected=None, **check): insns = {} - for loop in self.loops: + for loop in self.get_all_loops(): insns = loop.summary(adding_insns=insns) + return self._check_insns(insns, expected, check) + + def _check_insns(self, insns, expected, check): if expected is not None: insns.pop('debug_merge_point', None) + insns.pop('label', None) assert insns == expected for insn, expected_count in check.items(): getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist @@ -1012,29 +1051,102 @@ assert found == expected_count, ( "found %d %r, expected %d" % (found, insn, expected_count)) return insns + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + + # XXX hacked version, ignore and remove me when jit-targets is merged. + loops = self.get_all_loops() + loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX + assert len(loops) == 1 + loop, = loops + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + insns = {} + for op in loop.operations: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + loops = self.get_all_loops() + assert len(loops) == 1 + loop = loops[0] + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + assert self.check_resops(jump=1) + labels = [op for op in loop.operations if op.getopnum() == rop.LABEL] + targets = [op._descr_wref() for op in labels] + assert None not in targets # TargetToken was freed, give up + target = jumpop._descr_wref() + assert target + assert targets.count(target) == 1 + i = loop.operations.index(labels[targets.index(target)]) + insns = {} + for op in loop.operations[i:]: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_loops(self, expected=None, everywhere=False, **check): + insns = {} + for loop in self.get_all_loops(): + #if not everywhere: + # if getattr(loop, '_ignore_during_counting', False): + # continue + insns = loop.summary(adding_insns=insns) + if expected is not None: + insns.pop('debug_merge_point', None) + print + print + print " self.check_resops(%s)" % str(insns) + print + import pdb; pdb.set_trace() + else: + chk = ['%s=%d' % (i, insns.get(i, 0)) for i in check] + print + print + print " self.check_resops(%s)" % ', '.join(chk) + print + import pdb; pdb.set_trace() + return + + for insn, expected_count in check.items(): + getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist + found = insns.get(insn, 0) + assert found == expected_count, ( + "found %d %r, expected %d" % (found, insn, expected_count)) + return insns + def check_consistency(self): "NOT_RPYTHON" - for loop in self.loops: + for loop in self.get_all_loops(): loop.check_consistency() def maybe_view(self): if option.view: self.view() - def view(self, errmsg=None, extraloops=[]): - from pypy.jit.metainterp.graphpage import display_loops - loops = self.get_all_loops()[:] - for loop in extraloops: - if loop in loops: - loops.remove(loop) - loops.append(loop) - highlight_loops = dict.fromkeys(extraloops, 1) - for loop in loops: - if hasattr(loop, '_looptoken_number') and ( - loop._looptoken_number in self.invalidated_token_numbers): - highlight_loops.setdefault(loop, 2) - display_loops(loops, errmsg, highlight_loops) + def view(self, errmsg=None, extraprocedures=[]): + from pypy.jit.metainterp.graphpage import display_procedures + procedures = self.get_all_loops()[:] + for procedure in extraprocedures: + if procedure in procedures: + procedures.remove(procedure) + procedures.append(procedure) + highlight_procedures = dict.fromkeys(extraprocedures, 1) + for procedure in procedures: + if hasattr(procedure, '_looptoken_number') and ( + procedure._looptoken_number in self.invalidated_token_numbers): + highlight_procedures.setdefault(procedure, 2) + display_procedures(procedures, errmsg, highlight_procedures) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/inliner.py b/pypy/jit/metainterp/inliner.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/inliner.py @@ -0,0 +1,57 @@ +from pypy.jit.metainterp.history import Const +from pypy.jit.metainterp.resume import Snapshot + +class Inliner(object): + def __init__(self, inputargs, jump_args): + assert len(inputargs) == len(jump_args) + self.argmap = {} + for i in range(len(inputargs)): + if inputargs[i] in self.argmap: + assert self.argmap[inputargs[i]] == jump_args[i] + else: + self.argmap[inputargs[i]] = jump_args[i] + self.snapshot_map = {None: None} + + def inline_op(self, newop, ignore_result=False, clone=True, + ignore_failargs=False): + if clone: + newop = newop.clone() + args = newop.getarglist() + newop.initarglist([self.inline_arg(a) for a in args]) + + if newop.is_guard(): + args = newop.getfailargs() + if args and not ignore_failargs: + newop.setfailargs([self.inline_arg(a) for a in args]) + else: + newop.setfailargs([]) + + if newop.result and not ignore_result: + old_result = newop.result + newop.result = newop.result.clonebox() + self.argmap[old_result] = newop.result + + self.inline_descr_inplace(newop.getdescr()) + + return newop + + def inline_descr_inplace(self, descr): + from pypy.jit.metainterp.compile import ResumeGuardDescr + if isinstance(descr, ResumeGuardDescr): + descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) + + def inline_arg(self, arg): + if arg is None: + return None + if isinstance(arg, Const): + return arg + return self.argmap[arg] + + def inline_snapshot(self, snapshot): + if snapshot in self.snapshot_map: + return self.snapshot_map[snapshot] + boxes = [self.inline_arg(a) for a in snapshot.boxes] + new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) + self.snapshot_map[snapshot] = new_snapshot + return new_snapshot + diff --git a/pypy/jit/metainterp/jitdriver.py b/pypy/jit/metainterp/jitdriver.py --- a/pypy/jit/metainterp/jitdriver.py +++ b/pypy/jit/metainterp/jitdriver.py @@ -11,6 +11,7 @@ # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.num_red_args ... pypy.jit.metainterp.warmspot + # self.red_args_types ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.greenfield_info ... pypy.jit.metainterp.warmspot diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -10,8 +10,6 @@ counters=""" TRACING BACKEND -RUNNING -BLACKHOLE OPS RECORDED_OPS GUARDS @@ -67,18 +65,6 @@ def end_backend(self): pass - def start_running(self): - pass - - def end_running(self): - pass - - def start_blackhole(self): - pass - - def end_blackhole(self): - pass - def count(self, kind, inc=1): pass @@ -134,16 +120,6 @@ def start_backend(self): self._start(BACKEND) def end_backend(self): self._end (BACKEND) - # Don't record times for 'running' and 'blackhole' because there are - # too many of them: calling time.time() is a major blocker. - # If you are interested in these numbers, use 'PYPYLOG=file' and - # look at the resulting file with pypy/tool/logparser.py. - def start_running(self): self.count(RUNNING) - def end_running(self): pass - - def start_blackhole(self): self.count(BLACKHOLE) - def end_blackhole(self): pass - def count(self, kind, inc=1): self.counters[kind] += inc @@ -165,8 +141,6 @@ calls = self.calls self._print_line_time("Tracing", cnt[TRACING], tim[TRACING]) self._print_line_time("Backend", cnt[BACKEND], tim[BACKEND]) - self._print_intline("Running asm", cnt[RUNNING]) - self._print_intline("Blackhole", cnt[BLACKHOLE]) line = "TOTAL: \t\t%f" % (self.tk - self.starttime, ) debug_print(line) self._print_intline("ops", cnt[OPS]) diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, r_uint from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,6 +21,7 @@ # class MemoryManager(object): + NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -36,12 +37,13 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK self.alive_loops = {} + self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK else: self.max_age = max_age if check_frequency <= 0: @@ -49,10 +51,11 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self): + def next_generation(self, do_cleanups_now=True): self.current_generation += 1 - if self.current_generation == self.next_check: + if do_cleanups_now and self.current_generation >= self.next_check: self._kill_old_loops_now() + self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -81,3 +84,22 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") + + def get_current_generation_uint(self): + """Return the current generation, possibly truncated to a uint. + To use only as an approximation for decaying counters.""" + return r_uint(self.current_generation) + + def record_jitcell_dict(self, callback): + """NOT_RPYTHON. The given jitcell_dict is a dict that needs + occasional clean-ups of old cells. A cell is old if it never + reached the threshold, and its counter decayed to a tiny value.""" + # note that the various jitcell_dicts have different RPython types, + # so we have to make a different function for each one. These + # functions are chained to each other: each calls the previous one. + def cleanup_dict(): + callback() + cleanup_previous() + # + cleanup_previous = self._cleanup_jitcell_dicts + self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -4,13 +4,15 @@ from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap from pypy.jit.metainterp.optimizeopt.vstring import OptString -from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble +from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce from pypy.rlib.jit import PARAMETERS from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_start, debug_stop, debug_print + ALL_OPTS = [('intbounds', OptIntBounds), ('rewrite', OptRewrite), @@ -28,8 +30,7 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) -def build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble=True, retraced=False): +def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict @@ -45,12 +46,9 @@ optimizations.append(OptFfiCall()) if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + or 'heap' not in enable_opts or 'unroll' not in enable_opts): optimizations.append(OptSimplify()) - if inline_short_preamble: - optimizations = [OptInlineShortPreamble(retraced)] + optimizations - return optimizations, unroll @@ -80,3 +78,21 @@ if __name__ == '__main__': print ALL_OPTS_NAMES + +def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): + """Optimize loop.operations to remove internal overheadish operations. + """ + + debug_start("jit-optimize") + try: + loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, + loop.operations) + optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) + if unroll: + optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) + else: + optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer.propagate_all_forward() + finally: + debug_stop("jit-optimize") + diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -7,7 +7,7 @@ from pypy.rlib.libffi import Func from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.lltypesystem import llmemory, rffi class FuncInfo(object): @@ -234,10 +234,13 @@ # longlongs are treated as floats, see # e.g. llsupport/descr.py:getDescrClass is_float = True + elif kind == 'u': + # they're all False + pass else: assert False, "unsupported ffitype or kind" # - fieldsize = ffitype.c_size + fieldsize = rffi.getintfield(ffitype, 'c_size') return self.optimizer.cpu.interiorfielddescrof_dynamic( offset, width, fieldsize, is_pointer, is_float, is_signed ) diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -246,15 +246,16 @@ self.force_lazy_setfields_and_arrayitems_for_guard()) return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or # handled specially - opnum == rop.SETFIELD_RAW or # no effect on GC struct/array - opnum == rop.SETARRAYITEM_GC or # handled specially - opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.STRSETITEM or # no effect on GC struct/array - opnum == rop.UNICODESETITEM or # no effect on GC struct/array - opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever - opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array - opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -500,8 +500,9 @@ else: return CVAL_ZERO - def propagate_all_forward(self): - self.clear_newoperations() + def propagate_all_forward(self, clear=True): + if clear: + self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) self.loop.operations = self.get_newoperations() diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -1,9 +1,12 @@ from pypy.jit.metainterp.optimizeopt.optimizer import Optimization from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import ResOperation, rop - +from pypy.jit.metainterp.history import TargetToken, JitCellToken class OptSimplify(Optimization): + def __init__(self): + self.last_label_descr = None + def optimize_CALL_PURE(self, op): args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, @@ -28,6 +31,26 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + + def optimize_LABEL(self, op): + self.last_label_descr = op.getdescr() + self.emit_operation(op) + + def optimize_JUMP(self, op): + descr = op.getdescr() + assert isinstance(descr, JitCellToken) + if not descr.target_tokens: + assert self.last_label_descr is not None + target_token = self.last_label_descr + assert isinstance(target_token, TargetToken) + assert target_token.targeting_jitcell_token is descr + op.setdescr(self.last_label_descr) + else: + assert len(descr.target_tokens) == 1 + op.setdescr(descr.target_tokens[0]) + self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -0,0 +1,200 @@ +from pypy.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimize import InvalidLoop +from py.test import raises + +class BaseTestMultiLabel(BaseTest): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + + def optimize_loop(self, ops, expected): + loop = self.parse(ops) + if expected != "crash!": + expected = self.parse(expected) + + part = TreeLoop('part') + part.inputargs = loop.inputargs + part.start_resumedescr = FakeDescrWithSnapshot() + token = loop.original_jitcell_token + + optimized = TreeLoop('optimized') + optimized.inputargs = loop.inputargs + optimized.operations = [] + + labels = [i for i, op in enumerate(loop.operations) \ + if op.getopnum()==rop.LABEL] + prv = 0 + last_label = [] + for nxt in labels + [len(loop.operations)]: + assert prv != nxt + operations = last_label + loop.operations[prv:nxt] + if nxt < len(loop.operations): + label = loop.operations[nxt] + assert label.getopnum() == rop.LABEL + jumpop = ResOperation(rop.JUMP, label.getarglist(), + None, descr=token) + operations.append(jumpop) + part.operations = operations + self._do_optimize_loop(part, None) + if part.operations[-1].getopnum() == rop.LABEL: + last_label = [part.operations.pop()] + else: + last_label = [] + optimized.operations.extend(part.operations) + prv = nxt + 1 + + # + print + print "Optimized:" + if optimized.operations: + print '\n'.join([str(o) for o in optimized.operations]) + else: + print 'Failed!' + print + + assert expected != "crash!", "should have raised an exception" + self.assert_equal(optimized, expected) + + return optimized + + def test_simple(self): + ops = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1) + i3 = int_add(i1, 1) + escape(i3) + jump(i1) + """ + expected = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1, i2) + escape(i2) + jump(i1, i2) + """ + self.optimize_loop(ops, expected) + + def test_forced_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + escape(p3) + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_with_nonmatching_fields(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, 1, descr=valuedescr) + label(p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p4, 1, descr=nextdescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_arrays_with_nonmatching_lens(self): + ops = """ + [p1] + p2 = new_array(3, descr=arraydescr) + label(p2) + p4 = new_array(2, descr=arraydescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_1(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p4, 2, f0, descr=compleximagdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_2(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(2, descr=complexarraydescr) + setinteriorfield_gc(p4, 0, f0, descr=complexrealdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_array(self): + ops = """ + [p1] + p3 = new_array(3, descr=arraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_arraystruct(self): + ops = """ + [p1] + p3 = new_array(3, descr=complexarraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_turns_constant(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + guard_value(p3, ConstPtr(myptr)) [] + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_turns_not_equal(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3, p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + jump(p3, p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + +class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + pass + diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1,7 +1,8 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, FakeMetaInterpStaticData) + LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) +from pypy.jit.metainterp.history import TargetToken, JitCellToken from pypy.jit.metainterp.test.test_compile import FakeLogger import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize @@ -11,7 +12,6 @@ from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.rlib.rarithmetic import LONG_BIT - def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -116,9 +116,13 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" def optimize_loop(self, ops, optops, call_pure_results=None): - loop = self.parse(ops) - expected = self.parse(optops) + token = JitCellToken() + loop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=TargetToken(token))] + \ + loop.operations + if loop.operations[-1].getopnum() == rop.JUMP: + loop.operations[-1].setdescr(token) + expected = convert_old_style_to_targets(self.parse(optops), jump=True) self._do_optimize_loop(loop, call_pure_results) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,13 +1,13 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes) + LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation @@ -15,7 +15,7 @@ from pypy.jit.metainterp.optimizeopt.util import args_dict from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData from pypy.config.pypyoption import get_pypy_config - +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_build_opt_chain(): def check(chain, expected_names): @@ -23,49 +23,37 @@ assert names == expected_names # metainterp_sd = FakeMetaInterpStaticData(None) - chain, _ = build_opt_chain(metainterp_sd, "", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "") check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "heap:intbounds") - check(chain, ["OptInlineShortPreamble", "OptIntBounds", "OptHeap", "OptSimplify"]) + check(chain, ["OptIntBounds", "OptHeap", "OptSimplify"]) # chain, unroll = build_opt_chain(metainterp_sd, "unroll") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) assert unroll # - chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptFfiCall", "OptSimplify"]) # metainterp_sd.config = get_pypy_config(translating=True) assert not metainterp_sd.config.translation.jit_ffi - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptSimplify"]) # ____________________________________________________________ -class FakeDescr(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescr() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescr) - - class BaseTestWithUnroll(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -79,40 +67,41 @@ expected_preamble = self.parse(expected_preamble) if expected_short: expected_short = self.parse(expected_short) - loop.preamble = TreeLoop('preamble') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = LoopToken() - loop.preamble.start_resumedescr = FakeDescr() - # - self._do_optimize_loop(loop, call_pure_results) + + preamble = self.unroll_and_optimize(loop, call_pure_results) + # print print "Preamble:" - print loop.preamble.inputargs - if loop.preamble.operations: - print '\n'.join([str(o) for o in loop.preamble.operations]) + if preamble.operations: + print '\n'.join([str(o) for o in preamble.operations]) else: print 'Failed!' print print "Loop:" - print loop.inputargs print '\n'.join([str(o) for o in loop.operations]) print if expected_short: print "Short Preamble:" - short = loop.preamble.token.short_preamble[0] - print short.inputargs - print '\n'.join([str(o) for o in short.operations]) + short = loop.operations[0].getdescr().short_preamble + print '\n'.join([str(o) for o in short]) print assert expected != "crash!", "should have raised an exception" - self.assert_equal(loop, expected) + self.assert_equal(loop, convert_old_style_to_targets(expected, jump=True)) + assert loop.operations[0].getdescr() == loop.operations[-1].getdescr() if expected_preamble: - self.assert_equal(loop.preamble, expected_preamble, + self.assert_equal(preamble, convert_old_style_to_targets(expected_preamble, jump=False), text_right='expected preamble') + assert preamble.operations[-1].getdescr() == loop.operations[0].getdescr() if expected_short: - self.assert_equal(short, expected_short, + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, convert_old_style_to_targets(expected_short, jump=True), text_right='expected short preamble') + assert short[-1].getdescr() == loop.operations[0].getdescr() return loop @@ -234,7 +223,7 @@ """ % expected_value self.optimize_loop(ops, expected) - def test_reverse_of_cast(self): + def test_reverse_of_cast_1(self): ops = """ [i0] p0 = cast_int_to_ptr(i0) @@ -246,6 +235,8 @@ jump(i0) """ self.optimize_loop(ops, expected) + + def test_reverse_of_cast_2(self): ops = """ [p0] i1 = cast_ptr_to_int(p0) @@ -1181,6 +1172,7 @@ i1 = getfield_gc(p0, descr=valuedescr) i2 = int_sub(i1, 1) i3 = int_add(i0, i1) + i4 = same_as(i2) # This same_as should be killed by backend jump(i3, i2, i1) """ expected = """ @@ -1252,10 +1244,10 @@ i1 = int_add(i0, 1) p1 = new_with_vtable(ConstClass(node_vtable2)) p2 = new_with_vtable(ConstClass(node_vtable2)) - setfield_gc(p0, p1, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) setfield_gc(p2, p1, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p0, p1, descr=nextdescr) jump(p1) """ self.optimize_loop(ops, loop, preamble) @@ -1317,6 +1309,7 @@ p30 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) + p46 = same_as(p30) # This same_as should be killed by backend jump(i29, p30, p3) """ expected = """ @@ -1324,8 +1317,8 @@ i28 = int_add(i0, 1) i29 = int_add(i28, 1) p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) - setfield_gc(p30, i28, descr=nextdescr) jump(i29, p30, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2118,7 +2111,9 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i7 = same_as(i2) # This same_as should be killed by backend + i6 = same_as(i4) + jump(p1, i1, i2, i4, i6) """ expected = """ [p1, i1, i2, i4, i5] @@ -2148,7 +2143,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2177,7 +2173,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2207,7 +2204,9 @@ guard_true(i5) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i8 = same_as(i2) # This same_as should be killed by backend + i7 = same_as(i4) + jump(p1, i1, i2, i4, i7) """ expected = """ [p1, i1, i2, i4, i7] @@ -2433,7 +2432,8 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p4, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - jump(p1, i2, i4, p4, i4) + i101 = same_as(i4) + jump(p1, i2, i4, p4, i101) """ expected = """ [p1, i2, i4, p4, i5] @@ -3276,7 +3276,15 @@ setfield_gc(p1, i3, descr=valuedescr) jump(p1, i4, i3) ''' - self.optimize_loop(ops, ops, ops) + preamble = ''' + [p1, i1, i4] + setfield_gc(p1, i1, descr=valuedescr) + i3 = call_assembler(i1, descr=asmdescr) + setfield_gc(p1, i3, descr=valuedescr) + i143 = same_as(i3) # Should be killed by backend + jump(p1, i4, i3) + ''' + self.optimize_loop(ops, ops, preamble) def test_call_assembler_invalidates_heap_knowledge(self): ops = ''' @@ -3307,7 +3315,9 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i3, descr=valuedescr) - jump(p1, i4, i3, i3) + i148 = same_as(i3) + i147 = same_as(i3) + jump(p1, i4, i3, i148) ''' self.optimize_loop(ops, expected, preamble) @@ -3330,7 +3340,8 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i4, i3, i3) + i151 = same_as(i3) + jump(p1, i4, i3, i151) ''' self.optimize_loop(ops, expected, preamble) @@ -3350,7 +3361,8 @@ escape(i1) escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) - jump(i0, i4, i4) + i153 = same_as(i4) + jump(i0, i4, i153) ''' expected = ''' [i0, i4, i5] @@ -3380,7 +3392,8 @@ escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) guard_no_exception() [] - jump(i0, i4, i4) + i155 = same_as(i4) + jump(i0, i4, i155) ''' expected = ''' [i0, i2, i3] @@ -4198,6 +4211,7 @@ preamble = """ [p0] i0 = strlen(p0) + i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5418,6 +5432,7 @@ [p0] p1 = getfield_gc(p0, descr=valuedescr) setfield_gc(p0, p0, descr=valuedescr) + p4450 = same_as(p0) # Should be killed by backend jump(p0) """ expected = """ @@ -5653,7 +5668,8 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - jump(p2, p3, i2) + i7 = same_as(i2) + jump(p2, p3, i7) """ expected = """ [p1, p2, i1] @@ -5728,7 +5744,9 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - jump(p2, p3, p5, i2, i3) + i129 = same_as(i2) + i130 = same_as(i3) + jump(p2, p3, p5, i129, i130) """ expected = """ [p1, p2, p3, i1, i2] @@ -5788,7 +5806,8 @@ [p1, i1, i2, i3] escape(i3) i4 = int_sub(i2, i1) - jump(p1, i1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i1, i2, i4, i5) """ expected = """ [p1, i1, i2, i3, i4] @@ -5813,7 +5832,8 @@ escape(i5) i4 = int_sub(i2, i1) setfield_gc(p2, i4, descr=valuedescr) - jump(p1, i1, i2, p2, i4, i4) + i8 = same_as(i4) + jump(p1, i1, i2, p2, i8, i4) """ expected = """ [p1, i1, i2, p2, i5, i6] @@ -5939,7 +5959,8 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - jump(p4, i1, i2, p2, i5, i3, i4) + i9 = same_as(i4) + jump(p4, i1, i2, p2, i5, i3, i9) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6061,7 +6082,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - jump(p1, p2, p3, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, p3, i3, i11, i12) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6281,6 +6304,7 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) + i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6326,7 +6350,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - jump(p1, p2, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, i3, i11, i12) """ expected = """ [p1, p2, i3, i1, i2] @@ -6482,6 +6508,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] @@ -6614,7 +6655,8 @@ p188 = getarrayitem_gc(p187, 42, descr=) guard_value(p188, ConstPtr(myptr)) [] p25 = getfield_gc(ConstPtr(myptr), descr=otherdescr) - jump(p25, p187, i184, p25) + p26 = same_as(p25) + jump(p25, p187, i184, p26) """ short = """ [p1, p187, i184] @@ -6883,7 +6925,8 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - jump(p9, i843) + i0 = same_as(i843) + jump(p9, i0) """ short = """ [p9] @@ -6999,6 +7042,40 @@ """ self.optimize_loop(ops, expected) + def test_duplicated_aliased_virtual(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + jump(p3, p4) + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, expected) + + def test_imported_aliased_virtual_in_failargs(self): + ops = """ + [p1, p2, i0] + i2 = int_lt(i0, 10) + guard_true(i2) [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + i1 = int_add(i0, 1) + jump(p3, p4, i1) + """ + expected = """ + [i0] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize_loop(ops, expected) + def test_chained_virtuals(self): ops = """ [p0, p1] @@ -7575,7 +7652,8 @@ call(i2, descr=nonwritedescr) setfield_gc(p22, i1, descr=valuedescr) guard_nonnull_class(p18, ConstClass(node_vtable)) [] - jump(p22, p18, i1, i1) + i10 = same_as(i1) + jump(p22, p18, i1, i10) """ short = """ [p22, p18, i1] @@ -7677,6 +7755,22 @@ """ self.optimize_loop(ops, expected) + def test_setinteriorfield_should_not_clear_cache(self): + ops = """ + [i0, p0] + i2 = getfield_gc(p0, descr=adescr) + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0) + """ + expected = """ + [i0, p0, i2] + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0, i2) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -8,7 +8,8 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, Const, TreeLoop, BoxObj, - ConstObj, AbstractDescr) + ConstObj, AbstractDescr, + JitCellToken, TargetToken) from pypy.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.codewriter.effectinfo import EffectInfo @@ -18,6 +19,8 @@ from pypy.jit.metainterp import compile, resume, history from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.config.pypyoption import get_pypy_config +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -344,6 +347,11 @@ self.config = get_pypy_config(translating=True) self.config.translation.jit_ffi = True + class logger_noopt: + @classmethod + def log_loop(*args): + pass + class warmrunnerdesc: class memory_manager: retrace_limit = 5 @@ -394,7 +402,7 @@ expected.operations, False, remap, text_right) def _do_optimize_loop(self, loop, call_pure_results): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 + from pypy.jit.metainterp.optimizeopt import optimize_trace from pypy.jit.metainterp.optimizeopt.util import args_dict self.loop = loop @@ -408,7 +416,83 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - optimize_loop_1(metainterp_sd, loop, self.enable_opts) + optimize_trace(metainterp_sd, loop, self.enable_opts) + + def unroll_and_optimize(self, loop, call_pure_results=None): + operations = loop.operations + jumpop = operations[-1] + assert jumpop.getopnum() == rop.JUMP + inputargs = loop.inputargs + + jump_args = jumpop.getarglist()[:] + operations = operations[:-1] + cloned_operations = [op.clone() for op in operations] + + preamble = TreeLoop('preamble') + preamble.inputargs = inputargs + preamble.start_resumedescr = FakeDescrWithSnapshot() + + token = JitCellToken() + preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ + operations + \ + [ResOperation(rop.JUMP, jump_args, None, descr=token)] + self._do_optimize_loop(preamble, call_pure_results) + + assert preamble.operations[-1].getopnum() == rop.LABEL + + inliner = Inliner(inputargs, jump_args) + loop.start_resumedescr = preamble.start_resumedescr + loop.operations = [preamble.operations[-1]] + \ + [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], + None, descr=token)] + #[inliner.inline_op(jumpop)] + assert loop.operations[-1].getopnum() == rop.JUMP + assert loop.operations[0].getopnum() == rop.LABEL + loop.inputargs = loop.operations[0].getarglist() + + self._do_optimize_loop(loop, call_pure_results) + extra_same_as = [] + while loop.operations[0].getopnum() != rop.LABEL: + extra_same_as.append(loop.operations[0]) + del loop.operations[0] + + # Hack to prevent random order of same_as ops + extra_same_as.sort(key=lambda op: str(preamble.operations).find(str(op.getarg(0)))) + + for op in extra_same_as: + preamble.operations.insert(-1, op) + + return preamble + + +class FakeDescr(compile.ResumeGuardDescr): + def clone_if_mutable(self): + return FakeDescr() + def __eq__(self, other): + return isinstance(other, FakeDescr) + +class FakeDescrWithSnapshot(compile.ResumeGuardDescr): + class rd_snapshot: + class prev: + prev = None + boxes = [] + boxes = [] + def clone_if_mutable(self): + return FakeDescrWithSnapshot() + def __eq__(self, other): + return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) + + +def convert_old_style_to_targets(loop, jump): + newloop = TreeLoop(loop.name) + newloop.inputargs = loop.inputargs + newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=FakeDescr())] + \ + loop.operations + if not jump: + assert newloop.operations[-1].getopnum() == rop.JUMP + newloop.operations[-1] = ResOperation(rop.LABEL, newloop.operations[-1].getarglist(), None, descr=FakeDescr()) + return newloop # ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -1,11 +1,12 @@ from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes +from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState from pypy.jit.metainterp.compile import ResumeGuardDescr -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot from pypy.rlib.debug import debug_print @@ -13,63 +14,11 @@ # FIXME: Introduce some VirtualOptimizer super class instead -def optimize_unroll(metainterp_sd, loop, optimizations): +def optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble=True): opt = UnrollOptimizer(metainterp_sd, loop, optimizations) + opt.inline_short_preamble = inline_short_preamble opt.propagate_all_forward() -class Inliner(object): - def __init__(self, inputargs, jump_args): - assert len(inputargs) == len(jump_args) - self.argmap = {} - for i in range(len(inputargs)): - if inputargs[i] in self.argmap: - assert self.argmap[inputargs[i]] == jump_args[i] - else: - self.argmap[inputargs[i]] = jump_args[i] - self.snapshot_map = {None: None} - - def inline_op(self, newop, ignore_result=False, clone=True, - ignore_failargs=False): - if clone: - newop = newop.clone() - args = newop.getarglist() - newop.initarglist([self.inline_arg(a) for a in args]) - - if newop.is_guard(): - args = newop.getfailargs() - if args and not ignore_failargs: - newop.setfailargs([self.inline_arg(a) for a in args]) - else: - newop.setfailargs([]) - - if newop.result and not ignore_result: - old_result = newop.result - newop.result = newop.result.clonebox() - self.argmap[old_result] = newop.result - - self.inline_descr_inplace(newop.getdescr()) - - return newop - - def inline_descr_inplace(self, descr): - if isinstance(descr, ResumeGuardDescr): - descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) - - def inline_arg(self, arg): - if arg is None: - return None - if isinstance(arg, Const): - return arg - return self.argmap[arg] - - def inline_snapshot(self, snapshot): - if snapshot in self.snapshot_map: - return self.snapshot_map[snapshot] - boxes = [self.inline_arg(a) for a in snapshot.boxes] - new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) - self.snapshot_map[snapshot] = new_snapshot - return new_snapshot - class UnrollableOptimizer(Optimizer): def setup(self): self.importable_values = {} @@ -101,14 +50,13 @@ become the preamble or entry bridge (don't think there is a distinction anymore)""" + inline_short_preamble = True + did_import = False + def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) - self.cloned_operations = [] - for op in self.optimizer.loop.operations: - newop = op.clone() - self.cloned_operations.append(newop) - def fix_snapshot(self, loop, jump_args, snapshot): + def fix_snapshot(self, jump_args, snapshot): if snapshot is None: return None snapshot_args = snapshot.boxes @@ -116,116 +64,348 @@ for a in snapshot_args: a = self.getvalue(a).get_key_box() new_snapshot_args.append(a) - prev = self.fix_snapshot(loop, jump_args, snapshot.prev) + prev = self.fix_snapshot(jump_args, snapshot.prev) return Snapshot(prev, new_snapshot_args) def propagate_all_forward(self): loop = self.optimizer.loop + self.optimizer.clear_newoperations() + + + start_label = loop.operations[0] + if start_label.getopnum() == rop.LABEL: + loop.operations = loop.operations[1:] + # We need to emit the label op before import_state() as emitting it + # will clear heap caches + self.optimizer.send_extra_operation(start_label) + else: + start_label = None + jumpop = loop.operations[-1] if jumpop.getopnum() == rop.JUMP: loop.operations = loop.operations[:-1] else: - loopop = None + jumpop = None - self.optimizer.propagate_all_forward() + self.import_state(start_label) + self.optimizer.propagate_all_forward(clear=False) + if not jumpop: + return + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.did_import: - if jumpop: - assert jumpop.getdescr() is loop.token - jump_args = jumpop.getarglist() - jumpop.initarglist([]) + self.close_bridge(start_label) + self.finilize_short_preamble(start_label) + return + + cell_token = jumpop.getdescr() + assert isinstance(cell_token, JitCellToken) + stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) + + if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() - KillHugeIntBounds(self.optimizer).apply() + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + else: + assert stop_label + assert start_label + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + + self.close_loop(jumpop) + self.finilize_short_preamble(start_label) + + def export_state(self, targetop): + original_jump_args = targetop.getarglist() + jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] + + assert self.optimizer.loop.start_resumedescr + start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() + assert isinstance(start_resumedescr, ResumeGuardDescr) + start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) + # FIXME: I dont thnik we need fix_snapshot anymore + + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(jump_args) - loop.preamble.operations = self.optimizer.get_newoperations() - jump_args = [self.getvalue(a).get_key_box() for a in jump_args] + values = [self.getvalue(arg) for arg in jump_args] + inputargs = virtual_state.make_inputargs(values, self.optimizer) + short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() - self.start_resumedescr = start_resumedescr - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(loop, jump_args, - start_resumedescr.rd_snapshot) + constant_inputargs = {} + for box in jump_args: + const = self.get_constant_box(box) + if const: + constant_inputargs[box] = const - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(jump_args) + short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) + aliased_vrituals = {} + for i in range(len(original_jump_args)): + if original_jump_args[i] is not jump_args[i]: + if values[i].is_virtual(): + aliased_vrituals[original_jump_args[i]] = jump_args[i] + else: + short_boxes.alias(original_jump_args[i], jump_args[i]) + + self.optimizer.clear_newoperations() + for box in short_inputargs: + value = self.getvalue(box) + if value.is_virtual(): + value.force_box(self.optimizer) + inputarg_setup_ops = self.optimizer.get_newoperations() + + target_token = targetop.getdescr() + assert isinstance(target_token, TargetToken) + targetop.initarglist(inputargs) + target_token.virtual_state = virtual_state + target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] + target_token.start_resumedescr = start_resumedescr + target_token.exported_state = ExportedState(constant_inputargs, short_boxes, + inputarg_setup_ops, self.optimizer, + aliased_vrituals, jump_args) + + def import_state(self, targetop): + self.did_import = False + if not targetop: + # FIXME: Set up some sort of empty state with no virtuals? + return + target_token = targetop.getdescr() + if not target_token: + return + assert isinstance(target_token, TargetToken) + exported_state = target_token.exported_state + if not exported_state: + # FIXME: Set up some sort of empty state with no virtuals + return + self.did_import = True + + self.short = target_token.short_preamble[:] + self.short_seen = {} + self.short_boxes = exported_state.short_boxes.clone() + for box, const in exported_state.constant_inputargs.items(): + self.short_seen[box] = True + self.imported_state = exported_state + self.inputargs = targetop.getarglist() + self.initial_virtual_state = target_token.virtual_state + self.start_resumedescr = target_token.start_resumedescr + + seen = {} + for box in self.inputargs: + if box in seen: + continue + seen[box] = True + preamble_value = exported_state.optimizer.getvalue(box) + value = self.optimizer.getvalue(box) + value.import_from(preamble_value, self.optimizer) + + for newbox, oldbox in self.short_boxes.aliases.items(): + self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) + + # Setup the state of the new optimizer by emiting the + # short operations and discarding the result + self.optimizer.emitting_dissabled = True + for op in exported_state.inputarg_setup_ops: + self.optimizer.send_extra_operation(op) + seen = {} + + for op in self.short_boxes.operations(): + self.ensure_short_op_emitted(op, self.optimizer, seen) + if op and op.result: + preamble_value = exported_state.optimizer.getvalue(op.result) + value = self.optimizer.getvalue(op.result) + if not value.is_virtual(): + imp = ValueImporter(self, preamble_value, op) + self.optimizer.importable_values[value] = imp + newvalue = self.optimizer.getvalue(op.result) + newresult = newvalue.get_key_box() + if newresult is not op.result and not newvalue.is_constant(): + self.short_boxes.alias(newresult, op.result) + op = ResOperation(rop.SAME_AS, [op.result], newresult) + self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX + #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! + self.optimizer.flush() + self.optimizer.emitting_dissabled = False + + for box, key_box in exported_state.aliased_vrituals.items(): + self.optimizer.make_equal_to(box, self.getvalue(key_box)) + + def close_bridge(self, start_label): + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # We dont need to inline the short preamble we are creating as we are conneting + # the bridge to a different trace with a different short preamble + self.short_inliner = None + + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations): + op = newoperations[i] + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + for a in args: + self.import_box(a, inputargs, short_jumpargs, []) + i += 1 + newoperations = self.optimizer.get_newoperations() + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) + + def close_loop(self, jumpop): + virtual_state = self.initial_virtual_state + short_inputargs = self.short[0].getarglist() + constant_inputargs = self.imported_state.constant_inputargs + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # Construct jumpargs from the virtual state + original_jumpargs = jumpop.getarglist()[:] + values = [self.getvalue(arg) for arg in jumpop.getarglist()] + try: + jumpargs = virtual_state.make_inputargs(values, self.optimizer) + except BadVirtualState: + raise InvalidLoop + jumpop.initarglist(jumpargs) + + # Inline the short preamble at the end of the loop + jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) + assert len(short_inputargs) == len(jmp_to_short_args) + args = {} + for i in range(len(short_inputargs)): + if short_inputargs[i] in args: + if args[short_inputargs[i]] != jmp_to_short_args[i]: + raise InvalidLoop + args[short_inputargs[i]] = jmp_to_short_args[i] + self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) + for box, const in constant_inputargs.items(): + self.short_inliner.argmap[box] = const + for op in self.short[1:]: + newop = self.short_inliner.inline_op(op) + self.optimizer.send_extra_operation(newop) + + # Import boxes produced in the preamble but used in the loop + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = j = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations) or j < len(jumpargs): + if i == len(newoperations): + while j < len(jumpargs): + a = jumpargs[j] + if self.optimizer.loop.logops: + debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + j += 1 + else: + op = newoperations[i] + + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + + if self.optimizer.loop.logops: + debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + for a in args: + if self.optimizer.loop.logops: + debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + i += 1 + newoperations = self.optimizer.get_newoperations() + + jumpop.initarglist(jumpargs) + self.optimizer.send_extra_operation(jumpop) + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=jumpop.getdescr())) + + # Verify that the virtual state at the end of the loop is one + # that is compatible with the virtual state at the start of the loop + modifier = VirtualStateAdder(self.optimizer) + final_virtual_state = modifier.get_virtual_state(original_jumpargs) + debug_start('jit-log-virtualstate') + virtual_state.debug_print('Closed loop with ') + bad = {} + if not virtual_state.generalization_of(final_virtual_state, bad): + # We ended up with a virtual state that is not compatible + # and we are thus unable to jump to the start of the loop + final_virtual_state.debug_print("Bad virtual state at end of loop, ", + bad) + debug_stop('jit-log-virtualstate') + raise InvalidLoop - values = [self.getvalue(arg) for arg in jump_args] - inputargs = virtual_state.make_inputargs(values, self.optimizer) - short_inputargs = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) + debug_stop('jit-log-virtualstate') - self.constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - self.constant_inputargs[box] = const + maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards + if self.optimizer.emitted_guards > maxguards: + target_token = jumpop.getdescr() + assert isinstance(target_token, TargetToken) + target_token.targeting_jitcell_token.retraced_count = sys.maxint + + def finilize_short_preamble(self, start_label): + short = self.short + assert short[-1].getopnum() == rop.JUMP + target_token = start_label.getdescr() + assert isinstance(target_token, TargetToken) - sb = ShortBoxes(self.optimizer, inputargs + self.constant_inputargs.keys()) - self.short_boxes = sb + # Turn guards into conditional jumps to the preamble + for i in range(len(short)): + op = short[i] + if op.is_guard(): + op = op.clone() + op.setfailargs(None) + descr = target_token.start_resumedescr.clone_if_mutable() + op.setdescr(descr) + short[i] = op + + # Clone ops and boxes to get private versions and + short_inputargs = short[0].getarglist() + boxmap = {} + newargs = [None] * len(short_inputargs) + for i in range(len(short_inputargs)): + a = short_inputargs[i] + if a in boxmap: + newargs[i] = boxmap[a] + else: + newargs[i] = a.clonebox() + boxmap[a] = newargs[i] + inliner = Inliner(short_inputargs, newargs) + for box, const in self.imported_state.constant_inputargs.items(): + inliner.argmap[box] = const + for i in range(len(short)): + short[i] = inliner.inline_op(short[i]) + + target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.start_resumedescr) + + # Forget the values to allow them to be freed + for box in short[0].getarglist(): + box.forget_value() + for op in short: + if op.result: + op.result.forget_value() + target_token.short_preamble = self.short + target_token.exported_state = None + + + def FIXME_old_stuff(): preamble_optimizer = self.optimizer loop.preamble.quasi_immutable_deps = ( self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.new() loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps - logops = self.optimizer.loop.logops - if logops: - args = ", ".join([logops.repr_of_arg(arg) for arg in inputargs]) - debug_print('inputargs: ' + args) - args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs]) - debug_print('short inputargs: ' + args) - self.short_boxes.debug_print(logops) - - - # Force virtuals amoung the jump_args of the preamble to get the - # operations needed to setup the proper state of those virtuals - # in the peeled loop - inputarg_setup_ops = [] - preamble_optimizer.clear_newoperations() - seen = {} - for box in inputargs: - if box in seen: - continue - seen[box] = True - preamble_value = preamble_optimizer.getvalue(box) - value = self.optimizer.getvalue(box) - value.import_from(preamble_value, self.optimizer) - for box in short_inputargs: - if box in seen: - continue - seen[box] = True - value = preamble_optimizer.getvalue(box) - value.force_box(preamble_optimizer) - inputarg_setup_ops += preamble_optimizer.get_newoperations() - - # Setup the state of the new optimizer by emiting the - # short preamble operations and discarding the result - self.optimizer.emitting_dissabled = True - for op in inputarg_setup_ops: - self.optimizer.send_extra_operation(op) - seen = {} - for op in self.short_boxes.operations(): - self.ensure_short_op_emitted(op, self.optimizer, seen) - if op and op.result: - preamble_value = preamble_optimizer.getvalue(op.result) - value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): - imp = ValueImporter(self, preamble_value, op) - self.optimizer.importable_values[value] = imp - newresult = self.optimizer.getvalue(op.result).get_key_box() - if newresult is not op.result: - self.short_boxes.alias(newresult, op.result) - self.optimizer.flush() - self.optimizer.emitting_dissabled = False - - initial_inputargs_len = len(inputargs) - self.inliner = Inliner(loop.inputargs, jump_args) - - - short = self.inline(inputargs, self.cloned_operations, - loop.inputargs, short_inputargs, - virtual_state) loop.inputargs = inputargs args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\ @@ -241,149 +421,7 @@ loop.preamble.token.retraced_count = sys.maxint if short: - assert short[-1].getopnum() == rop.JUMP - short[-1].setdescr(loop.token) - - # Turn guards into conditional jumps to the preamble - for i in range(len(short)): - op = short[i] - if op.is_guard(): - op = op.clone() - op.setfailargs(None) - descr = self.start_resumedescr.clone_if_mutable() - op.setdescr(descr) - short[i] = op - - short_loop = TreeLoop('short preamble') - short_loop.inputargs = short_inputargs - short_loop.operations = short - - # Clone ops and boxes to get private versions and - boxmap = {} - newargs = [None] * len(short_loop.inputargs) - for i in range(len(short_loop.inputargs)): - a = short_loop.inputargs[i] - if a in boxmap: - newargs[i] = boxmap[a] - else: - newargs[i] = a.clonebox() - boxmap[a] = newargs[i] - inliner = Inliner(short_loop.inputargs, newargs) - for box, const in self.constant_inputargs.items(): - inliner.argmap[box] = const - short_loop.inputargs = newargs - ops = [inliner.inline_op(op) for op in short_loop.operations] - short_loop.operations = ops - descr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - short_loop.start_resumedescr = descr - - assert isinstance(loop.preamble.token, LoopToken) - if loop.preamble.token.short_preamble: - loop.preamble.token.short_preamble.append(short_loop) - else: - loop.preamble.token.short_preamble = [short_loop] - short_loop.virtual_state = virtual_state - - # Forget the values to allow them to be freed - for box in short_loop.inputargs: - box.forget_value() - for op in short_loop.operations: - if op.result: - op.result.forget_value() - - def inline(self, inputargs, loop_operations, loop_args, short_inputargs, virtual_state): - inliner = self.inliner - - short_jumpargs = inputargs[:] - - short = self.short = [] - short_seen = self.short_seen = {} - for box, const in self.constant_inputargs.items(): - short_seen[box] = True - - # This loop is equivalent to the main optimization loop in - # Optimizer.propagate_all_forward - jumpop = None - for newop in loop_operations: - newop = inliner.inline_op(newop, clone=False) - if newop.getopnum() == rop.JUMP: - jumpop = newop - break - - #self.optimizer.first_optimization.propagate_forward(newop) - self.optimizer.send_extra_operation(newop) - - self.boxes_created_this_iteration = {} - - assert jumpop - original_jumpargs = jumpop.getarglist()[:] - values = [self.getvalue(arg) for arg in jumpop.getarglist()] - jumpargs = virtual_state.make_inputargs(values, self.optimizer) - jumpop.initarglist(jumpargs) - jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - - for box, const in self.constant_inputargs.items(): - self.short_inliner.argmap[box] = const - - for op in short: - newop = self.short_inliner.inline_op(op) - self.optimizer.send_extra_operation(newop) - - newoperations = self.optimizer.get_newoperations() - - i = j = 0 - while i < len(newoperations) or j < len(jumpargs): - if i == len(newoperations): - while j < len(jumpargs): - a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - j += 1 - else: - op = newoperations[i] - - self.boxes_created_this_iteration[op.result] = True - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) - for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - i += 1 - newoperations = self.optimizer.get_newoperations() - - jumpop.initarglist(jumpargs) - self.optimizer.send_extra_operation(jumpop) - short.append(ResOperation(rop.JUMP, short_jumpargs, None)) - - modifier = VirtualStateAdder(self.optimizer) - final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') - bad = {} - if not virtual_state.generalization_of(final_virtual_state, bad): - # We ended up with a virtual state that is not compatible - # and we are thus unable to jump to the start of the loop - # XXX Is it possible to end up here? If so, consider: - # - Fallback on having the preamble jump to itself? - # - Would virtual_state.generate_guards make sense here? - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') - raise InvalidLoop - debug_stop('jit-log-virtualstate') - - return short + pass def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: @@ -399,19 +437,18 @@ guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) optimizer.send_extra_operation(guard) - def add_op_to_short(self, op, short, short_seen, emit=True, guards_needed=False): + def add_op_to_short(self, op, emit=True, guards_needed=False): if op is None: return None - if op.result is not None and op.result in short_seen: - if emit: + if op.result is not None and op.result in self.short_seen: + if emit and self.short_inliner: return self.short_inliner.inline_arg(op.result) else: return None for a in op.getarglist(): - if not isinstance(a, Const) and a not in short_seen: - self.add_op_to_short(self.short_boxes.producer(a), short, short_seen, - emit, guards_needed) + if not isinstance(a, Const) and a not in self.short_seen: + self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): descr = self.start_resumedescr.clone_if_mutable() op.setdescr(descr) @@ -421,9 +458,9 @@ else: value_guards = [] - short.append(op) - short_seen[op.result] = True - if emit: + self.short.append(op) + self.short_seen[op.result] = True + if emit and self.short_inliner: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) else: @@ -432,23 +469,22 @@ if op.is_ovf(): # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) for guard in value_guards: - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) if newop: return newop.result return None - def import_box(self, box, inputargs, short, short_jumpargs, - jumpargs, short_seen): + def import_box(self, box, inputargs, short_jumpargs, jumpargs): if isinstance(box, Const) or box in inputargs: return if box in self.boxes_created_this_iteration: return short_op = self.short_boxes.producer(box) - newresult = self.add_op_to_short(short_op, short, short_seen) + newresult = self.add_op_to_short(short_op) short_jumpargs.append(short_op.result) inputargs.append(box) @@ -456,98 +492,94 @@ if box in self.optimizer.values: box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) - -class OptInlineShortPreamble(Optimization): - def __init__(self, retraced): - self.retraced = retraced + def jump_to_already_compiled_trace(self, jumpop): + assert jumpop.getopnum() == rop.JUMP + cell_token = jumpop.getdescr() - def new(self): - return OptInlineShortPreamble(self.retraced) + assert isinstance(cell_token, JitCellToken) + if not cell_token.target_tokens: + return False - def propagate_forward(self, op): - if op.getopnum() == rop.JUMP: - loop_token = op.getdescr() - assert isinstance(loop_token, LoopToken) - short = loop_token.short_preamble - if short: - args = op.getarglist() - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + if not self.inline_short_preamble: + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return True - for sh in short: - ok = False - extra_guards = [] + args = jumpop.getarglist() + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(args) + debug_start('jit-log-virtualstate') + virtual_state.debug_print("Looking for ") - bad = {} - debugmsg = 'Did not match ' - if sh.virtual_state.generalization_of(virtual_state, bad): - ok = True - debugmsg = 'Matched ' - else: - try: - cpu = self.optimizer.cpu - sh.virtual_state.generate_guards(virtual_state, - args, cpu, - extra_guards) + for target in cell_token.target_tokens: + if not target.virtual_state: + continue + ok = False + extra_guards = [] - ok = True - debugmsg = 'Guarded to match ' - except InvalidLoop: - pass - sh.virtual_state.debug_print(debugmsg, bad) - - if ok: - debug_stop('jit-log-virtualstate') + bad = {} + debugmsg = 'Did not match ' + if target.virtual_state.generalization_of(virtual_state, bad): + ok = True + debugmsg = 'Matched ' + else: + try: + cpu = self.optimizer.cpu + target.virtual_state.generate_guards(virtual_state, + args, cpu, + extra_guards) - values = [self.getvalue(arg) - for arg in op.getarglist()] - args = sh.virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - inliner = Inliner(sh.inputargs, args) - - for guard in extra_guards: - if guard.is_guard(): - descr = sh.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - guard.setdescr(descr) - self.emit_operation(guard) - - try: - for shop in sh.operations: - newop = inliner.inline_op(shop) - self.emit_operation(newop) - except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") - self.emit_operation(op) - return + ok = True + debugmsg = 'Guarded to match ' + except InvalidLoop: + pass + target.virtual_state.debug_print(debugmsg, bad) + + if ok: debug_stop('jit-log-virtualstate') - retraced_count = loop_token.retraced_count - limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit - if not self.retraced and retraced_count self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -180,10 +188,15 @@ self.arraydescr is other.arraydescr) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState for i in range(len(self.fieldstate)): - v = value._items[i] + try: + v = value._items[i] + except IndexError: + raise BadVirtualState s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -248,12 +261,19 @@ s.enum(virtual_state) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayStructValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayStructValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): - v = value._items[i][self.fielddescrs[i][j]] + try: + v = value._items[i][self.fielddescrs[i][j]] + except IndexError: + raise BadVirtualState + except KeyError: + raise BadVirtualState s = self.fieldstate[p] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -546,18 +566,27 @@ self.aliases = {} self.rename = {} self.optimizer = optimizer - for box in surviving_boxes: - self.potential_ops[box] = None - optimizer.produce_potential_short_preamble_ops(self) - self.short_boxes = {} - self.short_boxes_in_production = {} + if surviving_boxes is not None: + for box in surviving_boxes: + self.potential_ops[box] = None + optimizer.produce_potential_short_preamble_ops(self) - for box in self.potential_ops.keys(): - try: - self.produce_short_preamble_box(box) - except BoxNotProducable: - pass + self.short_boxes = {} + self.short_boxes_in_production = {} + + for box in self.potential_ops.keys(): + try: + self.produce_short_preamble_box(box) + except BoxNotProducable: + pass + + def clone(self): + sb = ShortBoxes(self.optimizer, None) + sb.aliases.update(self.aliases) + sb.short_boxes = {} + sb.short_boxes.update(self.short_boxes) + return sb def prioritized_alternatives(self, box): if box not in self.alternatives: @@ -598,6 +627,7 @@ newbox = newop.result = op.result.clonebox() self.short_boxes[newop.result] = newop value = self.optimizer.getvalue(box) + self.optimizer.emit_operation(ResOperation(rop.SAME_AS, [box], newbox)) self.optimizer.make_equal_to(newbox, value) else: self.short_boxes[box] = op diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp import history, compile, resume from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstFloat -from pypy.jit.metainterp.history import Box +from pypy.jit.metainterp.history import Box, TargetToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import executor from pypy.jit.metainterp.logger import Logger @@ -22,7 +22,6 @@ from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr from pypy.jit.codewriter import heaptracker from pypy.jit.metainterp.optimizeopt.util import args_dict_box -from pypy.jit.metainterp.optimize import RetraceLoop # ____________________________________________________________ @@ -243,6 +242,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) @@ -1555,10 +1566,17 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None - self.retracing_loop_from = None + self.partial_trace = None + self.retracing_from = -1 self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + def retrace_needed(self, trace): + self.partial_trace = trace + self.retracing_from = len(self.history.operations) - 1 + self.heapcache.reset() + + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction f = self.newframe(jitcode, greenkey) @@ -1778,7 +1796,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate @@ -1793,7 +1810,7 @@ def _interpret(self): # Execute the frames forward until we raise a DoneWithThisFrame, - # a ExitFrameWithException, or a GenerateMergePoint exception. + # a ExitFrameWithException, or a ContinueRunningNormally exception. self.staticdata.stats.entered() while True: self.framestack[-1].run_one_step() @@ -1841,8 +1858,6 @@ self.seen_loop_header_for_jdindex = -1 try: self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1877,8 +1892,6 @@ if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(ABORT_BRIDGE) self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1926,14 +1939,9 @@ # that failed; # - if self.resumekey is a ResumeFromInterpDescr, it starts directly # from the interpreter. - if not self.retracing_loop_from: - try: - self.compile_bridge(live_arg_boxes) - except RetraceLoop: - start = len(self.history.operations) - self.current_merge_points.append((live_arg_boxes, start)) - self.retracing_loop_from = RetraceState(self, live_arg_boxes) - return + if not self.partial_trace: + # FIXME: Support a retrace to be a bridge as well as a loop + self.compile_trace(live_arg_boxes, resumedescr) # raises in case it works -- which is the common case, hopefully, # at least for bridges starting from a guard. @@ -1955,14 +1963,10 @@ else: # Found! Compile it as a loop. # raises in case it works -- which is the common case - if self.retracing_loop_from and \ - self.retracing_loop_from.merge_point == j: - bridge_arg_boxes = self.retracing_loop_from.live_arg_boxes - self.compile_bridge_and_loop(original_boxes, \ - live_arg_boxes, start, - bridge_arg_boxes, resumedescr) - else: - self.compile(original_boxes, live_arg_boxes, start, resumedescr) + if self.partial_trace: + if start != self.retracing_from: + raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now + self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.staticdata.log('cancelled, tracing more...') #self.staticdata.log('cancelled, stopping tracing') @@ -1972,12 +1976,48 @@ start = len(self.history.operations) self.current_merge_points.append((live_arg_boxes, start)) - def designate_target_loop(self, gmp): - loop_token = gmp.target_loop_token + def _unpack_boxes(self, boxes, start, stop): + ints = []; refs = []; floats = [] + for i in range(start, stop): + box = boxes[i] + if box.type == history.INT: ints.append(box.getint()) + elif box.type == history.REF: refs.append(box.getref_base()) + elif box.type == history.FLOAT:floats.append(box.getfloatstorage()) + else: assert 0 + return ints[:], refs[:], floats[:] + + def raise_continue_running_normally(self, live_arg_boxes, loop_token): + self.history.inputargs = None + self.history.operations = None + # For simplicity, we just raise ContinueRunningNormally here and + # ignore the loop_token passed in. It means that we go back to + # interpreted mode, but it should come back very quickly to the + # JIT, find probably the same 'loop_token', and execute it. + if we_are_translated(): + num_green_args = self.jitdriver_sd.num_green_args + gi, gr, gf = self._unpack_boxes(live_arg_boxes, 0, num_green_args) + ri, rr, rf = self._unpack_boxes(live_arg_boxes, num_green_args, + len(live_arg_boxes)) + CRN = self.staticdata.ContinueRunningNormally + raise CRN(gi, gr, gf, ri, rr, rf) + else: + # However, in order to keep the existing tests working + # (which are based on the assumption that 'loop_token' is + # directly used here), a bit of custom non-translatable code... + self._nontranslated_run_directly(live_arg_boxes, loop_token) + assert 0, "unreachable" + + def _nontranslated_run_directly(self, live_arg_boxes, loop_token): + "NOT_RPYTHON" + args = [] num_green_args = self.jitdriver_sd.num_green_args - residual_args = gmp.argboxes[num_green_args:] - history.set_future_values(self.cpu, residual_args) - return loop_token + num_red_args = self.jitdriver_sd.num_red_args + for box in live_arg_boxes[num_green_args:num_green_args+num_red_args]: + if box.type == history.INT: args.append(box.getint()) + elif box.type == history.REF: args.append(box.getref_base()) + elif box.type == history.FLOAT: args.append(box.getfloatstorage()) + else: assert 0 + self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) def prepare_resume_from_failure(self, opnum, dont_change_position=False): frame = self.framestack[-1] @@ -2018,54 +2058,57 @@ from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) - def get_compiled_merge_points(self, greenkey): - """Get the list of looptokens corresponding to the greenkey. - Turns the (internal) list of weakrefs into regular refs. - """ + def get_procedure_token(self, greenkey): cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - return cell.get_compiled_merge_points() + return cell.get_procedure_token() + + def compile_loop(self, original_boxes, live_arg_boxes, start, start_resumedescr): + num_green_args = self.jitdriver_sd.num_green_args + greenkey = original_boxes[:num_green_args] + if not self.partial_trace: + assert self.get_procedure_token(greenkey) is None or \ + self.get_procedure_token(greenkey).target_tokens is None + if self.partial_trace: + target_token = compile.compile_retrace(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr, self.partial_trace, + self.resumekey) + else: + target_token = compile.compile_loop(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr) + if target_token is not None: + assert isinstance(target_token, TargetToken) + self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token) + self.staticdata.stats.add_jitcell_token(target_token.targeting_jitcell_token) - def set_compiled_merge_points(self, greenkey, looptokens): - cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - cell.set_compiled_merge_points(looptokens) - def compile(self, original_boxes, live_arg_boxes, start, start_resumedescr): - num_green_args = self.jitdriver_sd.num_green_args - original_inputargs = self.history.inputargs - self.history.inputargs = original_boxes[num_green_args:] - greenkey = original_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) - loop_token = compile.compile_new_loop(self, old_loop_tokens, - greenkey, start, start_resumedescr) - if loop_token is not None: # raise if it *worked* correctly - self.set_compiled_merge_points(greenkey, old_loop_tokens) - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, loop_token) + if target_token is not None: # raise if it *worked* correctly + assert isinstance(target_token, TargetToken) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) - self.history.inputargs = original_inputargs - self.history.operations.pop() # remove the JUMP - - def compile_bridge(self, live_arg_boxes): + def compile_trace(self, live_arg_boxes, start_resumedescr): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - if len(old_loop_tokens) == 0: + target_jitcell_token = self.get_procedure_token(greenkey) + if not target_jitcell_token: return - #if self.resumekey.guard_opnum == rop.GUARD_CLASS: - # return # Kepp tracing for another iteration - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) + if not target_jitcell_token.target_tokens: + return + + self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, + descr=target_jitcell_token) try: - target_loop_token = compile.compile_new_bridge(self, - old_loop_tokens, - self.resumekey) + target_token = compile.compile_trace(self, self.resumekey, start_resumedescr) finally: self.history.operations.pop() # remove the JUMP - if target_loop_token is not None: # raise if it *worked* correctly - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, target_loop_token) + if target_token is not None: # raise if it *worked* correctly + assert isinstance(target_token, TargetToken) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, bridge_arg_boxes, start_resumedescr): @@ -2101,10 +2144,8 @@ except RetraceLoop: assert False assert target_loop_token is not None - - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, old_loop_tokens[0]) + self.raise_continue_running_normally(live_arg_boxes, + old_loop_tokens[0]) def compile_done_with_this_frame(self, exitbox): self.gen_store_back_in_virtualizable() @@ -2126,21 +2167,21 @@ loop_tokens = sd.loop_tokens_done_with_this_frame_float else: assert False - self.history.record(rop.JUMP, exits, None) - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + # FIXME: kill TerminatingLoopToken? + # FIXME: can we call compile_trace? + token = loop_tokens[0].finishdescr + self.history.record(rop.FINISH, exits, None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() def compile_exit_frame_with_exception(self, valuebox): self.gen_store_back_in_virtualizable() - # temporarily put a JUMP to a pseudo-loop - self.history.record(rop.JUMP, [valuebox], None) sd = self.staticdata - loop_tokens = sd.loop_tokens_exit_frame_with_exception_ref - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr + self.history.record(rop.FINISH, [valuebox], None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() @specialize.arg(1) @@ -2382,22 +2423,6 @@ abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) - def gen_load_from_other_virtualizable(self, vinfo, vbox): - boxes = [] - assert vinfo is not None - for i in range(vinfo.num_static_extra_boxes): - descr = vinfo.static_field_descrs[i] - boxes.append(self.execute_and_record(rop.GETFIELD_GC, descr, vbox)) - virtualizable = vinfo.unwrap_virtualizable_box(vbox) - for k in range(vinfo.num_arrays): - descr = vinfo.array_field_descrs[k] - abox = self.execute_and_record(rop.GETFIELD_GC, descr, vbox) - descr = vinfo.array_descrs[k] - for j in range(vinfo.get_array_length(virtualizable, k)): - boxes.append(self.execute_and_record(rop.GETARRAYITEM_GC, descr, - abox, ConstInt(j))) - return boxes - def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) for frame in self.framestack: @@ -2469,25 +2494,13 @@ greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args - vinfo = targetjitdriver_sd.virtualizable_info - if vinfo is not None: - index = targetjitdriver_sd.index_of_virtualizable - vbox = args[index] - args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) - # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenargs, args) + token = warmrunnerstate.get_assembler_token(greenargs) op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ -class GenerateMergePoint(JitException): - def __init__(self, args, target_loop_token): - assert target_loop_token is not None - self.argboxes = args - self.target_loop_token = target_loop_token - class ChangeFrame(JitException): """Raised after we mutated metainterp.framestack, in order to force it to reload the current top-of-stack frame that gets interpreted.""" diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -369,6 +369,8 @@ 'FINISH/*d', '_FINAL_LAST', + 'LABEL/*d', + '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', 'GUARD_TRUE/1d', @@ -379,11 +381,11 @@ 'GUARD_ISNULL/1d', 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION/0d', - 'GUARD_EXCEPTION/1d', + 'GUARD_NO_EXCEPTION/0d', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', - 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- @@ -496,6 +498,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -4,9 +4,9 @@ from pypy.rpython.ootypesystem import ootype from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.warmstate import unspecialize_value from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value from pypy.jit.codewriter.policy import JitPolicy from pypy.jit.codewriter import codewriter, longlong from pypy.rlib.rfloat import isnan @@ -16,15 +16,16 @@ from pypy.jit.codewriter import support class FakeJitCell(object): - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst + __product_token = None + def get_procedure_token(self): + return self.__product_token + def set_procedure_token(self, token): + self.__product_token = token class FakeWarmRunnerState(object): - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass + def attach_procedure_to_interp(self, greenkey, procedure_token): + cell = self.jit_cell_at_key(greenkey) + cell.set_procedure_token(procedure_token) def helper_func(self, FUNCPTR, func): from pypy.rpython.annlowlevel import llhelper @@ -132,16 +133,14 @@ def _run_with_machine_code(testself, args): metainterp = testself.metainterp num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented + procedure_token = metainterp.get_procedure_token(args[:num_green_args]) # a loop was successfully created by _run_with_pyjitpl(); call it cpu = metainterp.cpu + args1 = [] for i in range(len(args) - num_green_args): x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) + args1.append(unspecialize_value(x)) + faildescr = cpu.execute_token(procedure_token, *args1) assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') if metainterp.jitdriver_sd.result_type == history.INT: return cpu.get_latest_value_int(0) @@ -157,24 +156,34 @@ basic = True def check_resops(self, expected=None, **check): get_stats().check_resops(expected=expected, **check) + def check_simple_loop(self, expected=None, **check): + get_stats().check_simple_loop(expected=expected, **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" + + + def check_trace_count(self, count): # was check_loop_count + # The number of traces compiled assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): + def check_trace_count_at_most(self, count): assert get_stats().compiled_count <= count + + def check_jitcell_token_count(self, count): # was check_tree_loop_count + assert len(get_stats().jitcell_token_wrefs) == count + + def check_target_token_count(self, count): + tokens = get_stats().get_all_jitcell_tokens() + n = sum ([len(t.target_tokens) for t in tokens]) + assert n == count + def check_enter_count(self, count): assert get_stats().enter_count == count def check_enter_count_at_most(self, count): assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + return # FIXME assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): assert get_stats().aborted_count == count def check_aborted_count_at_least(self, count): @@ -217,7 +226,7 @@ # this can be used after interp_operations if expected is not None: expected = dict(expected) - expected['jump'] = 1 + expected['finish'] = 1 self.metainterp.staticdata.stats.check_history(expected, **isns) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -9,12 +9,11 @@ from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.metainterp.warmspot import get_stats -from pypy.jit.metainterp.warmstate import set_future_value from pypy.rlib import rerased from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -66,7 +65,7 @@ res = self.interp_operations(f, [8, 98]) assert res == 110 - def test_loop(self): + def test_loop_1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) def f(x, y): res = 0 @@ -78,19 +77,20 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 42 - self.check_loop_count(1) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + self.check_trace_count(1) + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, + 'guard_true': 2, 'int_sub': 2}) if self.basic: found = 0 - for op in get_stats().loops[0]._all_operations(): + for op in get_stats().get_all_loops()[0]._all_operations(): if op.getopname() == 'guard_true': liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) found += 1 - assert found == 1 + assert found == 2 def test_loop_variant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -106,8 +106,8 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) - self.check_resops(int_mul=3) + self.check_trace_count(1) + self.check_simple_loop(int_mul=1) def test_loop_variant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -123,8 +123,8 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) - self.check_resops(int_mul_ovf=3) + self.check_trace_count(1) + self.check_simple_loop(int_mul_ovf=1) def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -138,8 +138,9 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 252 - self.check_loop_count(1) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + self.check_trace_count(1) + self.check_simple_loop(int_mul=0) + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) @@ -156,66 +157,63 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 308 - self.check_loop_count(1) - self.check_resops({'jump': 2, 'int_lshift': 2, 'int_gt': 2, + self.check_trace_count(1) + self.check_simple_loop(int_mul_ovf=0) + self.check_resops({'jump': 1, 'int_lshift': 2, 'int_gt': 2, 'int_mul_ovf': 1, 'int_add': 4, 'guard_true': 2, 'guard_no_overflow': 1, 'int_sub': 2}) def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'n']) + def f(x, y, n): res = 0 while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, n=n, res=res) + myjitdriver.jit_merge_point(x=x, y=y, n=n, res=res) res += x * x - if y<16: + if y Author: Antonio Cuni Branch: Changeset: r603:f550f9557270 Date: 2011-12-15 17:26 +0100 http://bitbucket.org/pypy/buildbot/changeset/f550f9557270/ Log: bah, make sure that benchmarks on speed.python.org don't acquire the lock for tannit diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -19,6 +19,7 @@ # there are 8 logical CPUs, but only 4 physical ones TannitCPU = locks.MasterLock('tannit_cpu', maxCount=6) +SpeedPythonCPU = locks.MasterLock('speed_python_cpu', maxCount=24) class ShellCmd(shell.ShellCommand): @@ -321,13 +322,20 @@ repourl = 'https://bitbucket.org/pypy/benchmarks' update_hg(platform, self, repourl, 'benchmarks', use_branch=False) # + if host == 'tannit': + lock = TannitCPU + elif host == 'speed_python': + lock = SpeedPythonCPU + else: + assert False, 'unknown host %s' % host + # self.addStep( Translate( translationArgs=['-Ojit'], targetArgs=[], haltOnFailure=True, # this step can be executed in parallel with other builds - locks=[TannitCPU.access('counting')], + locks=[lock.access('counting')], ) ) pypy_c_rel = "../build/pypy/translator/goal/pypy-c" diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -126,9 +126,14 @@ app_tests=True ) -pypyJITBenchmarkFactory = pypybuilds.JITBenchmark() -pypyJITBenchmarkFactory64 = pypybuilds.JITBenchmark(platform='linux64', - postfix='-64') +pypyJITBenchmarkFactory_tannit = pypybuilds.JITBenchmark() +pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', + postfix='-64') + +pypyJITBenchmarkFactory64_speed = pypybuilds.JITBenchmark(platform='linux64', + postfix='-64', + host='speed_python') + LINUX32 = "own-linux-x86-32" LINUX64 = "own-linux-x86-64" @@ -287,21 +292,21 @@ {"name": JITBENCH, "slavenames": ["tannit32"], "builddir": JITBENCH, - "factory": pypyJITBenchmarkFactory, + "factory": pypyJITBenchmarkFactory_tannit, "category": 'benchmark-run', # the locks are acquired with fine grain inside the build }, {"name": JITBENCH64, "slavenames": ["tannit64"], "builddir": JITBENCH64, - "factory": pypyJITBenchmarkFactory64, + "factory": pypyJITBenchmarkFactory64_tannit, "category": "benchmark-run", # the locks are acquired with fine grain inside the build }, {"name": JITBENCH64_2, "slavenames": ["speed-python-64"], - "builddir": JITBENCH64, - "factory": pypyJITBenchmarkFactory64, + "builddir": JITBENCH64_2, + "factory": pypyJITBenchmarkFactory64_speed, "category": "benchmark-run", # the locks are acquired with fine grain inside the build }, From noreply at buildbot.pypy.org Thu Dec 15 17:28:46 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 15 Dec 2011 17:28:46 +0100 (CET) Subject: [pypy-commit] buildbot default: hg merge default Message-ID: <20111215162846.B151C82221@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r604:c34d2583efd4 Date: 2011-12-15 17:28 +0100 http://bitbucket.org/pypy/buildbot/changeset/c34d2583efd4/ Log: hg merge default diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -204,6 +204,7 @@ Nightly("nightly-0-00", [ JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) + JITBENCH64_2, # on speed.python.org, uses 1 core (in part exclusively) MACOSX32, # on minime ], branch=None, hour=0, minute=0), # @@ -329,7 +330,7 @@ 'category' : 'mac64', }, {"name": WIN32, - "slavenames": ["bigboard"], + "slavenames": ["snakepit32", "bigboard"], "builddir": WIN32, "factory": pypyOwnTestFactoryWin, "category": 'win32' @@ -341,13 +342,13 @@ "category": 'win32' }, {"name": APPLVLWIN32, - "slavenames": ["bigboard"], + "slavenames": ["snakepit32", "bigboard"], "builddir": APPLVLWIN32, "factory": pypyTranslatedAppLevelTestFactoryWin, "category": "win32" }, {"name" : JITWIN32, - "slavenames": ["bigboard"], + "slavenames": ["snakepit32", "bigboard"], 'builddir' : JITWIN32, 'factory' : pypyJITTranslatedTestFactoryWin, 'category' : 'win32', From noreply at buildbot.pypy.org Thu Dec 15 17:28:52 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 15 Dec 2011 17:28:52 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab, arigo, hager): implemented COND_CALL_GC_WB Message-ID: <20111215162852.D371C82221@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50585:3167c1547eef Date: 2011-12-15 17:28 +0100 http://bitbucket.org/pypy/pypy/changeset/3167c1547eef/ Log: (bivab, arigo, hager): implemented COND_CALL_GC_WB diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -10,12 +10,13 @@ INT) from pypy.rlib.objectmodel import we_are_translated from pypy.jit.backend.ppc.ppcgen.helper.assembler import (count_reg_args, - saved_registers) + Saved_Volatiles) from pypy.jit.backend.ppc.ppcgen.jump import remap_frame_layout from pypy.jit.backend.ppc.ppcgen.codebuilder import OverwritingBuilder from pypy.jit.backend.ppc.ppcgen.regalloc import TempPtr, TempInt from pypy.jit.backend.llsupport import symbolic from pypy.rpython.lltypesystem import rstr, rffi, lltype +from pypy.jit.metainterp.resoperation import rop NO_FORCE_INDEX = -1 @@ -868,6 +869,78 @@ emit_jit_debug = emit_debug_merge_point + def emit_cond_call_gc_wb(self, op, arglocs, regalloc): + # Write code equivalent to write_barrier() in the GC: it checks + # a flag in the object at arglocs[0], and if set, it calls the + # function remember_young_pointer() from the GC. The two arguments + # to the call are in arglocs[:2]. The rest, arglocs[2:], contains + # registers that need to be saved and restored across the call. + descr = op.getdescr() + if we_are_translated(): + cls = self.cpu.gc_ll_descr.has_write_barrier_class() + assert cls is not None and isinstance(descr, cls) + + opnum = op.getopnum() + if opnum == rop.COND_CALL_GC_WB: + N = 2 + addr = descr.get_write_barrier_fn(self.cpu) + elif opnum == rop.COND_CALL_GC_WB_ARRAY: + N = 3 + addr = descr.get_write_barrier_from_array_fn(self.cpu) + assert addr != 0 + else: + raise AssertionError(opnum) + loc_base = arglocs[0] + + self.mc.alloc_scratch_reg() + if IS_PPC_32: + self.mc.lwz(r.SCRATCH.value, loc_base.value, 0) + else: + self.mc.ld(r.SCRATCH.value, loc_base.value, 0) + + # offset to the byte we are interested in + byte_offset = descr.jit_wb_if_flag_byteofs + single_byte = descr.jit_wb_if_flag_singlebyte + + # examine which bit in the byte is set + for i in range(8): + if 1 << i == single_byte: + n = i + break + + if IS_PPC_32: + # compute the position of the bit we want to test + bitpos = (3 - byte_offset) * 8 + n + # put this bit to the rightmost bitposition of r0 + self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, 32 - bitpos, 31, 31) + # test whether this bit is set + self.mc.cmpwi(0, r.SCRATCH.value, 1) + else: + assert 0, "not implemented yet" + self.mc.free_scratch_reg() + + jz_location = self.mc.currpos() + self.mc.nop() + + # the following is supposed to be the slow path, so whenever possible + # we choose the most compact encoding over the most efficient one. + with Saved_Volatiles(self.mc): + if N == 2: + callargs = [r.r3, r.r4] + else: + callargs = [r.r3, r.r4, r.r5] + remap_frame_layout(self, arglocs, callargs, r.SCRATCH) + func = rffi.cast(lltype.Signed, addr) + # + # misaligned stack in the call, but it's ok because the write barrier + # is not going to call anything more. + self.mc.bl_abs(func) + + # patch the JZ above + offset = self.mc.currpos() - jz_location + pmc = OverwritingBuilder(self.mc, jz_location, 1) + pmc.bc(4, 2, offset) # jump if the two values are equal + pmc.overwrite() class ForceOpAssembler(object): diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -755,6 +755,21 @@ prepare_debug_merge_point = void prepare_jit_debug = void + def prepare_cond_call_gc_wb(self, op): + assert op.result is None + N = op.numargs() + # we force all arguments in a reg (unless they are Consts), + # because it will be needed anyway by the following setfield_gc + # or setarrayitem_gc. It avoids loading it twice from the memory. + arglocs = [] + argboxes = [] + for i in range(N): + loc, box = self._ensure_value_is_boxed(op.getarg(i), argboxes) + arglocs.append(loc) + argboxes.append(box) + self.rm.possibly_free_vars(argboxes) + return arglocs + def prepare_force_token(self, op): res_loc = self.force_allocate_reg(op.result) self.possibly_free_var(op.result) From noreply at buildbot.pypy.org Thu Dec 15 17:44:09 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 15 Dec 2011 17:44:09 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: implemented COND_CALL_GC_WB_ARRAY Message-ID: <20111215164409.2074E82221@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50586:3ea35dae52a6 Date: 2011-12-15 17:43 +0100 http://bitbucket.org/pypy/pypy/changeset/3ea35dae52a6/ Log: implemented COND_CALL_GC_WB_ARRAY diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -911,6 +911,7 @@ if IS_PPC_32: # compute the position of the bit we want to test bitpos = (3 - byte_offset) * 8 + n + # ^^^^^^^^^^^^^^^ due to endianess # put this bit to the rightmost bitposition of r0 self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, 32 - bitpos, 31, 31) # test whether this bit is set @@ -942,6 +943,8 @@ pmc.bc(4, 2, offset) # jump if the two values are equal pmc.overwrite() + emit_cond_call_gc_wb_array = emit_cond_call_gc_wb + class ForceOpAssembler(object): _mixin_ = True diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -770,6 +770,8 @@ self.rm.possibly_free_vars(argboxes) return arglocs + prepare_cond_call_gc_wb_array = prepare_cond_call_gc_wb + def prepare_force_token(self, op): res_loc = self.force_allocate_reg(op.result) self.possibly_free_var(op.result) From noreply at buildbot.pypy.org Thu Dec 15 18:16:28 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 18:16:28 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: In-progress. Message-ID: <20111215171628.E7A0A82221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50587:30a943d165f9 Date: 2011-12-15 17:32 +0100 http://bitbucket.org/pypy/pypy/changeset/30a943d165f9/ Log: In-progress. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -72,7 +72,8 @@ """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', with the vtable pointer set manually afterwards.""" assert isinstance(sizedescr, BaseSizeDescr) - res = self.get_funcptr_for_malloc_gc_fixed()(sizedescr.size) + mallocptr = self.get_funcptr_for_malloc_gc_fixed() + res = mallocptr(sizedescr.size) if res: pass # XXX tid return res @@ -622,6 +623,7 @@ self._make_gcrootmap() self._make_layoutbuilder() self._setup_gcclass() + self._make_functions() def _initialize_for_tests(self): self.layoutbuilder = None @@ -671,24 +673,28 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) + def _make_functions(self): + # make the fixed malloc function, with one argument + def malloc_gc_fixed(size): + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, type_id, size, False, False, False) + #llop.debug_print(lltype.Void, "\tmalloc_basic", size, "-->", res) # In case the operation above failed, we are returning NULL # from this function to assembler. There is also an RPython # exception set, typically MemoryError; but it's easier and # faster to check for the NULL return value, as done by # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + self.malloc_gc_fixed = malloc_gc_fixed + self.MALLOC_GC_FIXED = lltype.Ptr( + lltype.FuncType([lltype.Signed], llmemory.GCREF)) + # + # make the varsize malloc function, with three arguments + def malloc_gc_variable(basesize, num_elem, itemsize): + xx + # self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( @@ -754,6 +760,19 @@ self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + def get_funcptr_for_malloc_gc_fixed(self): + """Returns a function pointer to a function that implements + the simple case of MALLOC_GC: the case where the variable size + is zero. The function pointer has signature (size) -> GCREF.""" + xx + + def get_funcptr_for_malloc_gc_variable(self): + """Returns a function pointer to a function that implements + the complex case of MALLOC_GC: the case where the variable size + is not known to be zero. The signature is: + (base_size, num_elem, item_size) -> GCREF""" + xx + def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) return rffi.cast(lltype.Signed, nurs_addr) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -15,12 +15,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) From noreply at buildbot.pypy.org Thu Dec 15 18:16:30 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 18:16:30 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Progress... Message-ID: <20111215171630.1B02A82286@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50588:0304705f6571 Date: 2011-12-15 18:03 +0100 http://bitbucket.org/pypy/pypy/changeset/0304705f6571/ Log: Progress... diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -163,7 +163,7 @@ except KeyError: tsc = gccache.translate_support_code (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) - if key == 'array' and tsc: + if key == 'array' and not tsc: (_, _, baseofs) = symbolic.get_array_token(_A, tsc) assert baseofs == ofs, ("arrays %r and %r don't have the length " "field at the same offset!" % diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -75,7 +75,7 @@ mallocptr = self.get_funcptr_for_malloc_gc_fixed() res = mallocptr(sizedescr.size) if res: - pass # XXX tid + self._set_tid(res, sizedescr.tid) return res def gc_malloc_array(self, arraydescr, num_elem): @@ -83,26 +83,32 @@ ofs_length = arraydescr.get_ofs_length(self.translate_support_code) basesize = arraydescr.get_base_size(self.translate_support_code) itemsize = arraydescr.get_item_size(self.translate_support_code) - return self._gc_malloc_array(basesize, num_elem, itemsize, ofs_length) + return self._gc_malloc_array(basesize, num_elem, itemsize, ofs_length, + arraydescr.tid) - def _gc_malloc_array(self, basesize, num_elem, itemsize, ofs_length): + def gc_malloc_str(self, num_elem): + return self._gc_malloc_array(self.str_basesize, num_elem, + self.str_itemsize, + self.str_ofs_length, + self.str_type_id) + + def gc_malloc_unicode(self, num_elem): + return self._gc_malloc_array(self.unicode_basesize, num_elem, + self.unicode_itemsize, + self.unicode_ofs_length, + self.unicode_type_id) + + def _gc_malloc_array(self, basesize, num_elem, itemsize, ofs_length, tid): mallocptr = self.get_funcptr_for_malloc_gc_variable() res = mallocptr(basesize, num_elem, itemsize) if res: - # XXX tid + self._set_tid(res, tid) arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) arrayptr[ofs_length/WORD] = num_elem return res - def gc_malloc_str(self, num_elem): - return self._gc_malloc_array(self.str_basesize, num_elem, - self.str_itemsize, - self.str_ofs_length) - - def gc_malloc_unicode(self, num_elem): - return self._gc_malloc_array(self.unicode_basesize, num_elem, - self.unicode_itemsize, - self.unicode_ofs_length) + def _set_tid(self, gcptr, tid): + pass # unless overridden def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): @@ -609,8 +615,6 @@ def __init__(self, gcdescr, translator, rtyper, llop1=llop, really_not_translated=False): from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework GcLLDescription.__init__(self, gcdescr, translator, rtyper) self.translator = translator self.llop1 = llop1 @@ -642,24 +646,27 @@ def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) @@ -674,6 +681,7 @@ assert self.GCClass.inline_simple_malloc_varsize def _make_functions(self): + llop1 = self.llop1 # make the fixed malloc function, with one argument def malloc_gc_fixed(size): type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here @@ -702,76 +710,71 @@ self.write_barrier_descr = WriteBarrierDescr(self) self.fielddescr_tid = self.write_barrier_descr.fielddescr_tid # - def malloc_array(itemsize, tid, num_elem): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - return llop1.do_malloc_varsize_clear( - llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - self.str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - self.unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # - def malloc_str(length): - return llop1.do_malloc_varsize_clear( - llmemory.GCREF, - str_type_id, length, str_basesize, str_itemsize, - str_ofs_length) - def malloc_unicode(length): - return llop1.do_malloc_varsize_clear( - llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, - unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) +## def malloc_array(itemsize, tid, num_elem): +## type_id = llop.extract_ushort(llgroup.HALFWORD, tid) +## check_typeid(type_id) +## return llop1.do_malloc_varsize_clear( +## llmemory.GCREF, +## type_id, num_elem, self.array_basesize, itemsize, +## self.array_length_ofs) +## ###self.malloc_array = malloc_array +## self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( +## [lltype.Signed] * 3, llmemory.GCREF)) +## # +## (str_basesize, str_itemsize, str_ofs_length +## ) = symbolic.get_array_token(rstr.STR, True) +## (unicode_basesize, unicode_itemsize, unicode_ofs_length +## ) = symbolic.get_array_token(rstr.UNICODE, True) +## self.str_type_id = self.layoutbuilder.get_type_id(rstr.STR) +## self.unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) +## # +## def malloc_str(length): +## return llop1.do_malloc_varsize_clear( +## llmemory.GCREF, +## str_type_id, length, str_basesize, str_itemsize, +## str_ofs_length) +## def malloc_unicode(length): +## return llop1.do_malloc_varsize_clear( +## llmemory.GCREF, +## unicode_type_id, length, unicode_basesize,unicode_itemsize, +## unicode_ofs_length) +## ###self.malloc_str = malloc_str +## ###self.malloc_unicode = malloc_unicode +## ###self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( +## ### [lltype.Signed], llmemory.GCREF)) +## # +## class ForTestOnly: +## pass +## for_test_only = ForTestOnly() +## for_test_only.x = 1.23 +## def random_usage_of_xmm_registers(): +## x0 = for_test_only.x +## x1 = x0 * 0.1 +## x2 = x0 * 0.2 +## x3 = x0 * 0.3 +## for_test_only.x = x0 + x1 + x2 + x3 +## # +## def malloc_slowpath(size): +## if self.DEBUG: +## random_usage_of_xmm_registers() +## assert size >= self.minimal_size_in_nursery +## # NB. although we call do_malloc_fixedsize_clear() here, +## # it's a bit of a hack because we set tid to 0 and may +## # also use it to allocate varsized objects. The tid +## # and possibly the length are both set afterward. +## gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, +## 0, size, False, False, False) +## return rffi.cast(lltype.Signed, gcref) +## self.malloc_slowpath = malloc_slowpath +## self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_funcptr_for_malloc_gc_fixed(self): - """Returns a function pointer to a function that implements - the simple case of MALLOC_GC: the case where the variable size - is zero. The function pointer has signature (size) -> GCREF.""" - xx + """(size) -> GCREF""" + return llhelper(self.MALLOC_GC_FIXED, self.malloc_gc_fixed) def get_funcptr_for_malloc_gc_variable(self): - """Returns a function pointer to a function that implements - the complex case of MALLOC_GC: the case where the variable size - is not known to be zero. The signature is: - (base_size, num_elem, item_size) -> GCREF""" - xx + """(base_size, num_elem, item_size) -> GCREF""" + return llhelper(self.MALLOC_GC_VARIABLE, self.malloc_gc_variable) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -781,10 +784,6 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() @@ -800,32 +799,11 @@ type_id = self.layoutbuilder.get_type_id(A) descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -250,42 +250,43 @@ has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) - p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) - self.record.append(("fixedsize", repr(size), tid, p)) + assert not has_finalizer + assert not has_light_finalizer + assert rffi.cast(lltype.Signed, type_id) == 0 + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + p = llmemory.cast_adr_to_ptr(x, RESTYPE) + self.record.append(("fixedsize", repr(size), p)) return p - def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, - itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) - (p + offset_to_length).signed[0] = length - p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) - self.record.append(("varsize", tid, length, - repr(size), repr(itemsize), - repr(offset_to_length), p)) - return p +## def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, +## itemsize, offset_to_length): +## p = llmemory.raw_malloc(size + itemsize * length) +## (p + offset_to_length).signed[0] = length +## p = llmemory.cast_adr_to_ptr(p, RESTYPE) +## tid = llop.combine_ushort(lltype.Signed, type_id, 0) +## self.record.append(("varsize", tid, length, +## repr(size), repr(itemsize), +## repr(offset_to_length), p)) +## return p - def _write_barrier_failing_case(self, adr_struct, adr_newptr): - self.record.append(('barrier', adr_struct, adr_newptr)) +## def _write_barrier_failing_case(self, adr_struct, adr_newptr): +## self.record.append(('barrier', adr_struct, adr_newptr)) - def get_write_barrier_failing_case(self, FPTRTYPE): - return llhelper(FPTRTYPE, self._write_barrier_failing_case) +## def get_write_barrier_failing_case(self, FPTRTYPE): +## return llhelper(FPTRTYPE, self._write_barrier_failing_case) - _have_wb_from_array = False +## _have_wb_from_array = False - def _write_barrier_from_array_failing_case(self, adr_struct, v_index): - self.record.append(('barrier_from_array', adr_struct, v_index)) +## def _write_barrier_from_array_failing_case(self, adr_struct, v_index): +## self.record.append(('barrier_from_array', adr_struct, v_index)) - def get_write_barrier_from_array_failing_case(self, FPTRTYPE): - if self._have_wb_from_array: - return llhelper(FPTRTYPE, - self._write_barrier_from_array_failing_case) - else: - return lltype.nullptr(FPTRTYPE.TO) +## def get_write_barrier_from_array_failing_case(self, FPTRTYPE): +## if self._have_wb_from_array: +## return llhelper(FPTRTYPE, +## self._write_barrier_from_array_failing_case) +## else: +## return lltype.nullptr(FPTRTYPE.TO) class TestFramework(object): @@ -322,31 +323,32 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), - sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), p)] + p1 = lltype.cast_opaque_ptr(lltype.Ptr(S), p) + hdr = self.gc_ll_descr.gcheaderbuilder.header_of_object(p1) + assert hdr.tid == sizedescr.tid def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) From noreply at buildbot.pypy.org Thu Dec 15 18:16:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Dec 2011 18:16:31 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fixes Message-ID: <20111215171631.3F39682221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50589:6caef25ddb53 Date: 2011-12-15 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/6caef25ddb53/ Log: fixes diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -702,6 +702,9 @@ # make the varsize malloc function, with three arguments def malloc_gc_variable(basesize, num_elem, itemsize): xx + self.malloc_gc_variable = malloc_gc_variable + self.MALLOC_GC_VARIABLE = lltype.Ptr( + lltype.FuncType([lltype.Signed] * 3, llmemory.GCREF)) # self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -36,7 +36,6 @@ # one, both for performance and to reduce the number of write # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. - # (XXX later: or LABELs) # for op in operations: if op.getopnum() == rop.DEBUG_MERGE_POINT: @@ -45,7 +44,7 @@ if op.is_malloc(): self.handle_malloc_operation(op) continue - elif op.can_malloc(): + elif op.can_malloc() or op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() # ---------- write barriers ---------- if self.gc_ll_descr.write_barrier_descr is not None: From noreply at buildbot.pypy.org Thu Dec 15 21:10:45 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Thu, 15 Dec 2011 21:10:45 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: Updated tests for bool, added tests for int, all tests pass on 32-bit and 64-bit Message-ID: <20111215201045.A763382221@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-full-fromstring Changeset: r50590:bca13f8d517c Date: 2011-12-15 15:10 -0500 http://bitbucket.org/pypy/pypy/changeset/bca13f8d517c/ Log: Updated tests for bool, added tests for int, all tests pass on 32-bit and 64-bit diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1200,6 +1200,7 @@ def test_fromstring(self): from numpypy import fromstring, array, uint8, float32, int32 + import sys a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 @@ -1251,8 +1252,15 @@ assert (q == [1.0]).all() r = fromstring("\x01\x00\x02", dtype='bool') assert (r == [True, False, True]).all() - s = fromstring("1,2,3,,5", dtype="bool", sep=",") + s = fromstring("1,2,3,,5", dtype=bool, sep=",") assert (s == [True, True, True, False, True]).all() + t = fromstring("", bool) + assert (t == []).all() + u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int) + if sys.maxint > 2 ** 31 - 1: + assert (u == [1]).all() + else: + assert (u == [1, 0]).all() def test_fromstring_types(self): from numpypy import fromstring @@ -1270,7 +1278,7 @@ e = fromstring('\xFF\xFF\xFF\xFF', dtype=int32) assert e[0] == -1 f = fromstring('\xFF\xFF\xFF\xFF', dtype=uint32) - assert f[0] == 4294967295 + assert repr(f[0]) == '4294967295' g = fromstring('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', dtype=int64) assert g[0] == -1 h = fromstring(self.float32val, dtype=float32) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -208,6 +208,7 @@ class Integer(Primitive): _mixin_ = True + format_code = 'l' def _coerce(self, space, w_item): return self.box(space.int_w(space.call_function(space.w_int, w_item))) From noreply at buildbot.pypy.org Thu Dec 15 21:19:15 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Thu, 15 Dec 2011 21:19:15 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: Moved generic int code from Integer to Long Message-ID: <20111215201915.EF69A82221@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-full-fromstring Changeset: r50591:cf46c4c19b2f Date: 2011-12-15 15:19 -0500 http://bitbucket.org/pypy/pypy/changeset/cf46c4c19b2f/ Log: Moved generic int code from Integer to Long diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -208,7 +208,6 @@ class Integer(Primitive): _mixin_ = True - format_code = 'l' def _coerce(self, space, w_item): return self.box(space.int_w(space.call_function(space.w_int, w_item))) @@ -288,6 +287,7 @@ class Long(BaseType, Integer): T = rffi.LONG BoxType = interp_boxes.W_LongBox + format_code = 'l' class ULong(BaseType, Integer): T = rffi.ULONG From noreply at buildbot.pypy.org Thu Dec 15 21:22:07 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Thu, 15 Dec 2011 21:22:07 +0100 (CET) Subject: [pypy-commit] pypy default: Merged numpy-full-fromstring Message-ID: <20111215202207.B019882221@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: Changeset: r50592:65311ed125b7 Date: 2011-12-15 15:21 -0500 http://bitbucket.org/pypy/pypy/changeset/65311ed125b7/ Log: Merged numpy-full-fromstring diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,34 +1,90 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi +from pypy.module.micronumpy import interp_dtype +from pypy.objspace.std.strutil import strip_spaces FLOAT_SIZE = rffi.sizeof(lltype.Float) - at unwrap_spec(s=str) -def fromstring(space, s): +def _fromstring_text(space, s, count, sep, length, dtype): from pypy.module.micronumpy.interp_numarray import W_NDimArray + + sep_stripped = strip_spaces(sep) + skip_bad_vals = len(sep_stripped) == 0 + + items = [] + num_items = 0 + idx = 0 + + while (num_items < count or count == -1) and idx < len(s): + nextidx = s.find(sep, idx) + if nextidx < 0: + nextidx = length + piece = strip_spaces(s[idx:nextidx]) + if len(piece) > 0 or not skip_bad_vals: + if len(piece) == 0 and not skip_bad_vals: + val = dtype.itemtype.default_fromstring(space) + else: + try: + val = dtype.coerce(space, space.wrap(piece)) + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + gotit = False + while not gotit and len(piece) > 0: + piece = piece[:-1] + try: + val = dtype.coerce(space, space.wrap(piece)) + gotit = True + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + if not gotit: + val = dtype.itemtype.default_fromstring(space) + nextidx = length + items.append(val) + num_items += 1 + idx = nextidx + 1 + + if count > num_items: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(num_items, [num_items], dtype=dtype) + for i, val in enumerate(items): + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + +def _fromstring_bin(space, s, count, length, dtype): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + itemsize = dtype.itemtype.get_element_size() + if count == -1: + count = length / itemsize + if length % itemsize != 0: + raise operationerrfmt(space.w_ValueError, + "string length %d not divisable by item size %d", + length, itemsize) + if count * itemsize > length: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(count, [count], dtype=dtype) + for i in range(count): + val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + + at unwrap_spec(s=str, count=int, sep=str) +def fromstring(space, s, w_dtype=None, count=-1, sep=''): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) length = len(s) - - if length % FLOAT_SIZE == 0: - number = length/FLOAT_SIZE + if sep == '': + return _fromstring_bin(space, s, count, length, dtype) else: - raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - - dtype = get_dtype_cache(space).w_float64dtype - a = W_NDimArray(number, [number], dtype=dtype) - - start = 0 - end = FLOAT_SIZE - i = 0 - while i < number: - part = s[start:end] - a.dtype.setitem(a.storage, i, dtype.box(runpack('d', part))) - i += 1 - start += FLOAT_SIZE - end += FLOAT_SIZE - - return space.wrap(a) + return _fromstring_text(space, s, count, sep, length, dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1194,13 +1194,107 @@ import struct BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) + cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) + cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) + cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) def test_fromstring(self): - from numpypy import fromstring + from numpypy import fromstring, array, uint8, float32, int32 + import sys a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") + b = fromstring('\x01\x02', dtype=uint8) + assert a[0] == 1 + assert a[1] == 2 + c = fromstring(self.fdata, dtype=float32) + assert c[0] == float32(2.3) + d = fromstring("1 2", sep=' ', count=2, dtype=uint8) + assert len(d) == 2 + assert d[0] == 1 + assert d[1] == 2 + e = fromstring('3, 4,5', dtype=uint8, sep=',') + assert len(e) == 3 + assert e[0] == 3 + assert e[1] == 4 + assert e[2] == 5 + f = fromstring('\x01\x02\x03\x04\x05', dtype=uint8, count=3) + assert len(f) == 3 + assert f[0] == 1 + assert f[1] == 2 + assert f[2] == 3 + g = fromstring("1 2 3 ", dtype=uint8, sep=" ") + assert len(g) == 3 + assert g[0] == 1 + assert g[1] == 2 + assert g[2] == 3 + h = fromstring("1, , 2, 3", dtype=uint8, sep=",") + assert (h == [1,0,2,3]).all() + i = fromstring("1 2 3", dtype=uint8, sep=" ") + assert (i == [1,2,3]).all() + j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") + assert (j == [1,2,3]).all() + k = fromstring("1,x,2,3", dtype=uint8, sep=",") + assert (k == [1,0]).all() + l = fromstring("1,x,2,3", dtype='float32', sep=",") + assert (l == [1.0,-1.0]).all() + m = fromstring("1,,2,3", sep=",") + assert (m == [1.0,-1.0,2.0,3.0]).all() + n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + assert (n == [3]).all() + o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") + assert len(o) == 2 + assert o[0] == 1.0 + assert o[1] == 2.0 + p = fromstring("1.0,,2.0,3.0", sep=",") + assert (p == [1.0, -1.0, 2.0, 3.0]).all() + q = fromstring("1.0,,2.0,3.0", sep=" ") + assert (q == [1.0]).all() + r = fromstring("\x01\x00\x02", dtype='bool') + assert (r == [True, False, True]).all() + s = fromstring("1,2,3,,5", dtype=bool, sep=",") + assert (s == [True, True, True, False, True]).all() + t = fromstring("", bool) + assert (t == []).all() + u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int) + if sys.maxint > 2 ** 31 - 1: + assert (u == [1]).all() + else: + assert (u == [1, 0]).all() + + def test_fromstring_types(self): + from numpypy import fromstring + from numpypy import int8, int16, int32, int64 + from numpypy import uint8, uint16, uint32 + from numpypy import float32, float64 + a = fromstring('\xFF', dtype=int8) + assert a[0] == -1 + b = fromstring('\xFF', dtype=uint8) + assert b[0] == 255 + c = fromstring('\xFF\xFF', dtype=int16) + assert c[0] == -1 + d = fromstring('\xFF\xFF', dtype=uint16) + assert d[0] == 65535 + e = fromstring('\xFF\xFF\xFF\xFF', dtype=int32) + assert e[0] == -1 + f = fromstring('\xFF\xFF\xFF\xFF', dtype=uint32) + assert repr(f[0]) == '4294967295' + g = fromstring('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', dtype=int64) + assert g[0] == -1 + h = fromstring(self.float32val, dtype=float32) + assert h[0] == float32(5.2) + i = fromstring(self.float64val, dtype=float64) + assert i[0] == float64(300.4) + + + def test_fromstring_invalid(self): + from numpypy import fromstring, uint16, uint8, int32 + #default dtype is 64-bit float, so 3 bytes should fail + raises(ValueError, fromstring, "\x01\x02\x03") + #3 bytes is not modulo 2 bytes (int16) + raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) + #5 bytes is larger than 3 bytes + raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) class AppTestRepr(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -8,6 +8,7 @@ from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rstruct.runpack import runpack def simple_unary_op(func): @@ -55,6 +56,8 @@ class Primitive(object): _mixin_ = True + format_code = '?' + def get_element_size(self): return rffi.sizeof(self.T) @@ -84,6 +87,9 @@ def _coerce(self, space, w_item): raise NotImplementedError + def default_fromstring(self, space): + raise NotImplementedError + def read(self, storage, width, i, offset): return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset @@ -102,6 +108,9 @@ width, storage, i, offset, value ) + def runpack_str(self, s): + return self.box(runpack(self.format_code, s)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -164,6 +173,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool BoxType = interp_boxes.W_BoolBox + format_code = '?' True = BoxType(True) False = BoxType(False) @@ -192,6 +202,9 @@ def for_computation(self, v): return int(v) + + def default_fromstring(self, space): + return self.box(False) class Integer(Primitive): _mixin_ = True @@ -205,6 +218,9 @@ def for_computation(self, v): return widen(v) + + def default_fromstring(self, space): + return self.box(0) @simple_binary_op def div(self, v1, v2): @@ -241,30 +257,37 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box + format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box + format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT BoxType = interp_boxes.W_Int16Box + format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box + format_code = "H" class Int32(BaseType, Integer): T = rffi.INT BoxType = interp_boxes.W_Int32Box + format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT BoxType = interp_boxes.W_UInt32Box + format_code = "I" class Long(BaseType, Integer): T = rffi.LONG BoxType = interp_boxes.W_LongBox + format_code = 'l' class ULong(BaseType, Integer): T = rffi.ULONG @@ -273,10 +296,12 @@ class Int64(BaseType, Integer): T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box + format_code = "q" class UInt64(BaseType, Integer): T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + format_code = "Q" def _coerce(self, space, w_item): try: @@ -304,6 +329,9 @@ def for_computation(self, v): return float(v) + def default_fromstring(self, space): + return self.box(-1.0) + @simple_binary_op def div(self, v1, v2): try: @@ -403,7 +431,9 @@ class Float32(BaseType, Float): T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box + format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box \ No newline at end of file + BoxType = interp_boxes.W_Float64Box + format_code = "d" \ No newline at end of file From noreply at buildbot.pypy.org Thu Dec 15 21:23:06 2011 From: noreply at buildbot.pypy.org (jterrace) Date: Thu, 15 Dec 2011 21:23:06 +0100 (CET) Subject: [pypy-commit] pypy numpy-full-fromstring: Closing merged branch Message-ID: <20111215202306.9BBF982221@wyvern.cs.uni-duesseldorf.de> Author: Jeff Terrace Branch: numpy-full-fromstring Changeset: r50593:0fe83ac4f0da Date: 2011-12-15 15:22 -0500 http://bitbucket.org/pypy/pypy/changeset/0fe83ac4f0da/ Log: Closing merged branch From noreply at buildbot.pypy.org Thu Dec 15 21:40:00 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 15 Dec 2011 21:40:00 +0100 (CET) Subject: [pypy-commit] pypy default: fixes for ulong dtype and some small other cleanups Message-ID: <20111215204000.ACE5D82221@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50594:2b3d72c181dd Date: 2011-12-15 14:39 -0600 http://bitbucket.org/pypy/pypy/changeset/2b3d72c181dd/ Log: fixes for ulong dtype and some small other cleanups diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -133,7 +133,7 @@ descr__new__, get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - pass + descr__new__, get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1197,10 +1197,12 @@ cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) + cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) def test_fromstring(self): + import sys from numpypy import fromstring, array, uint8, float32, int32 - import sys + a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 @@ -1261,12 +1263,11 @@ assert (u == [1]).all() else: assert (u == [1, 0]).all() - + def test_fromstring_types(self): - from numpypy import fromstring - from numpypy import int8, int16, int32, int64 - from numpypy import uint8, uint16, uint32 - from numpypy import float32, float64 + from numpypy import (fromstring, int8, int16, int32, int64, uint8, + uint16, uint32, float32, float64) + a = fromstring('\xFF', dtype=int8) assert a[0] == -1 b = fromstring('\xFF', dtype=uint8) @@ -1285,8 +1286,10 @@ assert h[0] == float32(5.2) i = fromstring(self.float64val, dtype=float64) assert i[0] == float64(300.4) - - + j = fromstring(self.ulongval, dtype='L') + assert j[0] == 12 + + def test_fromstring_invalid(self): from numpypy import fromstring, uint16, uint8, int32 #default dtype is 64-bit float, so 3 bytes should fail diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -56,8 +56,7 @@ class Primitive(object): _mixin_ = True - format_code = '?' - + def get_element_size(self): return rffi.sizeof(self.T) @@ -173,7 +172,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool BoxType = interp_boxes.W_BoolBox - format_code = '?' + format_code = "?" True = BoxType(True) False = BoxType(False) @@ -202,7 +201,7 @@ def for_computation(self, v): return int(v) - + def default_fromstring(self, space): return self.box(False) @@ -218,7 +217,7 @@ def for_computation(self, v): return widen(v) - + def default_fromstring(self, space): return self.box(0) @@ -287,11 +286,12 @@ class Long(BaseType, Integer): T = rffi.LONG BoxType = interp_boxes.W_LongBox - format_code = 'l' + format_code = "l" class ULong(BaseType, Integer): T = rffi.ULONG BoxType = interp_boxes.W_ULongBox + format_code = "L" class Int64(BaseType, Integer): T = rffi.LONGLONG From noreply at buildbot.pypy.org Thu Dec 15 23:04:30 2011 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Dec 2011 23:04:30 +0100 (CET) Subject: [pypy-commit] pypy numpy-concatenate: add failing test Message-ID: <20111215220430.360CE82221@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-concatenate Changeset: r50595:ce05e49e8eda Date: 2011-12-16 00:02 +0200 http://bitbucket.org/pypy/pypy/changeset/ce05e49e8eda/ Log: add failing test diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -924,12 +924,13 @@ assert a[0].tolist() == [17.1, 27.2] def test_concatenate(self): - from numpypy import array, concatenate + from numpypy import array, concatenate, dtype a1 = array([0,1,2]) a2 = array([3,4,5]) a = concatenate((a1, a2)) assert len(a) == 6 assert (a == [0,1,2,3,4,5]).all() + assert a.dtype is dtype(int) b1 = array([[1, 2], [3, 4]]) b2 = array([[5, 6]]) b = concatenate((b1, b2), axis=0) From noreply at buildbot.pypy.org Fri Dec 16 05:57:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Dec 2011 05:57:53 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: Kill duplicate function Message-ID: <20111216045753.1EDE482221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-backend-2 Changeset: r50596:c8a475c4557b Date: 2011-12-16 05:55 +0100 http://bitbucket.org/pypy/pypy/changeset/c8a475c4557b/ Log: Kill duplicate function diff --git a/pypy/jit/backend/arm/runner.py b/pypy/jit/backend/arm/runner.py --- a/pypy/jit/backend/arm/runner.py +++ b/pypy/jit/backend/arm/runner.py @@ -66,9 +66,6 @@ def get_latest_value_count(self): return self.assembler.fail_boxes_count - def get_latest_value_count(self): - return self.assembler.fail_boxes_count - def get_latest_force_token(self): return self.assembler.fail_force_index From noreply at buildbot.pypy.org Fri Dec 16 05:57:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Dec 2011 05:57:54 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: Fix fail_boxes_count, which is one more than the highest index written. Message-ID: <20111216045754.410B882221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-backend-2 Changeset: r50597:2f5bb75ff7c8 Date: 2011-12-16 05:56 +0100 http://bitbucket.org/pypy/pypy/changeset/2f5bb75ff7c8/ Log: Fix fail_boxes_count, which is one more than the highest index written. Add an assert checking that the pointer values written are really pointer-like. diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -262,6 +262,7 @@ if group == self.INT_TYPE: self.fail_boxes_int.setitem(fail_index, value) elif group == self.REF_TYPE: + assert (value & 3) == 0, "misaligned pointer" tgt = self.fail_boxes_ptr.get_addr_for_num(fail_index) rffi.cast(rffi.LONGP, tgt)[0] = value else: @@ -270,7 +271,7 @@ assert enc[i] == self.END_OF_LOCS descr = decode32(enc, i+1) - self.fail_boxes_count = fail_index + self.fail_boxes_count = fail_index + 1 self.fail_force_index = frame_loc return descr From noreply at buildbot.pypy.org Fri Dec 16 06:07:08 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Dec 2011 06:07:08 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: Sorry, nonsense. Message-ID: <20111216050708.3EC6982221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-backend-2 Changeset: r50598:a770817aed83 Date: 2011-12-16 06:06 +0100 http://bitbucket.org/pypy/pypy/changeset/a770817aed83/ Log: Sorry, nonsense. diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -271,7 +271,7 @@ assert enc[i] == self.END_OF_LOCS descr = decode32(enc, i+1) - self.fail_boxes_count = fail_index + 1 + self.fail_boxes_count = fail_index self.fail_force_index = frame_loc return descr From noreply at buildbot.pypy.org Fri Dec 16 06:18:09 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Dec 2011 06:18:09 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for sandbox. Message-ID: <20111216051809.5BF4582221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50599:d9b372cf25b0 Date: 2011-12-16 06:17 +0100 http://bitbucket.org/pypy/pypy/changeset/d9b372cf25b0/ Log: Fix for sandbox. diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -79,19 +79,19 @@ longlong2float = rffi.llexternal( "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) float2longlong = rffi.llexternal( "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG, _callable=float2longlong_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) From noreply at buildbot.pypy.org Fri Dec 16 06:20:59 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Dec 2011 06:20:59 +0100 (CET) Subject: [pypy-commit] pypy default: Fix test: "width" => "get_width()" Message-ID: <20111216052059.1DEA882221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50600:9307d48e5da0 Date: 2011-12-16 06:20 +0100 http://bitbucket.org/pypy/pypy/changeset/9307d48e5da0/ Log: Fix test: "width" => "get_width()" diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -249,7 +249,7 @@ while len(result) < count: x = fn() keys = [x._getregkey()] - if isinstance(x, StackLoc) and x.width > WORD: + if isinstance(x, StackLoc) and x.get_width() > WORD: keys.append(keys[0] + WORD) for key in keys: if key in seen: @@ -267,7 +267,7 @@ for i, loc in enumerate(locations): if isinstance(loc, RegLoc): if loc.is_xmm: - if loc.width > WORD: + if loc.get_width() > WORD: newvalue = ('value-xmm-%d' % i, 'value-xmm-hiword-%d' % i) else: @@ -276,8 +276,8 @@ else: regs1[loc.value] = 'value-int-%d' % i elif isinstance(loc, StackLoc): - stack[loc.value] = 'value-width%d-%d' % (loc.width, i) - if loc.width > WORD: + stack[loc.value] = 'value-width%d-%d' % (loc.get_width(), i) + if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: assert isinstance(loc, ImmedLoc) @@ -299,7 +299,7 @@ # def read(loc, expected_width=None): if expected_width is not None: - assert loc.width == expected_width + assert loc.get_width() == expected_width if isinstance(loc, RegLoc): if loc.is_xmm: return regs2[loc.value] @@ -307,7 +307,7 @@ return regs1[loc.value] if isinstance(loc, StackLoc): got = stack[loc.value] - if loc.width > WORD: + if loc.get_width() > WORD: got = (got, stack[loc.value+WORD]) return got if isinstance(loc, ImmedLoc): @@ -321,7 +321,7 @@ else: regs1[loc.value] = newvalue elif isinstance(loc, StackLoc): - if loc.width > WORD: + if loc.get_width() > WORD: newval1, newval2 = newvalue stack[loc.value] = newval1 stack[loc.value+WORD] = newval2 From noreply at buildbot.pypy.org Fri Dec 16 06:29:24 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Dec 2011 06:29:24 +0100 (CET) Subject: [pypy-commit] buildbot default: Don't run this server nightly as long as it hogs the Message-ID: <20111216052924.575B982221@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r605:b5e0f9f4bc83 Date: 2011-12-16 06:29 +0100 http://bitbucket.org/pypy/buildbot/changeset/b5e0f9f4bc83/ Log: Don't run this server nightly as long as it hogs the same global lock at tannit and takes more than 5 hours to run (twice as much as on tannit??) diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -204,7 +204,7 @@ Nightly("nightly-0-00", [ JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) - JITBENCH64_2, # on speed.python.org, uses 1 core (in part exclusively) + #JITBENCH64_2, # on speed.python.org, uses 1 core (in part exclusively) MACOSX32, # on minime ], branch=None, hour=0, minute=0), # From noreply at buildbot.pypy.org Fri Dec 16 11:12:06 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Dec 2011 11:12:06 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: I'm sure this is not very intentional Message-ID: <20111216101206.3E23C82287@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50601:a6fd97f41c91 Date: 2011-12-16 12:10 +0200 http://bitbucket.org/pypy/pypy/changeset/a6fd97f41c91/ Log: I'm sure this is not very intentional diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -771,4 +771,3 @@ v_cls = hop.inputarg(classrepr, arg=1) return hop.genop('jit_record_known_class', [v_inst, v_cls], resulttype=lltype.Void) ->>>>>>> other From noreply at buildbot.pypy.org Fri Dec 16 11:43:26 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Dec 2011 11:43:26 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: mark this as immutable as well Message-ID: <20111216104326.3EA1F82287@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50602:06599e469d83 Date: 2011-12-16 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/06599e469d83/ Log: mark this as immutable as well diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -16,7 +16,7 @@ class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] - _immutable_fields_ = ["promote_to_float", "promote_bools"] + _immutable_fields_ = ["promote_to_float", "promote_bools", "name"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name From noreply at buildbot.pypy.org Fri Dec 16 12:35:47 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Dec 2011 12:35:47 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: Refactor a bit - now ConcreteArray is something with strides, everything Message-ID: <20111216113547.AFA9882287@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50603:8ac7503b0bf3 Date: 2011-12-16 13:34 +0200 http://bitbucket.org/pypy/pypy/changeset/8ac7503b0bf3/ Log: Refactor a bit - now ConcreteArray is something with strides, everything else has no strides diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -203,37 +203,16 @@ return new_strides class BaseArray(Wrappable): - _attrs_ = ["invalidates", "shape", "strides", "backstrides", - "start", 'order'] + _attrs_ = ["invalidates", "shape"] - _immutable_fields_ = ['start', "order"] + _immutable_fields_ = [] strides = None start = 0 - def __init__(self, shape, order): + def __init__(self, shape): self.invalidates = [] self.shape = shape - self.order = order - if self.strides is None: - self.calc_strides(shape) - - def calc_strides(self, shape): - strides = [] - backstrides = [] - s = 1 - shape_rev = shape[:] - if self.order == 'C': - shape_rev.reverse() - for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh - if self.order == 'C': - strides.reverse() - backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] def invalidated(self): if self.invalidates: @@ -403,7 +382,10 @@ concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, concrete.find_size(), w_iterable) - concrete.setshape(space, new_shape) + if isinstance(self, ConcreteArray): + # scalars don't have to do anything, just check if the shape + # is still empty + concrete.setshape(space, new_shape) def descr_get_size(self, space): return space.wrap(self.find_size()) @@ -556,10 +538,8 @@ """ shape_len = len(self.shape) if shape_len == 0: - if not space.isinstance_w(w_idx, space.w_int): - raise OperationError(space.w_IndexError, space.wrap( - "wrong index")) - return True + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) if shape_len == 1: if space.isinstance_w(w_idx, space.w_int): return True @@ -590,6 +570,7 @@ def descr_getitem(self, space, w_idx): if self._single_item_result(space, w_idx): concrete = self.get_concrete() + assert isinstance(concrete, ConcreteArray) if len(concrete.shape) < 1: raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) @@ -602,6 +583,7 @@ self.invalidated() if self._single_item_result(space, w_idx): concrete = self.get_concrete() + assert isinstance(concrete, ConcreteArray) if len(concrete.shape) < 1: raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) @@ -616,37 +598,39 @@ @jit.unroll_safe def create_slice(self, space, chunks): + concr = self.get_concrete() + assert isinstance(concr, ConcreteArray) if len(chunks) == 1: start, stop, step, lgt = chunks[0] if step == 0: shape = self.shape[1:] - strides = self.strides[1:] - backstrides = self.backstrides[1:] + strides = concr.strides[1:] + backstrides = concr.backstrides[1:] else: shape = [lgt] + self.shape[1:] - strides = [self.strides[0] * step] + self.strides[1:] - backstrides = [(lgt - 1) * self.strides[0] * step] + self.backstrides[1:] - start *= self.strides[0] - start += self.start + strides = [concr.strides[0] * step] + concr.strides[1:] + backstrides = [(lgt - 1) * concr.strides[0] * step] + concr.backstrides[1:] + start *= concr.strides[0] + start += concr.start else: shape = [] strides = [] backstrides = [] - start = self.start + start = concr.start i = -1 for i, (start_, stop, step, lgt) in enumerate(chunks): if step != 0: shape.append(lgt) - strides.append(self.strides[i] * step) - backstrides.append(self.strides[i] * (lgt - 1) * step) - start += self.strides[i] * start_ + strides.append(concr.strides[i] * step) + backstrides.append(concr.strides[i] * (lgt - 1) * step) + start += concr.strides[i] * start_ # add a reminder s = i + 1 assert s >= 0 - shape += self.shape[s:] - strides += self.strides[s:] - backstrides += self.backstrides[s:] - return W_NDimSlice(self, start, strides[:], backstrides[:], + shape += concr.shape[s:] + strides += concr.strides[s:] + backstrides += concr.backstrides[s:] + return W_NDimSlice(concr, start, strides[:], backstrides[:], shape[:]) def descr_reshape(self, space, args_w): @@ -747,8 +731,8 @@ _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): - self.shape = self.strides = [] - BaseArray.__init__(self, [], 'C') + self.shape = [] + BaseArray.__init__(self, []) self.dtype = dtype self.value = value @@ -782,8 +766,8 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, name, shape, res_dtype, order): - BaseArray.__init__(self, shape, order) + def __init__(self, name, shape, res_dtype): + BaseArray.__init__(self, shape) self.forced_result = None self.res_dtype = res_dtype self.name = name @@ -838,9 +822,8 @@ class Call1(VirtualArray): - def __init__(self, ufunc, name, shape, res_dtype, values, order): - VirtualArray.__init__(self, name, shape, res_dtype, - values.order) + def __init__(self, ufunc, name, shape, res_dtype, values): + VirtualArray.__init__(self, name, shape, res_dtype) self.values = values self.ufunc = ufunc @@ -863,7 +846,7 @@ Intermediate class for performing binary operations. """ def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): - VirtualArray.__init__(self, name, shape, res_dtype, left.order) + VirtualArray.__init__(self, name, shape, res_dtype) self.ufunc = ufunc self.left = left self.right = right @@ -886,7 +869,34 @@ self.left.create_sig(), self.right.create_sig()) -class ViewArray(BaseArray): +class ConcreteArray(BaseArray): + """ An array that have actual storage, whether owned or not + """ + def __init__(self, shape, order): + self.order = order + if self.strides is None: + self.calc_strides(shape) + BaseArray.__init__(self, shape) + + def calc_strides(self, shape): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if self.order == 'C': + shape_rev.reverse() + for sh in shape_rev: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + + +class ConcreteViewArray(ConcreteArray): """ Class for representing views of arrays, they will reflect changes of parent arrays. Example: slices @@ -894,13 +904,14 @@ def __init__(self, parent, strides, backstrides, shape): self.strides = strides self.backstrides = backstrides - BaseArray.__init__(self, shape, parent.order) + ConcreteArray.__init__(self, shape, parent.order) assert isinstance(parent, W_NDimArray) self.parent = parent self.invalidates = parent.invalidates def get_concrete(self): - # in fact, ViewArray never gets "concrete" as it never stores data. + # in fact, ConcreteViewArray never gets "concrete" as it never + # stores data. # This implementation is needed for BaseArray getitem/setitem to work, # can be refactored. self.parent.get_concrete() @@ -959,7 +970,7 @@ self.backstrides = new_backstrides[:] self.shape = new_shape[:] -class W_NDimSlice(ViewArray): +class W_NDimSlice(ConcreteViewArray): def __init__(self, parent, start, strides, backstrides, shape): if isinstance(parent, W_NDimSlice): parent = parent.parent @@ -967,7 +978,7 @@ # XXX this should not force the array, but it did before the # refactoring anyway, just in a more obscure way parent = parent.get_concrete() - ViewArray.__init__(self, parent, strides, backstrides, shape) + ConcreteViewArray.__init__(self, parent, strides, backstrides, shape) self.start = start self.size = 1 for sh in shape: @@ -1015,14 +1026,14 @@ def create_sig(self): return signature.ViewSignature(self.parent.create_sig()) -class W_NDimArray(BaseArray): +class W_NDimArray(ConcreteArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ _immutable_fields_ = ['storage'] def __init__(self, size, shape, dtype, order='C'): - BaseArray.__init__(self, shape, order) + ConcreteArray.__init__(self, shape, order) self.size = size self.dtype = dtype self.storage = dtype.malloc(size) @@ -1213,15 +1224,15 @@ ) -class W_FlatIterator(ViewArray): +class W_FlatIterator(ConcreteViewArray): @jit.unroll_safe def __init__(self, arr): size = 1 for sh in arr.shape: size *= sh - ViewArray.__init__(self, arr.get_concrete(), [arr.strides[-1]], - [arr.backstrides[-1]], [size]) + ConcreteViewArray.__init__(self, arr.get_concrete(), [arr.strides[-1]], + [arr.backstrides[-1]], [size]) self.shapelen = len(arr.shape) self.arr = arr self.iter = OneDimIterator(self.arr.start, self.strides[0], diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -121,8 +121,7 @@ if isinstance(w_obj, Scalar): return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) - w_res = Call1(self.func, self.name, w_obj.shape, res_dtype, w_obj, - w_obj.order) + w_res = Call1(self.func, self.name, w_obj.shape, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,4 +1,5 @@ from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash +from pypy.rlib.rarithmetic import intmask from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ BroadcastIterator, OneDimIterator, ConstantIterator from pypy.rlib.jit import hint, unroll_safe, promote @@ -63,7 +64,7 @@ class Signature(object): _attrs_ = ['iter_no'] _immutable_fields_ = ['iter_no'] - + def invent_numbering(self): cache = r_dict(sigeq, sighash) allnumbers = [] @@ -171,6 +172,9 @@ self.iter_no = no def _create_iter(self, iterlist, arr): + from pypy.module.micronumpy.interp_numarray import ConcreteViewArray + + assert isinstance(arr, ConcreteViewArray) if self.iter_no >= len(iterlist): iterlist.append(ViewIterator(arr)) @@ -197,7 +201,7 @@ self.name = name def hash(self): - return compute_hash(self.name) ^ self.child.hash() << 1 + return compute_hash(self.name) ^ intmask(self.child.hash() << 1) def eq(self, other): if type(self) is not type(other): @@ -233,8 +237,8 @@ self.calc_dtype = calc_dtype def hash(self): - return (compute_hash(self.name) ^ (self.left.hash() << 1) ^ - (self.right.hash() << 2)) + return (compute_hash(self.name) ^ intmask(self.left.hash() << 1) ^ + intmask(self.right.hash() << 2)) def eq(self, other): if type(self) is not type(other): From noreply at buildbot.pypy.org Fri Dec 16 12:40:43 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Dec 2011 12:40:43 +0100 (CET) Subject: [pypy-commit] pypy default: Fix: again only look at the "loop" part, ignoring the "preamble" part. Message-ID: <20111216114043.7A99382287@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50604:179263e7dd58 Date: 2011-12-16 11:40 +0000 http://bitbucket.org/pypy/pypy/changeset/179263e7dd58/ Log: Fix: again only look at the "loop" part, ignoring the "preamble" part. diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -24,10 +24,8 @@ # to the interpreter hoping to immediately run the JITted # code; but instead, we Trace again, just because another # counter was also about to reach its limit... - loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') + loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ - ... - label(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) From noreply at buildbot.pypy.org Fri Dec 16 21:12:34 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 16 Dec 2011 21:12:34 +0100 (CET) Subject: [pypy-commit] pypy default: make str.replace() not do tons of copying Message-ID: <20111216201234.793E782287@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50605:10601f705a55 Date: 2011-12-16 14:12 -0600 http://bitbucket.org/pypy/pypy/changeset/10601f705a55/ Log: make str.replace() not do tons of copying diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -514,44 +514,41 @@ if maxsplit == 0: return space.wrap(input) - #print "from replace, input: %s, sub: %s, by: %s" % (input, sub, by) + # An ok guess at the default size + builder = StringBuilder(len(input)) + first = True if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - substrings_w = [""] + first = False for i in range(upper): - c = input[i] - substrings_w.append(c) - substrings_w.append(input[upper:]) + builder.append(by) + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) else: start = 0 sublen = len(sub) - substrings_w = [] while maxsplit != 0: next = input.find(sub, start) if next < 0: break - substrings_w.append(input[start:next]) + if not first: + builder.append(by) + first = False + builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - substrings_w.append(input[start:]) + if not first: + builder.append(by) + builder.append_slice(input, start, len(input)) - try: - # XXX conservative estimate. If your strings are that close - # to overflowing, bad luck. - one = ovfcheck(len(substrings_w) * len(by)) - ovfcheck(one + len(input)) - except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("replace string is too long")) - - return space.wrap(by.join(substrings_w)) + return space.wrap(builder.build()) def str_replace__String_ANY_ANY_ANY(space, w_self, w_sub, w_by, w_maxsplit): From noreply at buildbot.pypy.org Fri Dec 16 21:40:35 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Dec 2011 21:40:35 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: sharing arrays Message-ID: <20111216204035.1545A82287@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50606:82d0ce07b964 Date: 2011-12-16 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/82d0ce07b964/ Log: sharing arrays diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -708,7 +708,7 @@ def find_sig(self): """ find a correct signature for the array """ - return signature.find_sig(self.create_sig()) + return signature.find_sig(self.create_sig(), self) def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -70,7 +70,7 @@ shapelen = len(obj.shape) sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), - obj.create_sig())) + obj.create_sig()), obj) frame = sig.create_frame(obj) if shapelen > 1 and not multidim: raise OperationError(space.w_NotImplementedError, diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -2,6 +2,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ BroadcastIterator, OneDimIterator, ConstantIterator +from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr from pypy.rlib.jit import hint, unroll_safe, promote # def components_eq(lhs, rhs): @@ -22,12 +23,16 @@ def sigeq(one, two): return one.eq(two) +def sigeq2(one, two): + return one.eq(two, compare_array_no=False) + def sighash(sig): return sig.hash() known_sigs = r_dict(sigeq, sighash) -def find_sig(sig): +def find_sig(sig, arr): + sig.invent_array_numbering(arr) try: return known_sigs[sig] except KeyError: @@ -36,12 +41,13 @@ return sig class NumpyEvalFrame(object): - _virtualizable2_ = ['iterators[*]', 'final_iter'] + _virtualizable2_ = ['iterators[*]', 'final_iter', 'arraylist[*]'] @unroll_safe - def __init__(self, iterators): + def __init__(self, iterators, arrays): self = hint(self, access_directly=True, fresh_virtualizable=True) self.iterators = iterators[:] + self.arrays = arrays[:] for i in range(len(self.iterators)): iter = self.iterators[i] if not isinstance(iter, ConstantIterator):# or not isinstance(iter, BroadcastIterator): @@ -61,15 +67,33 @@ for i in range(len(self.iterators)): self.iterators[i] = self.iterators[i].next(shapelen) +def _add_ptr_to_cache(ptr, cache): + i = 0 + for p in cache: + if ptr == p: + return i + i += 1 + else: + res = len(cache) + cache.append(ptr) + return res + class Signature(object): - _attrs_ = ['iter_no'] - _immutable_fields_ = ['iter_no'] + _attrs_ = ['iter_no', 'array_no'] + _immutable_fields_ = ['iter_no', 'array_no'] + + array_no = 0 + iter_no = 0 def invent_numbering(self): - cache = r_dict(sigeq, sighash) + cache = r_dict(sigeq2, sighash) allnumbers = [] self._invent_numbering(cache, allnumbers) + def invent_array_numbering(self, arr): + cache = [] + self._invent_array_numbering(arr, cache) + def _invent_numbering(self, cache, allnumbers): try: no = cache[self] @@ -81,8 +105,9 @@ def create_frame(self, arr): iterlist = [] - self._create_iter(iterlist, arr) - return NumpyEvalFrame(iterlist) + arraylist = [] + self._create_iter(iterlist, arraylist, arr) + return NumpyEvalFrame(iterlist, arraylist) class ConcreteSignature(Signature): _immutable_fields_ = ['dtype'] @@ -90,10 +115,13 @@ def __init__(self, dtype): self.dtype = dtype - def eq(self, other): + def eq(self, other, compare_array_no=True): if type(self) is not type(other): return False assert isinstance(other, ConcreteSignature) + if compare_array_no: + if self.array_no != other.array_no: + return False return self.dtype is other.dtype def hash(self): @@ -103,41 +131,59 @@ def debug_repr(self): return 'Array' - def _create_iter(self, iterlist, arr): + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + assert isinstance(arr, W_NDimArray) + self.array_no = _add_ptr_to_cache(arr.storage, cache) + + def _create_iter(self, iterlist, arraylist, arr): from pypy.module.micronumpy.interp_numarray import W_NDimArray assert isinstance(arr, W_NDimArray) if self.iter_no >= len(iterlist): iterlist.append(ArrayIterator(arr.size)) + if self.array_no >= len(arraylist): + arraylist.append(arr.storage) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import W_NDimArray assert isinstance(arr, W_NDimArray) iter = frame.iterators[self.iter_no] - return self.dtype.getitem(arr.storage, iter.offset) + return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) class ForcedSignature(ArraySignature): def debug_repr(self): return 'ForcedArray' - def _create_iter(self, iterlist, arr): + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import VirtualArray + assert isinstance(arr, VirtualArray) + arr = arr.forced_result + self.array_no = _add_ptr_to_cache(arr.storage, cache) + + def _create_iter(self, iterlist, arraylist, arr): from pypy.module.micronumpy.interp_numarray import VirtualArray assert isinstance(arr, VirtualArray) arr = arr.forced_result if self.iter_no >= len(iterlist): iterlist.append(ArrayIterator(arr.size)) + if self.array_no >= len(arraylist): + arraylist.append(arr.storage) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import VirtualArray assert isinstance(arr, VirtualArray) arr = arr.forced_result iter = frame.iterators[self.iter_no] - return self.dtype.getitem(arr.storage, iter.offset) + return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) class ScalarSignature(ConcreteSignature): def debug_repr(self): return 'Scalar' - def _create_iter(self, iterlist, arr): + def _invent_array_numbering(self, arr, cache): + pass + + def _create_iter(self, iterlist, arraylist, arr): if self.iter_no >= len(iterlist): iter = ConstantIterator() iterlist.append(iter) @@ -153,11 +199,11 @@ def __init__(self, child): self.child = child - def eq(self, other): + def eq(self, other, compare_array_no=True): if type(self) is not type(other): return False assert isinstance(other, ViewSignature) - return self.child.eq(other.child) + return self.child.eq(other.child, compare_array_no) def hash(self): return self.child.hash() ^ 0x12345 @@ -171,25 +217,33 @@ allnumbers.append(no) self.iter_no = no - def _create_iter(self, iterlist, arr): + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import ConcreteViewArray + assert isinstance(arr, ConcreteViewArray) + self.array_no = _add_ptr_to_cache(arr.parent.storage, cache) + + def _create_iter(self, iterlist, arraylist, arr): from pypy.module.micronumpy.interp_numarray import ConcreteViewArray assert isinstance(arr, ConcreteViewArray) if self.iter_no >= len(iterlist): iterlist.append(ViewIterator(arr)) + if self.array_no >= len(arraylist): + arraylist.append(arr.parent.storage) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import W_NDimSlice assert isinstance(arr, W_NDimSlice) arr = arr.get_concrete() iter = frame.iterators[self.iter_no] - return arr.find_dtype().getitem(arr.parent.storage, iter.offset) + return arr.find_dtype().getitem(frame.arrays[self.array_no], + iter.offset) class FlatiterSignature(ViewSignature): def debug_repr(self): return 'FlatIter(%s)' % self.child.debug_repr() - def _create_iter(self, iterlist, arr): + def _create_iter(self, iterlist, arraylist, arr): raise NotImplementedError class Call1(Signature): @@ -203,11 +257,12 @@ def hash(self): return compute_hash(self.name) ^ intmask(self.child.hash() << 1) - def eq(self, other): + def eq(self, other, compare_array_no=True): if type(self) is not type(other): return False assert isinstance(other, Call1) - return self.unfunc is other.unfunc and self.child.eq(other.child) + return (self.unfunc is other.unfunc and + self.child.eq(other.child, compare_array_no)) def debug_repr(self): return 'Call1(%s, %s)' % (self.name, self.child.debug_repr()) @@ -215,10 +270,15 @@ def _invent_numbering(self, cache, allnumbers): self.child._invent_numbering(cache, allnumbers) - def _create_iter(self, iterlist, arr): + def _invent_array_numbering(self, arr, cache): from pypy.module.micronumpy.interp_numarray import Call1 assert isinstance(arr, Call1) - self.child._create_iter(iterlist, arr.values) + self.child._invent_array_numbering(arr.values, cache) + + def _create_iter(self, iterlist, arraylist, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._create_iter(iterlist, arraylist, arr.values) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import Call1 @@ -240,24 +300,31 @@ return (compute_hash(self.name) ^ intmask(self.left.hash() << 1) ^ intmask(self.right.hash() << 2)) - def eq(self, other): + def eq(self, other, compare_array_no=True): if type(self) is not type(other): return False assert isinstance(other, Call2) return (self.binfunc is other.binfunc and self.calc_dtype is other.calc_dtype and - self.left.eq(other.left) and self.right.eq(other.right)) + self.left.eq(other.left, compare_array_no) and + self.right.eq(other.right, compare_array_no)) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + self.left._invent_array_numbering(arr.left, cache) + self.right._invent_array_numbering(arr.right, cache) def _invent_numbering(self, cache, allnumbers): self.left._invent_numbering(cache, allnumbers) self.right._invent_numbering(cache, allnumbers) - def _create_iter(self, iterlist, arr): + def _create_iter(self, iterlist, arraylist, arr): from pypy.module.micronumpy.interp_numarray import Call2 assert isinstance(arr, Call2) - self.left._create_iter(iterlist, arr.left) - self.right._create_iter(iterlist, arr.right) + self.left._create_iter(iterlist, arraylist, arr.left) + self.right._create_iter(iterlist, arraylist, arr.right) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import Call2 @@ -271,11 +338,14 @@ self.right.debug_repr()) class ReduceSignature(Call2): - def _create_iter(self, iterlist, arr): - self.right._create_iter(iterlist, arr) + def _create_iter(self, iterlist, arraylist, arr): + self.right._create_iter(iterlist, arraylist, arr) def _invent_numbering(self, cache, allnumbers): self.right._invent_numbering(cache, allnumbers) + def _invent_array_numbering(self, arr, cache): + self.right._invent_array_numbering(arr, cache) + def eval(self, frame, arr): return self.right.eval(frame, arr) diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -14,6 +14,7 @@ bool_dtype = get_dtype_cache(space).w_booldtype ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar2 = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) sig1 = v1.find_sig() @@ -21,6 +22,10 @@ assert v1 is not v2 assert sig1.left.iter_no == sig1.right.iter_no assert sig2.left.iter_no != sig2.right.iter_no + assert sig1.left.array_no == sig1.right.array_no + sig1b = ar2.descr_add(space, ar).find_sig() + assert sig1b.left.array_no != sig1b.right.array_no + assert sig1b is not sig1 v3 = ar.descr_add(space, Scalar(float64_dtype, 1.0)) sig3 = v3.find_sig() assert sig2 is sig3 From noreply at buildbot.pypy.org Sat Dec 17 13:13:11 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 13:13:11 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: hg merge default Message-ID: <20111217121311.C647E8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50607:2d900cd7e4e3 Date: 2011-12-17 12:49 +0100 http://bitbucket.org/pypy/pypy/changeset/2d900cd7e4e3/ Log: hg merge default diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -190,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -706,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -328,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -355,11 +363,13 @@ TARGET_TOKENS = weakref.WeakKeyDictionary() -def compile_add_target_token(loop, descr): +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling loop = _from_opaque(loop) op = loop.operations[-1] descrobj = _normalize(descr) - TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt def compile_add_var(loop, intvar): loop = _from_opaque(loop) @@ -395,17 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, targettoken): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) descrobj = _normalize(targettoken) - loop_target, target_opindex, target_inputargs = TARGET_TOKENS[descrobj] + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass # op = loop.operations[-1] op.jump_target = loop_target op.jump_target_opindex = target_opindex op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(target_inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -987,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1816,6 +1835,7 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -138,11 +138,12 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, jitcell_token, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl @@ -153,14 +154,14 @@ clt.loop_and_bridges = [c] clt.compiled_version = c jitcell_token.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -187,7 +189,7 @@ assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: - llimpl.compile_add_target_token(c, descr) + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -241,7 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - llimpl.compile_add_jump_target(c, targettoken) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,23 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the fake 'assembler' generated for the given loop. - Returns the descr of the last executed operation: either the one - attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -69,6 +69,8 @@ self.bindings[box] = loc # index = self.get_loc_index(loc) + if index < 0: + return endindex = index + self.frame_size(box.type) while len(self.used) < endindex: self.used.append(False) @@ -91,6 +93,8 @@ # size = self.frame_size(box.type) baseindex = self.get_loc_index(loc) + if baseindex < 0: + return for i in range(size): index = baseindex + i assert 0 <= index < len(self.used) @@ -98,7 +102,8 @@ def try_to_reuse_location(self, box, loc): index = self.get_loc_index(loc) - assert index >= 0 + if index < 0: + return False size = self.frame_size(box.type) for i in range(size): while (index + i) >= len(self.used): diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -110,9 +111,9 @@ looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -258,8 +259,8 @@ done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -288,8 +289,8 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -34,20 +34,17 @@ descr) looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -108,8 +105,7 @@ inputargs = [i0] looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -131,8 +127,7 @@ operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -156,8 +151,7 @@ operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 44) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -222,8 +216,7 @@ self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -264,8 +257,7 @@ self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -288,8 +280,7 @@ operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -311,8 +302,7 @@ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 @@ -343,8 +333,7 @@ ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 @@ -379,9 +368,7 @@ ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -442,9 +429,7 @@ for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1129,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1191,10 +1166,11 @@ self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1244,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1303,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1356,15 +1328,16 @@ # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1427,10 +1400,9 @@ unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1699,14 +1671,12 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1725,8 +1695,7 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1744,13 +1713,11 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1922,16 +1889,12 @@ ops[2].setfailargs([i1, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1967,16 +1930,12 @@ ops[2].setfailargs([i1, i2, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -2013,17 +1972,13 @@ ops[2].setfailargs([i1, f2, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2058,8 +2013,7 @@ ops[1].setfailargs([i1, i2]) looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2118,12 +2072,12 @@ ops[1].setfailargs([]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2176,9 +2130,8 @@ self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2197,9 +2150,7 @@ looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2208,9 +2159,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2226,9 +2175,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2237,9 +2184,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2448,9 +2393,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2462,9 +2406,8 @@ loop = parse(ops, namespace=locals()) othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -2499,9 +2442,9 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2513,9 +2456,9 @@ loop = parse(ops, namespace=locals()) othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2526,9 +2469,9 @@ try: othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2589,9 +2532,9 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2607,9 +2550,9 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2629,10 +2572,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2986,8 +2928,7 @@ looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier def test_compile_loop_with_target(self): @@ -3014,8 +2955,7 @@ operations[6].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -3027,8 +2967,7 @@ ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) assert res == -10 @@ -3108,13 +3047,13 @@ self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) looptoken2 = JitCellToken() - inputargs = [] + inputargs = [BoxInt()] operations = [ ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), ] self.cpu.compile_loop(inputargs, operations, looptoken2) - fail = self.cpu.execute_token(looptoken2) + fail = self.cpu.execute_token(looptoken2, -9) assert fail.identifier == 42 diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -6,6 +6,7 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -616,8 +617,13 @@ return self.loop._jitcelltoken if not hasattr(self, '_initialjumploop_celltoken'): self._initialjumploop_celltoken = JitCellToken() - self.cpu.compile_loop(self.startvars[:], - [ResOperation(rop.JUMP, self.startvars[:], None, + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, descr=self.loop._targettoken)], self._initialjumploop_celltoken) return self._initialjumploop_celltoken @@ -649,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.runjitcelltoken()) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -717,10 +717,21 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -309,12 +310,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -325,7 +325,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -421,10 +421,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -445,12 +443,12 @@ operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily @@ -458,19 +456,17 @@ frame_depth, param_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth clt.param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, looppos, - frame_depth+param_depth) + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, rawstart + looppos, - rawstart + directbootstrappos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -481,18 +477,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -802,152 +797,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -974,7 +838,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -985,13 +849,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV_bi(to_loc.value, low_part) + self.mc.MOV_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1143,18 +1019,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -1891,10 +1767,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1910,7 +1786,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position + pos = loc.position + if pos < 0: + mc.writechar(chr(self.CODE_INPUTARG)) + pos = ~pos + n = self.CODE_FROMSTACK//4 + pos else: assert isinstance(loc, RegLoc) n = loc.value @@ -1930,6 +1810,7 @@ descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] + code_inputarg = False while 1: # decode the next instruction from the bytecode code = rffi.cast(lltype.Signed, bytecode[0]) @@ -1948,11 +1829,17 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: continue + elif code == self.CODE_INPUTARG: + code_inputarg = True + continue else: # 'code' identifies a register kind = code & 3 @@ -1968,6 +1855,7 @@ def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! self.fail_ebp = allregisters[16 + ebp.value] + code_inputarg = False num = 0 value_hi = 0 while 1: @@ -1988,6 +1876,9 @@ # load the value from the stack kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: @@ -2000,6 +1891,9 @@ if code == self.CODE_HOLE: num += 1 continue + if code == self.CODE_INPUTARG: + code_inputarg = True + continue assert code == self.CODE_STOP break code >>= 2 @@ -2104,9 +1998,9 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA in _call_footer below throws - # away most of the frame, including all the PUSHes that we did just - # above. + # _call_header_with_stack_check(). The LEA in _call_footer below + # throws away most of the frame, including all the PUSHes that we + # did just above. self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -2189,7 +2083,7 @@ argtypes=op.getdescr().get_arg_types(), callconv=op.getdescr().get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return if op.getdescr().get_return_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long @@ -2354,10 +2248,10 @@ self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() assert isinstance(descr, JitCellToken) - assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) + assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs # - # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + # Write a call to the target assembler + self._emit_call(fail_index, imm(descr._x86_function_addr), arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None @@ -2588,6 +2482,14 @@ self.gcrootmap_retaddr_forced = -1 def closing_jump(self, target_token): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = target_token._x86_clt._debug_nbargs + assert my_nbargs == target_nbargs + # target = target_token._x86_loop_code if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 @@ -2666,11 +2568,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,7 +12,7 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: @@ -31,7 +31,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +66,13 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) @@ -87,7 +94,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -28,7 +28,7 @@ class X86RegisterManager(RegisterManager): box_types = [INT, REF] - all_regs = [eax, ecx, edx, ebx, esi, edi] + all_regs = [ecx, eax, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] frame_reg = ebp @@ -60,7 +60,7 @@ class X86_64_RegisterManager(X86RegisterManager): # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -130,9 +130,9 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: @@ -165,6 +165,7 @@ self.jump_target_descr = None self.close_stack_struct = 0 self.final_jump_op = None + self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -173,22 +174,26 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity, useful = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - return operations, useful + return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations, useful = self._prepare(inputargs, operations, allgcrefs) - return self._process_inputargs(inputargs, useful), operations + operations = self._prepare(inputargs, operations, allgcrefs) + self._set_initial_bindings(inputargs) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 13 + return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations, _ = self._prepare(inputargs, operations, allgcrefs) + operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.param_depth = prev_depths[1] return operations @@ -196,46 +201,56 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs, useful): - # XXX we can sort out here by longevity if we need something - # more optimal - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - # Don't use all_regs[0] for passing arguments around a loop. - # Must be kept in sync with consider_jump(). - # XXX this should probably go to llsupport/regalloc.py - xmmtmp = self.xrm.free_regs.pop(0) - tmpreg = self.rm.free_regs.pop(0) - assert tmpreg == X86RegisterManager.all_regs[0] - assert xmmtmp == X86XMMRegisterManager.all_regs[0] - for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - reg = None - if self.longevity[arg][1] > -1 and arg in useful: - if arg.type == FLOAT: - # xxx is it really a good idea? at the first CALL they - # will all be flushed anyway - reg = self.xrm.try_allocate_reg(arg) + def _set_initial_bindings(self, inputargs): + if IS_X86_64: + inputargs = self._set_initial_bindings_regs_64(inputargs) + # ... + # stack layout: arg2 + # arg1 + # arg0 + # return address + # saved ebp <-- ebp points here + # ... + cur_frame_pos = - 1 - FRAME_FIXED_SIZE + assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD + assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD + # + for box in inputargs: + assert isinstance(box, Box) + # + if IS_X86_32 and box.type == FLOAT: + cur_frame_pos -= 2 + else: + cur_frame_pos -= 1 + loc = self.fm.frame_pos(cur_frame_pos, box.type) + self.fm.set_binding(box, loc) + + def _set_initial_bindings_regs_64(self, inputargs): + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + # + pass_on_stack = [] + # + for box in inputargs: + assert isinstance(box, Box) + # + if box.type == FLOAT: + if len(unused_xmm) > 0: + ask = unused_xmm.pop() + got = self.xrm.try_allocate_reg(box, selected_reg=ask) + assert ask == got else: - reg = self.rm.try_allocate_reg(arg) - if reg: - loc = reg + pass_on_stack.append(box) else: - loc = self.fm.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc - # otherwise we have it saved on stack, so no worry - self.rm.free_regs.insert(0, tmpreg) - self.xrm.free_regs.insert(0, xmmtmp) - assert tmpreg not in nonfloatlocs - assert xmmtmp not in floatlocs - # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op args, which is a non-resizable list - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + if len(unused_gpr) > 0: + ask = unused_gpr.pop() + got = self.rm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + # + return pass_on_stack def possibly_free_var(self, var): if var.type == FLOAT: @@ -446,8 +461,15 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop + def flush_loop(self): + # rare case: if the loop is too short, pad with NOPs + mc = self.assembler.mc + while mc.get_relative_pos() < self.min_bytes_before_label: + mc.NOP() + def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" @@ -458,7 +480,7 @@ # only to guard operations or to jump or to finish produced = {} last_used = {} - useful = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -469,10 +491,13 @@ opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - if opnum != rop.JUMP and opnum != rop.FINISH: - useful[arg] = None - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -480,7 +505,8 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + self.last_real_usage = last_real_usage + # longevity = {} for arg in produced: if arg in last_used: @@ -496,7 +522,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity, useful + self.longevity = longevity def loc(self, v): if v is None: # xxx kludgy @@ -1344,51 +1370,51 @@ # we would like the boxes to be after the jump. def _compute_hint_frame_locations_from_descr(self, descr): - nonfloatlocs, floatlocs = descr._x86_arglocs + arglocs = descr._x86_arglocs jump_op = self.final_jump_op - assert len(nonfloatlocs) == jump_op.numargs() + assert len(arglocs) == jump_op.numargs() for i in range(jump_op.numargs()): box = jump_op.getarg(i) if isinstance(box, Box): - loc = nonfloatlocs[i] + loc = arglocs[i] if isinstance(loc, StackLoc): - assert box.type != FLOAT self.fm.hint_frame_locations[box] = loc - else: - loc = floatlocs[i] - if isinstance(loc, StackLoc): - assert box.type == FLOAT - self.fm.hint_frame_locations[box] = loc def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() assert isinstance(descr, TargetToken) - nonfloatlocs, floatlocs = descr._x86_arglocs + arglocs = descr._x86_arglocs self.jump_target_descr = descr - # compute 'tmploc' to be all_regs[0] by spilling what is there - box = TempBox() - box1 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - tmploc = self.rm.force_allocate_reg(box, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None + # Do the remapping remap_frame_layout_mixed(assembler, - src_locations1, dst_locations1, tmploc, + src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(box) - self.xrm.possibly_free_var(box1) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1440,23 +1466,20 @@ self.rm.force_allocate_frame_reg(op.result) def consider_label(self, op): - # XXX big refactoring needed? descr = op.getdescr() assert isinstance(descr, TargetToken) inputargs = op.getarglist() - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) + arglocs = [None] * len(inputargs) # - # we need to make sure that the tmpreg and xmmtmp are free - tmpreg = X86RegisterManager.all_regs[0] - tmpvar = TempBox() - self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) - self.rm.possibly_free_var(tmpvar) - # - xmmtmp = X86XMMRegisterManager.all_regs[0] - tmpvar = TempBox() - self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) - self.xrm.possibly_free_var(tmpvar) + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) # # we need to make sure that no variable is stored in ebp for arg in inputargs: @@ -1467,16 +1490,18 @@ # for i in range(len(inputargs)): arg = inputargs[i] - assert not isinstance(arg, Const) + assert isinstance(arg, Box) loc = self.loc(arg) - assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc + assert loc is not ebp + arglocs[i] = loc if isinstance(loc, RegLoc): self.fm.mark_as_free(arg) - descr._x86_arglocs = nonfloatlocs, floatlocs + # + # if we are too close to the start of the loop, the label's target may + # get overridden by redirect_call_assembler(). (rare case) + self.flush_loop() + # + descr._x86_arglocs = arglocs descr._x86_loop_code = self.assembler.mc.get_relative_pos() descr._x86_clt = self.assembler.current_clt self.assembler.target_tokens_currently_compiling[descr] = None @@ -1490,23 +1515,6 @@ if jump_op is not None and jump_op.getdescr() is descr: self._compute_hint_frame_locations_from_descr(descr) -## from pypy.rpython.annlowlevel import llhelper -## def fn(addr): -## print '...label:', hex(addr), nonfloatlocs -## FUNC = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) -## ll_disp = llhelper(FUNC, fn) -## faddr = rffi.cast(lltype.Signed, ll_disp) -## for i in range(16): -## self.assembler.mc.PUSH_r(i) -## self.assembler.mc.CALL_l(0) -## self.assembler.mc.POP(edi) -## self.assembler.mc.MOV(r11, imm(faddr)) -## self.assembler.mc.CALL(r11) -## for i in range(15, -1, -1): -## if i == esp.value: -## i -= 1 -## self.assembler.mc.POP_r(i) - def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,14 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): - assert ebp_offset < 0 # so no confusion with RegLoc.value + def __init__(self, position, ebp_offset, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -64,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -75,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -92,9 +104,11 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True - width = WORD + +class ImmedLoc(ImmediateAssemblerLocation): + _immutable_ = True _location_code = 'i' def __init__(self, value): @@ -105,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): return "ImmedLoc(%d)" % (self.value) @@ -117,7 +134,6 @@ class AddressLoc(AssemblerLocation): _immutable_ = True - width = WORD # The address is base_loc + (scaled_loc << scale) + static_offset def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): assert 0 <= scale < 4 @@ -146,6 +162,9 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) + def get_width(self): + return WORD + def value_a(self): return self.loc_a @@ -180,32 +199,34 @@ raise AssertionError(self._location_code) return result -class ConstFloatLoc(AssemblerLocation): - # XXX: We have to use this class instead of just AddressLoc because - # we want a width of 8 (... I think. Check this!) +class ConstFloatLoc(ImmediateAssemblerLocation): _immutable_ = True - width = 8 _location_code = 'j' def __init__(self, address): self.value = address + def get_width(self): + return 8 + def __repr__(self): return '' % (self.value,) if IS_X86_32: - class FloatImmedLoc(AssemblerLocation): + class FloatImmedLoc(ImmediateAssemblerLocation): # This stands for an immediate float. It cannot be directly used in # any assembler instruction. Instead, it is meant to be decomposed # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function # instead; see below. _immutable_ = True - width = 8 _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage + def get_width(self): + return 8 + def low_part(self): return intmask(self.aslonglong) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.objectmodel import we_are_translated +from pypy.jit.codewriter import longlong from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS @@ -21,7 +22,6 @@ supports_floats = True supports_singlefloats = True - BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) dont_keepalive_stuff = False # for tests with_threads = False @@ -91,15 +91,6 @@ return self.assembler.assemble_bridge(faildescr, inputargs, operations, original_loop_token, log=log) - def set_future_value_int(self, index, intvalue): - self.assembler.fail_boxes_int.setitem(index, intvalue) - - def set_future_value_float(self, index, floatvalue): - self.assembler.fail_boxes_float.setitem(index, floatvalue) - - def set_future_value_ref(self, index, ptrvalue): - self.assembler.fail_boxes_ptr.setitem(index, ptrvalue) - def get_latest_value_int(self, index): return self.assembler.fail_boxes_int.getitem(index) @@ -122,27 +113,28 @@ # the FORCE_TOKEN operation and this helper both return 'ebp'. return self.assembler.fail_ebp - def execute_token(self, executable_token): - addr = executable_token._x86_bootstrap_code - #llop.debug_print(lltype.Void, ">>>> Entering", addr) - func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) - fail_index = self._execute_call(func) - #llop.debug_print(lltype.Void, "<<<< Back") - return self.get_fail_descr_from_number(fail_index) - - def _execute_call(self, func): - # help flow objspace - prev_interpreter = None - if not self.translate_support_code: - prev_interpreter = LLInterpreter.current_interpreter - LLInterpreter.current_interpreter = self.debug_ll_interpreter - res = 0 - try: - res = func() - finally: + def make_execute_token(self, *ARGS): + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) + # + def execute_token(executable_token, *args): + clt = executable_token.compiled_loop_token + assert len(args) == clt._debug_nbargs + # + addr = executable_token._x86_function_addr + func = rffi.cast(FUNCPTR, addr) + #llop.debug_print(lltype.Void, ">>>> Entering", addr) + prev_interpreter = None # help flow space if not self.translate_support_code: - LLInterpreter.current_interpreter = prev_interpreter - return res + prev_interpreter = LLInterpreter.current_interpreter + LLInterpreter.current_interpreter = self.debug_ll_interpreter + try: + fail_index = func(*args) + finally: + if not self.translate_support_code: + LLInterpreter.current_interpreter = prev_interpreter + #llop.debug_print(lltype.Void, "<<<< Back") + return self.get_fail_descr_from_number(fail_index) + return execute_token def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -46,12 +46,13 @@ xmm2] assert len(failargs) == len(locs) assembler.write_failure_recovery_description(mc, failargs, locs) - nums = [Assembler386.DESCR_INT + 4*(16+0), - Assembler386.DESCR_REF + 4*(16+1), - Assembler386.DESCR_FLOAT + 4*(16+10), - Assembler386.DESCR_INT + 4*(16+100), - Assembler386.DESCR_REF + 4*(16+101), - Assembler386.DESCR_FLOAT + 4*(16+110), + base = 8 + 8*IS_X86_64 + nums = [Assembler386.DESCR_INT + 4*(base+0), + Assembler386.DESCR_REF + 4*(base+1), + Assembler386.DESCR_FLOAT + 4*(base+10), + Assembler386.DESCR_INT + 4*(base+100), + Assembler386.DESCR_REF + 4*(base+101), + Assembler386.DESCR_FLOAT + 4*(base+110), Assembler386.CODE_HOLE, Assembler386.CODE_HOLE, Assembler386.DESCR_INT + 4*ebx.value, diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -71,6 +71,18 @@ ('mov', eax, s24), ('mov', s12, edi)] +def test_no_tmp_reg(): + assembler = MockAssembler() + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) + remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], None) + assert assembler.ops == [('push', s8), + ('pop', s20), + ('mov', eax, s24), + ('mov', s12, edi)] + def test_reordering(): assembler = MockAssembler() s8 = frame_pos(8, INT) @@ -237,7 +249,7 @@ while len(result) < count: x = fn() keys = [x._getregkey()] - if isinstance(x, StackLoc) and x.width > WORD: + if isinstance(x, StackLoc) and x.get_width() > WORD: keys.append(keys[0] + WORD) for key in keys: if key in seen: @@ -255,7 +267,7 @@ for i, loc in enumerate(locations): if isinstance(loc, RegLoc): if loc.is_xmm: - if loc.width > WORD: + if loc.get_width() > WORD: newvalue = ('value-xmm-%d' % i, 'value-xmm-hiword-%d' % i) else: @@ -264,8 +276,8 @@ else: regs1[loc.value] = 'value-int-%d' % i elif isinstance(loc, StackLoc): - stack[loc.value] = 'value-width%d-%d' % (loc.width, i) - if loc.width > WORD: + stack[loc.value] = 'value-width%d-%d' % (loc.get_width(), i) + if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: assert isinstance(loc, ImmedLoc) @@ -287,7 +299,7 @@ # def read(loc, expected_width=None): if expected_width is not None: - assert loc.width == expected_width + assert loc.get_width() == expected_width if isinstance(loc, RegLoc): if loc.is_xmm: return regs2[loc.value] @@ -295,7 +307,7 @@ return regs1[loc.value] if isinstance(loc, StackLoc): got = stack[loc.value] - if loc.width > WORD: + if loc.get_width() > WORD: got = (got, stack[loc.value+WORD]) return got if isinstance(loc, ImmedLoc): @@ -309,7 +321,7 @@ else: regs1[loc.value] = newvalue elif isinstance(loc, StackLoc): - if loc.width > WORD: + if loc.get_width() > WORD: newval1, newval2 = newvalue stack[loc.value] = newval1 stack[loc.value+WORD] = newval2 diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -19,8 +19,7 @@ finish(i3, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 @@ -55,8 +54,7 @@ assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 assert self.getint(1) == 22 @@ -71,20 +69,19 @@ i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) - ''', [0]) + ''', [0, 0, 0, 0, 0, 0, 0, 0]) other_loop = self.interpret(''' - [i3] + [i3, i10, i11, i12, i13, i14, i15, i16] label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] jump(i3, descr=targettoken2) - ''', [1]) + ''', [1, 0, 0, 0, 0, 0, 0, 0]) ops = ''' [i3] jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' bridge = self.attach_bridge(ops, other_loop, 1) - self.cpu.set_future_value_int(0, 1) - fail = self.run(other_loop) + fail = self.run(other_loop, 1, 0, 0, 0, 0, 0, 0, 0) assert fail.identifier == 1 def test_bridge_jumps_to_self_deeper(self): @@ -100,7 +97,7 @@ i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] jump(i3, i30, 1, i30, i30, i30, descr=targettoken) - ''', [0]) + ''', [0, 0, 0, 0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -123,10 +120,7 @@ # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 @@ -142,7 +136,7 @@ i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] jump(i3, i1, i2, descr=targettoken) - ''', [0]) + ''', [0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -150,10 +144,7 @@ jump(i3, 0, 1, descr=targettoken) ''' bridge = self.attach_bridge(ops, loop, 5) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -142,19 +142,20 @@ loop = self.parse(ops) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - for i, arg in enumerate(args): + arguments = [] + for arg in args: if isinstance(arg, int): - self.cpu.set_future_value_int(i, arg) + arguments.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) - self.cpu.set_future_value_float(i, arg) + arguments.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) - self.cpu.set_future_value_ref(i, llgcref) + arguments.append(llgcref) loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, *arguments) return loop def prepare_loop(self, ops): @@ -193,8 +194,8 @@ loop._jitcelltoken) return bridge - def run(self, loop): - return self.cpu.execute_token(loop._jitcelltoken) + def run(self, loop, *arguments): + return self.cpu.execute_token(loop._jitcelltoken, *arguments) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): @@ -220,7 +221,7 @@ ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' - [i5] + [i5, i6, i7, i8] label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) @@ -229,14 +230,13 @@ guard_true(i2) [i4] jump(i4, descr=targettoken2) ''' - loop2 = self.interpret(ops2, [0]) + loop2 = self.interpret(ops2, [0, 0, 0, 0]) bridge_ops = ''' [i4] jump(i4, i4, i4, i4, descr=targettoken) ''' bridge = self.attach_bridge(bridge_ops, loop2, 5) - self.cpu.set_future_value_int(0, 0) - self.run(loop2) + self.run(loop2, 0, 0, 0, 0) assert self.getint(0) == 31 assert self.getint(1) == 30 assert self.getint(2) == 30 @@ -274,8 +274,7 @@ loop = self.interpret(ops, [0]) assert self.getint(0) == 1 bridge = self.attach_bridge(bridge_ops, loop, 2) - self.cpu.set_future_value_int(0, 0) - self.run(loop) + self.run(loop, 0) assert self.getint(0) == 1 def test_inputarg_unused(self): @@ -301,9 +300,7 @@ assert self.getint(0) == 0 assert self.getint(1) == 10 bridge = self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - self.run(loop) + self.run(loop, 0, 10) assert self.getint(0) == 0 assert self.getint(1) == 10 @@ -320,9 +317,7 @@ finish(1, 2) ''' self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 1) - self.run(loop) + self.run(loop, 0, 1) def test_spill_for_constant(self): ops = ''' @@ -406,7 +401,7 @@ guard_true(i5) [i2, i1] jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' - self.interpret(ops, [0, 1, 2, 3]) + self.interpret(ops, [0, 1, 2, 3, 0, 0, 0]) def test_op_result_unused(self): ops = ''' @@ -440,9 +435,7 @@ finish(i0, i1, i2, i3, i4, i5, i6, i7, i8) ''' self.attach_bridge(bridge_ops, loop, 1) - for i in range(9): - self.cpu.set_future_value_int(i, i) - self.run(loop) + self.run(loop, 0, 1, 2, 3, 4, 5, 6, 7, 8) assert self.getints(9) == range(9) def test_loopargs(self): @@ -452,27 +445,13 @@ jump(i4, i1, i2, i3) """ regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 + if IS_X86_64: + assert len(regalloc.rm.reg_bindings) == 4 + assert len(regalloc.fm.bindings) == 0 + else: + assert len(regalloc.rm.reg_bindings) == 0 + assert len(regalloc.fm.bindings) == 4 - def test_loopargs_2(self): - ops = """ - [i0, i1, i2, i3] - i4 = int_add(i0, i1) - finish(i4, i1, i2, i3) - """ - regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 - - def test_loopargs_3(self): - ops = """ - [i0, i1, i2, i3] - i4 = int_add(i0, i1) - guard_true(i4) [i0, i1, i2, i3, i4] - jump(i4, i1, i2, i3) - """ - regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 - class TestRegallocCompOps(BaseTestRegalloc): @@ -640,8 +619,8 @@ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token assert clt.param_depth == self.expected_param_depth(1) @@ -652,8 +631,8 @@ i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) finish(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] clt = loop._jitcelltoken.compiled_loop_token assert clt.param_depth == self.expected_param_depth(2) @@ -689,9 +668,7 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 5*7 def test_bridge_calls_2(self): @@ -712,8 +689,6 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -22,8 +22,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 9) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 9) assert cpu.get_latest_value_int(0) == (9 >> 3) assert cpu.get_latest_value_int(1) == (~18) @@ -45,8 +44,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -10) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -10) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == -1000 assert cpu.get_latest_value_int(2) == 1 @@ -142,17 +140,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -13) - cpu.set_future_value_int(1, 10) - cpu.set_future_value_int(2, 10) - cpu.set_future_value_int(3, 8) - cpu.set_future_value_int(4, -8) - cpu.set_future_value_int(5, -16) - cpu.set_future_value_int(6, -18) - cpu.set_future_value_int(7, 46) - cpu.set_future_value_int(8, -12) - cpu.set_future_value_int(9, 26) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 0 assert cpu.get_latest_value_int(2) == 0 @@ -257,17 +245,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 17) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, -6) - cpu.set_future_value_int(3, 6) - cpu.set_future_value_int(4, 1) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, 13) - cpu.set_future_value_int(7, 9) - cpu.set_future_value_int(8, 49) - cpu.set_future_value_int(9, 8) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 8 assert cpu.get_latest_value_int(2) == 1 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -282,11 +282,7 @@ ops[-2].setfailargs([i1]) looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) - if op == rop.INT_IS_TRUE: - self.cpu.set_future_value_int(0, b.value) - else: - self.cpu.set_future_value_ref(0, b.value) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_latest_value_int(0) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, @@ -332,9 +328,8 @@ inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) - for i, box in enumerate(inputargs): - self.cpu.set_future_value_int(i, box.value) - self.cpu.execute_token(looptoken) + inputvalues = [box.value for box in inputargs] + self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_latest_value_int(0) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: @@ -400,8 +395,7 @@ assert address >= loopaddress + loopsize assert size >= 10 # randomish number - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -507,9 +501,7 @@ looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) - self.cpu.set_future_value_int(0, 123450) - self.cpu.set_future_value_int(1, 123408) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 123450, 123408) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert self.cpu.get_latest_value_int(1) == 42 @@ -541,8 +533,7 @@ self.cpu.assembler.set_debug(True) looptoken = JitCellToken() self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -565,7 +556,6 @@ self.cpu.assembler.set_debug(True) looptoken = JitCellToken() self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -498,27 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - d = op.args[1].value.copy() - d.pop('flavor') - add_memory_pressure = d.pop('add_memory_pressure', False) - zero = d.pop('zero', False) - track_allocation = d.pop('track_allocation', True) - if d: - raise UnsupportedMallocFlags(d) - ARRAY = op.args[0].value - name = 'raw_malloc' - if zero: - name += '_zero' - if add_memory_pressure: - name += '_add_memory_pressure' - if not track_allocation: - name += '_no_track_allocation' - return self._do_builtin_call(op, name, - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -531,11 +533,18 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO @@ -736,6 +745,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,26 +599,75 @@ return p return _ll_0_alloc_with_del - def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) - return _ll_1_raw_malloc - return build_ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - build_ll_1_raw_malloc = build_raw_malloc_builder() - build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) - build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) - build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) - build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) - build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) - build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) - build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -550,7 +550,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str assert op1.opname == '-live-' assert op1.args == [] @@ -564,7 +564,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str assert op1.opname == '-live-' assert op1.args == [] @@ -578,6 +578,35 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1504,7 +1504,6 @@ all_virtuals=None): from pypy.jit.metainterp.resume import blackhole_from_resumedata #debug_start('jit-blackhole') - metainterp_sd.profiler.start_blackhole() blackholeinterp = blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, jitdriver_sd, @@ -1518,10 +1517,9 @@ current_exc = blackholeinterp._prepare_resume_from_failure( resumedescr.guard_opnum, dont_change_position) - try: - _run_forever(blackholeinterp, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(blackholeinterp, current_exc) + #finally: #debug_stop('jit-blackhole') def convert_and_run_from_pyjitpl(metainterp, raising_exception=False): @@ -1529,7 +1527,6 @@ # 'metainterp.framestack'. #debug_start('jit-blackhole') metainterp_sd = metainterp.staticdata - metainterp_sd.profiler.start_blackhole() nextbh = None for frame in metainterp.framestack: curbh = metainterp_sd.blackholeinterpbuilder.acquire_interp() @@ -1546,8 +1543,7 @@ firstbh.exception_last_value = current_exc current_exc = lltype.nullptr(rclass.OBJECTPTR.TO) # - try: - _run_forever(firstbh, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(firstbh, current_exc) + #finally: #debug_stop('jit-blackhole') diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -11,7 +11,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt -from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const +from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const, ConstInt from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop @@ -254,7 +254,44 @@ record_loop_or_bridge(metainterp_sd, loop) return target_token +def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): + vinfo = jitdriver_sd.virtualizable_info + extra_ops = [] + inputargs = loop.inputargs + vable_box = inputargs[jitdriver_sd.index_of_virtualizable] + i = jitdriver_sd.num_red_args + loop.inputargs = inputargs[:i] + for descr in vinfo.static_field_descrs: + assert i < len(inputargs) + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], box, descr)) + i += 1 + arrayindex = 0 + for descr in vinfo.array_field_descrs: + vable = vable_box.getref_base() + arraylen = vinfo.get_array_length(vable, arrayindex) + arraybox = BoxPtr() + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], arraybox, descr)) + arraydescr = vinfo.array_descrs[arrayindex] + assert i + arraylen <= len(inputargs) + for index in range(arraylen): + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETARRAYITEM_GC, + [arraybox, ConstInt(index)], + box, descr=arraydescr)) + i += 1 + arrayindex += 1 + assert i == len(inputargs) + loop.operations = extra_ops + loop.operations + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): + vinfo = jitdriver_sd.virtualizable_info + if vinfo is not None: + patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) + original_jitcell_token = loop.original_jitcell_token jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) @@ -435,14 +472,14 @@ if self.must_compile(metainterp_sd, jitdriver_sd): self.start_compiling() try: - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) finally: self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) - assert 0, "unreachable" + assert 0, "unreachable" def _trace_and_compile_from_bridge(self, metainterp_sd, jitdriver_sd): # 'jitdriver_sd' corresponds to the outermost one, i.e. the one @@ -451,7 +488,7 @@ # jitdrivers. from pypy.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - return metainterp.handle_guard_failure(self) + metainterp.handle_guard_failure(self) _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, metainterp_sd, jitdriver_sd): @@ -767,21 +804,25 @@ assert exception, "PropagateExceptionDescr: no exception??" raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) -def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes, +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redargtypes, memory_manager=None): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled version of the code may end up replacing it. """ - # 'redboxes' is only used to know the types of red arguments. - inputargs = [box.clonebox() for box in redboxes] jitcell_token = make_jitcell_token(jitdriver_sd) - # 'nb_red_args' might be smaller than len(redboxes), - # because it doesn't include the virtualizable boxes. nb_red_args = jitdriver_sd.num_red_args + assert len(redargtypes) == nb_red_args + inputargs = [] + for kind in redargtypes: + if kind == history.INT: box = BoxInt() + elif kind == history.REF: box = BoxPtr() + elif kind == history.FLOAT: box = BoxFloat() + else: raise AssertionError + inputargs.append(box) k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + callargs = [funcbox] + greenboxes + inputargs # result_type = jitdriver_sd.result_type if result_type == history.INT: diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -124,9 +124,6 @@ def sort_key(self): raise NotImplementedError - def set_future_value(self, cpu, j): - raise NotImplementedError - def nonnull(self): raise NotImplementedError @@ -289,9 +286,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def same_constant(self, other): if isinstance(other, ConstInt): return self.value == other.value @@ -329,9 +323,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def same_constant(self, other): if isinstance(other, ConstFloat): return self.value == other.value @@ -378,9 +369,6 @@ def getaddr(self): return llmemory.cast_ptr_to_adr(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -432,9 +420,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - ## def getaddr(self): ## # so far this is used only when calling ## # CodeWriter.IndirectCallset.bytecode_for_address. We don't need a @@ -540,9 +525,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def nonnull(self): return self.value != 0 @@ -575,9 +557,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def nonnull(self): return self.value != longlong.ZEROF @@ -620,9 +599,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def nonnull(self): return bool(self.value) @@ -667,19 +643,12 @@ def nonnull(self): return bool(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def repr_rpython(self): return repr_rpython(self, 'bo') _getrepr_ = repr_object -def set_future_values(cpu, boxes): - for j in range(len(boxes)): - boxes[j].set_future_value(cpu, j) - # ____________________________________________________________ @@ -768,10 +737,23 @@ class TargetToken(AbstractDescr): def __init__(self, targeting_jitcell_token=None): - # The jitcell to which jumps might result in a jump to this label + # Warning, two different jitcell_tokens here! + # + # * 'targeting_jitcell_token' is only useful for the front-end, + # and it means: consider the LABEL that uses this TargetToken. + # At this position, the state is logically the one given + # by targeting_jitcell_token. So e.g. if we want to enter the + # JIT with some given green args, if the jitcell matches, then + # we can jump to this LABEL. + # + # * 'original_jitcell_token' is information from the backend's + # point of view: it means that this TargetToken is used in + # a LABEL that belongs to either: + # - a loop; then 'original_jitcell_token' is this loop + # - or a bridge; then 'original_jitcell_token' is the loop + # out of which we made this bridge + # self.targeting_jitcell_token = targeting_jitcell_token - - # The jitcell where the trace containing the label with this TargetToken begins self.original_jitcell_token = None self.virtual_state = None @@ -981,15 +963,19 @@ self.aborted_keys = [] self.invalidated_token_numbers = set() # <- not RPython self.jitcell_token_wrefs = [] + self.jitcell_dicts = [] # <- not RPython def clear(self): del self.loops[:] del self.locations[:] del self.aborted_keys[:] + del self.jitcell_token_wrefs[:] self.invalidated_token_numbers.clear() self.compiled_count = 0 self.enter_count = 0 self.aborted_count = 0 + for dict in self.jitcell_dicts: + dict.clear() def add_jitcell_token(self, token): assert isinstance(token, JitCellToken) diff --git a/pypy/jit/metainterp/jitdriver.py b/pypy/jit/metainterp/jitdriver.py --- a/pypy/jit/metainterp/jitdriver.py +++ b/pypy/jit/metainterp/jitdriver.py @@ -11,6 +11,7 @@ # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.num_red_args ... pypy.jit.metainterp.warmspot + # self.red_args_types ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.greenfield_info ... pypy.jit.metainterp.warmspot diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -10,8 +10,6 @@ counters=""" TRACING BACKEND -RUNNING -BLACKHOLE OPS RECORDED_OPS GUARDS @@ -67,18 +65,6 @@ def end_backend(self): pass - def start_running(self): - pass - - def end_running(self): - pass - - def start_blackhole(self): - pass - - def end_blackhole(self): - pass - def count(self, kind, inc=1): pass @@ -134,16 +120,6 @@ def start_backend(self): self._start(BACKEND) def end_backend(self): self._end (BACKEND) - # Don't record times for 'running' and 'blackhole' because there are - # too many of them: calling time.time() is a major blocker. - # If you are interested in these numbers, use 'PYPYLOG=file' and - # look at the resulting file with pypy/tool/logparser.py. - def start_running(self): self.count(RUNNING) - def end_running(self): pass - - def start_blackhole(self): self.count(BLACKHOLE) - def end_blackhole(self): pass - def count(self, kind, inc=1): self.counters[kind] += inc @@ -165,8 +141,6 @@ calls = self.calls self._print_line_time("Tracing", cnt[TRACING], tim[TRACING]) self._print_line_time("Backend", cnt[BACKEND], tim[BACKEND]) - self._print_intline("Running asm", cnt[RUNNING]) - self._print_intline("Blackhole", cnt[BLACKHOLE]) line = "TOTAL: \t\t%f" % (self.tk - self.starttime, ) debug_print(line) self._print_intline("ops", cnt[OPS]) diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, r_uint from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,6 +21,7 @@ # class MemoryManager(object): + NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -36,12 +37,13 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK self.alive_loops = {} + self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK else: self.max_age = max_age if check_frequency <= 0: @@ -49,10 +51,11 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self): + def next_generation(self, do_cleanups_now=True): self.current_generation += 1 - if self.current_generation == self.next_check: + if do_cleanups_now and self.current_generation >= self.next_check: self._kill_old_loops_now() + self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -81,3 +84,22 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") + + def get_current_generation_uint(self): + """Return the current generation, possibly truncated to a uint. + To use only as an approximation for decaying counters.""" + return r_uint(self.current_generation) + + def record_jitcell_dict(self, callback): + """NOT_RPYTHON. The given jitcell_dict is a dict that needs + occasional clean-ups of old cells. A cell is old if it never + reached the threshold, and its counter decayed to a tiny value.""" + # note that the various jitcell_dicts have different RPython types, + # so we have to make a different function for each one. These + # functions are chained to each other: each calls the previous one. + def cleanup_dict(): + callback() + cleanup_previous() + # + cleanup_previous = self._cleanup_jitcell_dicts + self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -246,15 +246,16 @@ self.force_lazy_setfields_and_arrayitems_for_guard()) return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or # handled specially - opnum == rop.SETFIELD_RAW or # no effect on GC struct/array - opnum == rop.SETARRAYITEM_GC or # handled specially - opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.STRSETITEM or # no effect on GC struct/array - opnum == rop.UNICODESETITEM or # no effect on GC struct/array - opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever - opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array - opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7744,6 +7744,22 @@ """ self.optimize_loop(ops, expected) + def test_setinteriorfield_should_not_clear_cache(self): + ops = """ + [i0, p0] + i2 = getfield_gc(p0, descr=adescr) + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0) + """ + expected = """ + [i0, p0, i2] + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0, i2) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1810,7 +1810,7 @@ def _interpret(self): # Execute the frames forward until we raise a DoneWithThisFrame, - # a ExitFrameWithException, or a GenerateMergePoint exception. + # a ExitFrameWithException, or a ContinueRunningNormally exception. self.staticdata.stats.entered() while True: self.framestack[-1].run_one_step() @@ -1858,8 +1858,6 @@ self.seen_loop_header_for_jdindex = -1 try: self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1894,8 +1892,6 @@ if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(ABORT_BRIDGE) self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1980,12 +1976,48 @@ start = len(self.history.operations) self.current_merge_points.append((live_arg_boxes, start)) - def designate_target_loop(self, gmp): - loop_token = gmp.target_loop_token + def _unpack_boxes(self, boxes, start, stop): + ints = []; refs = []; floats = [] + for i in range(start, stop): + box = boxes[i] + if box.type == history.INT: ints.append(box.getint()) + elif box.type == history.REF: refs.append(box.getref_base()) + elif box.type == history.FLOAT:floats.append(box.getfloatstorage()) + else: assert 0 + return ints[:], refs[:], floats[:] + + def raise_continue_running_normally(self, live_arg_boxes, loop_token): + self.history.inputargs = None + self.history.operations = None + # For simplicity, we just raise ContinueRunningNormally here and + # ignore the loop_token passed in. It means that we go back to + # interpreted mode, but it should come back very quickly to the + # JIT, find probably the same 'loop_token', and execute it. + if we_are_translated(): + num_green_args = self.jitdriver_sd.num_green_args + gi, gr, gf = self._unpack_boxes(live_arg_boxes, 0, num_green_args) + ri, rr, rf = self._unpack_boxes(live_arg_boxes, num_green_args, + len(live_arg_boxes)) + CRN = self.staticdata.ContinueRunningNormally + raise CRN(gi, gr, gf, ri, rr, rf) + else: + # However, in order to keep the existing tests working + # (which are based on the assumption that 'loop_token' is + # directly used here), a bit of custom non-translatable code... + self._nontranslated_run_directly(live_arg_boxes, loop_token) + assert 0, "unreachable" + + def _nontranslated_run_directly(self, live_arg_boxes, loop_token): + "NOT_RPYTHON" + args = [] num_green_args = self.jitdriver_sd.num_green_args - residual_args = gmp.argboxes[num_green_args:] - history.set_future_values(self.cpu, residual_args) - return loop_token + num_red_args = self.jitdriver_sd.num_red_args + for box in live_arg_boxes[num_green_args:num_green_args+num_red_args]: + if box.type == history.INT: args.append(box.getint()) + elif box.type == history.REF: args.append(box.getref_base()) + elif box.type == history.FLOAT: args.append(box.getfloatstorage()) + else: assert 0 + self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) def prepare_resume_from_failure(self, opnum, dont_change_position=False): frame = self.framestack[-1] @@ -2054,10 +2086,9 @@ if target_token is not None: # raise if it *worked* correctly - self.history.inputargs = None - self.history.operations = None assert isinstance(target_token, TargetToken) - raise GenerateMergePoint(live_arg_boxes, target_token.targeting_jitcell_token) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_trace(self, live_arg_boxes, start_resumedescr): num_green_args = self.jitdriver_sd.num_green_args @@ -2075,10 +2106,9 @@ finally: self.history.operations.pop() # remove the JUMP if target_token is not None: # raise if it *worked* correctly - self.history.inputargs = None - self.history.operations = None assert isinstance(target_token, TargetToken) - raise GenerateMergePoint(live_arg_boxes, target_token.targeting_jitcell_token) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, bridge_arg_boxes, start_resumedescr): @@ -2114,10 +2144,8 @@ except RetraceLoop: assert False assert target_loop_token is not None - - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, old_loop_tokens[0]) + self.raise_continue_running_normally(live_arg_boxes, + old_loop_tokens[0]) def compile_done_with_this_frame(self, exitbox): self.gen_store_back_in_virtualizable() @@ -2395,22 +2423,6 @@ abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) - def gen_load_from_other_virtualizable(self, vinfo, vbox): - boxes = [] - assert vinfo is not None - for i in range(vinfo.num_static_extra_boxes): - descr = vinfo.static_field_descrs[i] - boxes.append(self.execute_and_record(rop.GETFIELD_GC, descr, vbox)) - virtualizable = vinfo.unwrap_virtualizable_box(vbox) - for k in range(vinfo.num_arrays): - descr = vinfo.array_field_descrs[k] - abox = self.execute_and_record(rop.GETFIELD_GC, descr, vbox) - descr = vinfo.array_descrs[k] - for j in range(vinfo.get_array_length(virtualizable, k)): - boxes.append(self.execute_and_record(rop.GETARRAYITEM_GC, descr, - abox, ConstInt(j))) - return boxes - def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) for frame in self.framestack: @@ -2482,25 +2494,13 @@ greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args - vinfo = targetjitdriver_sd.virtualizable_info - if vinfo is not None: - index = targetjitdriver_sd.index_of_virtualizable - vbox = args[index] - args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) - # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenargs, args) + token = warmrunnerstate.get_assembler_token(greenargs) op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ -class GenerateMergePoint(JitException): - def __init__(self, args, target_loop_token): - assert target_loop_token is not None - self.argboxes = args - self.target_loop_token = target_loop_token - class ChangeFrame(JitException): """Raised after we mutated metainterp.framestack, in order to force it to reload the current top-of-stack frame that gets interpreted.""" diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -381,11 +381,11 @@ 'GUARD_ISNULL/1d', 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION/0d', - 'GUARD_EXCEPTION/1d', + 'GUARD_NO_EXCEPTION/0d', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', - 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -4,9 +4,9 @@ from pypy.rpython.ootypesystem import ootype from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.warmstate import unspecialize_value from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value from pypy.jit.codewriter.policy import JitPolicy from pypy.jit.codewriter import codewriter, longlong from pypy.rlib.rfloat import isnan @@ -136,11 +136,11 @@ procedure_token = metainterp.get_procedure_token(args[:num_green_args]) # a loop was successfully created by _run_with_pyjitpl(); call it cpu = metainterp.cpu + args1 = [] for i in range(len(args) - num_green_args): x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(procedure_token) + args1.append(unspecialize_value(x)) + faildescr = cpu.execute_token(procedure_token, *args1) assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') if metainterp.jitdriver_sd.result_type == history.INT: return cpu.get_latest_value_int(0) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -9,7 +9,6 @@ from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.metainterp.warmspot import get_stats -from pypy.jit.metainterp.warmstate import set_future_value from pypy.rlib import rerased from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, @@ -2911,6 +2910,27 @@ res = self.meta_interp(f, [32]) assert res == f(32) + def test_decay_counters(self): + myjitdriver = JitDriver(greens = ['m'], reds = ['n']) + def f(m, n): + while n > 0: + myjitdriver.jit_merge_point(m=m, n=n) + n += m + n -= m + n -= 1 + def main(): + f(5, 7) # run 7x with m=5 counter[m=5] = 7 + f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) + f(5, 5) # run 5x times with m=5 counter[m=5] = 8 + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=9, trace_eagerness=99) + self.check_trace_count(1) + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=8, trace_eagerness=99) + self.check_trace_count(2) + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -55,6 +55,7 @@ warmstate = FakeState() on_compile = staticmethod(lambda *args: None) on_compile_bridge = staticmethod(lambda *args: None) + virtualizable_info = None def test_compile_loop(): cpu = FakeCPU() @@ -171,23 +172,17 @@ result_type = INT # loop_token = compile_tmp_callback(cpu, FakeJitDriverSD(), - [ConstInt(12), ConstInt(34)], - [BoxInt(56), ConstInt(78), BoxInt(90)]) + [ConstInt(12), ConstInt(34)], "ii") # raiseme = None - cpu.set_future_value_int(0, -156) - cpu.set_future_value_int(1, -178) - cpu.set_future_value_int(2, -190) # passed in, but dropped - fail_descr = cpu.execute_token(loop_token) + # only two arguments must be passed in + fail_descr = cpu.execute_token(loop_token, -156, -178) assert fail_descr is FakeJitDriverSD().portal_finishtoken # EXC = lltype.GcStruct('EXC') llexc = lltype.malloc(EXC) raiseme = LLException("exception class", llexc) - cpu.set_future_value_int(0, -156) - cpu.set_future_value_int(1, -178) - cpu.set_future_value_int(2, -190) - fail_descr = cpu.execute_token(loop_token) + fail_descr = cpu.execute_token(loop_token, -156, -178) assert isinstance(fail_descr, compile.PropagateExceptionDescr) got = cpu.grab_exc_value() assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), got) == llexc @@ -196,10 +191,7 @@ class ExitFrameWithExceptionRef(Exception): pass FakeMetaInterpSD.cpu = cpu - cpu.set_future_value_int(0, -156) - cpu.set_future_value_int(1, -178) - cpu.set_future_value_int(2, -190) - fail_descr = cpu.execute_token(loop_token) + fail_descr = cpu.execute_token(loop_token, -156, -178) try: fail_descr.handle_fail(FakeMetaInterpSD(), None) except FakeMetaInterpSD.ExitFrameWithExceptionRef, e: diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -10,7 +10,7 @@ self.counter = 123456 Profiler.start(self) self.events = [] - self.times = [0, 0, 0, 0] + self.times = [0, 0] def timer(self): self.counter += 1 @@ -24,12 +24,6 @@ Profiler._end(self, event) self.events.append(~event) - def start_running(self): self._start(RUNNING) - def end_running(self): self._end(RUNNING) - - def start_blackhole(self): self._start(BLACKHOLE) - def end_blackhole(self): self._end(BLACKHOLE) - class ProfilerMixin(LLJitMixin): def meta_interp(self, *args, **kwds): kwds = kwds.copy() @@ -56,14 +50,10 @@ BACKEND, ~ BACKEND, ~ TRACING, - RUNNING, - ~ RUNNING, - BLACKHOLE, - ~ BLACKHOLE ] assert profiler.events == expected - assert profiler.times == [2, 1, 1, 1] - assert profiler.counters == [1, 1, 1, 1, 3, 3, 1, 15, 2, 0, 0, 0, 0, + assert profiler.times == [2, 1] + assert profiler.counters == [1, 1, 3, 3, 1, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0] def test_simple_loop_with_call(self): diff --git a/pypy/jit/metainterp/test/test_rawmem.py b/pypy/jit/metainterp/test/test_rawmem.py --- a/pypy/jit/metainterp/test/test_rawmem.py +++ b/pypy/jit/metainterp/test/test_rawmem.py @@ -8,7 +8,7 @@ VOID_TP = lltype.Array(lltype.Void, hints={"nolength": True, "uncast_on_llgraph": True}) class A(object): def __init__(self, x): - self.storage = rffi.cast(lltype.Ptr(VOID_TP), x)\ + self.storage = rffi.cast(lltype.Ptr(VOID_TP), x) def f(n): x = lltype.malloc(TP, n, flavor="raw", zero=True) @@ -19,4 +19,14 @@ lltype.free(x, flavor="raw") return s res = self.interp_operations(f, [10]) - assert res == 1.0 \ No newline at end of file + + def test_fixed_size_malloc(self): + TIMEVAL = lltype.Struct('dummy', ('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)) + def f(): + p = lltype.malloc(TIMEVAL, flavor='raw') + lltype.free(p, flavor='raw') + return 42 + res = self.interp_operations(f, []) + assert res == 42 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'finish': 1}) diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -72,16 +72,16 @@ res = self.meta_interp(main, [0, 6], listops=True, backendopt=True) assert res == 5040 - self.check_resops({'jump': 1, 'int_le': 2, 'guard_value': 1, - 'int_mul': 2, 'guard_false': 2, 'int_sub': 2}) + self.check_simple_loop({'jump': 1, 'int_le': 1, + 'int_mul': 1, 'guard_false': 1, 'int_sub': 1}) def test_tl_2(self): main = self._get_main() res = self.meta_interp(main, [1, 10], listops=True, backendopt=True) assert res == main(1, 10) - self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 1, - 'guard_false': 2, 'guard_value': 1}) + self.check_simple_loop({'int_le': 1, 'int_sub': 1, 'jump': 1, + 'guard_false': 1}) def test_tl_call(self, listops=True, policy=None): from pypy.jit.tl.tl import interp diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -77,7 +77,7 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 30 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_preexisting_access_2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -102,7 +102,8 @@ assert f(5) == 185 res = self.meta_interp(f, [5]) assert res == 185 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, + getfield_gc=2) # <= at the header of the loop def test_two_paths_access(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -124,7 +125,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10118 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_synchronize_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy'], @@ -146,7 +147,7 @@ return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_virtualizable_and_greens(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'xy'], @@ -174,7 +175,7 @@ return res res = self.meta_interp(f, [40]) assert res == 50 * 4 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=4) def test_double_frame(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy', 'other'], @@ -197,7 +198,8 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_resops(setfield_gc=2, getfield_gc=1) + self.check_simple_loop(setfield_gc=1, getfield_gc=0) + self.check_resops(setfield_gc=2, getfield_gc=3) # ------------------------------ @@ -247,8 +249,8 @@ return xy2.inst_l1[2] res = self.meta_interp(f, [16]) assert res == 3001 + 16 * 80 - self.check_resops(setarrayitem_gc=0, setfield_gc=0, - getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(setarrayitem_gc=0, setfield_gc=0, + getarrayitem_gc=0, getfield_gc=0) def test_synchronize_arrays_in_return(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -278,7 +280,8 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getarrayitem_gc=0, + getfield_gc=0, setarrayitem_gc=0) def test_array_length(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -304,8 +307,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, - arraylen_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=0, getfield_gc=0) def test_residual_function(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2'], @@ -338,8 +341,8 @@ return xy2.inst_l1[1] res = self.meta_interp(f, [18]) assert res == 2941309 + 18 - self.check_resops(call=2, setfield_gc=0, getarrayitem_gc=0, - arraylen_gc=2, getfield_gc=0) + self.check_simple_loop(call=1, setfield_gc=0, getarrayitem_gc=0, + arraylen_gc=1, getfield_gc=0) def test_double_frame_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'xy2', 'other'], @@ -375,8 +378,8 @@ expected = f(20) res = self.meta_interp(f, [20], enable_opts='') assert res == expected - self.check_resops(setarrayitem_gc=1, setfield_gc=0, - getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) + self.check_simple_loop(setarrayitem_gc=1, setfield_gc=0, + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) # ------------------------------ @@ -423,7 +426,8 @@ assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 - self.check_resops(setfield_gc=0, getarrayitem_gc=0, getfield_gc=0) + self.check_simple_loop(getfield_gc=0, getarrayitem_gc=0, + setfield_gc=0, setarrayitem_gc=0) # ------------------------------ @@ -457,7 +461,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_virtualizable_with_array(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'frame'], @@ -491,7 +495,8 @@ res = self.meta_interp(f, [10, 1], listops=True) assert res == f(10, 1) - self.check_resops(getarrayitem_gc=0) + self.check_simple_loop(getfield_gc=0, getarrayitem_gc=0) + self.check_resops(getfield_gc=2, getarrayitem_gc=4) def test_subclass_of_virtualizable(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -519,7 +524,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) def test_external_pass(self): jitdriver = JitDriver(greens = [], reds = ['n', 'z', 'frame'], @@ -1037,7 +1042,7 @@ res = self.meta_interp(f, [10]) assert res == 55 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) from pypy.jit.backend.test.support import BaseCompiledMixin if isinstance(self, BaseCompiledMixin): @@ -1197,7 +1202,8 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_blackhole_should_synchronize(self): myjitdriver = JitDriver(greens = [], reds = ['frame'], @@ -1233,7 +1239,8 @@ res = self.meta_interp(f, [10]) assert res == 155 - self.check_resops(setfield_gc=0, getfield_gc=0) + self.check_simple_loop(setfield_gc=0, getfield_gc=0) + self.check_resops(setfield_gc=0, getfield_gc=2) def test_blackhole_should_not_reenter(self): if not self.basic: diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -303,18 +303,11 @@ exc_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) cls.exc_vtable = exc_vtable - class FakeLoopToken: + class FakeFailDescr(object): def __init__(self, no): self.no = no - self.generation = 0 - - class FakeFailDescr(object): - def __init__(self, looptoken): - assert isinstance(looptoken, FakeLoopToken) - self.looptoken = looptoken - def handle_fail(self, metainterp_sd, jitdrivers_sd): - no = self.looptoken.no + no = self.no if no == 0: raise metainterp_sd.warmrunnerdesc.DoneWithThisFrameInt(3) if no == 1: @@ -326,7 +319,7 @@ raise metainterp_sd.warmrunnerdesc.ExitFrameWithExceptionRef( metainterp_sd.cpu, lltype.cast_opaque_ptr(llmemory.GCREF, exc)) - return self.looptoken + assert 0 class FakeDescr: def as_vtable_size_descr(self): @@ -353,11 +346,10 @@ sizeof = nodescr def get_fail_descr_from_number(self, no): - return FakeFailDescr(FakeLoopToken(no)) + return FakeFailDescr(no) - def execute_token(self, token): - assert token.no == 2 - return FakeFailDescr(FakeLoopToken(1)) + def make_execute_token(self, *ARGS): + return "not callable" driver = JitDriver(reds = ['red'], greens = ['green']) @@ -381,7 +373,6 @@ [jd] = self.desc.jitdrivers_sd assert jd._assembler_call_helper(0, 0) == 3 assert jd._assembler_call_helper(1, 0) == 10 - assert jd._assembler_call_helper(2, 0) == 10 try: jd._assembler_call_helper(3, 0) except LLException, lle: diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,3 +1,4 @@ +import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -8,7 +9,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rlib.rarithmetic import r_singlefloat, r_uint def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -151,29 +152,6 @@ assert get_jitcell(False, 42, 0.25) is cell4 assert cell1 is not cell3 is not cell4 is not cell1 -def test_make_set_future_values(): - future_values = {} - class FakeCPU: - def set_future_value_int(self, j, value): - future_values[j] = "int", value - def set_future_value_float(self, j, value): - future_values[j] = "float", value - class FakeWarmRunnerDesc: - cpu = FakeCPU() - memory_manager = None - class FakeJitDriverSD: - _red_args_types = ["int", "float"] - virtualizable_info = None - # - state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - set_future_values = state.make_set_future_values() - set_future_values(5, 42.5) - assert future_values == { - 0: ("int", 5), - 1: ("float", longlong.getfloatstorage(42.5)), - } - assert set_future_values is state.make_set_future_values() - def test_make_unwrap_greenkey(): class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] @@ -210,6 +188,7 @@ _confirm_enter_jit_ptr = None _can_never_inline_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] class FakeCell: dont_trace_here = False state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) @@ -239,6 +218,7 @@ _can_never_inline_ptr = None _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() res = state.get_location_str([ConstInt(5), constfloat(42.5)]) @@ -264,6 +244,7 @@ _can_never_inline_ptr = None _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() @@ -289,8 +270,83 @@ _can_never_inline_ptr = llhelper(CAN_NEVER_INLINE, can_never_inline) _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None + red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True + +def test_decay_counters(): + cell = JitCell(r_uint(5)) + cell.counter = 100 + cell.adjust_counter(r_uint(5), math.log(0.9)) + assert cell.counter == 100 + cell.adjust_counter(r_uint(6), math.log(0.9)) + assert cell.counter == 90 + cell.adjust_counter(r_uint(9), math.log(0.9)) + assert cell.counter == int(90 * (0.9**3)) + +def test_cleanup_jitcell_dict(): + from pypy.jit.metainterp.memmgr import MemoryManager + class FakeWarmRunnerDesc: + memory_manager = MemoryManager() + class cpu: + pass + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed] + # + # Test creating tons of jitcells that remain at 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell1 = get_jitcell(True, -1) + assert len(warmstate._jitcell_dict) == 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 + # + for i in range(1, 20005): + get_jitcell(True, i) # should trigger a clean-up at 20001 + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 + # + # Same test, with one jitcell that has a counter of BASE instead of 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate.set_param_decay_halflife(2) + warmstate.set_param_threshold(5) + warmstate.set_param_function_threshold(0) + get_jitcell = warmstate._make_jitcell_getter_default() + cell2 = get_jitcell(True, -2) + cell2.counter = BASE = warmstate.increment_threshold * 3 + # + for i in range(0, 20005): + get_jitcell(True, i) + assert len(warmstate._jitcell_dict) == (i % 19999) + 2 + # + assert cell2 in warmstate._jitcell_dict.values() + assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + # Same test, with jitcells that are compiled and free by the memmgr + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + get_jitcell(True, -1) + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -1 + cell.wref_procedure_token = None # or a dead weakref, equivalently + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + # Same test, with counter == -2 (rare case, kept alive) + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell = get_jitcell(True, -1) + cell.counter = -2 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -2 + assert len(warmstate._jitcell_dict) == i + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,9 +64,11 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, + threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): + function_threshold=4, decay_halflife=0, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, + **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -83,15 +85,16 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(3) # for tests + jd.warmstate.set_param_threshold(threshold) jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(2) # for tests + jd.warmstate.set_param_trace_eagerness(trace_eagerness) jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) + jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph @@ -522,9 +525,9 @@ greens_v, reds_v = support.decode_hp_hint_args(op) ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] - jd._red_args_types = [history.getkind(v.concretetype) for v in reds_v] + jd.red_args_types = [history.getkind(v.concretetype) for v in reds_v] jd.num_green_args = len(jd._green_args_spec) - jd.num_red_args = len(jd._red_args_types) + jd.num_red_args = len(jd.red_args_types) RESTYPE = graph.getreturnvar().concretetype (jd._JIT_ENTER_FUNCTYPE, jd._PTR_JIT_ENTER_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, lltype.Void) @@ -771,16 +774,16 @@ def assembler_call_helper(failindex, virtualizableref): fail_descr = self.cpu.get_fail_descr_from_number(failindex) - while True: - if vinfo is not None: - virtualizable = lltype.cast_opaque_ptr( - vinfo.VTYPEPTR, virtualizableref) - vinfo.reset_vable_token(virtualizable) - try: - loop_token = fail_descr.handle_fail(self.metainterp_sd, jd) - except JitException, e: - return handle_jitexception(e) - fail_descr = self.execute_token(loop_token) + if vinfo is not None: + virtualizable = lltype.cast_opaque_ptr( + vinfo.VTYPEPTR, virtualizableref) + vinfo.reset_vable_token(virtualizable) + try: + fail_descr.handle_fail(self.metainterp_sd, jd) + except JitException, e: + return handle_jitexception(e) + else: + assert 0, "should have raised" jd._assembler_call_helper = assembler_call_helper # for debugging jd._assembler_helper_ptr = self.helper_func( @@ -910,10 +913,3 @@ graphs = self.translator.graphs for graph, block, i in find_force_quasi_immutable(graphs): self.replace_force_quasiimmut_with_direct_call(block.operations[i]) - - # ____________________________________________________________ - - def execute_token(self, loop_token): - fail_descr = self.cpu.execute_token(loop_token) - self.memory_manager.keep_loop_alive(loop_token) - return fail_descr diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref +import sys, weakref, math from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -12,6 +12,7 @@ from pypy.rlib.debug import debug_start, debug_stop, debug_print from pypy.jit.metainterp import history from pypy.jit.codewriter import support, heaptracker, longlong +from pypy.tool.sourcetools import func_with_new_name # ____________________________________________________________ @@ -142,26 +143,6 @@ else: return rffi.cast(lltype.Signed, x) - at specialize.ll_and_arg(3) -def set_future_value(cpu, j, value, typecode): - if typecode == 'ref': - refvalue = cpu.ts.cast_to_ref(value) - cpu.set_future_value_ref(j, refvalue) - elif typecode == 'int': - if isinstance(lltype.typeOf(value), lltype.Ptr): - intvalue = llmemory.AddressAsInt(llmemory.cast_ptr_to_adr(value)) - else: - intvalue = lltype.cast_primitive(lltype.Signed, value) - cpu.set_future_value_int(j, intvalue) - elif typecode == 'float': - if lltype.typeOf(value) is lltype.Float: - value = longlong.getfloatstorage(value) - else: - assert longlong.is_longlong(lltype.typeOf(value)) - value = rffi.cast(lltype.SignedLongLong, value) - cpu.set_future_value_float(j, value) - else: - assert False class JitCell(BaseJitCell): # the counter can mean the following things: @@ -172,6 +153,25 @@ dont_trace_here = False wref_procedure_token = None + def __init__(self, generation): + # The stored 'counter' value follows an exponential decay model. + # Conceptually after every generation, it decays by getting + # multiplied by a constant <= 1.0. In practice, decaying occurs + # lazily: the following field records the latest seen generation + # number, and adjustment is done by adjust_counter() when needed. + self.latest_generation_seen = generation + + def adjust_counter(self, generation, log_decay_factor): + if generation != self.latest_generation_seen: + # The latest_generation_seen is older than the current generation. + # Adjust by multiplying self.counter N times by decay_factor, i.e. + # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). + assert self.counter >= 0 + N = generation - self.latest_generation_seen + factor = math.exp(log_decay_factor * N) + self.counter = int(self.counter * factor) + self.latest_generation_seen = generation + def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -191,7 +191,6 @@ class WarmEnterState(object): THRESHOLD_LIMIT = sys.maxint // 2 - default_jitcell_dict = None def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -232,6 +231,17 @@ def set_param_inlining(self, value): self.inlining = value + def set_param_decay_halflife(self, value): + # Use 0 or -1 to mean "no decay". Initialize the internal variable + # 'log_decay_factor'. It is choosen such that by multiplying the + # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every + # generation, then the counter will be divided by two after 'value' + # generations have passed. + if value <= 0: + self.log_decay_factor = 0.0 # log(1.0) + else: + self.log_decay_factor = math.log(0.5) / value + def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -297,26 +307,61 @@ index_of_virtualizable = jitdriver_sd.index_of_virtualizable num_green_args = jitdriver_sd.num_green_args get_jitcell = self.make_jitcell_getter() - set_future_values = self.make_set_future_values() self.make_jitdriver_callbacks() confirm_enter_jit = self.confirm_enter_jit + range_red_args = unrolling_iterable( + range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) + memmgr = self.warmrunnerdesc.memory_manager + if memmgr is not None: + get_current_generation = memmgr.get_current_generation_uint + else: + get_current_generation = lambda: r_uint(0) + # get a new specialized copy of the method + ARGS = [] + for kind in jitdriver_sd.red_args_types: + if kind == 'int': + ARGS.append(lltype.Signed) + elif kind == 'ref': + ARGS.append(llmemory.GCREF) + elif kind == 'float': + ARGS.append(longlong.FLOATSTORAGE) + else: + assert 0, kind + func_execute_token = self.cpu.make_execute_token(*ARGS) + + def execute_assembler(loop_token, *args): + # Call the backend to run the 'looptoken' with the given + # input args. + fail_descr = func_execute_token(loop_token, *args) + # + # If we have a virtualizable, we have to reset its + # 'vable_token' field afterwards + if vinfo is not None: + virtualizable = args[index_of_virtualizable] + virtualizable = vinfo.cast_gcref_to_vtype(virtualizable) + vinfo.reset_vable_token(virtualizable) + # + # Record in the memmgr that we just ran this loop, + # so that it will keep it alive for a longer time + warmrunnerdesc.memory_manager.keep_loop_alive(loop_token) + # + # Handle the failure + fail_descr.handle_fail(metainterp_sd, jitdriver_sd) + # + assert 0, "should have raised" def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. """ - if vinfo is not None: - virtualizable = args[num_green_args + index_of_virtualizable] - virtualizable = vinfo.cast_to_vtype(virtualizable) - else: - virtualizable = None - # look for the cell corresponding to the current greenargs greenargs = args[:num_green_args] cell = get_jitcell(True, *greenargs) if cell.counter >= 0: # update the profiling counter + cell.adjust_counter(get_current_generation(), + self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n @@ -330,42 +375,36 @@ # set counter to -2, to mean "tracing in effect" cell.counter = -2 try: - procedure_token = metainterp.compile_and_run_once(jitdriver_sd, - *args) + metainterp.compile_and_run_once(jitdriver_sd, *args) finally: if cell.counter == -2: cell.counter = 0 else: - if cell.counter == -2: + if cell.counter != -1: + assert cell.counter == -2 # tracing already happening in some outer invocation of # this function. don't trace a second time. return - assert cell.counter == -1 if not confirm_enter_jit(*args): return + # machine code was already compiled for these greenargs procedure_token = cell.get_procedure_token() if procedure_token is None: # it was a weakref that has been freed cell.counter = 0 return - # machine code was already compiled for these greenargs - # get the assembler and fill in the boxes - set_future_values(*args[num_green_args:]) - - # ---------- execute assembler ---------- - while True: # until interrupted by an exception - metainterp_sd.profiler.start_running() - #debug_start("jit-running") - fail_descr = warmrunnerdesc.execute_token(procedure_token) - #debug_stop("jit-running") - metainterp_sd.profiler.end_running() - procedure_token = None # for test_memmgr - if vinfo is not None: - vinfo.reset_vable_token(virtualizable) - procedure_token = fail_descr.handle_fail(metainterp_sd, - jitdriver_sd) + # extract and unspecialize the red arguments to pass to + # the assembler + execute_args = () + for i in range_red_args: + execute_args += (unspecialize_value(args[i]), ) + # run it! this executes until interrupted by an exception + execute_assembler(procedure_token, *execute_args) + # + assert 0, "should not reach this point" maybe_compile_and_run._dont_inline_ = True self.maybe_compile_and_run = maybe_compile_and_run + self.execute_assembler = execute_assembler return maybe_compile_and_run # ---------- @@ -415,6 +454,15 @@ # return jit_getter + def _new_jitcell(self): + warmrunnerdesc = self.warmrunnerdesc + if (warmrunnerdesc is not None and + warmrunnerdesc.memory_manager is not None): + gen = warmrunnerdesc.memory_manager.get_current_generation_uint() + else: + gen = r_uint(0) + return JitCell(gen) + def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -439,6 +487,49 @@ return x # jitcell_dict = r_dict(comparekey, hashkey) + try: + self.warmrunnerdesc.stats.jitcell_dicts.append(jitcell_dict) + except AttributeError: + pass + # + memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager + if memmgr: + def _cleanup_dict(): + minimum = sys.maxint + if self.increment_threshold > 0: + minimum = min(minimum, self.increment_threshold) + if self.increment_function_threshold > 0: + minimum = min(minimum, self.increment_function_threshold) + currentgen = memmgr.get_current_generation_uint() + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.adjust_counter(currentgen, self.log_decay_factor) + if cell.counter < minimum: + killme.append(key) + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # If no tracing goes on at all because the jitcells are + # each time for new greenargs, the dictionary grows forever. + # So every one in a (rare) while, we decide to force an + # artificial next_generation() and _cleanup_dict(). + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + memmgr.next_generation(do_cleanups_now=False) + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests + memmgr.record_jitcell_dict(_cleanup_dict) + else: + def _maybe_cleanup_dict(): + pass # def get_jitcell(build, *greenargs): try: @@ -446,7 +537,8 @@ except KeyError: if not build: return None - cell = JitCell() + _maybe_cleanup_dict() + cell = self._new_jitcell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -457,6 +549,10 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} + # note that there is no equivalent of record_jitcell_dict() + # in the case of custom getters. We assume that the interpreter + # stores the JitCells on some objects that can go away by GC, + # like the PyCode objects in PyPy. # def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) @@ -478,7 +574,7 @@ if not build: return cell if cell is None: - cell = JitCell() + cell = self._new_jitcell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) @@ -502,56 +598,6 @@ # ---------- - def make_set_future_values(self): - "NOT_RPYTHON" - if hasattr(self, 'set_future_values'): - return self.set_future_values - - jitdriver_sd = self.jitdriver_sd - cpu = self.cpu - vinfo = jitdriver_sd.virtualizable_info - red_args_types = unrolling_iterable(jitdriver_sd._red_args_types) - # - def set_future_values(*redargs): - i = 0 - for typecode in red_args_types: - set_future_value(cpu, i, redargs[i], typecode) - i = i + 1 - if vinfo is not None: - set_future_values_from_vinfo(*redargs) - # - if vinfo is not None: - i0 = len(jitdriver_sd._red_args_types) - index_of_virtualizable = jitdriver_sd.index_of_virtualizable - vable_static_fields = unrolling_iterable( - zip(vinfo.static_extra_types, vinfo.static_fields)) - vable_array_fields = unrolling_iterable( - zip(vinfo.arrayitem_extra_types, vinfo.array_fields)) - getlength = cpu.ts.getlength - getarrayitem = cpu.ts.getarrayitem - # - def set_future_values_from_vinfo(*redargs): - i = i0 - virtualizable = redargs[index_of_virtualizable] - virtualizable = vinfo.cast_to_vtype(virtualizable) - for typecode, fieldname in vable_static_fields: - x = getattr(virtualizable, fieldname) - set_future_value(cpu, i, x, typecode) - i = i + 1 - for typecode, fieldname in vable_array_fields: - lst = getattr(virtualizable, fieldname) - for j in range(getlength(lst)): - x = getarrayitem(lst, j) - set_future_value(cpu, i, x, typecode) - i = i + 1 - else: - set_future_values_from_vinfo = None - # - self.set_future_values = set_future_values - return set_future_values - - # ---------- - def make_jitdriver_callbacks(self): if hasattr(self, 'get_location_str'): return @@ -601,8 +647,9 @@ jd.on_compile = lambda *args: None jd.on_compile_bridge = lambda *args: None - def get_assembler_token(greenkey, redboxes): - # 'redboxes' is only used to know the types of red arguments + redargtypes = ''.join([kind[0] for kind in jd.red_args_types]) + + def get_assembler_token(greenkey): cell = self.jit_cell_at_key(greenkey) procedure_token = cell.get_procedure_token() if procedure_token is None: @@ -611,7 +658,7 @@ cell.counter = 0 # but was freed in the meantime. memmgr = warmrunnerdesc.memory_manager procedure_token = compile_tmp_callback(cpu, jd, greenkey, - redboxes, memmgr) + redargtypes, memmgr) cell.set_procedure_token(procedure_token) return procedure_token self.get_assembler_token = get_assembler_token diff --git a/pypy/jit/tool/jitoutput.py b/pypy/jit/tool/jitoutput.py --- a/pypy/jit/tool/jitoutput.py +++ b/pypy/jit/tool/jitoutput.py @@ -10,9 +10,6 @@ REGEXES = [ (('tracing_no', 'tracing_time'), '^Tracing:\s+([\d.]+)\s+([\d.]+)$'), (('backend_no', 'backend_time'), '^Backend:\s+([\d.]+)\s+([\d.]+)$'), - (('asm_no',), '^Running asm:\s+([\d.]+)$'), - (('blackhole_no',), - '^Blackhole:\s+([\d.]+)$'), (None, '^TOTAL.*$'), (('ops.total',), '^ops:\s+(\d+)$'), (('recorded_ops.total',), '^recorded ops:\s+(\d+)$'), diff --git a/pypy/jit/tool/test/test_jitoutput.py b/pypy/jit/tool/test/test_jitoutput.py --- a/pypy/jit/tool/test/test_jitoutput.py +++ b/pypy/jit/tool/test/test_jitoutput.py @@ -34,8 +34,6 @@ # assert did not crash # asserts below are a bit delicate, possibly they might be deleted assert info.tracing_no == 1 - assert info.asm_no == 1 - assert info.blackhole_no == 1 assert info.backend_no == 1 assert info.ops.total == 2 assert info.recorded_ops.total == 2 @@ -47,8 +45,6 @@ DATA = '''Tracing: 1 0.006992 Backend: 1 0.000525 -Running asm: 1 -Blackhole: 1 TOTAL: 0.025532 ops: 2 recorded ops: 6 @@ -75,8 +71,6 @@ info = parse_prof(DATA) assert info.tracing_no == 1 assert info.tracing_time == 0.006992 - assert info.asm_no == 1 - assert info.blackhole_no == 1 assert info.backend_no == 1 assert info.backend_time == 0.000525 assert info.ops.total == 2 diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -161,11 +161,16 @@ def test_shutdown(self): import socket, ssl, sys, gc - if sys.platform == 'darwin': - skip("get also on CPython: error: [Errno 0]") ss = socket.ssl(self.s) ss.write("hello\n") - assert ss.shutdown() is self.s._sock + try: + result = ss.shutdown() + except socket.error, e: + # xxx obscure case; throwing errno 0 is pretty odd... + if e.errno == 0: + skip("Shutdown raised errno 0. CPython does this too") + raise + assert result is self.s._sock raises(ssl.SSLError, ss.write, "hello\n") del ss; gc.collect() diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,9 +1,19 @@ from pypy.interpreter.mixedmodule import MixedModule +class PyPyModule(MixedModule): + interpleveldefs = { + 'debug_repr': 'interp_extras.debug_repr', + } + appleveldefs = {} + class Module(MixedModule): applevel_name = 'numpypy' + submodules = { + 'pypy': PyPyModule + } + interpleveldefs = { 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', @@ -81,6 +91,7 @@ 'mean': 'app_numpy.mean', 'sum': 'app_numpy.sum', 'min': 'app_numpy.min', + 'identity': 'app_numpy.identity', 'max': 'app_numpy.max', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -13,6 +13,11 @@ # weighting, just the average part! return mean(a) +def identity(n, dtype=None): + a = numpypy.zeros((n,n), dtype=dtype) + for i in range(n): + a[i][i] = 1 + return a def mean(a): if not hasattr(a, "mean"): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -86,6 +86,7 @@ descr_ge = _binop_impl("greater_equal") descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") descr_rmul = _binop_right_impl("multiply") descr_neg = _unaryop_impl("negative") @@ -132,7 +133,7 @@ descr__new__, get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - pass + descr__new__, get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") @@ -170,7 +171,8 @@ __mul__ = interp2app(W_GenericBox.descr_mul), __div__ = interp2app(W_GenericBox.descr_div), - __radd__ = interp2app(W_GenericBox.descr_add), + __radd__ = interp2app(W_GenericBox.descr_radd), + __rsub__ = interp2app(W_GenericBox.descr_rsub), __rmul__ = interp2app(W_GenericBox.descr_rmul), __eq__ = interp2app(W_GenericBox.descr_eq), diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_extras.py @@ -0,0 +1,7 @@ +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy.interp_numarray import BaseArray + + + at unwrap_spec(array=BaseArray) +def debug_repr(space, array): + return space.wrap(array.debug_repr()) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -581,6 +581,7 @@ def descr_get_dtype(self, space): return space.wrap(self.find_dtype()) + @jit.unroll_safe def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) @@ -791,7 +792,8 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - concrete.setitem_w(space, item, w_value) + dtype = concrete.find_dtype() + concrete.setitem(item, dtype.coerce(space, w_value)) return if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) @@ -924,9 +926,6 @@ def start_iter(self, res_shape=None): raise NotImplementedError - def descr_debug_repr(self, space): - return space.wrap(self.debug_repr()) - def descr_array_iface(self, space): concrete = self.get_concrete() storage = concrete.get_storage(space) @@ -1178,10 +1177,6 @@ def eval(self, iter): return self.parent.getitem(iter.get_offset()) - @unwrap_spec(item=int) - def setitem_w(self, space, item, w_value): - return self.parent.setitem_w(space, item, w_value) - def setitem(self, item, value): # This is currently not possible to be called from anywhere. raise NotImplementedError @@ -1330,9 +1325,6 @@ raise OperationError(space.w_TypeError, space.wrap( "len() of unsized object")) - def setitem_w(self, space, item, w_value): - return self.setitem(item, self.dtype.coerce(space, w_value)) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) @@ -1472,7 +1464,6 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), - __debug_repr__ = interp2app(BaseArray.descr_debug_repr), __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,34 +1,90 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi +from pypy.module.micronumpy import interp_dtype +from pypy.objspace.std.strutil import strip_spaces FLOAT_SIZE = rffi.sizeof(lltype.Float) - at unwrap_spec(s=str) -def fromstring(space, s): +def _fromstring_text(space, s, count, sep, length, dtype): from pypy.module.micronumpy.interp_numarray import W_NDimArray + + sep_stripped = strip_spaces(sep) + skip_bad_vals = len(sep_stripped) == 0 + + items = [] + num_items = 0 + idx = 0 + + while (num_items < count or count == -1) and idx < len(s): + nextidx = s.find(sep, idx) + if nextidx < 0: + nextidx = length + piece = strip_spaces(s[idx:nextidx]) + if len(piece) > 0 or not skip_bad_vals: + if len(piece) == 0 and not skip_bad_vals: + val = dtype.itemtype.default_fromstring(space) + else: + try: + val = dtype.coerce(space, space.wrap(piece)) + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + gotit = False + while not gotit and len(piece) > 0: + piece = piece[:-1] + try: + val = dtype.coerce(space, space.wrap(piece)) + gotit = True + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + if not gotit: + val = dtype.itemtype.default_fromstring(space) + nextidx = length + items.append(val) + num_items += 1 + idx = nextidx + 1 + + if count > num_items: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(num_items, [num_items], dtype=dtype) + for i, val in enumerate(items): + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + +def _fromstring_bin(space, s, count, length, dtype): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + itemsize = dtype.itemtype.get_element_size() + if count == -1: + count = length / itemsize + if length % itemsize != 0: + raise operationerrfmt(space.w_ValueError, + "string length %d not divisable by item size %d", + length, itemsize) + if count * itemsize > length: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(count, [count], dtype=dtype) + for i in range(count): + val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + + at unwrap_spec(s=str, count=int, sep=str) +def fromstring(space, s, w_dtype=None, count=-1, sep=''): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) length = len(s) - - if length % FLOAT_SIZE == 0: - number = length/FLOAT_SIZE + if sep == '': + return _fromstring_bin(space, s, count, length, dtype) else: - raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - - dtype = get_dtype_cache(space).w_float64dtype - a = W_NDimArray(number, [number], dtype=dtype) - - start = 0 - end = FLOAT_SIZE - i = 0 - while i < number: - part = s[start:end] - a.dtype.setitem(a.storage, i, dtype.box(runpack('d', part))) - i += 1 - start += FLOAT_SIZE - end += FLOAT_SIZE - - return space.wrap(a) + return _fromstring_text(space, s, count, sep, length, dtype) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -193,6 +193,7 @@ assert type(X(True)) is numpy.bool_ assert X(True) is numpy.True_ + assert numpy.bool_("False") is numpy.True_ def test_int8(self): import numpypy as numpy @@ -211,6 +212,10 @@ assert type(int(x)) is int assert int(x) == -128 + assert numpy.int8('50') == numpy.int8(50) + raises(ValueError, numpy.int8, '50.2') + assert numpy.int8('127') == 127 + assert numpy.int8('128') == -128 def test_uint8(self): import numpypy as numpy @@ -232,6 +237,8 @@ assert numpy.uint8(255) == 255 assert numpy.uint8(256) == 0 + assert numpy.uint8('255') == 255 + assert numpy.uint8('256') == 0 def test_int16(self): import numpypy as numpy @@ -240,26 +247,43 @@ assert x == 3 assert numpy.int16(32767) == 32767 assert numpy.int16(32768) == -32768 + assert numpy.int16('32767') == 32767 + assert numpy.int16('32768') == -32768 def test_uint16(self): import numpypy as numpy assert numpy.uint16(65535) == 65535 assert numpy.uint16(65536) == 0 + assert numpy.uint16('65535') == 65535 + assert numpy.uint16('65536') == 0 def test_int32(self): + import sys import numpypy as numpy x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 - assert numpy.int32(2147483648) == -2147483648 + assert numpy.int32('2147483647') == 2147483647 + if sys.maxint > 2 ** 31 - 1: + assert numpy.int32(2147483648) == -2147483648 + assert numpy.int32('2147483648') == -2147483648 + else: + raises(OverflowError, numpy.int32, 2147483648) + raises(OverflowError, numpy.int32, '2147483648') def test_uint32(self): + import sys import numpypy as numpy - assert numpy.uint32(4294967295) == 4294967295 - assert numpy.uint32(4294967296) == 0 + assert numpy.uint32(10) == 10 + + if sys.maxint > 2 ** 31 - 1: + assert numpy.uint32(4294967295) == 4294967295 + assert numpy.uint32(4294967296) == 0 + assert numpy.uint32('4294967295') == 4294967295 + assert numpy.uint32('4294967296') == 0 def test_int_(self): import numpypy as numpy @@ -279,8 +303,15 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - assert numpy.int64(9223372036854775807) == 9223372036854775807 + if sys.maxint >= 2 ** 63 - 1: + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64('9223372036854775807') == 9223372036854775807 + else: + raises(OverflowError, numpy.int64, 9223372036854775807) + raises(OverflowError, numpy.int64, '9223372036854775807') + raises(OverflowError, numpy.int64, 9223372036854775808) + raises(OverflowError, numpy.int64, '9223372036854775808') def test_uint64(self): import sys @@ -304,6 +335,8 @@ assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] assert numpy.float32(12) == numpy.float64(12) + assert numpy.float32('23.4') == numpy.float32(23.4) + raises(ValueError, numpy.float32, '23.2df') def test_float64(self): import numpypy as numpy @@ -315,6 +348,8 @@ assert numpy.dtype(float).type is numpy.float64 assert numpy.float64(2.0) == 2.0 + assert numpy.float64('23.4') == numpy.float64(23.4) + raises(ValueError, numpy.float64, '23.2df') def test_subclass_type(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -491,6 +491,11 @@ for i in range(5): assert b[i] == i - 5 + def test_scalar_subtract(self): + from numpypy import int32 + assert int32(2) - 1 == 1 + assert 1 - int32(2) == -1 + def test_mul(self): import numpypy @@ -722,6 +727,26 @@ a = array([True] * 5, bool) assert a.sum() == 5 + def test_identity(self): + from numpypy import identity, array + from numpypy import int32, float64, dtype + a = identity(0) + assert len(a) == 0 + assert a.dtype == dtype('float64') + assert a.shape == (0,0) + b = identity(1, dtype=int32) + assert len(b) == 1 + assert b[0][0] == 1 + assert b.shape == (1,1) + assert b.dtype == dtype('int32') + c = identity(2) + assert c.shape == (2,2) + assert (c == [[1,0],[0,1]]).all() + d = identity(3, dtype='int32') + assert d.shape == (3,3) + assert d.dtype == dtype('int32') + assert (d == [[1,0,0],[0,1,0],[0,0,1]]).all() + def test_prod(self): from numpypy import array a = array(range(1, 6)) @@ -868,16 +893,17 @@ def test_debug_repr(self): from numpypy import zeros, sin + from numpypy.pypy import debug_repr a = zeros(1) - assert a.__debug_repr__() == 'Array' - assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' - assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' - assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + assert debug_repr(a) == 'Array' + assert debug_repr(a + a) == 'Call2(add, Array, Array)' + assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' + assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(sin(a)) == 'Call1(sin, Array)' b = a + a b[0] = 3 - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Call2(add, forced=Array)' def test_tolist_scalar(self): from numpypy import int32, bool_ @@ -1168,13 +1194,110 @@ import struct BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) + cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) + cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) + cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) + cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) def test_fromstring(self): - from numpypy import fromstring + import sys + from numpypy import fromstring, array, uint8, float32, int32 + a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") + b = fromstring('\x01\x02', dtype=uint8) + assert a[0] == 1 + assert a[1] == 2 + c = fromstring(self.fdata, dtype=float32) + assert c[0] == float32(2.3) + d = fromstring("1 2", sep=' ', count=2, dtype=uint8) + assert len(d) == 2 + assert d[0] == 1 + assert d[1] == 2 + e = fromstring('3, 4,5', dtype=uint8, sep=',') + assert len(e) == 3 + assert e[0] == 3 + assert e[1] == 4 + assert e[2] == 5 + f = fromstring('\x01\x02\x03\x04\x05', dtype=uint8, count=3) + assert len(f) == 3 + assert f[0] == 1 + assert f[1] == 2 + assert f[2] == 3 + g = fromstring("1 2 3 ", dtype=uint8, sep=" ") + assert len(g) == 3 + assert g[0] == 1 + assert g[1] == 2 + assert g[2] == 3 + h = fromstring("1, , 2, 3", dtype=uint8, sep=",") + assert (h == [1,0,2,3]).all() + i = fromstring("1 2 3", dtype=uint8, sep=" ") + assert (i == [1,2,3]).all() + j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") + assert (j == [1,2,3]).all() + k = fromstring("1,x,2,3", dtype=uint8, sep=",") + assert (k == [1,0]).all() + l = fromstring("1,x,2,3", dtype='float32', sep=",") + assert (l == [1.0,-1.0]).all() + m = fromstring("1,,2,3", sep=",") + assert (m == [1.0,-1.0,2.0,3.0]).all() + n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + assert (n == [3]).all() + o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") + assert len(o) == 2 + assert o[0] == 1.0 + assert o[1] == 2.0 + p = fromstring("1.0,,2.0,3.0", sep=",") + assert (p == [1.0, -1.0, 2.0, 3.0]).all() + q = fromstring("1.0,,2.0,3.0", sep=" ") + assert (q == [1.0]).all() + r = fromstring("\x01\x00\x02", dtype='bool') + assert (r == [True, False, True]).all() + s = fromstring("1,2,3,,5", dtype=bool, sep=",") + assert (s == [True, True, True, False, True]).all() + t = fromstring("", bool) + assert (t == []).all() + u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int) + if sys.maxint > 2 ** 31 - 1: + assert (u == [1]).all() + else: + assert (u == [1, 0]).all() + + def test_fromstring_types(self): + from numpypy import (fromstring, int8, int16, int32, int64, uint8, + uint16, uint32, float32, float64) + + a = fromstring('\xFF', dtype=int8) + assert a[0] == -1 + b = fromstring('\xFF', dtype=uint8) + assert b[0] == 255 + c = fromstring('\xFF\xFF', dtype=int16) + assert c[0] == -1 + d = fromstring('\xFF\xFF', dtype=uint16) + assert d[0] == 65535 + e = fromstring('\xFF\xFF\xFF\xFF', dtype=int32) + assert e[0] == -1 + f = fromstring('\xFF\xFF\xFF\xFF', dtype=uint32) + assert repr(f[0]) == '4294967295' + g = fromstring('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', dtype=int64) + assert g[0] == -1 + h = fromstring(self.float32val, dtype=float32) + assert h[0] == float32(5.2) + i = fromstring(self.float64val, dtype=float64) + assert i[0] == float64(300.4) + j = fromstring(self.ulongval, dtype='L') + assert j[0] == 12 + + + def test_fromstring_invalid(self): + from numpypy import fromstring, uint16, uint8, int32 + #default dtype is 64-bit float, so 3 bytes should fail + raises(ValueError, fromstring, "\x01\x02\x03") + #3 bytes is not modulo 2 bytes (int16) + raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) + #5 bytes is larger than 3 bytes + raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) class AppTestRepr(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -250,22 +250,6 @@ 'int_ge': 1, 'guard_false': 1, 'jump': 1}) - def define_slice2(): - return """ - a = |30| - s1 = a -> :20:2 - s2 = a -> :30:3 - b = s1 + s2 - b -> 3 - """ - - def test_slice2(self): - result = self.run("slice2") - assert result == 15 - self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) - def define_multidim(): return """ a = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -8,6 +8,7 @@ from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rstruct.runpack import runpack def simple_unary_op(func): @@ -55,6 +56,7 @@ class Primitive(object): _mixin_ = True + def get_element_size(self): return rffi.sizeof(self.T) @@ -84,6 +86,9 @@ def _coerce(self, space, w_item): raise NotImplementedError + def default_fromstring(self, space): + raise NotImplementedError + def read(self, storage, width, i, offset): return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset @@ -102,6 +107,9 @@ width, storage, i, offset, value ) + def runpack_str(self, s): + return self.box(runpack(self.format_code, s)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -164,6 +172,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool BoxType = interp_boxes.W_BoolBox + format_code = "?" True = BoxType(True) False = BoxType(False) @@ -193,11 +202,14 @@ def for_computation(self, v): return int(v) + def default_fromstring(self, space): + return self.box(False) + class Integer(Primitive): _mixin_ = True def _coerce(self, space, w_item): - return self.box(space.int_w(space.int(w_item))) + return self.box(space.int_w(space.call_function(space.w_int, w_item))) def str_format(self, box): value = self.unbox(box) @@ -206,6 +218,9 @@ def for_computation(self, v): return widen(v) + def default_fromstring(self, space): + return self.box(0) + @simple_binary_op def div(self, v1, v2): if v2 == 0: @@ -241,42 +256,52 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box + format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box + format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT BoxType = interp_boxes.W_Int16Box + format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box + format_code = "H" class Int32(BaseType, Integer): T = rffi.INT BoxType = interp_boxes.W_Int32Box + format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT BoxType = interp_boxes.W_UInt32Box + format_code = "I" class Long(BaseType, Integer): T = rffi.LONG BoxType = interp_boxes.W_LongBox + format_code = "l" class ULong(BaseType, Integer): T = rffi.ULONG BoxType = interp_boxes.W_ULongBox + format_code = "L" class Int64(BaseType, Integer): T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box + format_code = "q" class UInt64(BaseType, Integer): T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + format_code = "Q" def _coerce(self, space, w_item): try: @@ -295,7 +320,7 @@ _mixin_ = True def _coerce(self, space, w_item): - return self.box(space.float_w(space.float(w_item))) + return self.box(space.float_w(space.call_function(space.w_float, w_item))) def str_format(self, box): value = self.unbox(box) @@ -304,6 +329,9 @@ def for_computation(self, v): return float(v) + def default_fromstring(self, space): + return self.box(-1.0) + @simple_binary_op def div(self, v1, v2): try: @@ -403,7 +431,9 @@ class Float32(BaseType, Float): T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box + format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box \ No newline at end of file + BoxType = interp_boxes.W_Float64Box + format_code = "d" \ No newline at end of file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -656,7 +656,11 @@ os.fsync(f) # <- should also work with a file, or anything finally: # with a fileno() method f.close() - raises(OSError, os.fsync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fsync(fd) + except OSError: + pass raises(ValueError, os.fsync, -1) if hasattr(os, 'fdatasync'): @@ -668,7 +672,11 @@ os.fdatasync(fd) finally: f.close() - raises(OSError, os.fdatasync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fdatasync(fd) + except OSError: + pass raises(ValueError, os.fdatasync, -1) if hasattr(os, 'fchdir'): diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -210,9 +210,9 @@ def entry_bridge_ops(self, *args, **kwds): ops = list(self._allops(*args, **kwds)) labels = [op for op in ops if op.name == 'label'] - assert ops.index(labels[0]) == 0 - i = ops.index(labels[1]) - return ops[1:i] + i0 = ops.index(labels[0]) + i1 = ops.index(labels[1]) + return ops[i0+1:i1] @property def chunks(self): @@ -409,7 +409,7 @@ """ iter_exp_ops = iter(expected_ops) iter_ops = RevertableIterator(self.ops) - for opindex, exp_op in enumerate(iter_exp_ops): + for exp_op in iter_exp_ops: try: if exp_op == '...': # loop until we find an operation which matches @@ -430,7 +430,7 @@ if exp_op[4] is False: # optional operation iter_ops.revert_one() continue # try to match with the next exp_op - e.opindex = opindex + e.opindex = iter_ops.index - 1 raise # # make sure we exhausted iter_ops diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -45,8 +45,10 @@ cmdline = [sys.executable] if not import_site: cmdline.append('-S') - for key, value in jitopts.iteritems(): - cmdline += ['--jit', '%s=%s' % (key, value)] + if jitopts: + jitcmdline = ['%s=%s' % (key, value) + for key, value in jitopts.items()] + cmdline += ['--jit', ','.join(jitcmdline)] cmdline.append(str(self.filepath)) # print cmdline, logfile diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -6,6 +6,8 @@ def main(n): def f(): for i in range(10000): + i -= 1 + i -= 42 # ID: subtract yield i def g(): @@ -15,10 +17,15 @@ g() log = self.run(main, [500]) - loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') + # XXX XXX this test fails so far because of a detail that + # changed with jit-simplify-backendintf. We should try to + # think of a way to be more resistent against such details. + # The issue is that we now get one Tracing, then go back + # to the interpreter hoping to immediately run the JITted + # code; but instead, we Trace again, just because another + # counter was also about to reach its limit... + loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ - ... - label(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) @@ -26,3 +33,8 @@ i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) + assert loop.match_by_id("subtract", """ + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + i2 = int_sub_ovf(i1, 42) + guard_no_overflow(descr=...) + """) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -31,9 +31,9 @@ imag2 = float2longlong(imag2) return real1 == real2 and imag1 == imag2 - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_COMPLEX as tag real = space.float_w(space.getattr(self, space.wrap("real"))) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,9 +34,9 @@ two = float2longlong(space.float_w(w_other)) return one == two - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_FLOAT as tag val = float2longlong(space.float_w(self)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -26,9 +26,9 @@ return self is w_other return space.int_w(self) == space.int_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_INT as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -18,9 +18,9 @@ return self is w_other return space.bigint_w(self).eq(space.bigint_w(w_other)) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_LONG as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -177,52 +177,55 @@ _specialisations = [] Cls_ii = make_specialised_class((int, int)) -Cls_is = make_specialised_class((int, str)) -Cls_io = make_specialised_class((int, object)) -Cls_si = make_specialised_class((str, int)) -Cls_ss = make_specialised_class((str, str)) -Cls_so = make_specialised_class((str, object)) -Cls_oi = make_specialised_class((object, int)) -Cls_os = make_specialised_class((object, str)) +#Cls_is = make_specialised_class((int, str)) +#Cls_io = make_specialised_class((int, object)) +#Cls_si = make_specialised_class((str, int)) +#Cls_ss = make_specialised_class((str, str)) +#Cls_so = make_specialised_class((str, object)) +#Cls_oi = make_specialised_class((object, int)) +#Cls_os = make_specialised_class((object, str)) Cls_oo = make_specialised_class((object, object)) Cls_ff = make_specialised_class((float, float)) -Cls_ooo = make_specialised_class((object, object, object)) +#Cls_ooo = make_specialised_class((object, object, object)) def makespecialisedtuple(space, list_w): if len(list_w) == 2: w_arg1, w_arg2 = list_w w_type1 = space.type(w_arg1) - w_type2 = space.type(w_arg2) + #w_type2 = space.type(w_arg2) # if w_type1 is space.w_int: + w_type2 = space.type(w_arg2) if w_type2 is space.w_int: return Cls_ii(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_is(space, w_arg1, w_arg2) - else: - return Cls_io(space, w_arg1, w_arg2) + #elif w_type2 is space.w_str: + # return Cls_is(space, w_arg1, w_arg2) + #else: + # return Cls_io(space, w_arg1, w_arg2) # - elif w_type1 is space.w_str: - if w_type2 is space.w_int: - return Cls_si(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_ss(space, w_arg1, w_arg2) - else: - return Cls_so(space, w_arg1, w_arg2) + #elif w_type1 is space.w_str: + # if w_type2 is space.w_int: + # return Cls_si(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_ss(space, w_arg1, w_arg2) + # else: + # return Cls_so(space, w_arg1, w_arg2) # - elif w_type1 is space.w_float and w_type2 is space.w_float: - return Cls_ff(space, w_arg1, w_arg2) + elif w_type1 is space.w_float: + w_type2 = space.type(w_arg2) + if w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) # - else: - if w_type2 is space.w_int: - return Cls_oi(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_os(space, w_arg1, w_arg2) - else: - return Cls_oo(space, w_arg1, w_arg2) + #else: + # if w_type2 is space.w_int: + # return Cls_oi(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_os(space, w_arg1, w_arg2) + # else: + return Cls_oo(space, w_arg1, w_arg2) # - elif len(list_w) == 3: - return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + #elif len(list_w) == 3: + # return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) else: raise NotSpecialised diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -32,9 +32,9 @@ return False return space.str_w(self) is space.str_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.str_w(self))) @@ -514,44 +514,41 @@ if maxsplit == 0: return space.wrap(input) - #print "from replace, input: %s, sub: %s, by: %s" % (input, sub, by) + # An ok guess at the default size + builder = StringBuilder(len(input)) + first = True if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - substrings_w = [""] + first = False for i in range(upper): - c = input[i] - substrings_w.append(c) - substrings_w.append(input[upper:]) + builder.append(by) + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) else: start = 0 sublen = len(sub) - substrings_w = [] while maxsplit != 0: next = input.find(sub, start) if next < 0: break - substrings_w.append(input[start:next]) + if not first: + builder.append(by) + first = False + builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - substrings_w.append(input[start:]) + if not first: + builder.append(by) + builder.append_slice(input, start, len(input)) - try: - # XXX conservative estimate. If your strings are that close - # to overflowing, bad luck. - one = ovfcheck(len(substrings_w) * len(by)) - ovfcheck(one + len(input)) - except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("replace string is too long")) - - return space.wrap(by.join(substrings_w)) + return space.wrap(builder.build()) def str_replace__String_ANY_ANY_ANY(space, w_self, w_sub, w_by, w_maxsplit): diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -253,6 +253,12 @@ y = 2j assert id(x) != id(y) + def test_object_hash_immutable(self): + x = 42 + y = 40 + y += 2 + assert object.__hash__(x) == object.__hash__(y) + def test_isinstance_shortcut(): from pypy.objspace.std import objspace diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -33,15 +33,15 @@ N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) - def hash_test(values): + def hash_test(values, must_be_specialized=True): N_values_w = [N_space.wrap(value) for value in values] S_values_w = [S_space.wrap(value) for value in values] N_w_tuple = N_space.newtuple(N_values_w) S_w_tuple = S_space.newtuple(S_values_w) - - assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + + if must_be_specialized: + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) assert isinstance(N_w_tuple, W_TupleObject) - assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) @@ -53,7 +53,7 @@ hash_test([1,(1,2)]) hash_test([1,('a',2)]) hash_test([1,()]) - hash_test([1,2,3]) + hash_test([1,2,3], must_be_specialized=False) class AppTestW_SpecialisedTupleObject: @@ -83,6 +83,8 @@ return ("SpecialisedTupleObject" + expected) in r def test_createspecialisedtuple(self): + have = ['ii', 'ff', 'oo'] + # spec = {int: 'i', float: 'f', str: 's', @@ -92,14 +94,14 @@ for y in [43, 4.3, "bar", []]: expected1 = spec[type(x)] expected2 = spec[type(y)] - if (expected1 == 'f') ^ (expected2 == 'f'): - if expected1 == 'f': expected1 = 'o' - if expected2 == 'f': expected2 = 'o' + if expected1 + expected2 not in have: + expected1 = expected2 = 'o' obj = (x, y) assert self.isspecialised(obj, '_' + expected1 + expected2) # - obj = (1, 2, 3) - assert self.isspecialised(obj, '_ooo') + if 'ooo' in have: + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') def test_delegation(self): t = self.forbid_delegation((42, 43)) @@ -214,6 +216,8 @@ raises(IndexError, "t[-3]") def test_three_tuples(self): + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") b = self.forbid_delegation((1, 2, 3)) c = (1,) d = c + (2, 3) @@ -221,6 +225,16 @@ assert b == d def test_mongrel(self): + a = self.forbid_delegation((2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 2 + assert a[0] == 2.2 and a[1] == '333' + b = ('333',) + assert a == (2.2,) + b + assert not a != (2.2,) + b + # + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") a = self.forbid_delegation((1, 2.2, '333')) assert self.isspecialised(a) assert len(a) == 3 diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py --- a/pypy/objspace/std/typetype.py +++ b/pypy/objspace/std/typetype.py @@ -10,7 +10,6 @@ w_dict=gateway.NoneNotWrapped): "This is used to create user-defined classes only." - from pypy.objspace.std.typeobject import W_TypeObject # XXX check types w_typetype = _precheck_for_new(space, w_typetype) @@ -19,10 +18,18 @@ if (space.is_w(space.type(w_typetype), space.w_type) and w_bases is None and w_dict is None): return space.type(w_name) - elif w_bases is None or w_dict is None: + else: + return _create_new_type(space, w_typetype, w_name, w_bases, w_dict) + + +def _create_new_type(space, w_typetype, w_name, w_bases, w_dict): + # this is in its own function because we want the special case 'type(x)' + # above to be seen by the jit. + from pypy.objspace.std.typeobject import W_TypeObject + + if w_bases is None or w_dict is None: raise OperationError(space.w_TypeError, space.wrap("type() takes 1 or 3 arguments")) - bases_w = space.fixedview(w_bases) w_winner = w_typetype diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -32,9 +32,9 @@ return False return space.unicode_w(self) is space.unicode_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.unicode_w(self))) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,6 +395,7 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', + 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -79,19 +79,19 @@ longlong2float = rffi.llexternal( "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) float2longlong = rffi.llexternal( "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG, _callable=float2longlong_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -16,6 +16,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.rstring import StringBuilder, UnicodeBuilder +from pypy.rlib import jit from pypy.rpython.lltypesystem import llmemory import os, sys @@ -249,8 +250,7 @@ wrapper = func_with_new_name(wrapper, name) if calling_conv != "c": - from pypy.rlib.jit import dont_look_inside - wrapper = dont_look_inside(wrapper) + wrapper = jit.dont_look_inside(wrapper) return wrapper @@ -697,6 +697,8 @@ return b.build() # str -> char* + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def get_nonmovingbuffer(data): """ Either returns a non-moving copy or performs neccessary pointer @@ -717,6 +719,8 @@ get_nonmovingbuffer._annenforceargs_ = [strtype] # (str, char*) -> None + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def free_nonmovingbuffer(data, buf): """ Either free a non-moving buffer or keep the original storage alive. diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -126,10 +126,7 @@ rtype_inplace_rshift = rtype_rshift def rtype_pow(_, hop): - raise MissingRTypeOperation("pow(int, int)" - " (use float**float instead; it is too" - " easy to overlook the overflow" - " issues of int**int)") + raise MissingRTypeOperation("'**' not supported in RPython") rtype_pow_ovf = rtype_pow rtype_inplace_pow = rtype_pow diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -11,14 +11,17 @@ sys.exit(1) def heads(args): - g = os.popen(r"hg heads --topo %s --template '{branches} {node|short}\n'" + g = os.popen(r"hg heads --topo %s --template '{node|short}:{branches}\n'" % args, 'r') result = g.read() g.close() result = result.splitlines(False) - result = [s for s in result - if not s.startswith(' ') - and not s.startswith('closed-branches ')] + for line in result: + if len(line.split(':', 1)) != 2: + raise ValueError("'result' contains: %r" % line) + result = [s.split(':', 1) for s in result] + result = [(head, branch) for (head, branch) in result + if branch not in ['', 'closed-branches']] return result all_heads = heads("--closed") @@ -34,8 +37,7 @@ closed_heads.reverse() -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print '\t', branch print print 'The branches listed above will be merged to "closed-branches".' @@ -54,8 +56,7 @@ print '*** error %r' % (err,) sys.exit(1) -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print print '***** %s ***** %s *****' % (branch, head) do("hg up --clean closed-branches") diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,6 +11,9 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) + c_files.extend([py.path.local(f) for f in eci.separate_module_files]) + eci = ExternalCompilationInfo(**eci._copy_attributes()) + eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Sat Dec 17 13:13:12 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 13:13:12 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: kill ExportedState.jump_args Message-ID: <20111217121312.EF05582221@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50608:de71b40cee0f Date: 2011-12-17 13:12 +0100 http://bitbucket.org/pypy/pypy/changeset/de71b40cee0f/ Log: kill ExportedState.jump_args diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -212,13 +212,12 @@ try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - #return None # XXX: Dissable for now # Fall back on jumping to preamble target_token = label.getdescr() assert isinstance(target_token, TargetToken) assert target_token.exported_state part.operations = [orignial_label] + \ - [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + [ResOperation(rop.JUMP, inputargs[:], None, descr=loop_jitcell_token)] try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -158,9 +158,7 @@ target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] target_token.start_resumedescr = start_resumedescr - target_token.exported_state = ExportedState(short_boxes, - inputarg_setup_ops, self.optimizer, - jump_args) + target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops, self.optimizer) def import_state(self, targetop): self.did_import = False @@ -539,9 +537,7 @@ self.unroll.add_op_to_short(self.op, False, True) class ExportedState(object): - def __init__(self, short_boxes, inputarg_setup_ops, optimizer, - jump_args): + def __init__(self, short_boxes, inputarg_setup_ops, optimizer): self.short_boxes = short_boxes self.inputarg_setup_ops = inputarg_setup_ops self.optimizer = optimizer - self.jump_args = jump_args From noreply at buildbot.pypy.org Sat Dec 17 13:27:15 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 13:27:15 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: replace ExportedState.optimizer with ExportedState.exported_values, which conatians the info needed Message-ID: <20111217122715.0E8988205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50609:9756d0eb5d62 Date: 2011-12-17 13:26 +0100 http://bitbucket.org/pypy/pypy/changeset/9756d0eb5d62/ Log: replace ExportedState.optimizer with ExportedState.exported_values, which conatians the info needed diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -158,7 +158,17 @@ target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] target_token.start_resumedescr = start_resumedescr - target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops, self.optimizer) + + exported_values = {} + for box in inputargs: + exported_values[box] = self.optimizer.getvalue(box) + for op in short_boxes.operations(): + if op and op.result: + box = op.result + exported_values[box] = self.optimizer.getvalue(box) + + target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops, + exported_values) def import_state(self, targetop): self.did_import = False @@ -188,7 +198,7 @@ if box in seen: continue seen[box] = True - preamble_value = exported_state.optimizer.getvalue(box) + preamble_value = exported_state.exported_values[box] value = self.optimizer.getvalue(box) value.import_from(preamble_value, self.optimizer) @@ -202,7 +212,7 @@ for op in self.short_boxes.operations(): self.ensure_short_op_emitted(op, self.optimizer, seen) if op and op.result: - preamble_value = exported_state.optimizer.getvalue(op.result) + preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) if not value.is_virtual(): imp = ValueImporter(self, preamble_value, op) @@ -537,7 +547,7 @@ self.unroll.add_op_to_short(self.op, False, True) class ExportedState(object): - def __init__(self, short_boxes, inputarg_setup_ops, optimizer): + def __init__(self, short_boxes, inputarg_setup_ops, exported_values): self.short_boxes = short_boxes self.inputarg_setup_ops = inputarg_setup_ops - self.optimizer = optimizer + self.exported_values = exported_values From noreply at buildbot.pypy.org Sat Dec 17 13:30:09 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 13:30:09 +0100 (CET) Subject: [pypy-commit] pypy default: Run ./interpreter/astcompiler/tools/asdl_py.py Message-ID: <20111217123009.C81178205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r50610:1ed3eaa72e9d Date: 2011-12-16 22:42 +0100 http://bitbucket.org/pypy/pypy/changeset/1ed3eaa72e9d/ Log: Run ./interpreter/astcompiler/tools/asdl_py.py diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -101,9 +101,7 @@ missing = required[i] if missing is not None: err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) + raise operationerrfmt(space.w_TypeError, err, missing, host) raise AssertionError("should not reach here") From noreply at buildbot.pypy.org Sat Dec 17 13:30:11 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 13:30:11 +0100 (CET) Subject: [pypy-commit] pypy default: Small improvement in generated ast: lineno and col_offset Message-ID: <20111217123011.0E0538205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r50611:a930e5061f83 Date: 2011-12-16 22:51 +0100 http://bitbucket.org/pypy/pypy/changeset/a930e5061f83/ Log: Small improvement in generated ast: lineno and col_offset are always at positions 1 and 2, code is simpler and avoid an indirection. diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -110,7 +110,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -143,7 +142,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -176,7 +174,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -198,7 +195,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -230,15 +226,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -262,7 +256,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -290,9 +284,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -318,7 +309,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -355,9 +346,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -372,10 +360,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -383,9 +371,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -402,7 +387,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -419,9 +404,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -440,7 +422,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -458,9 +440,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -478,7 +457,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -487,9 +466,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -509,10 +485,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -530,9 +506,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -557,7 +530,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -586,9 +559,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -611,7 +581,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -639,9 +609,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -664,7 +631,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -692,9 +659,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -715,10 +679,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -737,9 +701,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -760,14 +721,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -779,9 +740,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -806,7 +764,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -843,9 +801,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -866,7 +821,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -893,9 +848,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -912,10 +864,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -924,9 +876,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -943,7 +892,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -960,9 +909,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -980,12 +926,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1001,9 +947,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1023,12 +966,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1039,9 +982,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1056,7 +996,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1070,9 +1010,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1087,7 +1024,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1095,9 +1032,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1117,9 +1051,6 @@ class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1139,9 +1070,6 @@ class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1160,15 +1088,13 @@ class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1186,7 +1112,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1203,9 +1129,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1223,7 +1146,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1232,9 +1155,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1250,7 +1170,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1258,9 +1178,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1277,7 +1194,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1286,9 +1203,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1307,7 +1221,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1317,9 +1231,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1340,7 +1251,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1367,9 +1278,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1386,7 +1294,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1403,9 +1311,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1424,7 +1329,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1442,9 +1347,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1463,7 +1365,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1481,9 +1383,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1504,7 +1403,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1523,9 +1422,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1544,7 +1440,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1562,9 +1458,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1579,10 +1472,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1590,9 +1483,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1613,7 +1503,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1638,9 +1528,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1668,12 +1555,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1704,9 +1591,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1721,7 +1605,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1729,9 +1613,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1745,16 +1626,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1768,16 +1646,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1794,7 +1669,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1802,9 +1677,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1822,7 +1694,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1831,9 +1703,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1848,16 +1717,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1875,7 +1741,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1892,9 +1758,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1912,7 +1775,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1929,9 +1792,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1945,7 +1805,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2007,7 +1867,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2026,7 +1885,6 @@ class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2065,7 +1923,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2098,7 +1955,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2392,15 +2248,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2422,12 +2276,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + missing_field(space, self.initialization_state, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -3096,7 +2950,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3110,14 +2964,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3131,7 +2985,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3147,7 +3001,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3161,14 +3015,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3182,10 +3036,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3199,10 +3053,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3216,7 +3070,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3252,7 +3106,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3266,10 +3120,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3283,10 +3137,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3300,10 +3154,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3317,7 +3171,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3354,7 +3208,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3368,7 +3222,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3395,7 +3249,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3409,7 +3263,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3437,7 +3291,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3451,14 +3305,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3472,7 +3326,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3505,7 +3359,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3519,14 +3373,14 @@ w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3542,14 +3396,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3563,7 +3417,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3596,7 +3450,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3610,10 +3464,10 @@ w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3627,14 +3481,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3648,7 +3502,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3682,7 +3536,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3696,14 +3550,14 @@ w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3717,10 +3571,10 @@ w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3734,10 +3588,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3751,7 +3605,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3787,7 +3641,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3801,10 +3655,10 @@ w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3818,10 +3672,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3835,7 +3689,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3870,7 +3724,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3884,10 +3738,10 @@ w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3901,10 +3755,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3918,7 +3772,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3953,7 +3807,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3967,14 +3821,14 @@ w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3988,10 +3842,10 @@ w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4005,7 +3859,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4039,7 +3893,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4053,14 +3907,14 @@ w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4074,14 +3928,14 @@ w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4095,7 +3949,7 @@ w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4124,7 +3978,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4138,10 +3992,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4155,10 +4009,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4172,7 +4026,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4204,7 +4058,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4218,10 +4072,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4235,7 +4089,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4269,7 +4123,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4283,14 +4137,14 @@ w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4304,7 +4158,7 @@ w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4332,7 +4186,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4346,7 +4200,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4378,7 +4232,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4395,10 +4249,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4412,14 +4266,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4433,7 +4287,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4467,7 +4321,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4481,14 +4335,14 @@ w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4502,14 +4356,14 @@ w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4523,7 +4377,7 @@ w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4552,7 +4406,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4566,7 +4420,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4598,7 +4452,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4612,7 +4466,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4694,7 +4548,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4708,14 +4562,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4729,7 +4583,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4745,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4761,10 +4615,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4778,7 +4632,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4811,7 +4665,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4825,14 +4679,14 @@ w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4848,14 +4702,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4869,7 +4723,7 @@ w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4902,7 +4756,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4918,14 +4772,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4939,7 +4793,7 @@ w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4971,7 +4825,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4985,14 +4839,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5006,7 +4860,7 @@ w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5038,7 +4892,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5052,14 +4906,14 @@ w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5073,14 +4927,14 @@ w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5094,7 +4948,7 @@ w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5123,7 +4977,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5137,10 +4991,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5154,7 +5008,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5184,7 +5038,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5198,7 +5052,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5230,7 +5084,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5244,10 +5098,10 @@ w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5261,7 +5115,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5294,7 +5148,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5308,10 +5162,10 @@ w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5325,7 +5179,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5358,7 +5212,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5372,14 +5226,14 @@ w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5393,10 +5247,10 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5410,7 +5264,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5444,7 +5298,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5458,10 +5312,10 @@ w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5475,7 +5329,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5508,7 +5362,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5522,7 +5376,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5553,7 +5407,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5567,10 +5421,10 @@ w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5584,10 +5438,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5601,7 +5455,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5636,7 +5490,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5650,10 +5504,10 @@ w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5667,10 +5521,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5684,14 +5538,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5705,14 +5559,14 @@ w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5726,7 +5580,7 @@ w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5763,7 +5617,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5777,7 +5631,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5808,7 +5662,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5822,7 +5676,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5853,7 +5707,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5867,7 +5721,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5898,7 +5752,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5912,14 +5766,14 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5933,14 +5787,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5956,7 +5810,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5989,7 +5843,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -6003,14 +5857,14 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6024,14 +5878,14 @@ w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6047,7 +5901,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6080,7 +5934,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6094,14 +5948,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6117,7 +5971,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6145,7 +5999,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6159,14 +6013,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6182,7 +6036,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6211,7 +6065,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6225,14 +6079,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6248,7 +6102,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6281,7 +6135,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6295,7 +6149,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6885,7 +6739,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6899,14 +6753,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6920,7 +6774,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6936,7 +6790,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6950,14 +6804,14 @@ w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6971,10 +6825,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6988,7 +6842,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -505,7 +499,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +515,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value From noreply at buildbot.pypy.org Sat Dec 17 13:30:12 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 13:30:12 +0100 (CET) Subject: [pypy-commit] pypy default: Check type of some ast nodes: Message-ID: <20111217123012.40B728205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r50612:a699f3064d8f Date: 2011-12-17 13:28 +0100 http://bitbucket.org/pypy/pypy/changeset/a699f3064d8f/ Log: Check type of some ast nodes: return.value must be an expr *subclass*, but not an expr! diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,15 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") class mod(AST): @@ -125,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -157,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -187,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -210,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -256,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -309,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -361,7 +370,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~4) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', None], 'Return') + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: if not self.initialization_state & 4: self.value = None @@ -387,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'targets'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -422,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -457,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -486,7 +495,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~4) ^ 27: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: if not self.initialization_state & 4: self.dest = None @@ -530,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -581,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -631,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -680,7 +689,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~8) ^ 23: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: if not self.initialization_state & 8: self.optional_vars = None @@ -722,7 +731,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~28) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', None, None, None], 'Raise') + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: if not self.initialization_state & 4: self.type = None @@ -764,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -821,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -865,7 +874,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~8) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'test', None], 'Assert') + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: if not self.initialization_state & 8: self.msg = None @@ -892,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'names'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -927,7 +936,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~20) ^ 11: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: if not self.initialization_state & 4: self.module = None @@ -967,7 +976,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~24) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'body', None, None], 'Exec') + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: if not self.initialization_state & 8: self.globals = None @@ -996,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'names'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1024,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1044,7 +1053,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass @@ -1063,7 +1072,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass @@ -1082,7 +1091,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass @@ -1112,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1146,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1170,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1194,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1221,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1251,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1294,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elts'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1329,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1365,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1403,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1440,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1473,7 +1482,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~4) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', None], 'Yield') + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: if not self.initialization_state & 4: self.value = None @@ -1503,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1556,7 +1565,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~96) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: if not self.initialization_state & 32: self.starargs = None @@ -1605,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1626,7 +1635,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'n'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass @@ -1646,7 +1655,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 's'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass @@ -1669,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1694,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1717,7 +1726,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass @@ -1741,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1775,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1805,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', 'value'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -1878,7 +1887,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass @@ -1905,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -1938,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -1968,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2231,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2277,7 +2286,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~12) ^ 19: - missing_field(space, self.initialization_state, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: if not self.initialization_state & 4: self.type = None @@ -2322,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2365,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2385,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -2871,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3216,6 +3227,8 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3320,6 +3333,8 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3367,6 +3382,8 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3411,6 +3428,8 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3458,6 +3477,8 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3544,6 +3565,8 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3565,6 +3588,8 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3649,6 +3674,8 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3732,6 +3759,8 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3815,6 +3844,8 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3836,6 +3867,8 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3901,6 +3934,8 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3922,6 +3957,8 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3943,6 +3980,8 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4131,6 +4170,8 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4152,6 +4193,8 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4329,6 +4372,8 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4350,6 +4395,8 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4371,6 +4418,8 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4460,6 +4509,8 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4673,6 +4724,8 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4717,6 +4770,8 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4787,6 +4842,8 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4854,6 +4911,8 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4900,6 +4959,8 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4921,6 +4982,8 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4942,6 +5005,8 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5092,6 +5157,8 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5156,6 +5223,8 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5220,6 +5289,8 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5241,6 +5312,8 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5306,6 +5379,8 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5370,6 +5445,8 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5415,6 +5492,8 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5498,6 +5577,8 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5553,6 +5634,8 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5574,6 +5657,8 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5625,6 +5710,8 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5760,6 +5847,8 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5851,6 +5940,8 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -5872,6 +5963,8 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6261,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6282,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6303,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6392,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6661,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6682,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6798,6 +6903,8 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6819,6 +6926,8 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -7016,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -115,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -452,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -585,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -628,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -285,3 +285,10 @@ [], lineno=1, col_offset=0) ]) exec compile(body, '', 'exec') + + def test_invalid_sum(self): + import _ast as ast + pos = dict(lineno=2, col_offset=3) + m = ast.Module([ast.Expr(ast.expr(**pos), **pos)]) + exc = raises(TypeError, compile, m, "", "exec") + From noreply at buildbot.pypy.org Sat Dec 17 14:27:31 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Dec 2011 14:27:31 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: clean up descr_len Message-ID: <20111217132731.C49808205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50613:e3cef40afa0d Date: 2011-12-17 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/e3cef40afa0d/ Log: clean up descr_len diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -394,7 +394,10 @@ return self.get_concrete().copy() def descr_len(self, space): - return self.get_concrete().descr_len(space) + if len(self.shape): + return space.wrap(self.shape[0]) + raise OperationError(space.w_TypeError, space.wrap( + "len() of unsized object")) def descr_repr(self, space): res = StringBuilder() @@ -931,11 +934,6 @@ # This is currently not possible to be called from anywhere. raise NotImplementedError - def descr_len(self, space): - if self.shape: - return space.wrap(self.shape[0]) - return space.wrap(1) - def setshape(self, space, new_shape): if len(self.shape) < 1: return @@ -1059,12 +1057,6 @@ ) return array - def descr_len(self, space): - if len(self.shape): - return space.wrap(self.shape[0]) - raise OperationError(space.w_TypeError, space.wrap( - "len() of unsized object")) - def setitem_w(self, space, item, w_value): return self.setitem(item, self.dtype.coerce(space, w_value)) From noreply at buildbot.pypy.org Sat Dec 17 14:47:59 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 14:47:59 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: cleanup Message-ID: <20111217134759.70F318205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50614:4054e3b40851 Date: 2011-12-17 13:34 +0100 http://bitbucket.org/pypy/pypy/changeset/4054e3b40851/ Log: cleanup diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -132,7 +132,6 @@ start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() assert isinstance(start_resumedescr, ResumeGuardDescr) start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) - # FIXME: I dont thnik we need fix_snapshot anymore modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) @@ -188,7 +187,6 @@ self.short = target_token.short_preamble[:] self.short_seen = {} self.short_boxes = exported_state.short_boxes - self.imported_state = exported_state self.inputargs = targetop.getarglist() self.initial_virtual_state = target_token.virtual_state self.start_resumedescr = target_token.start_resumedescr From noreply at buildbot.pypy.org Sat Dec 17 14:48:00 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 14:48:00 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: rename start_resumedescr to resume_at_jump_descr Message-ID: <20111217134800.A19B78205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50615:2653c963abd8 Date: 2011-12-17 14:47 +0100 http://bitbucket.org/pypy/pypy/changeset/2653c963abd8/ Log: rename start_resumedescr to resume_at_jump_descr diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -105,7 +105,7 @@ def compile_loop(metainterp, greenkey, start, inputargs, jumpargs, - start_resumedescr, full_preamble_needed=True): + resume_at_jump_descr, full_preamble_needed=True): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -126,7 +126,7 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] h_ops = history.operations - part.start_resumedescr = start_resumedescr + part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] @@ -184,7 +184,7 @@ def compile_retrace(metainterp, greenkey, start, inputargs, jumpargs, - start_resumedescr, partial_trace, resumekey): + resume_at_jump_descr, partial_trace, resumekey): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -200,7 +200,7 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] - part.start_resumedescr = start_resumedescr + part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations part.operations = [partial_trace.operations[-1]] + \ @@ -750,7 +750,7 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey, start_resumedescr=None): +def compile_trace(metainterp, resumekey, resume_at_jump_descr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ @@ -766,7 +766,7 @@ # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] - new_trace.start_resumedescr = start_resumedescr + new_trace.resume_at_jump_descr = resume_at_jump_descr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -765,7 +765,7 @@ call_pure_results = None logops = None quasi_immutable_deps = None - start_resumedescr = None + resume_at_jump_descr = None def _token(*args): raise Exception("TreeLoop.token is killed") diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -15,7 +15,7 @@ part = TreeLoop('part') part.inputargs = loop.inputargs - part.start_resumedescr = FakeDescrWithSnapshot() + part.resume_at_jump_descr = FakeDescrWithSnapshot() token = loop.original_jitcell_token optimized = TreeLoop('optimized') diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -430,7 +430,7 @@ preamble = TreeLoop('preamble') preamble.inputargs = inputargs - preamble.start_resumedescr = FakeDescrWithSnapshot() + preamble.resume_at_jump_descr = FakeDescrWithSnapshot() token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ @@ -441,7 +441,7 @@ assert preamble.operations[-1].getopnum() == rop.LABEL inliner = Inliner(inputargs, jump_args) - loop.start_resumedescr = preamble.start_resumedescr + loop.resume_at_jump_descr = preamble.resume_at_jump_descr loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -128,10 +128,10 @@ original_jump_args = targetop.getarglist() jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] - assert self.optimizer.loop.start_resumedescr - start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) + assert self.optimizer.loop.resume_at_jump_descr + resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr.clone_if_mutable() + assert isinstance(resume_at_jump_descr, ResumeGuardDescr) + resume_at_jump_descr.rd_snapshot = self.fix_snapshot(jump_args, resume_at_jump_descr.rd_snapshot) modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) @@ -156,7 +156,7 @@ targetop.initarglist(inputargs) target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] - target_token.start_resumedescr = start_resumedescr + target_token.resume_at_jump_descr = resume_at_jump_descr exported_values = {} for box in inputargs: @@ -189,7 +189,7 @@ self.short_boxes = exported_state.short_boxes self.inputargs = targetop.getarglist() self.initial_virtual_state = target_token.virtual_state - self.start_resumedescr = target_token.start_resumedescr + self.resume_at_jump_descr = target_token.resume_at_jump_descr seen = {} for box in self.inputargs: @@ -345,7 +345,7 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - descr = target_token.start_resumedescr.clone_if_mutable() + descr = target_token.resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) short[i] = op @@ -364,8 +364,8 @@ for i in range(len(short)): short[i] = inliner.inline_op(short[i]) - target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(target_token.start_resumedescr) + target_token.resume_at_jump_descr = self.resume_at_jump_descr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed for box in short[0].getarglist(): @@ -403,7 +403,7 @@ if not isinstance(a, Const) and a not in self.short_seen: self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): - descr = self.start_resumedescr.clone_if_mutable() + descr = self.resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) if guards_needed and self.short_boxes.has_producer(op.result): @@ -502,7 +502,7 @@ for guard in extra_guards: if guard.is_guard(): - descr = target.start_resumedescr.clone_if_mutable() + descr = target.resume_at_jump_descr.clone_if_mutable() inliner.inline_descr_inplace(descr) guard.setdescr(descr) self.optimizer.send_extra_operation(guard) diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -2062,7 +2062,7 @@ cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) return cell.get_procedure_token() - def compile_loop(self, original_boxes, live_arg_boxes, start, start_resumedescr): + def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] if not self.partial_trace: @@ -2072,13 +2072,13 @@ target_token = compile.compile_retrace(self, greenkey, start, original_boxes[num_green_args:], live_arg_boxes[num_green_args:], - start_resumedescr, self.partial_trace, + resume_at_jump_descr, self.partial_trace, self.resumekey) else: target_token = compile.compile_loop(self, greenkey, start, original_boxes[num_green_args:], live_arg_boxes[num_green_args:], - start_resumedescr) + resume_at_jump_descr) if target_token is not None: assert isinstance(target_token, TargetToken) self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token) @@ -2090,7 +2090,7 @@ jitcell_token = target_token.targeting_jitcell_token self.raise_continue_running_normally(live_arg_boxes, jitcell_token) - def compile_trace(self, live_arg_boxes, start_resumedescr): + def compile_trace(self, live_arg_boxes, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] target_jitcell_token = self.get_procedure_token(greenkey) @@ -2102,7 +2102,7 @@ self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) try: - target_token = compile.compile_trace(self, self.resumekey, start_resumedescr) + target_token = compile.compile_trace(self, self.resumekey, resume_at_jump_descr) finally: self.history.operations.pop() # remove the JUMP if target_token is not None: # raise if it *worked* correctly @@ -2111,7 +2111,7 @@ self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, - bridge_arg_boxes, start_resumedescr): + bridge_arg_boxes, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args original_inputargs = self.history.inputargs greenkey = original_boxes[:num_green_args] @@ -2121,7 +2121,7 @@ greenkey = original_boxes[:num_green_args] self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) loop_token = compile.compile_new_loop(self, [], greenkey, start, - start_resumedescr, False) + resume_at_jump_descr, False) self.history.operations.pop() # remove the JUMP if loop_token is None: self.history.inputargs = original_inputargs diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -449,7 +449,7 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - bridge.start_resumedescr = FakeDescrWithSnapshot() + bridge.resume_at_jump_descr = FakeDescrWithSnapshot() optimize_trace(metainterp_sd, bridge, self.enable_opts) From noreply at buildbot.pypy.org Sat Dec 17 15:27:32 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Dec 2011 15:27:32 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: Refactoring in progress - code removal. Message-ID: <20111217142732.EB0898205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50616:2e85e96a3a5b Date: 2011-12-17 16:26 +0200 http://bitbucket.org/pypy/pypy/changeset/2e85e96a3a5b/ Log: Refactoring in progress - code removal. diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -633,8 +633,8 @@ shape += concr.shape[s:] strides += concr.strides[s:] backstrides += concr.backstrides[s:] - return W_NDimSlice(concr, start, strides[:], backstrides[:], - shape[:]) + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], concr) def descr_reshape(self, space, args_w): """reshape(...) @@ -664,8 +664,8 @@ new_backstrides = [0] * ndims for nd in range(ndims): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - arr = W_NDimSlice(self, self.start, new_strides, - new_backstrides, new_shape) + arr = W_NDimSlice(self.start, new_strides, new_backstrides, + new_shape, self) else: # Create copy with contiguous data arr = concrete.copy() @@ -696,8 +696,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete, self.start, strides[:], - backstrides[:], shape[:])) + return space.wrap(W_NDimSlice(self.start, strides[:], + backstrides[:], shape[:], concrete)) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -875,11 +875,39 @@ class ConcreteArray(BaseArray): """ An array that have actual storage, whether owned or not """ - def __init__(self, shape, order): + def __init__(self, size, shape, dtype, order='C', parent=None): + self.size = size + self.parent = parent + if parent is not None: + self.storage = parent.storage + else: + self.storage = dtype.malloc(size) self.order = order + self.dtype = dtype if self.strides is None: self.calc_strides(shape) BaseArray.__init__(self, shape) + if parent is not None: + self.invalidates = parent.invalidates + + def get_concrete(self): + return self + + def find_size(self): + return self.size + + def find_dtype(self): + return self.dtype + + def getitem(self, item): + return self.dtype.getitem(self.storage, item) + + def setitem_w(self, space, item, w_value): + return self.setitem(item, self.dtype.coerce(space, w_value)) + + def setitem(self, item, value): + self.invalidated() + self.dtype.setitem(self.storage, item, value) def calc_strides(self, shape): strides = [] @@ -898,41 +926,51 @@ self.strides = strides[:] self.backstrides = backstrides[:] - -class ConcreteViewArray(ConcreteArray): - """ - Class for representing views of arrays, they will reflect changes of parent - arrays. Example: slices - """ - def __init__(self, parent, strides, backstrides, shape): +class W_NDimSlice(ConcreteArray): + def __init__(self, start, strides, backstrides, shape, parent): + if isinstance(parent, W_NDimSlice): + parent = parent.parent + size = 1 + for sh in shape: + size *= sh self.strides = strides self.backstrides = backstrides - ConcreteArray.__init__(self, shape, parent.order) - assert isinstance(parent, W_NDimArray) - self.parent = parent - self.invalidates = parent.invalidates + ConcreteArray.__init__(self, size, shape, parent.dtype, parent.order, + parent) + self.start = start - def get_concrete(self): - # in fact, ConcreteViewArray never gets "concrete" as it never - # stores data. - # This implementation is needed for BaseArray getitem/setitem to work, - # can be refactored. - self.parent.get_concrete() - return self + def setslice(self, space, w_value): + res_shape = shape_agreement(space, self.shape, w_value.shape) + self._sliceloop(w_value, res_shape) - def getitem(self, item): - return self.parent.getitem(item) + def _sliceloop(self, source, res_shape): + sig = source.find_sig() + frame = sig.create_frame(source) + res_iter = ViewIterator(self) + shapelen = len(res_shape) + while not res_iter.done(): + slice_driver.jit_merge_point(sig=sig, + frame=frame, + shapelen=shapelen, + self=self, source=source, + res_iter=res_iter) + self.setitem(res_iter.offset, sig.eval(frame, source).convert_to( + self.find_dtype())) + frame.next(shapelen) + res_iter = res_iter.next(shapelen) - def eval(self, iter): - return self.parent.getitem(iter.get_offset()) + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = ViewIterator(self) + a_iter = ArrayIterator(array.size) + while not iter.done(): + array.setitem(a_iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) + return array - @unwrap_spec(item=int) - def setitem_w(self, space, item, w_value): - return self.parent.setitem_w(space, item, w_value) - - def setitem(self, item, value): - # This is currently not possible to be called from anywhere. - raise NotImplementedError + def create_sig(self): + return signature.ViewSignature(self.parent.create_sig()) def setshape(self, space, new_shape): if len(self.shape) < 1: @@ -968,85 +1006,11 @@ self.backstrides = new_backstrides[:] self.shape = new_shape[:] -class W_NDimSlice(ConcreteViewArray): - def __init__(self, parent, start, strides, backstrides, shape): - if isinstance(parent, W_NDimSlice): - parent = parent.parent - else: - # XXX this should not force the array, but it did before the - # refactoring anyway, just in a more obscure way - parent = parent.get_concrete() - ConcreteViewArray.__init__(self, parent, strides, backstrides, shape) - self.start = start - self.size = 1 - for sh in shape: - self.size *= sh - - def find_size(self): - return self.size - - def find_dtype(self): - return self.parent.find_dtype() - - def setslice(self, space, w_value): - res_shape = shape_agreement(space, self.shape, w_value.shape) - self._sliceloop(w_value, res_shape) - - def _sliceloop(self, source, res_shape): - sig = source.find_sig() - frame = sig.create_frame(source) - res_iter = ViewIterator(self) - shapelen = len(res_shape) - while not res_iter.done(): - slice_driver.jit_merge_point(sig=sig, - frame=frame, - shapelen=shapelen, - self=self, source=source, - res_iter=res_iter) - self.setitem(res_iter.offset, sig.eval(frame, source).convert_to( - self.find_dtype())) - frame.next(shapelen) - res_iter = res_iter.next(shapelen) - - def setitem(self, item, value): - self.parent.setitem(item, value) - - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = ViewIterator(self) - a_iter = ArrayIterator(array.size) - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def create_sig(self): - return signature.ViewSignature(self.parent.create_sig()) - class W_NDimArray(ConcreteArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ _immutable_fields_ = ['storage'] - - def __init__(self, size, shape, dtype, order='C'): - ConcreteArray.__init__(self, shape, order) - self.size = size - self.dtype = dtype - self.storage = dtype.malloc(size) - - def get_concrete(self): - return self - - def find_size(self): - return self.size - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - return self.dtype.getitem(self.storage, item) def copy(self): array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) @@ -1057,13 +1021,6 @@ ) return array - def setitem_w(self, space, item, w_value): - return self.setitem(item, self.dtype.coerce(space, w_value)) - - def setitem(self, item, value): - self.invalidated() - self.dtype.setitem(self.storage, item, value) - def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) @@ -1216,15 +1173,15 @@ ) -class W_FlatIterator(ConcreteViewArray): +class W_FlatIterator(ConcreteArray): @jit.unroll_safe def __init__(self, arr): size = 1 for sh in arr.shape: size *= sh - ConcreteViewArray.__init__(self, arr.get_concrete(), [arr.strides[-1]], - [arr.backstrides[-1]], [size]) + ConcreteArray.__init__(self, arr.get_concrete(), [arr.strides[-1]], + [arr.backstrides[-1]], [size]) self.shapelen = len(arr.shape) self.arr = arr self.iter = OneDimIterator(self.arr.start, self.strides[0], diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -132,21 +132,21 @@ return 'Array' def _invent_array_numbering(self, arr, cache): - from pypy.module.micronumpy.interp_numarray import W_NDimArray - assert isinstance(arr, W_NDimArray) + from pypy.module.micronumpy.interp_numarray import ConcreteArray + assert isinstance(arr, ConcreteArray) self.array_no = _add_ptr_to_cache(arr.storage, cache) def _create_iter(self, iterlist, arraylist, arr): - from pypy.module.micronumpy.interp_numarray import W_NDimArray - assert isinstance(arr, W_NDimArray) + from pypy.module.micronumpy.interp_numarray import ConcreteArray + assert isinstance(arr, ConcreteArray) if self.iter_no >= len(iterlist): iterlist.append(ArrayIterator(arr.size)) if self.array_no >= len(arraylist): arraylist.append(arr.storage) def eval(self, frame, arr): - from pypy.module.micronumpy.interp_numarray import W_NDimArray - assert isinstance(arr, W_NDimArray) + from pypy.module.micronumpy.interp_numarray import ConcreteArray + assert isinstance(arr, ConcreteArray) iter = frame.iterators[self.iter_no] return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) @@ -193,21 +193,7 @@ assert isinstance(arr, Scalar) return arr.value -class ViewSignature(Signature): - _immutable_fields_ = ['child'] - - def __init__(self, child): - self.child = child - - def eq(self, other, compare_array_no=True): - if type(self) is not type(other): - return False - assert isinstance(other, ViewSignature) - return self.child.eq(other.child, compare_array_no) - - def hash(self): - return self.child.hash() ^ 0x12345 - +class ViewSignature(ArraySignature): def debug_repr(self): return 'Slice(%s)' % self.child.debug_repr() @@ -217,28 +203,6 @@ allnumbers.append(no) self.iter_no = no - def _invent_array_numbering(self, arr, cache): - from pypy.module.micronumpy.interp_numarray import ConcreteViewArray - assert isinstance(arr, ConcreteViewArray) - self.array_no = _add_ptr_to_cache(arr.parent.storage, cache) - - def _create_iter(self, iterlist, arraylist, arr): - from pypy.module.micronumpy.interp_numarray import ConcreteViewArray - - assert isinstance(arr, ConcreteViewArray) - if self.iter_no >= len(iterlist): - iterlist.append(ViewIterator(arr)) - if self.array_no >= len(arraylist): - arraylist.append(arr.parent.storage) - - def eval(self, frame, arr): - from pypy.module.micronumpy.interp_numarray import W_NDimSlice - assert isinstance(arr, W_NDimSlice) - arr = arr.get_concrete() - iter = frame.iterators[self.iter_no] - return arr.find_dtype().getitem(frame.arrays[self.array_no], - iter.offset) - class FlatiterSignature(ViewSignature): def debug_repr(self): return 'FlatIter(%s)' % self.child.debug_repr() From noreply at buildbot.pypy.org Sat Dec 17 15:27:34 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Dec 2011 15:27:34 +0100 (CET) Subject: [pypy-commit] pypy default: Temporarily revert 976baeae86ed, seems to break some stuff for obscure reasons Message-ID: <20111217142734.161BC8205C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50617:e42742946b48 Date: 2011-12-17 16:27 +0200 http://bitbucket.org/pypy/pypy/changeset/e42742946b48/ Log: Temporarily revert 976baeae86ed, seems to break some stuff for obscure reasons diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,9 +11,6 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) - c_files.extend([py.path.local(f) for f in eci.separate_module_files]) - eci = ExternalCompilationInfo(**eci._copy_attributes()) - eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Sat Dec 17 15:43:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 15:43:50 +0100 (CET) Subject: [pypy-commit] pypy default: kill the comments, they no longer apply. Message-ID: <20111217144350.253398205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50618:a148c5d7c283 Date: 2011-12-16 12:54 +0100 http://bitbucket.org/pypy/pypy/changeset/a148c5d7c283/ Log: kill the comments, they no longer apply. diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -17,13 +17,6 @@ g() log = self.run(main, [500]) - # XXX XXX this test fails so far because of a detail that - # changed with jit-simplify-backendintf. We should try to - # think of a way to be more resistent against such details. - # The issue is that we now get one Tracing, then go back - # to the interpreter hoping to immediately run the JITted - # code; but instead, we Trace again, just because another - # counter was also about to reach its limit... loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ i16 = force_token() @@ -34,7 +27,7 @@ jump(..., descr=...) """) assert loop.match_by_id("subtract", """ - setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me i2 = int_sub_ovf(i1, 42) guard_no_overflow(descr=...) """) From noreply at buildbot.pypy.org Sat Dec 17 15:43:51 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 15:43:51 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: In-progress: found a hopefully reasonable solution, namely a Message-ID: <20111217144351.53FC18205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50619:dc4fca252174 Date: 2011-12-17 15:28 +0100 http://bitbucket.org/pypy/pypy/changeset/dc4fca252174/ Log: In-progress: found a hopefully reasonable solution, namely a CALL_MALLOC_GC operation that is basically just a CALL, invoking whatever helper rewrite.py wants to introduce. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -42,6 +42,15 @@ self.field_unicodelen_descr = get_field_arraylen_descr(self, rstr.UNICODE) + def _ready(self): + MALLOC_FIXEDSIZE = lltype.Ptr( + lltype.FuncType([lltype.Signed], llmemory.GCREF)) + self.malloc_fixedsize_fn = llhelper(MALLOC_FIXEDSIZE, + self.malloc_fixedsize) + self.c_malloc_fixedsize_fn = ConstInt( + heaptracker.adr2int(llmemory.cast_ptr_to_adr( + self.malloc_fixedsize_fn))) + def _freeze_(self): return True def initialize(self): @@ -55,28 +64,11 @@ def freeing_block(self, start, stop): pass - def get_funcptr_for_malloc_gc_fixed(self): - """Returns a function pointer to a function that implements - the simple case of MALLOC_GC: the case where the variable size - is zero. The function pointer has signature (size) -> GCREF.""" - raise NotImplementedError - - def get_funcptr_for_malloc_gc_variable(self): - """Returns a function pointer to a function that implements - the complex case of MALLOC_GC: the case where the variable size - is not known to be zero. The signature is: - (base_size, num_elem, item_size) -> GCREF""" - raise NotImplementedError - def gc_malloc(self, sizedescr): """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', with the vtable pointer set manually afterwards.""" assert isinstance(sizedescr, BaseSizeDescr) - mallocptr = self.get_funcptr_for_malloc_gc_fixed() - res = mallocptr(sizedescr.size) - if res: - self._set_tid(res, sizedescr.tid) - return res + return self._gc_malloc(sizedescr.size, sizedescr.tid) def gc_malloc_array(self, arraydescr, num_elem): assert isinstance(arraydescr, BaseArrayDescr) @@ -98,18 +90,6 @@ self.unicode_ofs_length, self.unicode_type_id) - def _gc_malloc_array(self, basesize, num_elem, itemsize, ofs_length, tid): - mallocptr = self.get_funcptr_for_malloc_gc_variable() - res = mallocptr(basesize, num_elem, itemsize) - if res: - self._set_tid(res, tid) - arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) - arrayptr[ofs_length/WORD] = num_elem - return res - - def _set_tid(self, gcptr, tid): - pass # unless overridden - def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) @@ -180,24 +160,35 @@ def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - self.malloc_fn_ptr = self.configure_boehm_once() + malloc_fn_ptr = self.configure_boehm_once() + self.malloc_fn_ptr = malloc_fn_ptr # - def malloc_gc_variable(basesize, num_elem, itemsize): - try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) - except OverflowError: - return lltype.nullptr(llmemory.GCREF.TO) - return self.malloc_fn_ptr(size) + def malloc_fixedsize(size): + res = malloc_fn_ptr(size) + if not res: + raise MemoryError + return res + self.malloc_fixedsize = malloc_fixedsize # - self.malloc_gc_variable = malloc_gc_variable - self.MALLOC_GC_VARIABLE = lltype.Ptr( - lltype.FuncType([lltype.Signed] * 3, llmemory.GCREF)) + self._ready() - def get_funcptr_for_malloc_gc_fixed(self): - return self.malloc_fn_ptr + def _gc_malloc(self, size, tid): + # Boehm: 'tid' is ignored + return self.malloc_fixedsize(size) - def get_funcptr_for_malloc_gc_variable(self): - return llhelper(self.MALLOC_GC_VARIABLE, self.malloc_gc_variable) + def _gc_malloc_array(self, basesize, num_elem, itemsize, ofs_length, tid): + # Boehm: 'tid' is ignored + try: + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + except OverflowError: + raise MemoryError + res = self.malloc_fn_ptr(totalsize) + if not res: + raise MemoryError + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem + return res + # ____________________________________________________________ # All code below is for the hybrid or minimark GC diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -11,8 +11,11 @@ # # - Remove the DEBUG_MERGE_POINTs. # - # - Turn all NEW_xxx to MALLOC_GC operations, possibly followed by - # SETFIELDs in order to initialize their GC fields. + # - Turn all NEW_xxx to either a CALL_MALLOC_GC, or a CALL_MALLOC_NURSERY + # followed by SETFIELDs in order to initialize their GC fields. The + # two advantages of CALL_MALLOC_NURSERY is that it inlines the common + # path, and we need only one such operation to allocate several blocks + # of memory at once. # # - Add COND_CALLs to the write barrier before SETFIELD_GC and # SETARRAYITEM_GC operations. @@ -40,7 +43,7 @@ for op in operations: if op.getopnum() == rop.DEBUG_MERGE_POINT: continue - # ---------- fold the NEWxxx operations into MALLOC_GC ---------- + # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- if op.is_malloc(): self.handle_malloc_operation(op) continue @@ -116,11 +119,12 @@ var_size = ovfcheck(item_size * num_elem) total_size = ovfcheck(base_size + var_size) except OverflowError: - pass + pass # total_size is still -1 if total_size >= 0: self.gen_malloc_nursery(total_size, op.result) else: - self.gen_malloc_gc(base_size, op.result, + xxx + self.gen_new_array(base_size, op.result, v_length, ConstInt(item_size)) self.gen_initialize_tid(op.result, tid) self.gen_initialize_len(op.result, v_length, arraylen_descr) @@ -135,12 +139,14 @@ self._op_malloc_nursery = None self.recent_mallocs.clear() - def gen_malloc_gc(self, size, v_result, - v_num_elem=c_zero, c_item_size=c_zero): - """Generate a MALLOC_GC.""" + def gen_malloc_fixedsize(self, size, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). + Note that with the framework GC, this should be called very rarely. + """ + self.emitting_an_operation_that_can_collect() c_size = ConstInt(size) - self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.MALLOC_GC, [c_size, v_num_elem, c_item_size], + op = ResOperation(rop.CALL_MALLOC_GC, + [self.gc_ll_descr.c_malloc_fixedsize_fn, c_size], v_result) self.newops.append(op) # mark 'v_result' as freshly malloced @@ -151,8 +157,7 @@ If that fails, generate a plain MALLOC_GC instead. """ if not self.gc_ll_descr.can_use_nursery_malloc(size): - self.gen_malloc_gc(size, v_result) - return + return self.gen_malloc_fixedsize(size, v_result) # size = self.round_up_for_allocation(size) op = None @@ -178,6 +183,7 @@ self._previous_size = size self._v_last_malloced_nursery = v_result self.recent_mallocs[v_result] = None + return True def gen_initialize_tid(self, v_newgcobj, tid): if self.gc_ll_descr.fielddescr_tid is not None: diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -15,6 +15,8 @@ class RewriteTests(object): def check_rewrite(self, frm_operations, to_operations): + malloc_fixedsize = self.gc_ll_descr.malloc_fixedsize_fn + # S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) sdescr = get_size_descr(self.gc_ll_descr, S) @@ -84,7 +86,7 @@ jump() """, """ [p1] - p0 = malloc_gc(%(sdescr.size)d, 0, 0) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d) jump() """) @@ -96,8 +98,8 @@ jump() """, """ [] - p0 = malloc_gc(%(sdescr.size)d, 0, 0) - p1 = malloc_gc(%(sdescr.size)d, 0, 0) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d) jump() """) @@ -108,8 +110,9 @@ jump() """, """ [] - p0 = malloc_gc(%(adescr.get_base_size(False) + \ - 10 * adescr.get_item_size(False))d, 0, 0) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.get_base_size(False) + \ + 10 * adescr.get_item_size(False))d) setfield_gc(p0, 10, descr=alendescr) jump() """) @@ -121,8 +124,10 @@ jump() """, """ [i1] - p0 = malloc_gc(%(adescr.get_base_size(False))d, \ - i1, %(adescr.get_item_size(False))d) + p0 = call_malloc_gc(ConstClass(malloc_varsize), \ + %(adescr.get_base_size(False))d, \ + i1, \ + %(adescr.get_item_size(False))d) setfield_gc(p0, i1, descr=alendescr) jump() """) diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -344,8 +344,8 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, - rop.MALLOC_GC, - rop.MALLOC_NURSERY, + rop.CALL_MALLOC_GC, + rop.CALL_MALLOC_NURSERY, rop.LABEL, ): # list of opcodes never executed by pyjitpl continue diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -472,8 +472,6 @@ 'NEW_ARRAY/1d', 'NEWSTR/1', 'NEWUNICODE/1', - 'MALLOC_GC/3', # added by llsupport/gc: malloc of C1+N*C2 bytes - 'MALLOC_NURSERY/1', # added by llsupport/gc: nursery malloc, const bytes '_MALLOC_LAST', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend @@ -510,6 +508,8 @@ #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation 'CALL_PURE/*d', # removed before it's passed to the backend + 'CALL_MALLOC_GC/*d', # like CALL, but NULL => propagate MemoryError + 'CALL_MALLOC_NURSERY/2d', # nursery malloc, const number of bytes, zeroed '_CALL_LAST', '_CANRAISE_LAST', # ----- end of can_raise operations ----- From noreply at buildbot.pypy.org Sat Dec 17 15:43:52 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 15:43:52 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Untested: move this reverse-engineering-like code out of the backend. Message-ID: <20111217144352.881068205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-jit-backend Changeset: r50620:7ce9ef1c360f Date: 2011-12-17 15:42 +0100 http://bitbucket.org/pypy/pypy/changeset/7ce9ef1c360f/ Log: Untested: move this reverse-engineering-like code out of the backend. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -561,26 +561,34 @@ self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') # self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG - self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( + (self.jit_wb_if_flag_byteofs, + self.jit_wb_if_flag_singlebyte, + self.jit_wb_if_flag_bitpos) = ( self.extract_flag_byte(self.jit_wb_if_flag)) # if hasattr(GCClass, 'JIT_WB_CARDS_SET'): self.jit_wb_cards_set = GCClass.JIT_WB_CARDS_SET self.jit_wb_card_page_shift = GCClass.JIT_WB_CARD_PAGE_SHIFT - self.jit_wb_cards_set_byteofs, self.jit_wb_cards_set_singlebyte = ( + (self.jit_wb_cards_set_byteofs, + self.jit_wb_cards_set_singlebyte, + self.jit_wb_cards_set_bitpos) = ( self.extract_flag_byte(self.jit_wb_cards_set)) else: self.jit_wb_cards_set = 0 def extract_flag_byte(self, flag_word): # if convenient for the backend, we compute the info about - # the flag as (byte-offset, single-byte-flag). + # the flag as (byte-offset, single-byte-flag, bit-position-in-word). + # Note that flag_word == 1 << bit_position_in_word. import struct value = struct.pack("l", flag_word) assert value.count('\x00') == len(value) - 1 # only one byte is != 0 i = 0 while value[i] == '\x00': i += 1 - return (i, struct.unpack('b', value[i])[0]) + bitpos = 0 + while flag_word > (1 << bitpos): bitpos += 1 + assert flag_word == (1 << bitpos) + return (i, struct.unpack('b', value[i])[0], bitpos) def get_write_barrier_fn(self, cpu): llop1 = self.llop1 diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -898,22 +898,14 @@ else: self.mc.ld(r.SCRATCH.value, loc_base.value, 0) - # offset to the byte we are interested in - byte_offset = descr.jit_wb_if_flag_byteofs - single_byte = descr.jit_wb_if_flag_singlebyte - - # examine which bit in the byte is set - for i in range(8): - if 1 << i == single_byte: - n = i - break + # get the position of the bit we want to test + bitpos = descr.jit_wb_if_flag_bitpos if IS_PPC_32: - # compute the position of the bit we want to test - bitpos = (3 - byte_offset) * 8 + n - # ^^^^^^^^^^^^^^^ due to endianess # put this bit to the rightmost bitposition of r0 - self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, 32 - bitpos, 31, 31) + if bitpos > 0: + self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, + 32 - bitpos, 31, 31) # test whether this bit is set self.mc.cmpwi(0, r.SCRATCH.value, 1) else: diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1945,6 +1945,7 @@ jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 + jit_wb_if_flag_bitpos = 12 def get_write_barrier_fn(self, cpu): return funcbox.getint() # @@ -1982,6 +1983,7 @@ jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 + jit_wb_if_flag_bitpos = 12 jit_wb_cards_set = 0 def get_write_barrier_from_array_fn(self, cpu): return funcbox.getint() @@ -2028,9 +2030,11 @@ jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 + jit_wb_if_flag_bitpos = 12 jit_wb_cards_set = 8192 jit_wb_cards_set_byteofs = struct.pack("i", 8192).index('\x20') jit_wb_cards_set_singlebyte = 0x20 + jit_wb_cards_set_bitpos = 13 jit_wb_card_page_shift = 7 def get_write_barrier_from_array_fn(self, cpu): return funcbox.getint() From noreply at buildbot.pypy.org Sat Dec 17 15:43:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 15:43:53 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111217144353.DABFB8205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50621:b3f614a9de14 Date: 2011-12-17 15:43 +0100 http://bitbucket.org/pypy/pypy/changeset/b3f614a9de14/ Log: merge heads diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -285,3 +285,10 @@ [], lineno=1, col_offset=0) ]) exec compile(body, '', 'exec') + + def test_invalid_sum(self): + import _ast as ast + pos = dict(lineno=2, col_offset=3) + m = ast.Module([ast.Expr(ast.expr(**pos), **pos)]) + exc = raises(TypeError, compile, m, "", "exec") + diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -514,44 +514,41 @@ if maxsplit == 0: return space.wrap(input) - #print "from replace, input: %s, sub: %s, by: %s" % (input, sub, by) + # An ok guess at the default size + builder = StringBuilder(len(input)) + first = True if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - substrings_w = [""] + first = False for i in range(upper): - c = input[i] - substrings_w.append(c) - substrings_w.append(input[upper:]) + builder.append(by) + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) else: start = 0 sublen = len(sub) - substrings_w = [] while maxsplit != 0: next = input.find(sub, start) if next < 0: break - substrings_w.append(input[start:next]) + if not first: + builder.append(by) + first = False + builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - substrings_w.append(input[start:]) + if not first: + builder.append(by) + builder.append_slice(input, start, len(input)) - try: - # XXX conservative estimate. If your strings are that close - # to overflowing, bad luck. - one = ovfcheck(len(substrings_w) * len(by)) - ovfcheck(one + len(input)) - except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("replace string is too long")) - - return space.wrap(by.join(substrings_w)) + return space.wrap(builder.build()) def str_replace__String_ANY_ANY_ANY(space, w_self, w_sub, w_by, w_maxsplit): diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,9 +11,6 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) - c_files.extend([py.path.local(f) for f in eci.separate_module_files]) - eci = ExternalCompilationInfo(**eci._copy_attributes()) - eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Sat Dec 17 16:21:39 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 16:21:39 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: rename to indicate that it belongs to the short_preamble and not the current part Message-ID: <20111217152139.26EE78205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50622:c69acd44808e Date: 2011-12-17 15:14 +0100 http://bitbucket.org/pypy/pypy/changeset/c69acd44808e/ Log: rename to indicate that it belongs to the short_preamble and not the current part diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -187,9 +187,9 @@ self.short = target_token.short_preamble[:] self.short_seen = {} self.short_boxes = exported_state.short_boxes + self.short_resume_at_jump_descr = target_token.resume_at_jump_descr self.inputargs = targetop.getarglist() self.initial_virtual_state = target_token.virtual_state - self.resume_at_jump_descr = target_token.resume_at_jump_descr seen = {} for box in self.inputargs: @@ -364,7 +364,7 @@ for i in range(len(short)): short[i] = inliner.inline_op(short[i]) - target_token.resume_at_jump_descr = self.resume_at_jump_descr.clone_if_mutable() + target_token.resume_at_jump_descr = target_token.resume_at_jump_descr.clone_if_mutable() inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed @@ -403,7 +403,7 @@ if not isinstance(a, Const) and a not in self.short_seen: self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): - descr = self.resume_at_jump_descr.clone_if_mutable() + descr = self.short_resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) if guards_needed and self.short_boxes.has_producer(op.result): From noreply at buildbot.pypy.org Sat Dec 17 16:21:40 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 16:21:40 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: not needed since the hack adding ops at the begining of newoperations was removed Message-ID: <20111217152140.49A4D8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50623:1dfa4f95ba02 Date: 2011-12-17 15:34 +0100 http://bitbucket.org/pypy/pypy/changeset/1dfa4f95ba02/ Log: not needed since the hack adding ops at the begining of newoperations was removed diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -222,7 +222,7 @@ self.optimizer.emitting_dissabled = False def close_bridge(self, start_label): - inputargs = self.inputargs + inputargs = self.inputargs short_jumpargs = inputargs[:] # We dont need to inline the short preamble we are creating as we are conneting @@ -232,8 +232,6 @@ newoperations = self.optimizer.get_newoperations() self.boxes_created_this_iteration = {} i = 0 - while newoperations[i].getopnum() != rop.LABEL: - i += 1 while i < len(newoperations): op = newoperations[i] self.boxes_created_this_iteration[op.result] = True @@ -279,8 +277,6 @@ newoperations = self.optimizer.get_newoperations() self.boxes_created_this_iteration = {} i = j = 0 - while newoperations[i].getopnum() != rop.LABEL: - i += 1 while i < len(newoperations) or j < len(jumpargs): if i == len(newoperations): while j < len(jumpargs): From noreply at buildbot.pypy.org Sat Dec 17 16:21:41 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 16:21:41 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: always assign self.inputargs Message-ID: <20111217152141.6B0118205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50624:69c7bd4e7bac Date: 2011-12-17 15:45 +0100 http://bitbucket.org/pypy/pypy/changeset/69c7bd4e7bac/ Log: always assign self.inputargs diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -172,8 +172,11 @@ def import_state(self, targetop): self.did_import = False if not targetop: + self.inputargs = self.optimizer.loop.inputargs # FIXME: Set up some sort of empty state with no virtuals? return + self.inputargs = targetop.getarglist() + target_token = targetop.getdescr() if not target_token: return @@ -188,7 +191,6 @@ self.short_seen = {} self.short_boxes = exported_state.short_boxes self.short_resume_at_jump_descr = target_token.resume_at_jump_descr - self.inputargs = targetop.getarglist() self.initial_virtual_state = target_token.virtual_state seen = {} From noreply at buildbot.pypy.org Sat Dec 17 16:21:42 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 16:21:42 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: labels always have descr Message-ID: <20111217152142.8B85D8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50625:9e63288cc721 Date: 2011-12-17 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/9e63288cc721/ Log: labels always have descr diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -178,8 +178,6 @@ self.inputargs = targetop.getarglist() target_token = targetop.getdescr() - if not target_token: - return assert isinstance(target_token, TargetToken) exported_state = target_token.exported_state if not exported_state: From noreply at buildbot.pypy.org Sat Dec 17 18:52:28 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 18:52:28 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: In-progress. Message-ID: <20111217175228.567428205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50626:fc4c51ca1ec4 Date: 2011-12-17 15:57 +0100 http://bitbucket.org/pypy/pypy/changeset/fc4c51ca1ec4/ Log: In-progress. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -41,15 +41,21 @@ self.field_strlen_descr = get_field_arraylen_descr(self, rstr.STR) self.field_unicodelen_descr = get_field_arraylen_descr(self, rstr.UNICODE) + self._generated_functions = [] - def _ready(self): - MALLOC_FIXEDSIZE = lltype.Ptr( - lltype.FuncType([lltype.Signed], llmemory.GCREF)) - self.malloc_fixedsize_fn = llhelper(MALLOC_FIXEDSIZE, - self.malloc_fixedsize) - self.c_malloc_fixedsize_fn = ConstInt( - heaptracker.adr2int(llmemory.cast_ptr_to_adr( - self.malloc_fixedsize_fn))) + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should raise MemoryError and return NULL if out of + memory. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + ll_func = llhelper(FUNCPTR, func) + c_ll_func = ConstInt( + heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func))) + setattr(self, '%s' % funcname, func) + setattr(self, '%s_fn' % funcname, ll_func) + setattr(self, 'c_%s_fn' % funcname, c_ll_func) + self._generated_functions.append(funcname) def _freeze_(self): return True @@ -168,9 +174,8 @@ if not res: raise MemoryError return res - self.malloc_fixedsize = malloc_fixedsize - # - self._ready() + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) def _gc_malloc(self, size, tid): # Boehm: 'tid' is ignored @@ -618,7 +623,7 @@ self._make_gcrootmap() self._make_layoutbuilder() self._setup_gcclass() - self._make_functions() + self._make_functions() def _initialize_for_tests(self): self.layoutbuilder = None @@ -673,37 +678,49 @@ def _make_functions(self): llop1 = self.llop1 - # make the fixed malloc function, with one argument - def malloc_gc_fixed(size): + + def malloc_nursery(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend. + """ type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, "-->", res) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - return res - self.malloc_gc_fixed = malloc_gc_fixed - self.MALLOC_GC_FIXED = lltype.Ptr( - lltype.FuncType([lltype.Signed], llmemory.GCREF)) - # - # make the varsize malloc function, with three arguments - def malloc_gc_variable(basesize, num_elem, itemsize): - xx - self.malloc_gc_variable = malloc_gc_variable - self.MALLOC_GC_VARIABLE = lltype.Ptr( - lltype.FuncType([lltype.Signed] * 3, llmemory.GCREF)) - # - self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address, llmemory.Address], lltype.Void)) - self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) - self.write_barrier_descr = WriteBarrierDescr(self) - self.fielddescr_tid = self.write_barrier_descr.fielddescr_tid - # + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery, + [lltype.Signed]) + +## # make the fixed malloc function, with one argument +## def malloc_gc_fixed(size): +## type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here +## res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, +## type_id, size, +## False, False, False) +## #llop.debug_print(lltype.Void, "\tmalloc_basic", size, "-->", res) +## # In case the operation above failed, we are returning NULL +## # from this function to assembler. There is also an RPython +## # exception set, typically MemoryError; but it's easier and +## # faster to check for the NULL return value, as done by +## # translator/exceptiontransform.py. +## return res +## self.malloc_gc_fixed = malloc_gc_fixed +## self.MALLOC_GC_FIXED = lltype.Ptr( +## lltype.FuncType([lltype.Signed], llmemory.GCREF)) +## # +## # make the varsize malloc function, with three arguments +## def malloc_gc_variable(basesize, num_elem, itemsize): +## xx +## self.malloc_gc_variable = malloc_gc_variable +## self.MALLOC_GC_VARIABLE = lltype.Ptr( +## lltype.FuncType([lltype.Signed] * 3, llmemory.GCREF)) +## # +## self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( +## [llmemory.Address, llmemory.Address], lltype.Void)) +## self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( +## [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) +## self.write_barrier_descr = WriteBarrierDescr(self) +## self.fielddescr_tid = self.write_barrier_descr.fielddescr_tid +## # ## def malloc_array(itemsize, tid, num_elem): ## type_id = llop.extract_ushort(llgroup.HALFWORD, tid) ## check_typeid(type_id) diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -153,8 +153,8 @@ self.recent_mallocs[v_result] = None def gen_malloc_nursery(self, size, v_result): - """Try to generate or update a MALLOC_NURSERY. - If that fails, generate a plain MALLOC_GC instead. + """Try to generate or update a CALL_MALLOC_NURSERY. + If that fails, generate a plain CALL_MALLOC_GC instead. """ if not self.gc_ll_descr.can_use_nursery_malloc(size): return self.gen_malloc_fixedsize(size, v_result) @@ -164,11 +164,11 @@ # if self._op_malloc_nursery is not None: # already a MALLOC_NURSERY: increment its total size - total_size = self._op_malloc_nursery.getarg(0).getint() + total_size = self._op_malloc_nursery.getarg(1).getint() total_size += size if self.gc_ll_descr.can_use_nursery_malloc(total_size): # if the total size is still reasonable, merge it - self._op_malloc_nursery.setarg(0, ConstInt(total_size)) + self._op_malloc_nursery.setarg(1, ConstInt(total_size)) op = ResOperation(rop.INT_ADD, [self._v_last_malloced_nursery, ConstInt(self._previous_size)], @@ -176,7 +176,10 @@ if op is None: # if we failed to merge with a previous MALLOC_NURSERY, emit one self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.MALLOC_NURSERY, [ConstInt(size)], v_result) + op = ResOperation(rop.CALL_MALLOC_NURSERY, + [self.gc_ll_descr.c_malloc_nursery_fn, + ConstInt(size)], + v_result) self._op_malloc_nursery = op # self.newops.append(op) diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -15,8 +15,6 @@ class RewriteTests(object): def check_rewrite(self, frm_operations, to_operations): - malloc_fixedsize = self.gc_ll_descr.malloc_fixedsize_fn - # S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) sdescr = get_size_descr(self.gc_ll_descr, S) @@ -62,9 +60,14 @@ unicodelendescr = get_field_arraylen_descr(self.gc_ll_descr, rstr.UNICODE) # - ops = parse(frm_operations, namespace=locals()) - expected = parse(to_operations % Evaluator(locals()), - namespace=locals()) + namespace = locals().copy() + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = getattr(self.gc_ll_descr, '%s_fn' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) operations = self.gc_ll_descr.rewrite_assembler(self.cpu, ops.operations, []) @@ -195,7 +198,8 @@ jump() """, """ [p1] - p0 = malloc_nursery(%(sdescr.size)d) + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(sdescr.size)d) setfield_gc(p0, 1234, descr=tiddescr) jump() """) @@ -209,7 +213,8 @@ jump() """, """ [] - p0 = malloc_nursery(%(sdescr.size + tdescr.size + sdescr.size)d) + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(sdescr.size + tdescr.size + sdescr.size)d) setfield_gc(p0, 1234, descr=tiddescr) p1 = int_add(p0, %(sdescr.size)d) setfield_gc(p1, 5678, descr=tiddescr) From noreply at buildbot.pypy.org Sat Dec 17 18:52:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 18:52:29 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Fix some more tests. Message-ID: <20111217175229.771308205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50627:2011f2788757 Date: 2011-12-17 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2011f2788757/ Log: Fix some more tests. diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -230,7 +230,8 @@ jump() """, """ [] - p0 = malloc_nursery(%(adescr.get_base_size(False) + \ + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(adescr.get_base_size(False) + \ 10 * adescr.get_item_size(False))d) setfield_gc(p0, 4321, descr=tiddescr) setfield_gc(p0, 10, descr=alendescr) @@ -245,7 +246,8 @@ jump() """, """ [] - p0 = malloc_nursery(%(sdescr.size + \ + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(sdescr.size + \ adescr.get_base_size(False) + \ 10 * adescr.get_item_size(False))d) setfield_gc(p0, 1234, descr=tiddescr) @@ -262,7 +264,8 @@ jump() """, """ [] - p0 = malloc_nursery(%(bdescr.get_base_size(False) + 8)d) + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(bdescr.get_base_size(False) + 8)d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 6, descr=blendescr) jump() @@ -278,7 +281,8 @@ jump() """, """ [] - p0 = malloc_nursery(%(4 * (bdescr.get_base_size(False) + 8))d) + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(4 * (bdescr.get_base_size(False) + 8))d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 5, descr=blendescr) p1 = int_add(p0, %(bdescr.get_base_size(False) + 8)d) @@ -301,7 +305,7 @@ jump() """, """ [] - p0 = malloc_nursery(%(4*WORD)d) + p0 = call_malloc_nursery(ConstClass(malloc_nursery), %(4*WORD)d) setfield_gc(p0, 9000, descr=tiddescr) p1 = int_add(p0, %(2*WORD)d) setfield_gc(p1, 9000, descr=tiddescr) From noreply at buildbot.pypy.org Sat Dec 17 18:52:30 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 18:52:30 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: In-progress. Message-ID: <20111217175230.9B0898205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50628:ac65509a7f46 Date: 2011-12-17 16:13 +0100 http://bitbucket.org/pypy/pypy/changeset/ac65509a7f46/ Log: In-progress. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -690,6 +690,16 @@ self.generate_function('malloc_nursery', malloc_nursery, [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, self.array_basesize, itemsize, + self.array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + ## # make the fixed malloc function, with one argument ## def malloc_gc_fixed(size): ## type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here @@ -721,17 +731,6 @@ ## self.write_barrier_descr = WriteBarrierDescr(self) ## self.fielddescr_tid = self.write_barrier_descr.fielddescr_tid ## # -## def malloc_array(itemsize, tid, num_elem): -## type_id = llop.extract_ushort(llgroup.HALFWORD, tid) -## check_typeid(type_id) -## return llop1.do_malloc_varsize_clear( -## llmemory.GCREF, -## type_id, num_elem, self.array_basesize, itemsize, -## self.array_length_ofs) -## ###self.malloc_array = malloc_array -## self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( -## [lltype.Signed] * 3, llmemory.GCREF)) -## # ## (str_basesize, str_itemsize, str_ofs_length ## ) = symbolic.get_array_token(rstr.STR, True) ## (unicode_basesize, unicode_itemsize, unicode_ofs_length diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -122,12 +122,10 @@ pass # total_size is still -1 if total_size >= 0: self.gen_malloc_nursery(total_size, op.result) + self.gen_initialize_tid(op.result, tid) + self.gen_initialize_len(op.result, v_length, arraylen_descr) else: - xxx - self.gen_new_array(base_size, op.result, - v_length, ConstInt(item_size)) - self.gen_initialize_tid(op.result, tid) - self.gen_initialize_len(op.result, v_length, arraylen_descr) + self.gen_malloc_array(item_size, tid, v_length, op.result) # ---------- @@ -139,18 +137,27 @@ self._op_malloc_nursery = None self.recent_mallocs.clear() + def _gen_call_malloc_gc(self, args, v_result): + """Generate a CALL_MALLOC_GC with the given args.""" + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_GC, args, v_result) + self.newops.append(op) + # mark 'v_result' as freshly malloced + self.recent_mallocs[v_result] = None + def gen_malloc_fixedsize(self, size, v_result): """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). Note that with the framework GC, this should be called very rarely. """ - self.emitting_an_operation_that_can_collect() - c_size = ConstInt(size) - op = ResOperation(rop.CALL_MALLOC_GC, - [self.gc_ll_descr.c_malloc_fixedsize_fn, c_size], - v_result) - self.newops.append(op) - # mark 'v_result' as freshly malloced - self.recent_mallocs[v_result] = None + self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_fixedsize_fn, + ConstInt(size)], v_result) + + def gen_malloc_array(self, itemsize, tid, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...).""" + self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_array_fn, + ConstInt(itemsize), + ConstInt(tid), + v_num_elem], v_result) def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -319,9 +319,8 @@ jump(i0) """, """ [i0] - p0 = malloc_gc(%(bdescr.get_base_size(False))d, i0, 1) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, i0, descr=blendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0) jump(i0) """) From noreply at buildbot.pypy.org Sat Dec 17 18:52:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 18:52:31 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Finish test_rewrite.TestFramework. Message-ID: <20111217175231.BEBF78205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50629:9d09995aae35 Date: 2011-12-17 16:30 +0100 http://bitbucket.org/pypy/pypy/changeset/9d09995aae35/ Log: Finish test_rewrite.TestFramework. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -679,18 +679,19 @@ def _make_functions(self): llop1 = self.llop1 - def malloc_nursery(size): + def malloc_nursery_slowpath(size): """Allocate 'size' null bytes out of the nursery. - Note that the fast path is typically inlined by the backend. - """ + Note that the fast path is typically inlined by the backend.""" type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, type_id, size, False, False, False) - self.generate_function('malloc_nursery', malloc_nursery, + self.generate_function('malloc_nursery', malloc_nursery_slowpath, [lltype.Signed]) def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( @@ -700,6 +701,28 @@ self.generate_function('malloc_array', malloc_array, [lltype.Signed] * 3) + def malloc_str(length): + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + str_type_id, length, str_basesize, str_itemsize, + str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + + def malloc_unicode(length): + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + unicode_type_id, length, unicode_basesize, unicode_itemsize, + unicode_ofs_length) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + ## # make the fixed malloc function, with one argument ## def malloc_gc_fixed(size): ## type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -125,7 +125,15 @@ self.gen_initialize_tid(op.result, tid) self.gen_initialize_len(op.result, v_length, arraylen_descr) else: - self.gen_malloc_array(item_size, tid, v_length, op.result) + opnum = op.getopnum() + if opnum == rop.NEW_ARRAY: + self.gen_malloc_array(item_size, tid, v_length, op.result) + elif opnum == rop.NEWSTR: + self.gen_malloc_str(v_length, op.result) + elif opnum == rop.NEWUNICODE: + self.gen_malloc_unicode(v_length, op.result) + else: + raise NotImplementedError(op.getopname()) # ---------- @@ -159,6 +167,16 @@ ConstInt(tid), v_num_elem], v_result) + def gen_malloc_str(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" + self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_str_fn, + v_num_elem], v_result) + + def gen_malloc_unicode(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...).""" + self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_unicode_fn, + v_num_elem], v_result) + def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. If that fails, generate a plain CALL_MALLOC_GC instead. diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -332,7 +332,8 @@ jump() """, """ [] - p0 = malloc_gc(%(bdescr.get_base_size(False) + 100)d, 0, 0) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.get_base_size(False) + 100)d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 100, descr=blendescr) jump() @@ -348,13 +349,15 @@ jump() """, """ [] - p0 = malloc_nursery(%(2 * (bdescr.get_base_size(False) + 104))d) + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(2 * (bdescr.get_base_size(False) + 104))d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 101, descr=blendescr) p1 = int_add(p0, %(bdescr.get_base_size(False) + 104)d) setfield_gc(p1, 8765, descr=tiddescr) setfield_gc(p1, 102, descr=blendescr) - p2 = malloc_nursery(%(bdescr.get_base_size(False) + 104)d) + p2 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(bdescr.get_base_size(False) + 104)d) setfield_gc(p2, 8765, descr=tiddescr) setfield_gc(p2, 103, descr=blendescr) jump() @@ -367,7 +370,8 @@ jump() """, """ [p1] - p0 = malloc_nursery(104) # rounded up + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + 104) # rounded up setfield_gc(p0, 9315, descr=tiddescr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() @@ -381,7 +385,7 @@ jump() """, """ [p1] - p0 = malloc_gc(102, 0, 0) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102) setfield_gc(p0, 9315, descr=tiddescr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() @@ -393,18 +397,19 @@ p0 = newstr(14) p1 = newunicode(10) p2 = newunicode(i2) + p3 = newstr(i2) jump() """, """ [i2] - p0 = malloc_nursery(%(str_basesize + 16 * str_itemsize + \ + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(str_basesize + 16 * str_itemsize + \ unicode_basesize + 10 * unicode_itemsize)d) setfield_gc(p0, %(str_type_id)d, descr=tiddescr) setfield_gc(p0, 14, descr=strlendescr) p1 = int_add(p0, %(str_basesize + 16 * str_itemsize)d) setfield_gc(p1, %(unicode_type_id)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) - p2 = malloc_gc(%(unicode_basesize)d, i2, %(unicode_itemsize)d) - setfield_gc(p2, %(unicode_type_id)d, descr=tiddescr) - setfield_gc(p2, i2, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2) + p3 = call_malloc_gc(ConstClass(malloc_str), i2) jump() """) From noreply at buildbot.pypy.org Sat Dec 17 18:52:32 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Dec 2011 18:52:32 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Start to reorganize dsecr.py to kill the obscure (and now half pointless) Message-ID: <20111217175232.F3E578205C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50630:bab5597339cc Date: 2011-12-17 18:46 +0100 http://bitbucket.org/pypy/pypy/changeset/bab5597339cc/ Log: Start to reorganize dsecr.py to kill the obscure (and now half pointless) inheritance. diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -6,11 +6,6 @@ from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. - class GcCache(object): def __init__(self, translate_support_code, rtyper=None): @@ -79,229 +74,108 @@ # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_arraylen - if isinstance(ARRAY_OR_STRUCT, lltype.GcArray): - # assume that they all have the length at the same offset - key = 'array' - else: - # assume that it is STR or UNICODE - assert isinstance(ARRAY_OR_STRUCT.chars, lltype.Array) - key = ARRAY_OR_STRUCT try: - return cache[key] + return cache[ARRAY_OR_STRUCT] except KeyError: tsc = gccache.translate_support_code (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) - if key == 'array' and not tsc: - (_, _, baseofs) = symbolic.get_array_token(_A, tsc) - assert baseofs == ofs, ("arrays %r and %r don't have the length " - "field at the same offset!" % - (ARRAY_OR_STRUCT, _A)) - SignedFieldDescr = getFieldDescrClass(lltype.Signed) - result = SignedFieldDescr("len", ofs) - cache[key] = result + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result return result # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr - - def is_array_of_pointers(self): - return self._is_array_of_pointers - - def is_array_of_floats(self): - return self._is_array_of_floats - - def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - def get_array_descr(gccache, ARRAY): cache = gccache._cache_array try: return cache[ARRAY] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. + assert isinstance(ARRAY, lltype.Array) if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + lendescr = None else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - arraydescr.field_arraylen_descr = get_field_arraylen_descr( - gccache, ARRAY) - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) + lendescr = get_field_arraylen_descr(gccache, ARRAY) + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY, tsc) + flag = get_type_flag(ARRAY.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) if isinstance(ARRAY, lltype.GcArray): gccache.init_array_descr(ARRAY, arraydescr) cache[ARRAY] = arraydescr @@ -311,34 +185,29 @@ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr - def is_pointer_field(self): - return self.fielddescr.is_pointer_field() - - def is_float_field(self): - return self.fielddescr.is_float_field() - def sort_key(self): return self.fielddescr.sort_key() def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr # ____________________________________________________________ @@ -473,21 +342,11 @@ class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. + # Calls having a return kind of 'int' (including non-gc pointers). _return_type = history.INT call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed +#... class DynamicIntCallDescr(BaseIntCallDescr): """ diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -120,6 +120,7 @@ gcrootmap = None write_barrier_descr = None fielddescr_tid = None + has_tid = False str_type_id = 0 unicode_type_id = 0 @@ -166,9 +167,12 @@ def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.malloc_fn_ptr = malloc_fn_ptr - # + self.malloc_fn_ptr = self.configure_boehm_once() + self._make_functions() + + def _make_functions(self): + malloc_fn_ptr = self.malloc_fn_ptr + def malloc_fixedsize(size): res = malloc_fn_ptr(size) if not res: @@ -177,6 +181,37 @@ self.generate_function('malloc_fixedsize', malloc_fixedsize, [lltype.Signed]) + def malloc_array(basesize, itemsize, num_elem): + xxx + try: + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + except OverflowError: + raise MemoryError + res = malloc_fn_ptr(totalsize) + if not res: + raise MemoryError + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem + return res + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_str(length): + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + str_type_id, length, str_basesize, str_itemsize, + str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + + def malloc_unicode(length): + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + unicode_type_id, length, unicode_basesize, unicode_itemsize, + unicode_ofs_length) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + def _gc_malloc(self, size, tid): # Boehm: 'tid' is ignored return self.malloc_fixedsize(size) @@ -607,6 +642,7 @@ class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + has_tid = True def __init__(self, gcdescr, translator, rtyper, llop1=llop, really_not_translated=False): diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -85,7 +85,7 @@ self.handle_new_array(descr.tid, descr.get_base_size(self.tsc), descr.get_item_size(self.tsc), - descr.field_arraylen_descr, + descr.get_field_arraylen_descr(), op) elif opnum == rop.NEWSTR: self.handle_new_array(self.gc_ll_descr.str_type_id, @@ -162,10 +162,17 @@ def gen_malloc_array(self, itemsize, tid, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...).""" - self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_array_fn, - ConstInt(itemsize), - ConstInt(tid), - v_num_elem], v_result) + if self.gc_ll_descr.has_tid: + args = [self.gc_ll_descr.c_malloc_array_fn, + ConstInt(itemsize), + ConstInt(tid), + v_num_elem] + else: + args = [self.gc_ll_descr.c_malloc_array_fn, + ConstInt(basesize), + ConstInt(itemsize), + v_num_elem] + self._gen_call_malloc_gc(args, v_result) def gen_malloc_str(self, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,13 +203,15 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_call_descr_not_translated(): @@ -369,17 +317,18 @@ # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert descr2.repr_of_descr() == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert descr2i.repr_of_descr() == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert descr3.repr_of_descr() == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert descr3i.repr_of_descr() == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) assert 'GcPtrCallDescr' in descr4.repr_of_descr() @@ -453,7 +402,7 @@ c0 = GcCache(True) A1 = lltype.GcArray(lltype.Signed) fielddescr = get_field_arraylen_descr(c0, A1) - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset assert repr(ofs) == '< ArrayLengthOffset >' # diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -127,7 +127,7 @@ jump() """, """ [i1] - p0 = call_malloc_gc(ConstClass(malloc_varsize), \ + p0 = call_malloc_gc(ConstClass(malloc_array), \ %(adescr.get_base_size(False))d, \ i1, \ %(adescr.get_item_size(False))d) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -142,59 +142,6 @@ def repr_of_descr(self): return '%r' % (self,) - def get_arg_types(self): - """ Implement in call descr. - Must return a string of INT, REF and FLOAT ('i', 'r', 'f'). - """ - raise NotImplementedError - - def get_return_type(self): - """ Implement in call descr. - Must return INT, REF, FLOAT, or 'v' for void. - On 32-bit (hack) it can also be 'L' for longlongs. - Additionally it can be 'S' for singlefloats. - """ - raise NotImplementedError - - def get_extra_info(self): - """ Implement in call descr - """ - raise NotImplementedError - - def is_array_of_pointers(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_floats(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_structs(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_pointer_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def is_float_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def as_vtable_size_descr(self): - """ Implement for size descr representing objects with vtables. - Returns self. (it's an annotation hack) - """ - raise NotImplementedError - - def count_fields_if_immutable(self): - return -1 - def _clone_if_mutable(self): return self def clone_if_mutable(self): From noreply at buildbot.pypy.org Sat Dec 17 20:32:23 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 20:32:23 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: refactor a bit and make sure import_state import_state sets things up properly even if there is nothing to import Message-ID: <20111217193223.CB2898205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50631:4014ad4062c5 Date: 2011-12-17 17:47 +0100 http://bitbucket.org/pypy/pypy/changeset/4014ad4062c5/ Log: refactor a bit and make sure import_state import_state sets things up properly even if there is nothing to import diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -91,38 +91,57 @@ self.optimizer.propagate_all_forward(clear=False) if not jumpop: - return + return + if self.jump_to_already_compiled_trace(jumpop): # Found a compiled trace to jump to - if self.did_import: - + if self.short: + # Construct our short preamble self.close_bridge(start_label) - self.finilize_short_preamble(start_label) return cell_token = jumpop.getdescr() assert isinstance(cell_token, JitCellToken) stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) - if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) - self.optimizer.flush() - KillHugeIntBounds(self.optimizer).apply() + if self.did_import and self.jump_to_start_label(start_label, stop_label): + # Initial label matches, jump to it + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, + descr=start_label.getdescr()) + if self.short: + # Construct our short preamble + self.close_loop(start_label, jumpop) + else: + self.optimizer.send_extra_operation(jumpop) + return - loop.operations = self.optimizer.get_newoperations() - self.export_state(stop_label) - loop.operations.append(stop_label) - else: - assert stop_label - assert start_label - stop_target = stop_label.getdescr() - start_target = start_label.getdescr() - assert isinstance(stop_target, TargetToken) - assert isinstance(start_target, TargetToken) - assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token - jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + # Found nothing to jump to, emit a label instead + self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() - self.close_loop(jumpop) - self.finilize_short_preamble(start_label) + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + + def jump_to_start_label(self, start_label, stop_label): + if not start_label or not stop_label: + return False + + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + if stop_target.targeting_jitcell_token is not start_target.targeting_jitcell_token: + return False + + return True + + #args = stop_label.getarglist() + #modifier = VirtualStateAdder(self.optimizer) + #virtual_state = modifier.get_virtual_state(args) + #if self.initial_virtual_state.generalization_of(virtual_state): + # return True + def export_state(self, targetop): original_jump_args = targetop.getarglist() @@ -171,18 +190,24 @@ def import_state(self, targetop): self.did_import = False - if not targetop: + if not targetop: # Trace did not start with a label self.inputargs = self.optimizer.loop.inputargs - # FIXME: Set up some sort of empty state with no virtuals? + self.short = None + self.initial_virtual_state = None return + self.inputargs = targetop.getarglist() - target_token = targetop.getdescr() assert isinstance(target_token, TargetToken) exported_state = target_token.exported_state if not exported_state: - # FIXME: Set up some sort of empty state with no virtuals + # No state exported, construct one without virtuals + self.short = None + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(self.inputargs) + self.initial_virtual_state = virtual_state return + self.did_import = True self.short = target_token.short_preamble[:] @@ -243,8 +268,9 @@ i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) - - def close_loop(self, jumpop): + self.finilize_short_preamble(start_label) + + def close_loop(self, start_label, jumpop): virtual_state = self.initial_virtual_state short_inputargs = self.short[0].getarglist() inputargs = self.inputargs @@ -329,6 +355,8 @@ assert isinstance(target_token, TargetToken) target_token.targeting_jitcell_token.retraced_count = sys.maxint + self.finilize_short_preamble(start_label) + def finilize_short_preamble(self, start_label): short = self.short assert short[-1].getopnum() == rop.JUMP diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -580,6 +580,10 @@ except BoxNotProducable: pass + self.short_boxes_in_production = None # Not needed anymore + else: + self.short_boxes = {} + def prioritized_alternatives(self, box): if box not in self.alternatives: return [self.potential_ops[box]] From noreply at buildbot.pypy.org Sat Dec 17 20:32:24 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 20:32:24 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: allow the metainterpreter to request an label by placing it in the unoptimized trace Message-ID: <20111217193224.F18108205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50632:7f2f31e8e4bd Date: 2011-12-17 18:38 +0100 http://bitbucket.org/pypy/pypy/changeset/7f2f31e8e4bd/ Log: allow the metainterpreter to request an label by placing it in the unoptimized trace diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -129,7 +129,8 @@ part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ - [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] + try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -35,6 +35,9 @@ pass def optimize_LABEL(self, op): + descr = op.getdescr() + if isinstance(descr, JitCellToken): + return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) self.last_label_descr = op.getdescr() self.emit_operation(op) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -435,7 +435,7 @@ token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ operations + \ - [ResOperation(rop.JUMP, jump_args, None, descr=token)] + [ResOperation(rop.LABEL, jump_args, None, descr=token)] self._do_optimize_loop(preamble, call_pure_results) assert preamble.operations[-1].getopnum() == rop.LABEL diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -71,7 +71,6 @@ loop = self.optimizer.loop self.optimizer.clear_newoperations() - start_label = loop.operations[0] if start_label.getopnum() == rop.LABEL: loop.operations = loop.operations[1:] @@ -82,7 +81,7 @@ start_label = None jumpop = loop.operations[-1] - if jumpop.getopnum() == rop.JUMP: + if jumpop.getopnum() == rop.JUMP or jumpop.getopnum() == rop.LABEL: loop.operations = loop.operations[:-1] else: jumpop = None @@ -92,28 +91,30 @@ if not jumpop: return - - if self.jump_to_already_compiled_trace(jumpop): - # Found a compiled trace to jump to - if self.short: - # Construct our short preamble - self.close_bridge(start_label) - return cell_token = jumpop.getdescr() assert isinstance(cell_token, JitCellToken) stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) - if self.did_import and self.jump_to_start_label(start_label, stop_label): - # Initial label matches, jump to it - jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, - descr=start_label.getdescr()) - if self.short: - # Construct our short preamble - self.close_loop(start_label, jumpop) - else: - self.optimizer.send_extra_operation(jumpop) - return + + if jumpop.getopnum() == rop.JUMP: + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.short: + # Construct our short preamble + self.close_bridge(start_label) + return + + if self.jump_to_start_label(start_label, stop_label): + # Initial label matches, jump to it + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, + descr=start_label.getdescr()) + if self.short: + # Construct our short preamble + self.close_loop(start_label, jumpop) + else: + self.optimizer.send_extra_operation(jumpop) + return # Found nothing to jump to, emit a label instead self.optimizer.flush() From noreply at buildbot.pypy.org Sat Dec 17 20:32:26 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 20:32:26 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: kill old retrace logic no longer used Message-ID: <20111217193226.271838205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50633:bd3c0ac15e47 Date: 2011-12-17 18:42 +0100 http://bitbucket.org/pypy/pypy/changeset/bd3c0ac15e47/ Log: kill old retrace logic no longer used diff --git a/pypy/jit/metainterp/optimize.py b/pypy/jit/metainterp/optimize.py --- a/pypy/jit/metainterp/optimize.py +++ b/pypy/jit/metainterp/optimize.py @@ -6,12 +6,6 @@ we are trying to build cannot possibly make sense as a long-running loop (e.g. it cannot run 2 complete iterations).""" -class RetraceLoop(JitException): - """ Raised when inlining a short preamble resulted in an - InvalidLoop. This means the optimized loop is too specialized - to be useful here, so we trace it again and produced a second - copy specialized in some different way. - """ # ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -3,7 +3,7 @@ from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException -from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop +from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds from pypy.jit.metainterp.inliner import Inliner diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1548,11 +1548,6 @@ # ____________________________________________________________ -class RetraceState(object): - def __init__(self, metainterp, live_arg_boxes): - self.merge_point = len(metainterp.current_merge_points) - 1 - self.live_arg_boxes = live_arg_boxes - class MetaInterp(object): in_recursion = 0 @@ -2110,43 +2105,6 @@ jitcell_token = target_token.targeting_jitcell_token self.raise_continue_running_normally(live_arg_boxes, jitcell_token) - def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, - bridge_arg_boxes, resume_at_jump_descr): - num_green_args = self.jitdriver_sd.num_green_args - original_inputargs = self.history.inputargs - greenkey = original_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - original_operations = self.history.operations - self.history.inputargs = original_boxes[num_green_args:] - greenkey = original_boxes[:num_green_args] - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) - loop_token = compile.compile_new_loop(self, [], greenkey, start, - resume_at_jump_descr, False) - self.history.operations.pop() # remove the JUMP - if loop_token is None: - self.history.inputargs = original_inputargs - self.history.operations = original_operations - return - - if loop_token.short_preamble: - old_loop_tokens[0].short_preamble.extend(loop_token.short_preamble) - - self.history.inputargs = original_inputargs - self.history.operations = self.history.operations[:start] - - self.history.record(rop.JUMP, bridge_arg_boxes[num_green_args:], None) - try: - target_loop_token = compile.compile_new_bridge(self, - #[loop_token], - old_loop_tokens, - self.resumekey, - True) - except RetraceLoop: - assert False - assert target_loop_token is not None - self.raise_continue_running_normally(live_arg_boxes, - old_loop_tokens[0]) - def compile_done_with_this_frame(self, exitbox): self.gen_store_back_in_virtualizable() # temporarily put a JUMP to a pseudo-loop diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -11,7 +11,6 @@ from pypy.jit.metainterp.optimizeopt.intutils import IntBound from pypy.jit.metainterp.history import TreeLoop, JitCellToken from pypy.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData -from pypy.jit.metainterp.optimize import RetraceLoop from pypy.jit.metainterp.resoperation import ResOperation, rop class TestBasic: From noreply at buildbot.pypy.org Sat Dec 17 20:32:27 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 20:32:27 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: kill old interface for calling optimizeopt, its no longer used Message-ID: <20111217193227.4EFFC8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50634:e65693cb1de5 Date: 2011-12-17 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/e65693cb1de5/ Log: kill old interface for calling optimizeopt, its no longer used diff --git a/pypy/jit/metainterp/optimize.py b/pypy/jit/metainterp/optimize.py --- a/pypy/jit/metainterp/optimize.py +++ b/pypy/jit/metainterp/optimize.py @@ -5,52 +5,3 @@ """Raised when the optimize*.py detect that the loop that we are trying to build cannot possibly make sense as a long-running loop (e.g. it cannot run 2 complete iterations).""" - - -# ____________________________________________________________ - -def optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): - debug_start("jit-optimize") - try: - return _optimize_loop(metainterp_sd, old_loop_tokens, loop, - enable_opts) - finally: - debug_stop("jit-optimize") - -def _optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 - loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, - loop.operations) - # XXX do we really still need a list? - if old_loop_tokens: - return old_loop_tokens[0] - optimize_loop_1(metainterp_sd, loop, enable_opts) - return None - -# ____________________________________________________________ - -def optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, - inline_short_preamble=True, retraced=False): - debug_start("jit-optimize") - try: - return _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, - enable_opts, - inline_short_preamble, retraced) - finally: - debug_stop("jit-optimize") - -def _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, - inline_short_preamble, retraced=False): - from pypy.jit.metainterp.optimizeopt import optimize_bridge_1 - bridge.logops = metainterp_sd.logger_noopt.log_loop(bridge.inputargs, - bridge.operations) - if old_loop_tokens: - old_loop_token = old_loop_tokens[0] - bridge.operations[-1].setdescr(old_loop_token) # patch jump target - optimize_bridge_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) - return old_loop_tokens[0] - #return bridge.operations[-1].getdescr() - return None - -# ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -51,34 +51,6 @@ return optimizations, unroll - -def optimize_loop_1(metainterp_sd, loop, enable_opts, - inline_short_preamble=True, retraced=False): - """Optimize loop.operations to remove internal overheadish operations. - """ - - optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble, retraced) - if unroll: - optimize_unroll(metainterp_sd, loop, optimizations) - else: - optimizer = Optimizer(metainterp_sd, loop, optimizations) - optimizer.propagate_all_forward() - -def optimize_bridge_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble=True, retraced=False): - """The same, but for a bridge. """ - enable_opts = enable_opts.copy() - try: - del enable_opts['unroll'] - except KeyError: - pass - optimize_loop_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) - -if __name__ == '__main__': - print ALL_OPTS_NAMES - def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ @@ -96,3 +68,6 @@ finally: debug_stop("jit-optimize") +if __name__ == '__main__': + print ALL_OPTS_NAMES + diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -4,7 +4,7 @@ LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize -from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken From noreply at buildbot.pypy.org Sat Dec 17 20:32:28 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 20:32:28 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: moved retrace count check up one level Message-ID: <20111217193228.702168205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50635:9bc137b88407 Date: 2011-12-17 19:54 +0100 http://bitbucket.org/pypy/pypy/changeset/9bc137b88407/ Log: moved retrace count check up one level diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -116,6 +116,18 @@ self.optimizer.send_extra_operation(jumpop) return + if cell_token.target_tokens: + limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit + if cell_token.retraced_count < limit: + cell_token.retraced_count += 1 + debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + else: + debug_print("Retrace count reached, jumping to preamble") + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return True + # Found nothing to jump to, emit a label instead self.optimizer.flush() KillHugeIntBounds(self.optimizer).apply() @@ -544,20 +556,7 @@ self.optimizer.send_extra_operation(jumpop) return True debug_stop('jit-log-virtualstate') - - if self.did_import: - return False - limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit - if cell_token.retraced_count Author: Hakan Ardo Branch: jit-multilabel Changeset: r50636:f98a2c88a795 Date: 2011-12-17 19:55 +0100 http://bitbucket.org/pypy/pypy/changeset/f98a2c88a795/ Log: kill did_import diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -51,7 +51,6 @@ distinction anymore)""" inline_short_preamble = True - did_import = False def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) @@ -202,7 +201,6 @@ exported_values) def import_state(self, targetop): - self.did_import = False if not targetop: # Trace did not start with a label self.inputargs = self.optimizer.loop.inputargs self.short = None @@ -221,8 +219,6 @@ self.initial_virtual_state = virtual_state return - self.did_import = True - self.short = target_token.short_preamble[:] self.short_seen = {} self.short_boxes = exported_state.short_boxes From noreply at buildbot.pypy.org Sat Dec 17 20:32:30 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 17 Dec 2011 20:32:30 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: translation fixes Message-ID: <20111217193230.B33FE8205C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50637:4f6581643f66 Date: 2011-12-17 20:24 +0100 http://bitbucket.org/pypy/pypy/changeset/4f6581643f66/ Log: translation fixes diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -101,10 +101,11 @@ # Found a compiled trace to jump to if self.short: # Construct our short preamble + assert start_label self.close_bridge(start_label) return - if self.jump_to_start_label(start_label, stop_label): + if start_label and self.jump_to_start_label(start_label, stop_label): # Initial label matches, jump to it jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) @@ -125,7 +126,7 @@ assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) - return True + return # Found nothing to jump to, emit a label instead self.optimizer.flush() From noreply at buildbot.pypy.org Sat Dec 17 23:01:05 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:01:05 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge defaults Message-ID: <20111217220105.4A05C8205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50638:2258d6a11e06 Date: 2011-12-17 13:42 +0100 http://bitbucket.org/pypy/pypy/changeset/2258d6a11e06/ Log: hg merge defaults diff too long, truncating to 10000 out of 21767 lines diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -258,6 +258,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -525,6 +525,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,15 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") class mod(AST): @@ -110,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -126,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -143,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -159,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -176,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -190,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -198,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -214,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -230,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -262,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -290,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 128 - _col_offset_mask = 256 - def __init__(self, name, bases, keywords, starargs, kwargs, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -327,12 +327,12 @@ return visitor.visit_ClassDef(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 487: - missing_field(space, self.initialization_state, ['name', 'bases', 'keywords', None, None, 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + if (self.initialization_state & ~96) ^ 415: + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'keywords', None, None, 'body', 'decorator_list'], 'ClassDef') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None w_list = self.w_bases if w_list is not None: @@ -382,9 +382,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -399,10 +396,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -410,9 +407,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -429,7 +423,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -446,9 +440,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -467,7 +458,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -485,9 +476,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -505,7 +493,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -514,9 +502,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -541,7 +526,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -570,9 +555,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -595,7 +577,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -623,9 +605,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -648,7 +627,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -676,9 +655,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -699,10 +675,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -721,9 +697,6 @@ class Raise(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, exc, cause, lineno, col_offset): self.exc = exc self.cause = cause @@ -741,12 +714,12 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 12: - missing_field(space, self.initialization_state, [None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~12) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.exc = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.cause = None if self.exc: self.exc.sync_app_attrs(space) @@ -756,9 +729,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -783,7 +753,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -820,9 +790,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -843,7 +810,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -870,9 +837,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -889,10 +853,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -901,9 +865,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -920,7 +881,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -937,9 +898,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -957,12 +915,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -978,9 +936,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -995,7 +950,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1009,9 +964,6 @@ class Nonlocal(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1026,7 +978,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Nonlocal') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Nonlocal') else: pass w_list = self.w_names @@ -1040,9 +992,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1057,7 +1006,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1065,9 +1014,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1080,16 +1026,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1102,16 +1045,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1124,21 +1064,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1156,7 +1094,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1173,9 +1111,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1193,7 +1128,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1202,9 +1137,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1220,7 +1152,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1228,9 +1160,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1247,7 +1176,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1256,9 +1185,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1277,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1287,9 +1213,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1310,7 +1233,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1337,9 +1260,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1356,7 +1276,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1373,9 +1293,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1394,7 +1311,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1412,9 +1329,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1433,7 +1347,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1451,9 +1365,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1474,7 +1385,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1493,9 +1404,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1514,7 +1422,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1532,9 +1440,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1549,10 +1454,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1560,9 +1465,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1583,7 +1485,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1608,9 +1510,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1638,12 +1537,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1674,9 +1573,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1690,16 +1586,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1713,16 +1606,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1739,7 +1629,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1747,9 +1637,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1767,7 +1654,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1776,9 +1663,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1793,16 +1677,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1820,7 +1701,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1837,9 +1718,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1857,7 +1735,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1874,9 +1752,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1890,7 +1765,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -1952,7 +1827,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -1964,14 +1838,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -1992,7 +1865,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2010,7 +1883,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2026,7 +1898,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2043,7 +1915,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2057,7 +1928,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2320,7 +2191,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2337,15 +2208,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2367,12 +2236,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2413,7 +2282,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2456,7 +2325,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2476,7 +2345,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -2949,6 +2818,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3028,7 +2899,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3042,14 +2913,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3063,7 +2934,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3079,7 +2950,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3093,14 +2964,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3114,10 +2985,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3131,10 +3002,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3148,7 +3019,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3184,7 +3055,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3198,10 +3069,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3215,10 +3086,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -3232,14 +3103,14 @@ def ClassDef_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -3247,20 +3118,22 @@ def ClassDef_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def ClassDef_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -3268,16 +3141,18 @@ def ClassDef_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 32: + if not w_self.initialization_state & 128: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3291,10 +3166,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 32 + w_self.initialization_state |= 128 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 64: + if not w_self.initialization_state & 256: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3308,7 +3183,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 64 + w_self.initialization_state |= 256 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'keywords', 'starargs', 'kwargs', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3349,7 +3224,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3357,13 +3232,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3390,7 +3267,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3404,7 +3281,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3432,7 +3309,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3446,14 +3323,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3461,13 +3338,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3500,7 +3379,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3508,20 +3387,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3537,14 +3418,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3552,13 +3433,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3591,7 +3474,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3599,20 +3482,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3620,16 +3505,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3643,10 +3530,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3660,7 +3547,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3696,7 +3583,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3704,16 +3591,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3727,10 +3616,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3744,7 +3633,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3779,7 +3668,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3787,16 +3676,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3810,10 +3701,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3827,7 +3718,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3862,7 +3753,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3870,20 +3761,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3891,16 +3784,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3914,7 +3809,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -3948,7 +3843,7 @@ w_obj = w_self.getdictvalue(space, 'exc') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'exc') return space.wrap(w_self.exc) @@ -3956,20 +3851,22 @@ def Raise_set_exc(space, w_self, w_new_value): try: w_self.exc = space.interp_w(expr, w_new_value, True) + if type(w_self.exc) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'exc', w_new_value) return w_self.deldictvalue(space, 'exc') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_cause(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'cause') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'cause') return space.wrap(w_self.cause) @@ -3977,13 +3874,15 @@ def Raise_set_cause(space, w_self, w_new_value): try: w_self.cause = space.interp_w(expr, w_new_value, True) + if type(w_self.cause) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'cause', w_new_value) return w_self.deldictvalue(space, 'cause') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Raise_field_unroller = unrolling_iterable(['exc', 'cause']) def Raise_init(space, w_self, __args__): @@ -4011,7 +3910,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4025,10 +3924,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4042,10 +3941,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4059,7 +3958,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4091,7 +3990,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4105,10 +4004,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4122,7 +4021,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4156,7 +4055,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4164,20 +4063,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4185,13 +4086,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4219,7 +4122,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4233,7 +4136,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4265,7 +4168,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4282,10 +4185,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4299,14 +4202,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4320,7 +4223,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4350,7 +4253,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4364,7 +4267,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4392,7 +4295,7 @@ ) def Nonlocal_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4406,7 +4309,7 @@ def Nonlocal_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Nonlocal_field_unroller = unrolling_iterable(['names']) def Nonlocal_init(space, w_self, __args__): @@ -4438,7 +4341,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4446,13 +4349,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4534,7 +4439,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4548,14 +4453,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4569,7 +4474,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4585,7 +4490,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4601,10 +4506,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4618,7 +4523,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4651,7 +4556,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4659,20 +4564,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4688,14 +4595,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4703,13 +4610,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4742,7 +4651,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4758,14 +4667,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4773,13 +4682,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4811,7 +4722,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4825,14 +4736,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4840,13 +4751,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -4878,7 +4791,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4886,20 +4799,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4907,20 +4822,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -4928,13 +4845,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -4963,7 +4882,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -4977,10 +4896,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4994,7 +4913,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5024,7 +4943,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5038,7 +4957,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5070,7 +4989,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5078,16 +4997,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5101,7 +5022,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5134,7 +5055,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5142,16 +5063,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5165,7 +5088,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5198,7 +5121,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5206,20 +5129,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5227,16 +5152,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5250,7 +5177,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5284,7 +5211,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5292,16 +5219,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5315,7 +5244,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5348,7 +5277,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5356,13 +5285,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5393,7 +5324,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5401,16 +5332,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5424,10 +5357,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5441,7 +5374,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5476,7 +5409,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5484,16 +5417,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5507,10 +5442,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5524,14 +5459,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5539,20 +5474,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5560,13 +5497,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5603,7 +5542,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5617,7 +5556,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5648,7 +5587,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5662,7 +5601,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5693,7 +5632,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5701,20 +5640,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5728,14 +5669,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5751,7 +5692,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5784,7 +5725,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5792,20 +5733,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -5813,20 +5756,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5842,7 +5787,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -5875,7 +5820,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -5889,14 +5834,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5912,7 +5857,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -5940,7 +5885,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5954,14 +5899,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5977,7 +5922,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6006,7 +5951,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6020,14 +5965,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6043,7 +5988,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6076,7 +6021,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6090,7 +6035,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6202,6 +6147,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6223,6 +6170,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6244,6 +6193,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6333,6 +6284,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6602,6 +6555,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6623,6 +6578,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6680,7 +6637,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6694,14 +6651,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6715,7 +6672,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6731,7 +6688,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6739,20 +6696,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6760,16 +6719,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6783,7 +6744,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -6957,6 +6918,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -191,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def bytes_w(self, space): w_msg = typed_unwrap_error_msg(space, "bytes", self) @@ -487,6 +486,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -518,8 +527,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -702,7 +711,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -1606,6 +1618,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -48,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -322,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -347,6 +361,16 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -381,13 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -521,10 +557,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -617,6 +654,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -959,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1788,9 +1835,11 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -138,29 +138,30 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + jitcell_token.compiled_loop_token = clt + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -183,9 +185,11 @@ llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types, descr.extrainfo, descr.width) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -239,9 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,21 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -823,6 +823,15 @@ bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETFIELD_RAW) + # ---------- write barrier for SETINTERIORFIELD_GC ------ + if op.getopnum() == rop.SETINTERIORFIELD_GC: + val = op.getarg(0) + if val is not last_malloc: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: val = op.getarg(0) diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -16,32 +16,106 @@ """ Manage frame positions """ def __init__(self): - self.frame_bindings = {} - self.frame_depth = 0 + self.bindings = {} + self.used = [] # list of bools + self.hint_frame_locations = {} + + frame_depth = property(lambda:xxx, lambda:xxx) # XXX kill me + + def get_frame_depth(self): + return len(self.used) def get(self, box): - return self.frame_bindings.get(box, None) + return self.bindings.get(box, None) def loc(self, box): - res = self.get(box) - if res is not None: - return res + """Return or create the frame location associated with 'box'.""" + # first check if it's already in the frame_manager + try: + return self.bindings[box] + except KeyError: + pass + # check if we have a hint for this box + if box in self.hint_frame_locations: + # if we do, try to reuse the location for this box + loc = self.hint_frame_locations[box] + if self.try_to_reuse_location(box, loc): + return loc + # no valid hint. make up a new free location + return self.get_new_loc(box) + + def get_new_loc(self, box): size = self.frame_size(box.type) - self.frame_depth += ((-self.frame_depth) & (size-1)) - # ^^^ frame_depth is rounded up to a multiple of 'size', assuming + # frame_depth is rounded up to a multiple of 'size', assuming # that 'size' is a power of two. The reason for doing so is to # avoid obscure issues in jump.py with stack locations that try # to move from position (6,7) to position (7,8). - newloc = self.frame_pos(self.frame_depth, box.type) - self.frame_bindings[box] = newloc - self.frame_depth += size + while self.get_frame_depth() & (size - 1): + self.used.append(False) + # + index = self.get_frame_depth() + newloc = self.frame_pos(index, box.type) + for i in range(size): + self.used.append(True) + # + if not we_are_translated(): # extra testing + testindex = self.get_loc_index(newloc) + assert testindex == index + # + self.bindings[box] = newloc return newloc + def set_binding(self, box, loc): + self.bindings[box] = loc + # + index = self.get_loc_index(loc) + if index < 0: + return + endindex = index + self.frame_size(box.type) + while len(self.used) < endindex: + self.used.append(False) + while index < endindex: + self.used[index] = True + index += 1 + def reserve_location_in_frame(self, size): - frame_depth = self.frame_depth - self.frame_depth += size + frame_depth = self.get_frame_depth() + for i in range(size): + self.used.append(True) return frame_depth + def mark_as_free(self, box): + try: + loc = self.bindings[box] + except KeyError: + return # already gone + del self.bindings[box] + # + size = self.frame_size(box.type) + baseindex = self.get_loc_index(loc) + if baseindex < 0: + return + for i in range(size): + index = baseindex + i + assert 0 <= index < len(self.used) + self.used[index] = False + + def try_to_reuse_location(self, box, loc): + index = self.get_loc_index(loc) + if index < 0: + return False + size = self.frame_size(box.type) + for i in range(size): + while (index + i) >= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +123,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +146,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +169,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -570,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -42,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -282,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -305,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -327,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -348,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -107,12 +108,12 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -253,13 +254,13 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -284,12 +285,12 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,22 +32,19 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -106,10 +103,9 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -118,19 +114,20 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -139,19 +136,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -162,15 +162,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -190,15 +192,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -206,14 +210,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -226,17 +229,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -244,14 +251,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -261,19 +267,20 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -290,18 +297,17 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -311,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -320,20 +326,19 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -350,20 +355,20 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -419,14 +424,12 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1082,16 +1085,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1109,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1149,30 +1144,33 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1214,7 +1212,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1222,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1271,7 +1267,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1281,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1330,19 +1324,20 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1400,15 +1395,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1675,15 +1669,14 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1700,9 +1693,9 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1718,14 +1711,13 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1895,18 +1887,14 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1940,18 +1928,14 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -1986,19 +1970,15 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2031,10 +2011,9 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2091,14 +2070,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2147,13 +2126,12 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2169,12 +2147,10 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2183,9 +2159,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2201,9 +2175,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2212,9 +2184,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2415,7 +2385,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 @@ -2423,9 +2393,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2435,11 +2404,10 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -2471,12 +2439,12 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2486,11 +2454,11 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2499,11 +2467,11 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2561,12 +2529,12 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2578,13 +2546,13 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2596,7 +2564,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) @@ -2604,10 +2572,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2958,13 +2925,137 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [BoxInt()] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2, -9) + assert fail.identifier == 42 + class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,9 +3,10 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -179,7 +180,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -525,29 +526,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +581,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +612,22 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -608,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -676,33 +717,55 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,8 +2,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -152,14 +153,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -310,12 +310,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -326,7 +325,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -422,12 +421,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -443,37 +438,35 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, - frame_depth+param_depth) + clt.frame_depth = frame_depth + clt.param_depth = param_depth + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, - rawstart + directbootstrappos, + rawstart + looppos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -484,18 +477,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -548,6 +540,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +663,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,20 +685,24 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -793,152 +797,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -965,7 +838,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -976,13 +849,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV_bi(to_loc.value, low_part) + self.mc.MOV_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1134,18 +1019,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -1882,10 +1767,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1901,7 +1786,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position + pos = loc.position + if pos < 0: + mc.writechar(chr(self.CODE_INPUTARG)) + pos = ~pos + n = self.CODE_FROMSTACK//4 + pos else: assert isinstance(loc, RegLoc) n = loc.value @@ -1921,6 +1810,7 @@ descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] + code_inputarg = False while 1: # decode the next instruction from the bytecode code = rffi.cast(lltype.Signed, bytecode[0]) @@ -1939,11 +1829,17 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: continue + elif code == self.CODE_INPUTARG: + code_inputarg = True + continue else: # 'code' identifies a register kind = code & 3 @@ -1959,6 +1855,7 @@ def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! self.fail_ebp = allregisters[16 + ebp.value] + code_inputarg = False num = 0 value_hi = 0 while 1: @@ -1979,6 +1876,9 @@ # load the value from the stack kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: @@ -1991,6 +1891,9 @@ if code == self.CODE_HOLE: num += 1 continue + if code == self.CODE_INPUTARG: + code_inputarg = True + continue assert code == self.CODE_STOP break code >>= 2 @@ -2095,9 +1998,9 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA in _call_footer below throws - # away most of the frame, including all the PUSHes that we did just - # above. + # _call_header_with_stack_check(). The LEA in _call_footer below + # throws away most of the frame, including all the PUSHes that we + # did just above. self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -2180,7 +2083,7 @@ argtypes=op.getdescr().get_arg_types(), callconv=op.getdescr().get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return if op.getdescr().get_return_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long @@ -2344,11 +2247,11 @@ fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() - assert isinstance(descr, LoopToken) - assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) + assert isinstance(descr, JitCellToken) + assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs # - # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + # Write a call to the target assembler + self._emit_call(fail_index, imm(descr._x86_function_addr), arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None @@ -2578,15 +2481,21 @@ gcrootmap.put(self.gcrootmap_retaddr_forced, mark) self.gcrootmap_retaddr_forced = -1 - def target_arglocs(self, loop_token): - return loop_token._x86_arglocs - - def closing_jump(self, loop_token): - if loop_token is self.currently_compiling_loop: + def closing_jump(self, target_token): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = target_token._x86_clt._debug_nbargs + assert my_nbargs == target_nbargs + # + target = target_token._x86_loop_code + if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(self.looppos - curpos) + self.mc.JMP_l(target - curpos) else: - self.mc.JMP(imm(loop_token._x86_loop_code)) + self.mc.JMP(imm(target)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) @@ -2659,11 +2568,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,7 +12,7 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: @@ -31,7 +31,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +66,13 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) @@ -87,7 +94,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -5,7 +5,8 @@ import os from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ResOperation, BoxPtr, ConstFloat, - BoxFloat, LoopToken, INT, REF, FLOAT) + BoxFloat, INT, REF, FLOAT, + TargetToken, JitCellToken) from pypy.jit.backend.x86.regloc import * from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rlib.objectmodel import we_are_translated @@ -27,7 +28,7 @@ class X86RegisterManager(RegisterManager): box_types = [INT, REF] - all_regs = [eax, ecx, edx, ebx, esi, edi] + all_regs = [ecx, eax, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] frame_reg = ebp @@ -59,7 +60,7 @@ class X86_64_RegisterManager(X86RegisterManager): # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -129,15 +130,19 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -159,6 +164,8 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None + self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -167,70 +174,83 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity, useful = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - return operations, useful + return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations, useful = self._prepare(inputargs, operations, allgcrefs) - return self._process_inputargs(inputargs, useful), operations + operations = self._prepare(inputargs, operations, allgcrefs) + self._set_initial_bindings(inputargs) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 13 + return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations, _ = self._prepare(inputargs, operations, allgcrefs) + operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs, useful): - # XXX we can sort out here by longevity if we need something - # more optimal - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - # Don't use all_regs[0] for passing arguments around a loop. - # Must be kept in sync with consider_jump(). - # XXX this should probably go to llsupport/regalloc.py - xmmtmp = self.xrm.free_regs.pop(0) - tmpreg = self.rm.free_regs.pop(0) - assert tmpreg == X86RegisterManager.all_regs[0] - assert xmmtmp == X86XMMRegisterManager.all_regs[0] - for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - reg = None - if self.longevity[arg][1] > -1 and arg in useful: - if arg.type == FLOAT: - # xxx is it really a good idea? at the first CALL they - # will all be flushed anyway - reg = self.xrm.try_allocate_reg(arg) + def _set_initial_bindings(self, inputargs): + if IS_X86_64: + inputargs = self._set_initial_bindings_regs_64(inputargs) + # ... + # stack layout: arg2 + # arg1 + # arg0 + # return address + # saved ebp <-- ebp points here + # ... + cur_frame_pos = - 1 - FRAME_FIXED_SIZE + assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD + assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD + # + for box in inputargs: + assert isinstance(box, Box) + # + if IS_X86_32 and box.type == FLOAT: + cur_frame_pos -= 2 + else: + cur_frame_pos -= 1 + loc = self.fm.frame_pos(cur_frame_pos, box.type) + self.fm.set_binding(box, loc) + + def _set_initial_bindings_regs_64(self, inputargs): + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + # + pass_on_stack = [] + # + for box in inputargs: + assert isinstance(box, Box) + # + if box.type == FLOAT: + if len(unused_xmm) > 0: + ask = unused_xmm.pop() + got = self.xrm.try_allocate_reg(box, selected_reg=ask) + assert ask == got else: - reg = self.rm.try_allocate_reg(arg) - if reg: - loc = reg + pass_on_stack.append(box) else: - loc = self.fm.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc - # otherwise we have it saved on stack, so no worry - self.rm.free_regs.insert(0, tmpreg) - self.xrm.free_regs.insert(0, xmmtmp) - assert tmpreg not in nonfloatlocs - assert xmmtmp not in floatlocs - # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op args, which is a non-resizable list - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + if len(unused_gpr) > 0: + ask = unused_gpr.pop() + got = self.rm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + # + return pass_on_stack def possibly_free_var(self, var): if var.type == FLOAT: @@ -307,7 +327,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -316,7 +336,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -352,7 +372,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -441,8 +461,15 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop + def flush_loop(self): + # rare case: if the loop is too short, pad with NOPs + mc = self.assembler.mc + while mc.get_relative_pos() < self.min_bytes_before_label: + mc.NOP() + def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" @@ -453,7 +480,7 @@ # only to guard operations or to jump or to finish produced = {} last_used = {} - useful = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -464,10 +491,13 @@ opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - if opnum != rop.JUMP and opnum != rop.FINISH: - useful[arg] = None - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -475,7 +505,8 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + self.last_real_usage = last_real_usage + # longevity = {} for arg in produced: if arg in last_used: @@ -491,7 +522,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity, useful + self.longevity = longevity def loc(self, v): if v is None: # xxx kludgy @@ -888,7 +919,7 @@ def consider_call_assembler(self, op, guard_op): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) @@ -1318,35 +1349,72 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of 'fm' based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + self.final_jump_op = op + descr = op.getdescr() + assert isinstance(descr, TargetToken) + if descr._x86_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): + arglocs = descr._x86_arglocs + jump_op = self.final_jump_op + assert len(arglocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) + if isinstance(box, Box): + loc = arglocs[i] + if isinstance(loc, StackLoc): + self.fm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) + arglocs = descr._x86_arglocs self.jump_target_descr = descr - nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) - # compute 'tmploc' to be all_regs[0] by spilling what is there - box = TempBox() - box1 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - tmploc = self.rm.force_allocate_reg(box, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None + # Do the remapping remap_frame_layout_mixed(assembler, - src_locations1, dst_locations1, tmploc, + src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(box) - self.xrm.possibly_free_var(box1) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1362,7 +1430,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) @@ -1397,6 +1465,56 @@ # the FORCE_TOKEN operation returns directly 'ebp' self.rm.force_allocate_frame_reg(op.result) + def consider_label(self, op): + descr = op.getdescr() + assert isinstance(descr, TargetToken) + inputargs = op.getarglist() + arglocs = [None] * len(inputargs) + # + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) + # + # we need to make sure that no variable is stored in ebp + for arg in inputargs: + if self.loc(arg) is ebp: + loc2 = self.fm.loc(arg) + self.assembler.mc.MOV(loc2, ebp) + self.rm.bindings_to_frame_reg.clear() + # + for i in range(len(inputargs)): + arg = inputargs[i] + assert isinstance(arg, Box) + loc = self.loc(arg) + assert loc is not ebp + arglocs[i] = loc + if isinstance(loc, RegLoc): + self.fm.mark_as_free(arg) + # + # if we are too close to the start of the loop, the label's target may + # get overridden by redirect_call_assembler(). (rare case) + self.flush_loop() + # + descr._x86_arglocs = arglocs + descr._x86_loop_code = self.assembler.mc.get_relative_pos() + descr._x86_clt = self.assembler.current_clt + self.assembler.target_tokens_currently_compiling[descr] = None + self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) @@ -1452,3 +1570,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If 0, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,14 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): - assert ebp_offset < 0 # so no confusion with RegLoc.value + def __init__(self, position, ebp_offset, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -64,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -75,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -92,9 +104,11 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True - width = WORD + +class ImmedLoc(ImmediateAssemblerLocation): + _immutable_ = True _location_code = 'i' def __init__(self, value): @@ -105,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): return "ImmedLoc(%d)" % (self.value) @@ -117,7 +134,6 @@ class AddressLoc(AssemblerLocation): _immutable_ = True - width = WORD # The address is base_loc + (scaled_loc << scale) + static_offset def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): assert 0 <= scale < 4 @@ -146,6 +162,9 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) + def get_width(self): + return WORD + def value_a(self): return self.loc_a @@ -180,32 +199,34 @@ raise AssertionError(self._location_code) return result -class ConstFloatLoc(AssemblerLocation): - # XXX: We have to use this class instead of just AddressLoc because - # we want a width of 8 (... I think. Check this!) +class ConstFloatLoc(ImmediateAssemblerLocation): _immutable_ = True - width = 8 _location_code = 'j' def __init__(self, address): self.value = address + def get_width(self): + return 8 + def __repr__(self): return '' % (self.value,) if IS_X86_32: - class FloatImmedLoc(AssemblerLocation): + class FloatImmedLoc(ImmediateAssemblerLocation): # This stands for an immediate float. It cannot be directly used in # any assembler instruction. Instead, it is meant to be decomposed # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function # instead; see below. _immutable_ = True - width = 8 _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage + def get_width(self): + return 8 + def low_part(self): return intmask(self.aslonglong) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.objectmodel import we_are_translated +from pypy.jit.codewriter import longlong from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS @@ -21,7 +22,6 @@ supports_floats = True supports_singlefloats = True - BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) dont_keepalive_stuff = False # for tests with_threads = False @@ -91,15 +91,6 @@ return self.assembler.assemble_bridge(faildescr, inputargs, operations, original_loop_token, log=log) - def set_future_value_int(self, index, intvalue): - self.assembler.fail_boxes_int.setitem(index, intvalue) - - def set_future_value_float(self, index, floatvalue): - self.assembler.fail_boxes_float.setitem(index, floatvalue) - - def set_future_value_ref(self, index, ptrvalue): - self.assembler.fail_boxes_ptr.setitem(index, ptrvalue) - def get_latest_value_int(self, index): return self.assembler.fail_boxes_int.getitem(index) @@ -122,27 +113,28 @@ # the FORCE_TOKEN operation and this helper both return 'ebp'. return self.assembler.fail_ebp - def execute_token(self, executable_token): - addr = executable_token._x86_bootstrap_code - #llop.debug_print(lltype.Void, ">>>> Entering", addr) - func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) - fail_index = self._execute_call(func) - #llop.debug_print(lltype.Void, "<<<< Back") - return self.get_fail_descr_from_number(fail_index) - - def _execute_call(self, func): - # help flow objspace - prev_interpreter = None - if not self.translate_support_code: - prev_interpreter = LLInterpreter.current_interpreter - LLInterpreter.current_interpreter = self.debug_ll_interpreter - res = 0 - try: - res = func() - finally: + def make_execute_token(self, *ARGS): + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) + # + def execute_token(executable_token, *args): + clt = executable_token.compiled_loop_token + assert len(args) == clt._debug_nbargs + # + addr = executable_token._x86_function_addr + func = rffi.cast(FUNCPTR, addr) + #llop.debug_print(lltype.Void, ">>>> Entering", addr) + prev_interpreter = None # help flow space if not self.translate_support_code: - LLInterpreter.current_interpreter = prev_interpreter - return res + prev_interpreter = LLInterpreter.current_interpreter + LLInterpreter.current_interpreter = self.debug_ll_interpreter + try: + fail_index = func(*args) + finally: + if not self.translate_support_code: + LLInterpreter.current_interpreter = prev_interpreter + #llop.debug_print(lltype.Void, "<<<< Back") + return self.get_fail_descr_from_number(fail_index) + return execute_token def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) @@ -215,14 +207,3 @@ super(CPU_X86_64, self).__init__(*args, **kwargs) CPU = CPU386 - -# silence warnings -##history.LoopToken._x86_param_depth = 0 -##history.LoopToken._x86_arglocs = (None, None) -##history.LoopToken._x86_frame_depth = 0 -##history.LoopToken._x86_bootstrap_code = 0 -##history.LoopToken._x86_direct_bootstrap_code = 0 -##history.LoopToken._x86_loop_code = 0 -##history.LoopToken._x86_debug_checksum = 0 -##compile.AbstractFailDescr._x86_current_depths = (0, 0) -##compile.AbstractFailDescr._x86_adr_jump_offset = 0 diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -46,12 +46,13 @@ xmm2] assert len(failargs) == len(locs) assembler.write_failure_recovery_description(mc, failargs, locs) - nums = [Assembler386.DESCR_INT + 4*(16+0), - Assembler386.DESCR_REF + 4*(16+1), - Assembler386.DESCR_FLOAT + 4*(16+10), - Assembler386.DESCR_INT + 4*(16+100), - Assembler386.DESCR_REF + 4*(16+101), - Assembler386.DESCR_FLOAT + 4*(16+110), + base = 8 + 8*IS_X86_64 + nums = [Assembler386.DESCR_INT + 4*(base+0), + Assembler386.DESCR_REF + 4*(base+1), + Assembler386.DESCR_FLOAT + 4*(base+10), + Assembler386.DESCR_INT + 4*(base+100), + Assembler386.DESCR_REF + 4*(base+101), + Assembler386.DESCR_FLOAT + 4*(base+110), Assembler386.CODE_HOLE, Assembler386.CODE_HOLE, Assembler386.DESCR_INT + 4*ebx.value, diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, TreeLoop + BoxPtr, ConstPtr, TreeLoop, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo @@ -113,6 +113,8 @@ descr0 = cpu.fielddescrof(S, 'int') ptr0 = struct_ref + targettoken = TargetToken() + namespace = locals().copy() def test_basic(self): @@ -136,6 +138,7 @@ def test_bug_0(self): ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, descr=targettoken) guard_value(i2, 1) [i2, i3, i4, i5, i6, i7, i0, i1, i8] guard_class(i4, 138998336) [i4, i5, i6, i7, i0, i1, i8] i11 = getfield_gc(i4, descr=descr0) @@ -163,7 +166,7 @@ guard_false(i32) [i4, i6, i7, i0, i1, i24] i33 = getfield_gc(i0, descr=descr0) guard_value(i33, ConstPtr(ptr0)) [i4, i6, i7, i0, i1, i33, i24] - jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24) + jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -71,6 +71,18 @@ ('mov', eax, s24), ('mov', s12, edi)] +def test_no_tmp_reg(): + assembler = MockAssembler() + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) + remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], None) + assert assembler.ops == [('push', s8), + ('pop', s20), + ('mov', eax, s24), + ('mov', s12, edi)] + def test_reordering(): assembler = MockAssembler() s8 = frame_pos(8, INT) @@ -237,7 +249,7 @@ while len(result) < count: x = fn() keys = [x._getregkey()] - if isinstance(x, StackLoc) and x.width > WORD: + if isinstance(x, StackLoc) and x.get_width() > WORD: keys.append(keys[0] + WORD) for key in keys: if key in seen: @@ -255,7 +267,7 @@ for i, loc in enumerate(locations): if isinstance(loc, RegLoc): if loc.is_xmm: - if loc.width > WORD: + if loc.get_width() > WORD: newvalue = ('value-xmm-%d' % i, 'value-xmm-hiword-%d' % i) else: @@ -264,8 +276,8 @@ else: regs1[loc.value] = 'value-int-%d' % i elif isinstance(loc, StackLoc): - stack[loc.value] = 'value-width%d-%d' % (loc.width, i) - if loc.width > WORD: + stack[loc.value] = 'value-width%d-%d' % (loc.get_width(), i) + if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: assert isinstance(loc, ImmedLoc) @@ -287,7 +299,7 @@ # def read(loc, expected_width=None): if expected_width is not None: - assert loc.width == expected_width + assert loc.get_width() == expected_width if isinstance(loc, RegLoc): if loc.is_xmm: return regs2[loc.value] @@ -295,7 +307,7 @@ return regs1[loc.value] if isinstance(loc, StackLoc): got = stack[loc.value] - if loc.width > WORD: + if loc.get_width() > WORD: got = (got, stack[loc.value+WORD]) return got if isinstance(loc, ImmedLoc): @@ -309,7 +321,7 @@ else: regs1[loc.value] = newvalue elif isinstance(loc, StackLoc): - if loc.width > WORD: + if loc.get_width() > WORD: newval1, newval2 = newvalue stack[loc.value] = newval1 stack[loc.value+WORD] = newval2 diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -5,10 +5,11 @@ def test_compile_bridge_not_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -18,22 +19,22 @@ finish(i3, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 def test_compile_bridge_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) - previous = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + previous = loop._jitcelltoken.compiled_loop_token.frame_depth + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -42,19 +43,18 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].getdescr() + descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 assert self.getint(1) == 22 @@ -64,28 +64,30 @@ def test_bridge_jump_to_other_loop(self): loop = self.interpret(''' [i0, i10, i11, i12, i13, i14, i15, i16] + label(i0, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1, i10, i11, i12, i13, i14, i15, i16) - ''', [0]) + jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) + ''', [0, 0, 0, 0, 0, 0, 0, 0]) other_loop = self.interpret(''' - [i3] + [i3, i10, i11, i12, i13, i14, i15, i16] + label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] - jump(i3) - ''', [1]) + jump(i3, descr=targettoken2) + ''', [1, 0, 0, 0, 0, 0, 0, 0]) ops = ''' [i3] - jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=looptoken) + jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, other_loop, 0, looptoken=loop.token) - self.cpu.set_future_value_int(0, 1) - fail = self.run(other_loop) + bridge = self.attach_bridge(ops, other_loop, 1) + fail = self.run(other_loop, 1, 0, 0, 0, 0, 0, 0, 0) assert fail.identifier == 1 def test_bridge_jumps_to_self_deeper(self): loop = self.interpret(''' [i0, i1, i2, i31, i32, i33] + label(i0, i1, i2, i31, i32, i33, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i30 = int_add(i1, i2) @@ -94,8 +96,8 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i30, 1, i30, i30, i30) - ''', [0]) + jump(i3, i30, 1, i30, i30, i30, descr=targettoken) + ''', [0, 0, 0, 0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -104,28 +106,28 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) - jump(i3, i12, i11, i10, i6, i7, descr=looptoken) + jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 5, looptoken=loop.token) - guard_op = loop.operations[5] - loop_frame_depth = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth + bridge = self.attach_bridge(ops, loop, 6) + guard_op = loop.operations[6] + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 def test_bridge_jumps_to_self_shallower(self): loop = self.interpret(''' [i0, i1, i2] + label(i0, i1, i2, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i3 = int_add(i0, 1) @@ -133,19 +135,16 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i1, i2) - ''', [0]) + jump(i3, i1, i2, descr=targettoken) + ''', [0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' [i97, i3] - jump(i3, 0, 1, descr=looptoken) + jump(i3, 0, 1, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 4, looptoken=loop.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + bridge = self.attach_bridge(ops, loop, 5) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, LoopToken, BasicFailDescr + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass @@ -96,10 +96,16 @@ raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + targettoken = TargetToken() + targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 @@ -134,26 +140,29 @@ def interpret(self, ops, args, run=True): loop = self.parse(ops) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - for i, arg in enumerate(args): + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + arguments = [] + for arg in args: if isinstance(arg, int): - self.cpu.set_future_value_int(i, arg) + arguments.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) - self.cpu.set_future_value_float(i, arg) + arguments.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) - self.cpu.set_future_value_ref(i, llgcref) + arguments.append(llgcref) + loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, *arguments) return loop def prepare_loop(self, ops): loop = self.parse(ops) regalloc = RegAlloc(self.cpu.assembler, False) regalloc.prepare_loop(loop.inputargs, loop.operations, - loop.token, []) + loop.original_jitcell_token, []) return regalloc def getint(self, index): @@ -174,10 +183,7 @@ gcref = self.cpu.get_latest_value_ref(index) return lltype.cast_opaque_ptr(T, gcref) - def attach_bridge(self, ops, loop, guard_op_index, looptoken=None, **kwds): - if looptoken is not None: - self.namespace = self.namespace.copy() - self.namespace['looptoken'] = looptoken + def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) @@ -185,20 +191,21 @@ [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, - loop.token) + loop._jitcelltoken) return bridge - def run(self, loop): - return self.cpu.execute_token(loop.token) + def run(self, loop, *arguments): + return self.cpu.execute_token(loop._jitcelltoken, *arguments) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -206,29 +213,30 @@ def test_two_loops_and_a_bridge(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(i0, 1) i5 = int_lt(i4, 20) guard_true(i5) [i4, i1, i2, i3] - jump(i4, i1, i2, i3) + jump(i4, i1, i2, i3, descr=targettoken) ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' - [i5] + [i5, i6, i7, i8] + label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) i4 = int_add(i3, 1) i2 = int_lt(i4, 30) guard_true(i2) [i4] - jump(i4) + jump(i4, descr=targettoken2) ''' - loop2 = self.interpret(ops2, [0]) + loop2 = self.interpret(ops2, [0, 0, 0, 0]) bridge_ops = ''' [i4] - jump(i4, i4, i4, i4, descr=looptoken) + jump(i4, i4, i4, i4, descr=targettoken) ''' - bridge = self.attach_bridge(bridge_ops, loop2, 4, looptoken=loop.token) - self.cpu.set_future_value_int(0, 0) - self.run(loop2) + bridge = self.attach_bridge(bridge_ops, loop2, 5) + self.run(loop2, 0, 0, 0, 0) assert self.getint(0) == 31 assert self.getint(1) == 30 assert self.getint(2) == 30 @@ -237,10 +245,11 @@ def test_pointer_arg(self): ops = ''' [i0, p0] + label(i0, p0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 10) guard_true(i2) [p0] - jump(i1, p0) + jump(i1, p0, descr=targettoken) ''' S = lltype.GcStruct('S') ptr = lltype.malloc(S) @@ -265,8 +274,7 @@ loop = self.interpret(ops, [0]) assert self.getint(0) == 1 bridge = self.attach_bridge(bridge_ops, loop, 2) - self.cpu.set_future_value_int(0, 0) - self.run(loop) + self.run(loop, 0) assert self.getint(0) == 1 def test_inputarg_unused(self): @@ -292,9 +300,7 @@ assert self.getint(0) == 0 assert self.getint(1) == 10 bridge = self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - self.run(loop) + self.run(loop, 0, 10) assert self.getint(0) == 0 assert self.getint(1) == 10 @@ -311,17 +317,16 @@ finish(1, 2) ''' self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 1) - self.run(loop) + self.run(loop, 0, 1) def test_spill_for_constant(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(3, i1) i5 = int_lt(i4, 30) guard_true(i5) [i0, i4, i2, i3] - jump(1, i4, 3, 4) + jump(1, i4, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1, 30, 3, 4] @@ -329,31 +334,34 @@ def test_spill_for_constant_lshift(self): ops = ''' [i0, i2, i1, i3] + label(i0, i2, i1, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 3, i5, 4) + jump(i4, 3, i5, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, i5, 3, 4) + jump(i4, i5, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i3, i1, i2] + label(i0, i3, i1, i2, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 4, i5, 3) + jump(i4, 4, i5, 3, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] @@ -361,11 +369,12 @@ def test_result_selected_reg_via_neg(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i6 = int_neg(i2) i7 = int_add(1, i1) i4 = int_lt(i7, 10) guard_true(i4) [i0, i6, i7] - jump(1, i7, i2, i6) + jump(1, i7, i2, i6, descr=targettoken) ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] @@ -373,11 +382,12 @@ def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lt(i0, i1) i5 = int_add(i3, 1) i6 = int_lt(i5, 30) guard_true(i6) [i4] - jump(i0, i1, i4, i5) + jump(i0, i1, i4, i5, descr=targettoken) ''' self.interpret(ops, [0, 10, 0, 0]) assert self.getint(0) == 1 @@ -385,12 +395,13 @@ def test_jump_different_args(self): ops = ''' [i0, i15, i16, i18, i1, i2, i3] + label(i0, i15, i16, i18, i1, i2, i3, descr=targettoken) i4 = int_add(i3, 1) i5 = int_lt(i4, 20) guard_true(i5) [i2, i1] - jump(i0, i18, i15, i16, i2, i1, i4) + jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' - self.interpret(ops, [0, 1, 2, 3]) + self.interpret(ops, [0, 1, 2, 3, 0, 0, 0]) def test_op_result_unused(self): ops = ''' @@ -424,9 +435,7 @@ finish(i0, i1, i2, i3, i4, i5, i6, i7, i8) ''' self.attach_bridge(bridge_ops, loop, 1) - for i in range(9): - self.cpu.set_future_value_int(i, i) - self.run(loop) + self.run(loop, 0, 1, 2, 3, 4, 5, 6, 7, 8) assert self.getints(9) == range(9) def test_loopargs(self): @@ -436,27 +445,13 @@ jump(i4, i1, i2, i3) """ regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 + if IS_X86_64: + assert len(regalloc.rm.reg_bindings) == 4 + assert len(regalloc.fm.bindings) == 0 + else: + assert len(regalloc.rm.reg_bindings) == 0 + assert len(regalloc.fm.bindings) == 4 - def test_loopargs_2(self): - ops = """ - [i0, i1, i2, i3] - i4 = int_add(i0, i1) - finish(i4, i1, i2, i3) - """ - regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 - - def test_loopargs_3(self): - ops = """ - [i0, i1, i2, i3] - i4 = int_add(i0, i1) - guard_true(i4) [i0, i1, i2, i3, i4] - jump(i4, i1, i2, i3) - """ - regalloc = self.prepare_loop(ops) - assert len(regalloc.rm.reg_bindings) == 2 - class TestRegallocCompOps(BaseTestRegalloc): @@ -474,6 +469,7 @@ class TestRegallocMoreRegisters(BaseTestRegalloc): cpu = BaseTestRegalloc.cpu + targettoken = TargetToken() S = lltype.GcStruct('S', ('field', lltype.Char)) fielddescr = cpu.fielddescrof(S, 'field') @@ -546,6 +542,7 @@ def test_division_optimized(self): ops = ''' [i7, i6] + label(i7, i6, descr=targettoken) i18 = int_floordiv(i7, i6) i19 = int_xor(i7, i6) i21 = int_lt(i19, 0) @@ -553,7 +550,7 @@ i23 = int_is_true(i22) i24 = int_eq(i6, 4) guard_false(i24) [i18] - jump(i18, i6) + jump(i18, i6, descr=targettoken) ''' self.interpret(ops, [10, 4]) assert self.getint(0) == 2 @@ -622,9 +619,10 @@ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(1) + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(1) def test_two_calls(self): ops = ''' @@ -633,9 +631,10 @@ i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) finish(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(2) + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(2) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -648,7 +647,8 @@ ''' loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 - assert loop.token._x86_param_depth == self.expected_param_depth(10) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(10) def test_bridge_calls_1(self): ops = ''' @@ -668,9 +668,7 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 5*7 def test_bridge_calls_2(self): @@ -691,8 +689,6 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -1,6 +1,6 @@ import py from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, LoopToken + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD @@ -20,10 +20,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 9) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 9) assert cpu.get_latest_value_int(0) == (9 >> 3) assert cpu.get_latest_value_int(1) == (~18) @@ -43,10 +42,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -10) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -10) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == -1000 assert cpu.get_latest_value_int(2) == 1 @@ -140,19 +138,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -13) - cpu.set_future_value_int(1, 10) - cpu.set_future_value_int(2, 10) - cpu.set_future_value_int(3, 8) - cpu.set_future_value_int(4, -8) - cpu.set_future_value_int(5, -16) - cpu.set_future_value_int(6, -18) - cpu.set_future_value_int(7, 46) - cpu.set_future_value_int(8, -12) - cpu.set_future_value_int(9, 26) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 0 assert cpu.get_latest_value_int(2) == 0 @@ -255,19 +243,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 17) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, -6) - cpu.set_future_value_int(3, 6) - cpu.set_future_value_int(4, 1) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, 13) - cpu.set_future_value_int(7, 9) - cpu.set_future_value_int(8, 49) - cpu.set_future_value_int(9, 8) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 8 assert cpu.get_latest_value_int(2) == 1 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -1,9 +1,10 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr, rclass from pypy.rpython.annlowlevel import llhelper -from pypy.jit.metainterp.history import ResOperation, LoopToken +from pypy.jit.metainterp.history import ResOperation, TargetToken, JitCellToken from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstFloat, - ConstPtr, Box, BoxFloat, BasicFailDescr) + ConstPtr, Box, BoxFloat, + BasicFailDescr) from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD from pypy.jit.backend.x86.rx86 import fits_in_32bits @@ -279,13 +280,9 @@ descr=BasicFailDescr()), ] ops[-2].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) - if op == rop.INT_IS_TRUE: - self.cpu.set_future_value_int(0, b.value) - else: - self.cpu.set_future_value_ref(0, b.value) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_latest_value_int(0) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, @@ -329,11 +326,10 @@ ] ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) - for i, box in enumerate(inputargs): - self.cpu.set_future_value_int(i, box.value) - self.cpu.execute_token(looptoken) + inputvalues = [box.value for box in inputargs] + self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_latest_value_int(0) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: @@ -353,9 +349,10 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.number = 17 class FakeString(object): def __init__(self, val): @@ -365,14 +362,15 @@ return self.val operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) + operations[-2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" @@ -385,7 +383,7 @@ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -397,8 +395,7 @@ assert address >= loopaddress + loopsize assert size >= 10 # randomish number - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -408,11 +405,13 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] debug._log = dlog = debug.DebugLog() @@ -499,12 +498,10 @@ ops[3].setfailargs([]) ops[5].setfailargs([]) ops[7].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) - self.cpu.set_future_value_int(0, 123450) - self.cpu.set_future_value_int(1, 123408) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 123450, 123408) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert self.cpu.get_latest_value_int(1) == 42 @@ -523,19 +520,20 @@ loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -547,16 +545,17 @@ def test_debugger_checksum(self): loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) - assert ops.token._x86_debug_checksum == sum([op.getopnum() + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) + assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -490,8 +490,8 @@ check(a[i].y.i == n + i * 100 + 2) check(a[i].z.i == n + i * 100 + 3) i += 1 + n -= x.foo return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s - f(123, *[None]*11) # check that the check() are ok return None, f, None def test_compile_framework_7_interior(self): diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -498,27 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - d = op.args[1].value.copy() - d.pop('flavor') - add_memory_pressure = d.pop('add_memory_pressure', False) - zero = d.pop('zero', False) - track_allocation = d.pop('track_allocation', True) - if d: - raise UnsupportedMallocFlags(d) - ARRAY = op.args[0].value - name = 'raw_malloc' - if zero: - name += '_zero' - if add_memory_pressure: - name += '_add_memory_pressure' - if not track_allocation: - name += '_no_track_allocation' - return self._do_builtin_call(op, name, - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -531,11 +533,18 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO @@ -736,6 +745,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,26 +599,75 @@ return p return _ll_0_alloc_with_del - def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) - return _ll_1_raw_malloc - return build_ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - build_ll_1_raw_malloc = build_raw_malloc_builder() - build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) - build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) - build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) - build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) - build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) - build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) - build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -550,7 +550,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str assert op1.opname == '-live-' assert op1.args == [] @@ -564,7 +564,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str assert op1.opname == '-live-' assert op1.args == [] @@ -578,6 +578,35 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1504,7 +1504,6 @@ all_virtuals=None): from pypy.jit.metainterp.resume import blackhole_from_resumedata #debug_start('jit-blackhole') - metainterp_sd.profiler.start_blackhole() blackholeinterp = blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, jitdriver_sd, @@ -1518,10 +1517,9 @@ current_exc = blackholeinterp._prepare_resume_from_failure( resumedescr.guard_opnum, dont_change_position) - try: - _run_forever(blackholeinterp, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(blackholeinterp, current_exc) + #finally: #debug_stop('jit-blackhole') def convert_and_run_from_pyjitpl(metainterp, raising_exception=False): @@ -1529,7 +1527,6 @@ # 'metainterp.framestack'. #debug_start('jit-blackhole') metainterp_sd = metainterp.staticdata - metainterp_sd.profiler.start_blackhole() nextbh = None for frame in metainterp.framestack: curbh = metainterp_sd.blackholeinterpbuilder.acquire_interp() @@ -1546,8 +1543,7 @@ firstbh.exception_last_value = current_exc current_exc = lltype.nullptr(rclass.OBJECTPTR.TO) # - try: - _run_forever(firstbh, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(firstbh, current_exc) + #finally: #debug_stop('jit-blackhole') diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -9,12 +9,13 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist -from pypy.jit.metainterp.history import TreeLoop, Box, History, LoopToken +from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt -from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const +from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const, ConstInt from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP from pypy.jit.codewriter import heaptracker, longlong @@ -23,7 +24,7 @@ from pypy.jit.metainterp.jitprof import ABORT_BRIDGE raise SwitchToBlackhole(ABORT_BRIDGE) -def show_loop(metainterp_sd, loop=None, error=None): +def show_procedures(metainterp_sd, procedure=None, error=None): # debugging if option.view or option.viewloops: if error: @@ -32,11 +33,12 @@ errmsg += ': ' + str(error) else: errmsg = None - if loop is None: # or type(loop) is TerminatingLoop: - extraloops = [] + if procedure is None: + extraprocedures = [] else: - extraloops = [loop] - metainterp_sd.stats.view(errmsg=errmsg, extraloops=extraloops) + extraprocedures = [procedure] + metainterp_sd.stats.view(errmsg=errmsg, + extraprocedures=extraprocedures) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -45,131 +47,261 @@ return loop -def make_loop_token(nb_args, jitdriver_sd): - loop_token = LoopToken() - loop_token.outermost_jitdriver_sd = jitdriver_sd - return loop_token +def make_jitcell_token(jitdriver_sd): + jitcell_token = JitCellToken() + jitcell_token.outermost_jitdriver_sd = jitdriver_sd + return jitcell_token def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ - # get the original loop token (corresponding to 'loop', or if that is - # a bridge, to the loop that this bridge belongs to) - looptoken = loop.token - assert looptoken is not None + # get the original jitcell token corresponding to jitcell form which + # this trace starts + original_jitcell_token = loop.original_jitcell_token + assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests - assert looptoken.generation > 0 # has been registered with memmgr - wref = weakref.ref(looptoken) + assert original_jitcell_token.generation > 0 # has been registered with memmgr + wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number - looptoken.compiled_loop_token.record_faildescr_index(n) - elif isinstance(descr, LoopToken): - # for a JUMP or a CALL_ASSEMBLER: record it as a potential jump. + original_jitcell_token.compiled_loop_token.record_faildescr_index(n) + elif isinstance(descr, JitCellToken): + # for a CALL_ASSEMBLER: record it as a potential jump. + if descr is not original_jitcell_token: + original_jitcell_token.record_jump_to(descr) + descr.exported_state = None + op._descr = None # clear reference, mostly for tests + elif isinstance(descr, TargetToken): + # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) - if descr is not looptoken: - looptoken.record_jump_to(descr) - op._descr = None # clear reference, mostly for tests + if descr.original_jitcell_token is not original_jitcell_token: + assert descr.original_jitcell_token is not None + original_jitcell_token.record_jump_to(descr.original_jitcell_token) + # exported_state is clear by optimizeopt when the short preamble is + # constrcucted. if that did not happen the label should not show up + # in a trace that will be used + assert descr.exported_state is None if not we_are_translated(): - op._jumptarget_number = descr.number + op._descr_wref = weakref.ref(op._descr) + op._descr = None # clear reference to prevent the history.Stats + # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken - loop.token = None + loop.original_jitcell_token = None if not we_are_translated(): - loop._looptoken_number = looptoken.number + loop._looptoken_number = original_jitcell_token.number # ____________________________________________________________ -def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, - start_resumedescr, full_preamble_needed=True): - """Try to compile a new loop by closing the current history back +def compile_loop(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, full_preamble_needed=True): + """Try to compile a new procedure by closing the current history back to the first operation. """ - from pypy.jit.metainterp.optimize import optimize_loop + from pypy.jit.metainterp.optimizeopt import optimize_trace history = metainterp.history - loop = create_empty_loop(metainterp) - loop.inputargs = history.inputargs[:] + metainterp_sd = metainterp.staticdata + jitdriver_sd = metainterp.jitdriver_sd + + if False: + part = partial_trace + assert False + procedur_token = metainterp.get_procedure_token(greenkey) + assert procedure_token + all_target_tokens = [] + else: + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.start_resumedescr = start_resumedescr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] + + loop = create_empty_loop(metainterp) + loop.inputargs = part.inputargs + loop.operations = part.operations + loop.quasi_immutable_deps = {} + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + while part.operations[-1].getopnum() == rop.LABEL: + inliner = Inliner(inputargs, jumpargs) + part.quasi_immutable_deps = None + part.operations = [part.operations[-1]] + \ + [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], + None, descr=jitcell_token)] + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens.append(target_token) + inputargs = jumpargs + jumpargs = part.operations[-1].getarglist() + + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + + loop.operations = loop.operations[:-1] + part.operations + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + + if not loop.quasi_immutable_deps: + loop.quasi_immutable_deps = None for box in loop.inputargs: assert isinstance(box, Box) - # make a copy, because optimize_loop can mutate the ops and descrs - h_ops = history.operations - loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] + + loop.original_jitcell_token = jitcell_token + for label in all_target_tokens: + assert isinstance(label, TargetToken) + label.original_jitcell_token = jitcell_token + if label.virtual_state and label.short_preamble: + metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) + jitcell_token.target_tokens = all_target_tokens + send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") + record_loop_or_bridge(metainterp_sd, loop) + return all_target_tokens[0] + +def compile_retrace(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, partial_trace, resumekey): + """Try to compile a new procedure by closing the current history back + to the first operation. + """ + from pypy.jit.metainterp.optimizeopt import optimize_trace + + history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.token = loop_token - loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP - loop.preamble = create_empty_loop(metainterp, 'Preamble ') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.preamble.start_resumedescr = start_resumedescr + loop_jitcell_token = metainterp.get_procedure_token(greenkey) + assert loop_jitcell_token + assert partial_trace.operations[-1].getopnum() == rop.LABEL + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + part.start_resumedescr = start_resumedescr + h_ops = history.operations + + part.operations = [partial_trace.operations[-1]] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] + label = part.operations[0] + orignial_label = label.clone() + assert label.getopnum() == rop.LABEL try: - old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop, - jitdriver_sd.warmstate.enable_opts) + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - debug_print("compile_new_loop: got an InvalidLoop") - return None - if old_loop_token is not None: - metainterp.staticdata.log("reusing old loop") - return old_loop_token + #return None # XXX: Dissable for now + # Fall back on jumping to preamble + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert target_token.exported_state + part.operations = [orignial_label] + \ + [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + None, descr=loop_jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + inline_short_preamble=False) + + except InvalidLoop: + return None + assert part.operations[-1].getopnum() != rop.LABEL + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert loop_jitcell_token.target_tokens + loop_jitcell_token.target_tokens.append(target_token) - if loop.preamble.operations is not None: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - record_loop_or_bridge(metainterp_sd, loop) - token = loop.preamble.token - if full_preamble_needed: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, - loop.preamble, "entry bridge") - insert_loop_token(old_loop_tokens, loop.preamble.token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.preamble.token) - record_loop_or_bridge(metainterp_sd, loop.preamble) - elif token.short_preamble: - short = token.short_preamble[-1] - metainterp_sd.logger_ops.log_short_preamble(short.inputargs, - short.operations) - return token - else: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - insert_loop_token(old_loop_tokens, loop_token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.token) - record_loop_or_bridge(metainterp_sd, loop) - return loop_token + loop = partial_trace + loop.operations = loop.operations[:-1] + part.operations -def insert_loop_token(old_loop_tokens, loop_token): - # Find where in old_loop_tokens we should insert this new loop_token. - # The following algo means "as late as possible, but before another - # loop token that would be more general and so completely mask off - # the new loop_token". - # XXX do we still need a list? - old_loop_tokens.append(loop_token) + quasi_immutable_deps = {} + if loop.quasi_immutable_deps: + quasi_immutable_deps.update(loop.quasi_immutable_deps) + if part.quasi_immutable_deps: + quasi_immutable_deps.update(part.quasi_immutable_deps) + if quasi_immutable_deps: + loop.quasi_immutable_deps = quasi_immutable_deps + + for box in loop.inputargs: + assert isinstance(box, Box) + + target_token = loop.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + target_token.original_jitcell_token = loop.original_jitcell_token + record_loop_or_bridge(metainterp_sd, loop) + return target_token + +def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): + vinfo = jitdriver_sd.virtualizable_info + extra_ops = [] + inputargs = loop.inputargs + vable_box = inputargs[jitdriver_sd.index_of_virtualizable] + i = jitdriver_sd.num_red_args + loop.inputargs = inputargs[:i] + for descr in vinfo.static_field_descrs: + assert i < len(inputargs) + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], box, descr)) + i += 1 + arrayindex = 0 + for descr in vinfo.array_field_descrs: + vable = vable_box.getref_base() + arraylen = vinfo.get_array_length(vable, arrayindex) + arraybox = BoxPtr() + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], arraybox, descr)) + arraydescr = vinfo.array_descrs[arrayindex] + assert i + arraylen <= len(inputargs) + for index in range(arraylen): + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETARRAYITEM_GC, + [arraybox, ConstInt(index)], + box, descr=arraydescr)) + i += 1 + arrayindex += 1 + assert i == len(inputargs) + loop.operations = extra_ops + loop.operations def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): - jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, + vinfo = jitdriver_sd.virtualizable_info + if vinfo is not None: + patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) + + original_jitcell_token = loop.original_jitcell_token + jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata - loop_token = loop.token - loop_token.number = n = globaldata.loopnumbering + original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): - show_loop(metainterp_sd, loop) + show_procedures(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) @@ -177,26 +309,19 @@ debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - loop.token, name=loopname) + original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): - if type != "entry bridge": - metainterp_sd.stats.compiled() - else: - loop._ignore_during_counting = True + metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) - short = loop.token.short_preamble - if short: - metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, - short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token) + metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): @@ -204,8 +329,9 @@ jitdriver_sd.on_compile_bridge(metainterp_sd.logger_ops, original_loop_token, operations, n) if not we_are_translated(): - show_loop(metainterp_sd) - TreeLoop.check_consistency_of(inputargs, operations) + show_procedures(metainterp_sd) + seen = dict.fromkeys(inputargs) + TreeLoop.check_consistency_of_branch(operations, seen) metainterp_sd.profiler.start_backend() operations = get_deep_immutable_oplist(operations) debug_start("jit-backend") @@ -221,9 +347,9 @@ # metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset) # - if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( - original_loop_token) + #if metainterp_sd.warmrunnerdesc is not None: # for tests + # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( + # original_loop_token) # ____________________________________________________________ @@ -263,7 +389,7 @@ raise metainterp_sd.ExitFrameWithExceptionRef(cpu, value) -class TerminatingLoopToken(LoopToken): +class TerminatingLoopToken(JitCellToken): # FIXME: kill? terminating = True def __init__(self, nargs, finishdescr): @@ -298,7 +424,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +435,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +455,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,18 +465,21 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) - assert 0, "unreachable" + assert 0, "unreachable" def _trace_and_compile_from_bridge(self, metainterp_sd, jitdriver_sd): # 'jitdriver_sd' corresponds to the outermost one, i.e. the one @@ -354,17 +488,27 @@ # jitdrivers. from pypy.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - return metainterp.handle_guard_failure(self) + metainterp.handle_guard_failure(self) _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +535,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -400,13 +553,13 @@ # We managed to create a bridge. Attach the new operations # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None - new_loop.token = metainterp.resumekey_original_loop_token + new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, - new_loop.token) + new_loop.original_jitcell_token) def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here @@ -589,44 +742,32 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs - # We make a new LoopToken for this entry bridge, and stick it - # to every guard in the loop. - new_loop_token = make_loop_token(len(redargs), jitdriver_sd) - new_loop.token = new_loop_token + new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - self.original_greenkey, - new_loop_token) - # store the new loop in compiled_merge_points_wref too - old_loop_tokens = metainterp.get_compiled_merge_points( - self.original_greenkey) - # it always goes at the end of the list, as it is the most - # general loop token - old_loop_tokens.append(new_loop_token) - metainterp.set_compiled_merge_points(self.original_greenkey, - old_loop_tokens) + jitdriver_sd.warmstate.attach_procedure_to_interp( + self.original_greenkey, jitcell_token) + metainterp_sd.stats.add_jitcell_token(jitcell_token) - def reset_counter_from_failure(self): - pass - -def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): +def compile_trace(metainterp, resumekey, start_resumedescr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ - from pypy.jit.metainterp.optimize import optimize_bridge + from pypy.jit.metainterp.optimizeopt import optimize_trace # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. - # + # # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. - new_loop = create_empty_loop(metainterp) - new_loop.inputargs = metainterp.history.inputargs[:] + new_trace = create_empty_loop(metainterp) + new_trace.inputargs = inputargs = metainterp.history.inputargs[:] # clone ops, as optimize_bridge can mutate the ops - new_loop.operations = [op.clone() for op in metainterp.history.operations] + + new_trace.operations = [op.clone() for op in metainterp.history.operations] + new_trace.start_resumedescr = start_resumedescr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): @@ -634,38 +775,25 @@ else: inline_short_preamble = True try: - target_loop_token = optimize_bridge(metainterp_sd, old_loop_tokens, - new_loop, state.enable_opts, - inline_short_preamble, retraced) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop debug_print('InvalidLoop in compile_new_bridge') return None - # Did it work? - if target_loop_token is not None: - # Yes, we managed to create a bridge. Dispatch to resumekey to + + if new_trace.operations[-1].getopnum() != rop.LABEL: + # We managed to create a bridge. Dispatch to resumekey to # know exactly what we must do (ResumeGuardDescr/ResumeFromInterpDescr) - prepare_last_operation(new_loop, target_loop_token) - resumekey.compile_and_attach(metainterp, new_loop) - record_loop_or_bridge(metainterp_sd, new_loop) - return target_loop_token - -def prepare_last_operation(new_loop, target_loop_token): - op = new_loop.operations[-1] - if not isinstance(target_loop_token, TerminatingLoopToken): - # normal case - #op.setdescr(target_loop_token) # patch the jump target - pass + target_token = new_trace.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, new_trace) + record_loop_or_bridge(metainterp_sd, new_trace) + return target_token else: - # The target_loop_token is a pseudo loop token, - # e.g. loop_tokens_done_with_this_frame_void[0] - # Replace the operation with the real operation we want, i.e. a FINISH - descr = target_loop_token.finishdescr - args = op.getarglist() - new_op = ResOperation(rop.FINISH, args, None, descr=descr) - new_loop.operations[-1] = new_op + metainterp.retrace_needed(new_trace) + return None + # ____________________________________________________________ @@ -676,21 +804,25 @@ assert exception, "PropagateExceptionDescr: no exception??" raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) -def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes, +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redargtypes, memory_manager=None): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled version of the code may end up replacing it. """ - # 'redboxes' is only used to know the types of red arguments. - inputargs = [box.clonebox() for box in redboxes] - loop_token = make_loop_token(len(inputargs), jitdriver_sd) - # 'nb_red_args' might be smaller than len(redboxes), - # because it doesn't include the virtualizable boxes. + jitcell_token = make_jitcell_token(jitdriver_sd) nb_red_args = jitdriver_sd.num_red_args + assert len(redargtypes) == nb_red_args + inputargs = [] + for kind in redargtypes: + if kind == history.INT: box = BoxInt() + elif kind == history.REF: box = BoxPtr() + elif kind == history.FLOAT: box = BoxFloat() + else: raise AssertionError + inputargs.append(box) k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + callargs = [funcbox] + greenboxes + inputargs # result_type = jitdriver_sd.result_type if result_type == history.INT: @@ -717,7 +849,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, loop_token, log=False) + cpu.compile_loop(inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests - memory_manager.keep_loop_alive(loop_token) - return loop_token + memory_manager.keep_loop_alive(jitcell_token) + return jitcell_token diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -344,6 +344,7 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.LABEL, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,8 +12,9 @@ def get_display_text(self): return None -def display_loops(loops, errmsg=None, highlight_loops={}): - graphs = [(loop, highlight_loops.get(loop, 0)) for loop in loops] +def display_procedures(procedures, errmsg=None, highlight_procedures={}): + graphs = [(procedure, highlight_procedures.get(procedure, 0)) + for procedure in procedures] for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): @@ -25,18 +26,19 @@ def is_interesting_guard(op): return hasattr(op.getdescr(), '_debug_suboperations') +def getdescr(op): + if op._descr is not None: + return op._descr + if hasattr(op, '_descr_wref'): + return op._descr_wref() + return None + class ResOpGraphPage(GraphPage): def compute(self, graphs, errmsg=None): resopgen = ResOpGen() for graph, highlight in graphs: - if getattr(graph, 'token', None) is not None: - resopgen.jumps_to_graphs[graph.token] = graph - if getattr(graph, '_looptoken_number', None) is not None: - resopgen.jumps_to_graphs[graph._looptoken_number] = graph - - for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: resopgen.set_errmsg(errmsg) @@ -54,7 +56,7 @@ self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None - self.jumps_to_graphs = {} + self.target_tokens = {} def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -73,16 +75,21 @@ for graphindex in range(len(self.graphs)): self.block_starters[graphindex] = {0: True} for graphindex, graph in enumerate(self.graphs): - last_was_mergepoint = False + mergepointblock = None for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) if op.getopnum() == rop.DEBUG_MERGE_POINT: - if not last_was_mergepoint: - last_was_mergepoint = True - self.mark_starter(graphindex, i) + if mergepointblock is None: + mergepointblock = i + elif op.getopnum() == rop.LABEL: + self.mark_starter(graphindex, i) + self.target_tokens[getdescr(op)] = (graphindex, i) + mergepointblock = i else: - last_was_mergepoint = False + if mergepointblock is not None: + self.mark_starter(graphindex, mergepointblock) + mergepointblock = None def set_errmsg(self, errmsg): self.errmsg = errmsg @@ -172,24 +179,10 @@ (graphindex, opindex)) break if op.getopnum() == rop.JUMP: - tgt_g = -1 - tgt = None - tgt_number = getattr(op, '_jumptarget_number', None) - if tgt_number is not None: - tgt = self.jumps_to_graphs.get(tgt_number) - else: - tgt_descr = op.getdescr() - if tgt_descr is None: - tgt_g = graphindex - else: - tgt = self.jumps_to_graphs.get(tgt_descr.number) - if tgt is None: - tgt = self.jumps_to_graphs.get(tgt_descr) - if tgt is not None: - tgt_g = self.graphs.index(tgt) - if tgt_g != -1: + tgt_descr = getdescr(op) + if tgt_descr is not None and tgt_descr in self.target_tokens: self.genedge((graphindex, opstartindex), - (tgt_g, 0), + self.target_tokens[tgt_descr], weight="0") lines.append("") label = "\\l".join(lines) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker, longlong from pypy.rlib.objectmodel import compute_identity_hash +import weakref # ____________________________________________________________ @@ -123,9 +124,6 @@ def sort_key(self): raise NotImplementedError - def set_future_value(self, cpu, j): - raise NotImplementedError - def nonnull(self): raise NotImplementedError @@ -288,9 +286,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def same_constant(self, other): if isinstance(other, ConstInt): return self.value == other.value @@ -328,9 +323,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def same_constant(self, other): if isinstance(other, ConstFloat): return self.value == other.value @@ -377,9 +369,6 @@ def getaddr(self): return llmemory.cast_ptr_to_adr(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -431,9 +420,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - ## def getaddr(self): ## # so far this is used only when calling ## # CodeWriter.IndirectCallset.bytecode_for_address. We don't need a @@ -539,9 +525,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def nonnull(self): return self.value != 0 @@ -574,9 +557,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def nonnull(self): return self.value != longlong.ZEROF @@ -619,9 +599,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def nonnull(self): return bool(self.value) @@ -666,19 +643,12 @@ def nonnull(self): return bool(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def repr_rpython(self): return repr_rpython(self, 'bo') _getrepr_ = repr_object -def set_future_values(cpu, boxes): - for j in range(len(boxes)): - boxes[j].set_future_value(cpu, j) - # ____________________________________________________________ @@ -723,18 +693,17 @@ # ____________________________________________________________ -# The TreeLoop class contains a loop or a generalized loop, i.e. a tree -# of operations. Each branch ends in a jump which can go either to -# the top of the same loop, or to another TreeLoop; or it ends in a FINISH. +# The JitCellToken class is the root of a tree of traces. Each branch ends +# in a jump which goes to a LABEL operation; or it ends in a FINISH. -class LoopToken(AbstractDescr): +class JitCellToken(AbstractDescr): """Used for rop.JUMP, giving the target of the jump. This is different from TreeLoop: the TreeLoop class contains the whole loop, including 'operations', and goes away after the loop was compiled; but the LoopDescr remains alive and points to the generated assembler. """ - short_preamble = None + target_tokens = None failed_states = None retraced_count = 0 terminating = False # see TerminatingLoopToken in compile.py @@ -751,10 +720,11 @@ def __init__(self): # For memory management of assembled loops - self._keepalive_target_looktokens = {} # set of other LoopTokens + self._keepalive_jitcell_tokens = {} # set of other JitCellToken - def record_jump_to(self, target_loop_token): - self._keepalive_target_looktokens[target_loop_token] = None + def record_jump_to(self, jitcell_token): + assert isinstance(jitcell_token, JitCellToken) + self._keepalive_jitcell_tokens[jitcell_token] = None def __repr__(self): return '' % (self.number, self.generation) @@ -765,17 +735,49 @@ def dump(self): self.compiled_loop_token.cpu.dump_loop_token(self) +class TargetToken(AbstractDescr): + def __init__(self, targeting_jitcell_token=None): + # Warning, two different jitcell_tokens here! + # + # * 'targeting_jitcell_token' is only useful for the front-end, + # and it means: consider the LABEL that uses this TargetToken. + # At this position, the state is logically the one given + # by targeting_jitcell_token. So e.g. if we want to enter the + # JIT with some given green args, if the jitcell matches, then + # we can jump to this LABEL. + # + # * 'original_jitcell_token' is information from the backend's + # point of view: it means that this TargetToken is used in + # a LABEL that belongs to either: + # - a loop; then 'original_jitcell_token' is this loop + # - or a bridge; then 'original_jitcell_token' is the loop + # out of which we made this bridge + # + self.targeting_jitcell_token = targeting_jitcell_token + self.original_jitcell_token = None + + self.virtual_state = None + self.exported_state = None + class TreeLoop(object): inputargs = None operations = None - token = None call_pure_results = None logops = None quasi_immutable_deps = None + start_resumedescr = None + + def _token(*args): + raise Exception("TreeLoop.token is killed") + token = property(_token, _token) + + # This is the jitcell where the trace starts. Labels within the trace might + # belong to some other jitcells in the sens that jumping to this other + # jitcell will result in a jump to the label. + original_jitcell_token = None def __init__(self, name): self.name = name - # self.inputargs = list of distinct Boxes # self.operations = list of ResOperations # ops of the kind 'guard_xxx' contain a further list of operations, # which may itself contain 'guard_xxx' and so on, making a tree. @@ -808,6 +810,10 @@ def check_consistency(self): # for testing "NOT_RPYTHON" self.check_consistency_of(self.inputargs, self.operations) + for op in self.operations: + descr = op.getdescr() + if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): + assert descr.original_jitcell_token is self.original_jitcell_token @staticmethod def check_consistency_of(inputargs, operations): @@ -842,15 +848,23 @@ assert isinstance(box, Box) assert box not in seen seen[box] = True + if op.getopnum() == rop.LABEL: + inputargs = op.getarglist() + for box in inputargs: + assert isinstance(box, Box), "LABEL contains %r" % (box,) + seen = dict.fromkeys(inputargs) + assert len(seen) == len(inputargs), ( + "duplicate Box in the LABEL arguments") + assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: target = operations[-1].getdescr() if target is not None: - assert isinstance(target, LoopToken) + assert isinstance(target, TargetToken) def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -932,6 +946,9 @@ def clear(self): pass + def add_jitcell_token(self, token): + pass + class Stats(object): """For tests.""" @@ -944,17 +961,26 @@ self.loops = [] self.locations = [] self.aborted_keys = [] - self.invalidated_token_numbers = set() + self.invalidated_token_numbers = set() # <- not RPython + self.jitcell_token_wrefs = [] + self.jitcell_dicts = [] # <- not RPython def clear(self): del self.loops[:] del self.locations[:] del self.aborted_keys[:] + del self.jitcell_token_wrefs[:] self.invalidated_token_numbers.clear() self.compiled_count = 0 self.enter_count = 0 self.aborted_count = 0 + for dict in self.jitcell_dicts: + dict.clear() + def add_jitcell_token(self, token): + assert isinstance(token, JitCellToken) + self.jitcell_token_wrefs.append(weakref.ref(token)) + def set_history(self, history): self.operations = history.operations @@ -984,6 +1010,15 @@ def get_all_loops(self): return self.loops + def get_all_jitcell_tokens(self): + tokens = [t() for t in self.jitcell_token_wrefs] + if None in tokens: + assert False, "get_all_jitcell_tokens will not work as "+\ + "loops have been freed" + return tokens + + + def check_history(self, expected=None, **check): insns = {} for op in self.operations: @@ -1001,13 +1036,14 @@ def check_resops(self, expected=None, **check): insns = {} - for loop in self.loops: + for loop in self.get_all_loops(): insns = loop.summary(adding_insns=insns) return self._check_insns(insns, expected, check) def _check_insns(self, insns, expected, check): if expected is not None: insns.pop('debug_merge_point', None) + insns.pop('label', None) assert insns == expected for insn, expected_count in check.items(): getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist @@ -1034,29 +1070,83 @@ opname = op.getopname() insns[opname] = insns.get(opname, 0) + 1 return self._check_insns(insns, expected, check) + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + loops = self.get_all_loops() + assert len(loops) == 1 + loop = loops[0] + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + assert self.check_resops(jump=1) + labels = [op for op in loop.operations if op.getopnum() == rop.LABEL] + targets = [op._descr_wref() for op in labels] + assert None not in targets # TargetToken was freed, give up + target = jumpop._descr_wref() + assert target + assert targets.count(target) == 1 + i = loop.operations.index(labels[targets.index(target)]) + insns = {} + for op in loop.operations[i:]: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_loops(self, expected=None, everywhere=False, **check): + insns = {} + for loop in self.get_all_loops(): + #if not everywhere: + # if getattr(loop, '_ignore_during_counting', False): + # continue + insns = loop.summary(adding_insns=insns) + if expected is not None: + insns.pop('debug_merge_point', None) + print + print + print " self.check_resops(%s)" % str(insns) + print + import pdb; pdb.set_trace() + else: + chk = ['%s=%d' % (i, insns.get(i, 0)) for i in check] + print + print + print " self.check_resops(%s)" % ', '.join(chk) + print + import pdb; pdb.set_trace() + return + + for insn, expected_count in check.items(): + getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist + found = insns.get(insn, 0) + assert found == expected_count, ( + "found %d %r, expected %d" % (found, insn, expected_count)) + return insns + def check_consistency(self): "NOT_RPYTHON" - for loop in self.loops: + for loop in self.get_all_loops(): loop.check_consistency() def maybe_view(self): if option.view: self.view() - def view(self, errmsg=None, extraloops=[]): - from pypy.jit.metainterp.graphpage import display_loops - loops = self.get_all_loops()[:] - for loop in extraloops: - if loop in loops: - loops.remove(loop) - loops.append(loop) - highlight_loops = dict.fromkeys(extraloops, 1) - for loop in loops: - if hasattr(loop, '_looptoken_number') and ( - loop._looptoken_number in self.invalidated_token_numbers): - highlight_loops.setdefault(loop, 2) - display_loops(loops, errmsg, highlight_loops) + def view(self, errmsg=None, extraprocedures=[]): + from pypy.jit.metainterp.graphpage import display_procedures + procedures = self.get_all_loops()[:] + for procedure in extraprocedures: + if procedure in procedures: + procedures.remove(procedure) + procedures.append(procedure) + highlight_procedures = dict.fromkeys(extraprocedures, 1) + for procedure in procedures: + if hasattr(procedure, '_looptoken_number') and ( + procedure._looptoken_number in self.invalidated_token_numbers): + highlight_procedures.setdefault(procedure, 2) + display_procedures(procedures, errmsg, highlight_procedures) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/inliner.py b/pypy/jit/metainterp/inliner.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/inliner.py @@ -0,0 +1,57 @@ +from pypy.jit.metainterp.history import Const +from pypy.jit.metainterp.resume import Snapshot + +class Inliner(object): + def __init__(self, inputargs, jump_args): + assert len(inputargs) == len(jump_args) + self.argmap = {} + for i in range(len(inputargs)): + if inputargs[i] in self.argmap: + assert self.argmap[inputargs[i]] == jump_args[i] + else: + self.argmap[inputargs[i]] = jump_args[i] + self.snapshot_map = {None: None} + + def inline_op(self, newop, ignore_result=False, clone=True, + ignore_failargs=False): + if clone: + newop = newop.clone() + args = newop.getarglist() + newop.initarglist([self.inline_arg(a) for a in args]) + + if newop.is_guard(): + args = newop.getfailargs() + if args and not ignore_failargs: + newop.setfailargs([self.inline_arg(a) for a in args]) + else: + newop.setfailargs([]) + + if newop.result and not ignore_result: + old_result = newop.result + newop.result = newop.result.clonebox() + self.argmap[old_result] = newop.result + + self.inline_descr_inplace(newop.getdescr()) + + return newop + + def inline_descr_inplace(self, descr): + from pypy.jit.metainterp.compile import ResumeGuardDescr + if isinstance(descr, ResumeGuardDescr): + descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) + + def inline_arg(self, arg): + if arg is None: + return None + if isinstance(arg, Const): + return arg + return self.argmap[arg] + + def inline_snapshot(self, snapshot): + if snapshot in self.snapshot_map: + return self.snapshot_map[snapshot] + boxes = [self.inline_arg(a) for a in snapshot.boxes] + new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) + self.snapshot_map[snapshot] = new_snapshot + return new_snapshot + diff --git a/pypy/jit/metainterp/jitdriver.py b/pypy/jit/metainterp/jitdriver.py --- a/pypy/jit/metainterp/jitdriver.py +++ b/pypy/jit/metainterp/jitdriver.py @@ -11,6 +11,7 @@ # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.num_red_args ... pypy.jit.metainterp.warmspot + # self.red_args_types ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.greenfield_info ... pypy.jit.metainterp.warmspot diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -10,8 +10,6 @@ counters=""" TRACING BACKEND -RUNNING -BLACKHOLE OPS RECORDED_OPS GUARDS @@ -67,18 +65,6 @@ def end_backend(self): pass - def start_running(self): - pass - - def end_running(self): - pass - - def start_blackhole(self): - pass - - def end_blackhole(self): - pass - def count(self, kind, inc=1): pass @@ -134,16 +120,6 @@ def start_backend(self): self._start(BACKEND) def end_backend(self): self._end (BACKEND) - # Don't record times for 'running' and 'blackhole' because there are - # too many of them: calling time.time() is a major blocker. - # If you are interested in these numbers, use 'PYPYLOG=file' and - # look at the resulting file with pypy/tool/logparser.py. - def start_running(self): self.count(RUNNING) - def end_running(self): pass - - def start_blackhole(self): self.count(BLACKHOLE) - def end_blackhole(self): pass - def count(self, kind, inc=1): self.counters[kind] += inc @@ -165,8 +141,6 @@ calls = self.calls self._print_line_time("Tracing", cnt[TRACING], tim[TRACING]) self._print_line_time("Backend", cnt[BACKEND], tim[BACKEND]) - self._print_intline("Running asm", cnt[RUNNING]) - self._print_intline("Blackhole", cnt[BLACKHOLE]) line = "TOTAL: \t\t%f" % (self.tk - self.starttime, ) debug_print(line) self._print_intline("ops", cnt[OPS]) diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, r_uint from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,6 +21,7 @@ # class MemoryManager(object): + NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -36,12 +37,13 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK self.alive_loops = {} + self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK else: self.max_age = max_age if check_frequency <= 0: @@ -49,10 +51,11 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self): + def next_generation(self, do_cleanups_now=True): self.current_generation += 1 - if self.current_generation == self.next_check: + if do_cleanups_now and self.current_generation >= self.next_check: self._kill_old_loops_now() + self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -81,3 +84,22 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") + + def get_current_generation_uint(self): + """Return the current generation, possibly truncated to a uint. + To use only as an approximation for decaying counters.""" + return r_uint(self.current_generation) + + def record_jitcell_dict(self, callback): + """NOT_RPYTHON. The given jitcell_dict is a dict that needs + occasional clean-ups of old cells. A cell is old if it never + reached the threshold, and its counter decayed to a tiny value.""" + # note that the various jitcell_dicts have different RPython types, + # so we have to make a different function for each one. These + # functions are chained to each other: each calls the previous one. + def cleanup_dict(): + callback() + cleanup_previous() + # + cleanup_previous = self._cleanup_jitcell_dicts + self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -4,13 +4,15 @@ from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap from pypy.jit.metainterp.optimizeopt.vstring import OptString -from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble +from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce from pypy.rlib.jit import PARAMETERS from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_start, debug_stop, debug_print + ALL_OPTS = [('intbounds', OptIntBounds), ('rewrite', OptRewrite), @@ -28,8 +30,7 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) -def build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble=True, retraced=False): +def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict @@ -45,12 +46,9 @@ optimizations.append(OptFfiCall()) if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + or 'heap' not in enable_opts or 'unroll' not in enable_opts): optimizations.append(OptSimplify()) - if inline_short_preamble: - optimizations = [OptInlineShortPreamble(retraced)] + optimizations - return optimizations, unroll @@ -80,3 +78,21 @@ if __name__ == '__main__': print ALL_OPTS_NAMES + +def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): + """Optimize loop.operations to remove internal overheadish operations. + """ + + debug_start("jit-optimize") + try: + loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, + loop.operations) + optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) + if unroll: + optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) + else: + optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer.propagate_all_forward() + finally: + debug_stop("jit-optimize") + diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -234,6 +234,9 @@ # longlongs are treated as floats, see # e.g. llsupport/descr.py:getDescrClass is_float = True + elif kind == 'u': + # they're all False + pass else: assert False, "unsupported ffitype or kind" # diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -246,15 +246,16 @@ self.force_lazy_setfields_and_arrayitems_for_guard()) return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or # handled specially - opnum == rop.SETFIELD_RAW or # no effect on GC struct/array - opnum == rop.SETARRAYITEM_GC or # handled specially - opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.STRSETITEM or # no effect on GC struct/array - opnum == rop.UNICODESETITEM or # no effect on GC struct/array - opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever - opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array - opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -500,8 +500,9 @@ else: return CVAL_ZERO - def propagate_all_forward(self): - self.clear_newoperations() + def propagate_all_forward(self, clear=True): + if clear: + self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) self.loop.operations = self.get_newoperations() diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -1,9 +1,12 @@ from pypy.jit.metainterp.optimizeopt.optimizer import Optimization from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import ResOperation, rop - +from pypy.jit.metainterp.history import TargetToken, JitCellToken class OptSimplify(Optimization): + def __init__(self): + self.last_label_descr = None + def optimize_CALL_PURE(self, op): args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, @@ -31,6 +34,23 @@ def optimize_RECORD_KNOWN_CLASS(self, op): pass + def optimize_LABEL(self, op): + self.last_label_descr = op.getdescr() From noreply at buildbot.pypy.org Sat Dec 17 23:01:06 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:01:06 +0100 (CET) Subject: [pypy-commit] pypy py3k: Remove "issuer" and "server" attributes from ssl socket. Message-ID: <20111217220106.7B46F8205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50639:6a82b1beaf24 Date: 2011-12-17 14:24 +0100 http://bitbucket.org/pypy/pypy/changeset/6a82b1beaf24/ Log: Remove "issuer" and "server" attributes from ssl socket. + Fix shutdown test diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -328,25 +328,13 @@ self.w_socket = None self.ssl = lltype.nullptr(SSL.TO) self.peer_cert = lltype.nullptr(X509.TO) - self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') - self._server[0] = '\0' - self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') - self._issuer[0] = '\0' self.shutdown_seen_zero = False - def server(self, space): - return space.wrap(rffi.charp2str(self._server)) - - def issuer(self, space): - return space.wrap(rffi.charp2str(self._issuer)) - def __del__(self): if self.peer_cert: libssl_X509_free(self.peer_cert) if self.ssl: libssl_SSL_free(self.ssl) - lltype.free(self._server, flavor='raw') - lltype.free(self._issuer, flavor='raw') @unwrap_spec(data='bufferstr') def write(self, space, data): @@ -513,13 +501,6 @@ if self.peer_cert: libssl_X509_free(self.peer_cert) self.peer_cert = libssl_SSL_get_peer_certificate(self.ssl) - if self.peer_cert: - libssl_X509_NAME_oneline( - libssl_X509_get_subject_name(self.peer_cert), - self._server, X509_NAME_MAXLEN) - libssl_X509_NAME_oneline( - libssl_X509_get_issuer_name(self.peer_cert), - self._issuer, X509_NAME_MAXLEN) def shutdown(self, space): w_socket = self._get_socket(space) @@ -827,8 +808,6 @@ return space.newtuple([w_name, w_value]) SSLSocket.typedef = TypeDef("_SSLSocket", - server = interp2app(SSLSocket.server), - issuer = interp2app(SSLSocket.issuer), write = interp2app(SSLSocket.write), pending = interp2app(SSLSocket.pending), read = interp2app(SSLSocket.read), diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -95,20 +95,6 @@ self.s.close() del ss; gc.collect() - def test_server(self): - import ssl, gc - ss = ssl.wrap_socket(self.s) - assert isinstance(ss.server(), str) - self.s.close() - del ss; gc.collect() - - def test_issuer(self): - import ssl, gc - ss = ssl.wrap_socket(self.s) - assert isinstance(ss.issuer(), str) - self.s.close() - del ss; gc.collect() - def test_write(self): import ssl, gc ss = ssl.wrap_socket(self.s) @@ -142,18 +128,16 @@ del ss; gc.collect() def test_shutdown(self): - import ssl, sys, gc - ss = socket.ssl(self.s) + import socket, ssl, sys, gc + ss = ssl.wrap_socket(self.s) ss.write(b"hello\n") try: - result = ss.shutdown() + ss.shutdown(socket.SHUT_RDWR) except socket.error, e: - # xxx obscure case; throwing errno 0 is pretty odd... if e.errno == 0: - skip("Shutdown raised errno 0. CPython does this too") + pass # xxx obscure case; throwing errno 0 is pretty odd... raise - assert result is self.s._sock - raises(ssl.SSLError, ss.write, b"hello\n") + raises(AttributeError, ss.write, b"hello\n") del ss; gc.collect() class AppTestConnectedSSL_Timeout(AppTestConnectedSSL): From noreply at buildbot.pypy.org Sat Dec 17 23:01:07 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:01:07 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix a crash when the 'str' unwrap_spec is given a non-ascii unicode string. Message-ID: <20111217220107.A8E988205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50640:8d91b7ba9f7c Date: 2011-12-17 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/8d91b7ba9f7c/ Log: Fix a crash when the 'str' unwrap_spec is given a non-ascii unicode string. Now it automatically converts to utf_8 bytes, like CPython. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1304,7 +1304,11 @@ return self.str_w(w_obj) def str_w(self, w_obj): - return self.unicode_w(w_obj).encode('ascii') + try: + return self.unicode_w(w_obj).encode('ascii') + except UnicodeEncodeError: + w_bytes = self.call_method(w_obj, 'encode', self.wrap('utf-8')) + return self.bytes_w(w_bytes) def bytes_w(self, w_obj): return w_obj.bytes_w(self) diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -773,6 +773,8 @@ def test_invalid(self): raises(ValueError, float.fromhex, "0P") + # A fullwidth Unicode digit + raises(ValueError, float.fromhex, "0x1p\uff10") def test_division_edgecases(self): import math From noreply at buildbot.pypy.org Sat Dec 17 23:01:08 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:01:08 +0100 (CET) Subject: [pypy-commit] pypy py3k: Remove "except Exception, e" syntax. Message-ID: <20111217220108.CE0E58205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50641:4c7922f5a847 Date: 2011-12-17 18:41 +0100 http://bitbucket.org/pypy/pypy/changeset/4c7922f5a847/ Log: Remove "except Exception, e" syntax. Expected to break a lot of code... diff --git a/pypy/interpreter/pyparser/data/Grammar3.2 b/pypy/interpreter/pyparser/data/Grammar3.2 --- a/pypy/interpreter/pyparser/data/Grammar3.2 +++ b/pypy/interpreter/pyparser/data/Grammar3.2 @@ -75,7 +75,7 @@ with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] # NB compile.c makes sure that the default except clause is last -except_clause: 'except' [test [('as' | ',') test]] +except_clause: 'except' [test ['as' NAME]] suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT # Backward compatibility cruft to support: From noreply at buildbot.pypy.org Sat Dec 17 23:01:10 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:01:10 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix compiler: the exception handler target is now a NAME Message-ID: <20111217220110.0D73B8205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50642:3f837e62c8c0 Date: 2011-12-17 22:04 +0100 http://bitbucket.org/pypy/pypy/changeset/3f837e62c8c0/ Log: Fix compiler: the exception handler target is now a NAME (and not any assignment target as before). Still missing: the deletion of the target after the "except:" block. diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2229,8 +2229,6 @@ def mutate_over(self, visitor): if self.type: self.type = self.type.mutate_over(visitor) - if self.name: - self.name = self.name.mutate_over(visitor) if self.body: visitor._mutate_sequence(self.body) return visitor.visit_ExceptHandler(self) @@ -2245,8 +2243,6 @@ self.name = None if self.type: self.type.sync_app_attrs(space) - if self.name: - self.name.sync_app_attrs(space) w_list = self.w_body if w_list is not None: list_w = space.listview(w_list) @@ -2699,8 +2695,6 @@ def visit_ExceptHandler(self, node): if node.type: node.type.walkabout(self) - if node.name: - node.name.walkabout(self) self.visit_sequence(node.body) def visit_arguments(self, node): @@ -6718,9 +6712,10 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: - w_self.name = space.interp_w(expr, w_new_value, True) - if type(w_self.name) is expr: - raise OperationError(space.w_TypeError, space.w_None) + if space.is_w(w_new_value, space.w_None): + w_self.name = None + else: + w_self.name = space.str_w(w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -364,16 +364,16 @@ def handle_except_clause(self, exc, body): test = None - target = None + name = None suite = self.handle_suite(body) child_count = len(exc.children) if child_count >= 2: test = self.handle_expr(exc.children[1]) if child_count == 4: - target_child = exc.children[3] - target = self.handle_expr(target_child) - self.set_context(target, ast.Store) - return ast.ExceptHandler(test, target, suite, exc.lineno, exc.column) + name_node = exc.children[3] + name = name_node.value + self.check_forbidden_name(name, name_node) + return ast.ExceptHandler(test, name, suite, exc.lineno, exc.column) def handle_try_stmt(self, try_node): body = self.handle_suite(try_node.children[2]) diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -524,7 +524,7 @@ self.emit_jump(ops.POP_JUMP_IF_FALSE, next_except, True) self.emit_op(ops.POP_TOP) if handler.name: - handler.name.walkabout(self) + self.name_op(handler.name, ast.Store); else: self.emit_op(ops.POP_TOP) self.emit_op(ops.POP_TOP) diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl --- a/pypy/interpreter/astcompiler/tools/Python.asdl +++ b/pypy/interpreter/astcompiler/tools/Python.asdl @@ -101,7 +101,7 @@ comprehension = (expr target, expr iter, expr* ifs) -- not sure what to call the first argument for raise and except - excepthandler = ExceptHandler(expr? type, expr? name, stmt* body) + excepthandler = ExceptHandler(expr? type, identifier? name, stmt* body) attributes(int lineno, int col_offset) arguments = (expr* args, identifier? vararg, diff --git a/pypy/interpreter/pyparser/pytoken.py b/pypy/interpreter/pyparser/pytoken.py --- a/pypy/interpreter/pyparser/pytoken.py +++ b/pypy/interpreter/pyparser/pytoken.py @@ -35,6 +35,7 @@ _add_tok('EQUAL', "=" ) _add_tok('DOT', "." ) _add_tok('PERCENT', "%" ) +_add_tok('BACKQUOTE', "`" ) _add_tok('LBRACE', "{" ) _add_tok('RBRACE', "}" ) _add_tok('EQEQUAL', "==" ) diff --git a/pypy/module/atexit/app_atexit.py b/pypy/module/atexit/app_atexit.py --- a/pypy/module/atexit/app_atexit.py +++ b/pypy/module/atexit/app_atexit.py @@ -25,7 +25,7 @@ continue try: func(*args, **kwargs) - except BaseException, e: + except BaseException as e: if not isinstance(e, SystemExit): import traceback last_type, last_exc, last_tb = sys.exc_info() diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -71,7 +71,7 @@ return try: fcntl.fcntl(fd, fcntl.F_GETFD) - except IOError, e: + except IOError as e: raise OSError(e.errno, e.strerror, e.filename) else: def _validate_fd(fd): From noreply at buildbot.pypy.org Sat Dec 17 23:01:11 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:01:11 +0100 (CET) Subject: [pypy-commit] pypy py3k: Unicode fixes in _multibytecodec module Message-ID: <20111217220111.3981A8205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50643:c13feede4946 Date: 2011-12-17 20:47 +0100 http://bitbucket.org/pypy/pypy/changeset/c13feede4946/ Log: Unicode fixes in _multibytecodec module diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -48,7 +48,7 @@ c_codecs.pypy_cjk_dec_free(self.decodebuf) self.decodebuf = lltype.nullptr(c_codecs.DECODEBUF_P.TO) - @unwrap_spec(object=str, final=bool) + @unwrap_spec(object='bufferstr', final=bool) def decode_w(self, object, final=False): space = self.space state = space.fromcache(CodecState) @@ -114,7 +114,7 @@ pos = c_codecs.pypy_cjk_enc_inbuf_consumed(self.encodebuf) assert 0 <= pos <= len(object) self.pending = object[pos:] - return space.wrap(output) + return space.wrapbytes(output) @unwrap_spec(errors="str_or_None") diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py --- a/pypy/module/_multibytecodec/interp_multibytecodec.py +++ b/pypy/module/_multibytecodec/interp_multibytecodec.py @@ -12,7 +12,7 @@ self.name = name self.codec = codec - @unwrap_spec(input=str, errors="str_or_None") + @unwrap_spec(input='bufferstr', errors="str_or_None") def decode(self, space, input, errors=None): if errors is None: errors = 'strict' @@ -41,7 +41,7 @@ raise wrap_unicodeencodeerror(space, e, input, self.name) except RuntimeError: raise wrap_runtimeerror(space) - return space.newtuple([space.wrap(output), + return space.newtuple([space.wrapbytes(output), space.wrap(len(input))]) @@ -69,7 +69,7 @@ space.w_UnicodeDecodeError, space.newtuple([ space.wrap(name), - space.wrap(input), + space.wrapbytes(input), space.wrap(e.start), space.wrap(e.end), space.wrap(e.reason)])) diff --git a/pypy/module/_multibytecodec/test/test_app_codecs.py b/pypy/module/_multibytecodec/test/test_app_codecs.py --- a/pypy/module/_multibytecodec/test/test_app_codecs.py +++ b/pypy/module/_multibytecodec/test/test_app_codecs.py @@ -12,76 +12,76 @@ def test_decode_hz(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - r = codec.decode("~{abc}") - assert r == (u'\u5f95\u6cef', 6) + r = codec.decode(b"~{abc}") + assert r == ('\u5f95\u6cef', 6) def test_strict_error(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - r = codec.decode("~{abc}", "strict") - assert r == (u'\u5f95\u6cef', 6) - assert type(r[0]) is unicode + r = codec.decode(b"~{abc}", "strict") + assert r == ('\u5f95\u6cef', 6) + assert type(r[0]) is str def test_decode_hz_error(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - e = raises(UnicodeDecodeError, codec.decode, "~{}").value - assert e.args == ('hz', '~{}', 2, 3, 'incomplete multibyte sequence') + e = raises(UnicodeDecodeError, codec.decode, b"~{}").value + assert e.args == ('hz', b'~{}', 2, 3, 'incomplete multibyte sequence') assert e.encoding == 'hz' - assert e.object == '~{}' and type(e.object) is str + assert e.object == b'~{}' and type(e.object) is bytes assert e.start == 2 assert e.end == 3 assert e.reason == "incomplete multibyte sequence" # - e = raises(UnicodeDecodeError, codec.decode, "~{xyz}").value - assert e.args == ('hz', '~{xyz}', 2, 4, 'illegal multibyte sequence') + e = raises(UnicodeDecodeError, codec.decode, b"~{xyz}").value + assert e.args == ('hz', b'~{xyz}', 2, 4, 'illegal multibyte sequence') def test_decode_hz_ignore(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - r = codec.decode("def~{}abc", errors='ignore') - assert r == (u'def\u5fcf', 9) - r = codec.decode("def~{}abc", 'ignore') - assert r == (u'def\u5fcf', 9) + r = codec.decode(b"def~{}abc", errors='ignore') + assert r == ('def\u5fcf', 9) + r = codec.decode(b"def~{}abc", 'ignore') + assert r == ('def\u5fcf', 9) def test_decode_hz_replace(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - r = codec.decode("def~{}abc", errors='replace') - assert r == (u'def\ufffd\u5fcf', 9) - r = codec.decode("def~{}abc", 'replace') - assert r == (u'def\ufffd\u5fcf', 9) + r = codec.decode(b"def~{}abc", errors='replace') + assert r == ('def\ufffd\u5fcf', 9) + r = codec.decode(b"def~{}abc", 'replace') + assert r == ('def\ufffd\u5fcf', 9) def test_decode_custom_error_handler(self): import codecs codecs.register_error("test.decode_custom_error_handler", - lambda e: (u'\u1234\u5678', e.end)) - u = "abc\xDD".decode("hz", "test.decode_custom_error_handler") - assert u == u'abc\u1234\u5678' + lambda e: ('\u1234\u5678', e.end)) + u = b"abc\xDD".decode("hz", "test.decode_custom_error_handler") + assert u == 'abc\u1234\u5678' def test_decode_custom_error_handler_overflow(self): import codecs import sys codecs.register_error("test.test_decode_custom_error_handler_overflow", - lambda e: (u'', sys.maxint + 1)) - raises((IndexError, OverflowError), "abc\xDD".decode, "hz", + lambda e: ('', sys.maxint + 1)) + raises((IndexError, OverflowError), b"abc\xDD".decode, "hz", "test.test_decode_custom_error_handler_overflow") def test_encode_hz(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - r = codec.encode(u'\u5f95\u6cef') - assert r == ('~{abc}~}', 2) - assert type(r[0]) is str + r = codec.encode('\u5f95\u6cef') + assert r == (b'~{abc}~}', 2) + assert type(r[0]) is bytes def test_encode_hz_error(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - u = u'abc\u1234def' + u = 'abc\u1234def' e = raises(UnicodeEncodeError, codec.encode, u).value assert e.args == ('hz', u, 3, 4, 'illegal multibyte sequence') assert e.encoding == 'hz' - assert e.object == u and type(e.object) is unicode + assert e.object == u and type(e.object) is str assert e.start == 3 assert e.end == 4 assert e.reason == 'illegal multibyte sequence' @@ -89,20 +89,20 @@ def test_encode_hz_ignore(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - r = codec.encode(u'abc\u1234def', 'ignore') - assert r == ('abcdef', 7) - assert type(r[0]) is str + r = codec.encode('abc\u1234def', 'ignore') + assert r == (b'abcdef', 7) + assert type(r[0]) is bytes def test_encode_hz_replace(self): import _codecs_cn codec = _codecs_cn.getcodec("hz") - r = codec.encode(u'abc\u1234def', 'replace') - assert r == ('abc?def', 7) - assert type(r[0]) is str + r = codec.encode('abc\u1234def', 'replace') + assert r == (b'abc?def', 7) + assert type(r[0]) is bytes def test_encode_custom_error_handler(self): import codecs codecs.register_error("test.multi_bad_handler", lambda e: (repl, 1)) - repl = u"\u2014" - s = u"\uDDA1".encode("gbk", "test.multi_bad_handler") - assert s == '\xA1\xAA' + repl = "\u2014" + s = "\uDDA1".encode("gbk", "test.multi_bad_handler") + assert s == b'\xA1\xAA' diff --git a/pypy/module/_multibytecodec/test/test_app_incremental.py b/pypy/module/_multibytecodec/test/test_app_incremental.py --- a/pypy/module/_multibytecodec/test/test_app_incremental.py +++ b/pypy/module/_multibytecodec/test/test_app_incremental.py @@ -34,86 +34,86 @@ def test_decode_hz(self): d = self.IncrementalHzDecoder() - r = d.decode("~{abcd~}") - assert r == u'\u5f95\u6c85' - r = d.decode("~{efgh~}") - assert r == u'\u5f50\u73b7' - for c, output in zip("!~{abcd~}xyz~{efgh", - [u'!', # ! - u'', # ~ - u'', # { - u'', # a - u'\u5f95', # b - u'', # c - u'\u6c85', # d - u'', # ~ - u'', # } - u'x', # x - u'y', # y - u'z', # z - u'', # ~ - u'', # { - u'', # e - u'\u5f50', # f - u'', # g - u'\u73b7', # h + r = d.decode(b"~{abcd~}") + assert r == '\u5f95\u6c85' + r = d.decode(b"~{efgh~}") + assert r == '\u5f50\u73b7' + for c, output in zip(b"!~{abcd~}xyz~{efgh", + ['!', # ! + '', # ~ + '', # { + '', # a + '\u5f95', # b + '', # c + '\u6c85', # d + '', # ~ + '', # } + 'x', # x + 'y', # y + 'z', # z + '', # ~ + '', # { + '', # e + '\u5f50', # f + '', # g + '\u73b7', # h ]): - r = d.decode(c) + r = d.decode(bytes([c])) assert r == output def test_decode_hz_final(self): d = self.IncrementalHzDecoder() - r = d.decode("~{", True) - assert r == u'' - raises(UnicodeDecodeError, d.decode, "~", True) - raises(UnicodeDecodeError, d.decode, "~{a", True) + r = d.decode(b"~{", True) + assert r == '' + raises(UnicodeDecodeError, d.decode, b"~", True) + raises(UnicodeDecodeError, d.decode, b"~{a", True) def test_decode_hz_reset(self): d = self.IncrementalHzDecoder() - r = d.decode("ab") - assert r == u'ab' - r = d.decode("~{") - assert r == u'' - r = d.decode("ab") - assert r == u'\u5f95' - r = d.decode("ab") - assert r == u'\u5f95' + r = d.decode(b"ab") + assert r == 'ab' + r = d.decode(b"~{") + assert r == '' + r = d.decode(b"ab") + assert r == '\u5f95' + r = d.decode(b"ab") + assert r == '\u5f95' d.reset() - r = d.decode("ab") - assert r == u'ab' + r = d.decode(b"ab") + assert r == 'ab' def test_decode_hz_error(self): d = self.IncrementalHzDecoder() - raises(UnicodeDecodeError, d.decode, "~{abc", True) + raises(UnicodeDecodeError, d.decode, b"~{abc", True) d = self.IncrementalHzDecoder("ignore") - r = d.decode("~{abc", True) + r = d.decode(b"~{abc", True) assert r == u'\u5f95' d = self.IncrementalHzDecoder() d.errors = "replace" - r = d.decode("~{abc", True) - assert r == u'\u5f95\ufffd' + r = d.decode(b"~{abc", True) + assert r == '\u5f95\ufffd' def test_decode_hz_buffer_grow(self): d = self.IncrementalHzDecoder() for i in range(13): - r = d.decode("a" * (2**i)) - assert r == u"a" * (2**i) + r = d.decode(b"a" * (2**i)) + assert r == "a" * (2**i) def test_encode_hz(self): e = self.IncrementalHzEncoder() r = e.encode("abcd") - assert r == 'abcd' - r = e.encode(u"\u5f95\u6c85") - assert r == '~{abcd~}' - r = e.encode(u"\u5f50") - assert r == '~{ef~}' - r = e.encode(u"\u73b7") - assert r == '~{gh~}' + assert r == b'abcd' + r = e.encode("\u5f95\u6c85") + assert r == b'~{abcd~}' + r = e.encode("\u5f50") + assert r == b'~{ef~}' + r = e.encode("\u73b7") + assert r == b'~{gh~}' def test_encode_hz_final(self): e = self.IncrementalHzEncoder() - r = e.encode(u"xyz\u5f95\u6c85", True) - assert r == 'xyz~{abcd~}' + r = e.encode("xyz\u5f95\u6c85", True) + assert r == b'xyz~{abcd~}' # This is a bit hard to test, because the only way I can see that # encoders can return MBERR_TOOFEW is with surrogates, which only # occur with 2-byte unicode characters... We will just have to @@ -123,41 +123,41 @@ def test_encode_hz_reset(self): # Same issue as with test_encode_hz_final e = self.IncrementalHzEncoder() - r = e.encode(u"xyz\u5f95\u6c85", True) - assert r == 'xyz~{abcd~}' + r = e.encode("xyz\u5f95\u6c85", True) + assert r == b'xyz~{abcd~}' e.reset() - r = e.encode(u"xyz\u5f95\u6c85") - assert r == 'xyz~{abcd~}' + r = e.encode("xyz\u5f95\u6c85") + assert r == b'xyz~{abcd~}' def test_encode_hz_error(self): e = self.IncrementalHzEncoder() - raises(UnicodeEncodeError, e.encode, u"\u4321", True) + raises(UnicodeEncodeError, e.encode, "\u4321", True) e = self.IncrementalHzEncoder("ignore") - r = e.encode(u"xy\u4321z", True) - assert r == 'xyz' + r = e.encode("xy\u4321z", True) + assert r == b'xyz' e = self.IncrementalHzEncoder() e.errors = "replace" - r = e.encode(u"xy\u4321z", True) - assert r == 'xy?z' + r = e.encode("xy\u4321z", True) + assert r == b'xy?z' def test_encode_hz_buffer_grow(self): e = self.IncrementalHzEncoder() for i in range(13): - r = e.encode(u"a" * (2**i)) - assert r == "a" * (2**i) + r = e.encode("a" * (2**i)) + assert r == b"a" * (2**i) def test_encode_big5hkscs(self): #e = self.IncrementalBig5hkscsEncoder() - #r = e.encode(u'\xca', True) - #assert r == '\x88f' - #r = e.encode(u'\xca', True) - #assert r == '\x88f' - #raises(UnicodeEncodeError, e.encode, u'\u0304', True) + #r = e.encode('\xca', True) + #assert r == b'\x88f' + #r = e.encode('\xca', True) + #assert r == b'\x88f' + #raises(UnicodeEncodeError, e.encode, '\u0304', True) # e = self.IncrementalBig5hkscsEncoder() - r = e.encode(u'\xca') - assert r == '' - r = e.encode(u'\xca') - assert r == '\x88f' - r = e.encode(u'\u0304') - assert r == '\x88b' + r = e.encode('\xca') + assert r == b'' + r = e.encode('\xca') + assert r == b'\x88f' + r = e.encode('\u0304') + assert r == b'\x88b' diff --git a/pypy/module/_multibytecodec/test/test_app_stream.py b/pypy/module/_multibytecodec/test/test_app_stream.py --- a/pypy/module/_multibytecodec/test/test_app_stream.py +++ b/pypy/module/_multibytecodec/test/test_app_stream.py @@ -42,8 +42,8 @@ self.pos += size return res # - r = self.HzStreamReader(FakeFile("!~{abcd~}xyz~{efgh")) - for expected in u'!\u5f95\u6c85xyz\u5f50\u73b7': + r = self.HzStreamReader(FakeFile(b"!~{abcd~}xyz~{efgh")) + for expected in '!\u5f95\u6c85xyz\u5f50\u73b7': c = r.read(1) assert c == expected c = r.read(1) @@ -56,15 +56,15 @@ def read(self): return self.data # - r = self.HzStreamReader(FakeFile("!~{a"), "replace") + r = self.HzStreamReader(FakeFile(b"!~{a"), "replace") c = r.read() - assert c == u'!\ufffd' + assert c == '!\ufffd' # - r = self.HzStreamReader(FakeFile("!~{a")) + r = self.HzStreamReader(FakeFile(b"!~{a")) r.errors = "replace" assert r.errors == "replace" c = r.read() - assert c == u'!\ufffd' + assert c == '!\ufffd' def test_writer(self): class FakeFile: @@ -74,10 +74,10 @@ self.output.append(data) # w = self.HzStreamWriter(FakeFile()) - for input in u'!\u5f95\u6c85xyz\u5f50\u73b7': + for input in '!\u5f95\u6c85xyz\u5f50\u73b7': w.write(input) - assert w.stream.output == ['!', '~{ab~}', '~{cd~}', 'x', 'y', 'z', - '~{ef~}', '~{gh~}'] + assert w.stream.output == [b'!', b'~{ab~}', b'~{cd~}', b'x', b'y', b'z', + b'~{ef~}', b'~{gh~}'] def test_no_flush(self): class FakeFile: @@ -87,7 +87,7 @@ self.output.append(data) # w = self.ShiftJisx0213StreamWriter(FakeFile()) - w.write(u'\u30ce') - w.write(u'\u304b') - w.write(u'\u309a') - assert w.stream.output == ['\x83m', '', '\x82\xf5'] + w.write('\u30ce') + w.write('\u304b') + w.write('\u309a') + assert w.stream.output == [b'\x83m', b'', b'\x82\xf5'] From noreply at buildbot.pypy.org Sat Dec 17 23:01:12 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:01:12 +0100 (CET) Subject: [pypy-commit] pypy py3k: Unicode fixes for the bz2 module Message-ID: <20111217220112.676068205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50644:e20a4198d35c Date: 2011-12-17 21:52 +0100 http://bitbucket.org/pypy/pypy/changeset/e20a4198d35c/ Log: Unicode fixes for the bz2 module diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -4,7 +4,7 @@ from pypy.rpython.lltypesystem import lltype from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.interpreter.typedef import TypeDef, interp_attrproperty_bytes from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.rlib.streamio import Stream from pypy.translator.tool.cbuild import ExternalCompilationInfo @@ -410,7 +410,7 @@ if self.decompressor.running: raise OperationError(self.space.w_EOFError, self.space.wrap("compressed file ended before the logical end-of-the-stream was detected")) - result = self.space.str_w(w_result) + result = self.space.bytes_w(w_result) self.readlength += len(result) if len(self.buffer) != self.pos: pos = self.pos @@ -438,7 +438,7 @@ self.finished = True return "" raise - self.buffer = self.space.str_w(w_read) + self.buffer = self.space.bytes_w(w_read) self.pos = 0 if len(self.buffer) - self.pos >= n: pos = self.pos @@ -476,11 +476,11 @@ self.writtenlength = 0 def close(self): - self.stream.write(self.space.str_w(self.compressor.flush())) + self.stream.write(self.space.bytes_w(self.compressor.flush())) self.stream.close() def write(self, data): - self.stream.write(self.space.str_w(self.compressor.compress(data))) + self.stream.write(self.space.bytes_w(self.compressor.compress(data))) self.writtenlength += len(data) def tell(self): @@ -548,7 +548,7 @@ datasize = len(data) if datasize == 0: - return self.space.wrap("") + return self.space.wrapbytes("") if not self.running: raise OperationError(self.space.w_ValueError, @@ -576,7 +576,7 @@ out.prepare_next_chunk() res = out.make_result_string() - return self.space.wrap(res) + return self.space.wrapbytes(res) def flush(self): if not self.running: @@ -596,7 +596,7 @@ out.prepare_next_chunk() res = out.make_result_string() - return self.space.wrap(res) + return self.space.wrapbytes(res) W_BZ2Compressor.typedef = TypeDef("BZ2Compressor", __doc__ = W_BZ2Compressor.__doc__, @@ -650,7 +650,7 @@ unused_data attribute.""" if data == '': - return self.space.wrap('') + return self.space.wrapbytes('') if not self.running: raise OperationError(self.space.w_EOFError, self.space.wrap("end of stream was already found")) @@ -684,13 +684,13 @@ out.prepare_next_chunk() res = out.make_result_string() - return self.space.wrap(res) + return self.space.wrapbytes(res) W_BZ2Decompressor.typedef = TypeDef("BZ2Decompressor", __doc__ = W_BZ2Decompressor.__doc__, __new__ = interp2app(descr_decompressor__new__), - unused_data = interp_attrproperty("unused_data", W_BZ2Decompressor), + unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor), decompress = interp2app(W_BZ2Decompressor.decompress), ) @@ -738,7 +738,7 @@ res = out.make_result_string() BZ2_bzCompressEnd(bzs) - return space.wrap(res) + return space.wrapbytes(res) @unwrap_spec(data='bufferstr') def decompress(space, data): @@ -749,7 +749,7 @@ in_bufsize = len(data) if in_bufsize == 0: - return space.wrap("") + return space.wrapbytes("") with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf: @@ -780,4 +780,4 @@ res = out.make_result_string() BZ2_bzDecompressEnd(bzs) - return space.wrap(res) + return space.wrapbytes(res) diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py --- a/pypy/module/bz2/test/test_bz2_compdecomp.py +++ b/pypy/module/bz2/test/test_bz2_compdecomp.py @@ -1,6 +1,7 @@ from pypy.conftest import gettestobjspace from pypy.module.bz2.test.support import CheckAllocation from pypy.module.bz2 import interp_bz2 +from pypy.interpreter.gateway import interp2app import os, py HUGE_OK = False @@ -12,7 +13,7 @@ def setup_module(mod): DATA = 'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`' - def decompress(self, data): + def decompress(data): import popen2 import bz2 pop = popen2.Popen3("bunzip2", capturestderr=1) @@ -40,9 +41,11 @@ def setup_class(cls): space = gettestobjspace(usemodules=('bz2',)) cls.space = space - cls.w_TEXT = space.wrap(TEXT) - cls.w_decompress = space.wrap(decompress) + cls.w_TEXT = space.wrapbytes(TEXT) cls.w_HUGE_OK = space.wrap(HUGE_OK) + def decompress_w(space, w_data): + return space.wrapbytes(decompress(space.bytes_w(w_data))) + cls.w_decompress = space.wrap(interp2app(decompress_w)) def test_creation(self): from bz2 import BZ2Compressor @@ -59,7 +62,7 @@ bz2c = BZ2Compressor() raises(TypeError, bz2c.compress) data = bz2c.compress(self.TEXT) - data = "%s%s" % (data, bz2c.flush()) + data += bz2c.flush() assert self.decompress(data) == self.TEXT def test_compress_huge_data(self): @@ -71,7 +74,7 @@ bz2c = BZ2Compressor() raises(TypeError, bz2c.compress) data = bz2c.compress(HUGE_DATA) - data = "%s%s" % (data, bz2c.flush()) + data += bz2c.flush() assert self.decompress(data) == HUGE_DATA def test_compress_chunks_10(self): @@ -79,30 +82,30 @@ bz2c = BZ2Compressor() n = 0 - data = "" + data = b"" while True: temp = self.TEXT[n * 10:(n + 1) * 10] if not temp: break - data = "%s%s" % (data, bz2c.compress(temp)) + data += bz2c.compress(temp) n += 1 - data = "%s%s" % (data, bz2c.flush()) + data += bz2c.flush() assert self.decompress(data) == self.TEXT def test_buffer(self): from bz2 import BZ2Compressor bz2c = BZ2Compressor() data = bz2c.compress(buffer(self.TEXT)) - data = "%s%s" % (data, bz2c.flush()) + data += bz2c.flush() assert self.decompress(data) == self.TEXT class AppTestBZ2Decompressor(CheckAllocation): def setup_class(cls): space = gettestobjspace(usemodules=('bz2',)) cls.space = space - cls.w_TEXT = space.wrap(TEXT) - cls.w_DATA = space.wrap(DATA) - cls.w_BUGGY_DATA = space.wrap(BUGGY_DATA) + cls.w_TEXT = space.wrapbytes(TEXT) + cls.w_DATA = space.wrapbytes(DATA) + cls.w_BUGGY_DATA = space.wrapbytes(BUGGY_DATA) def test_creation(self): from bz2 import BZ2Decompressor @@ -115,7 +118,7 @@ from bz2 import BZ2Decompressor bz2d = BZ2Decompressor() - assert bz2d.unused_data == "" + assert bz2d.unused_data == b"" def test_decompress(self): from bz2 import BZ2Decompressor @@ -129,13 +132,13 @@ from bz2 import BZ2Decompressor bz2d = BZ2Decompressor() - decompressed_data = "" + decompressed_data = b"" n = 0 while True: temp = self.DATA[n * 10:(n + 1) * 10] if not temp: break - decompressed_data = "%s%s" % (decompressed_data, bz2d.decompress(temp)) + decompressed_data += bz2d.decompress(temp) n += 1 assert decompressed_data == self.TEXT @@ -145,7 +148,7 @@ from bz2 import BZ2Decompressor bz2d = BZ2Decompressor() - unused_data = "this is unused data" + unused_data = b"this is unused data" decompressed_data = bz2d.decompress(self.DATA + unused_data) assert decompressed_data == self.TEXT assert bz2d.unused_data == unused_data @@ -155,7 +158,7 @@ bz2d = BZ2Decompressor() bz2d.decompress(self.DATA) - raises(EOFError, bz2d.decompress, "foo") + raises(EOFError, bz2d.decompress, b"foo") def test_buffer(self): from bz2 import BZ2Decompressor @@ -167,24 +170,26 @@ from bz2 import BZ2Decompressor bz2d = BZ2Decompressor() decompressed_data = bz2d.decompress(self.BUGGY_DATA) - assert decompressed_data == '' + assert decompressed_data == b'' raises(IOError, bz2d.decompress, self.BUGGY_DATA) class AppTestBZ2ModuleFunctions(CheckAllocation): def setup_class(cls): space = gettestobjspace(usemodules=('bz2',)) cls.space = space - cls.w_TEXT = space.wrap(TEXT) - cls.w_DATA = space.wrap(DATA) - cls.w_decompress = space.wrap(decompress) + cls.w_TEXT = space.wrapbytes(TEXT) + cls.w_DATA = space.wrapbytes(DATA) cls.w_HUGE_OK = space.wrap(HUGE_OK) + def decompress_w(space, w_data): + return space.wrapbytes(decompress(space.bytes_w(w_data))) + cls.w_decompress = space.wrap(interp2app(decompress_w)) def test_compress_function(self): from bz2 import compress raises(TypeError, compress, 123) - raises(ValueError, compress, "foo", 10) - raises(TypeError, compress, "foo", "foo") + raises(ValueError, compress, b"foo", 10) + raises(TypeError, compress, b"foo", b"foo") data = compress(self.TEXT) assert self.decompress(data) == self.TEXT @@ -203,7 +208,7 @@ import bz2 raises(TypeError, bz2.decompress) - assert bz2.decompress("") == "" + assert bz2.decompress(b"") == b"" decompressed_data = bz2.decompress(self.DATA) assert decompressed_data == self.TEXT diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -3,6 +3,7 @@ import py from pypy.conftest import gettestobjspace from pypy.module.bz2.test.support import CheckAllocation +from pypy.interpreter.gateway import interp2app import os import random @@ -24,7 +25,7 @@ data = DATA[:100] f.write(data, 'wb') - def decompress(self, data): + def decompress(data): import popen2 import bz2 pop = popen2.Popen3("bunzip2", capturestderr=1) @@ -51,14 +52,16 @@ def setup_class(cls): space = gettestobjspace(usemodules=('bz2',)) cls.space = space - cls.w_TEXT = space.wrap(TEXT) - cls.w_DATA = space.wrap(DATA) - cls.w_DATA_CRLF = space.wrap(DATA_CRLF) + cls.w_TEXT = space.wrapbytes(TEXT) + cls.w_DATA = space.wrapbytes(DATA) + cls.w_DATA_CRLF = space.wrapbytes(DATA_CRLF) cls.w_temppath = space.wrap(str(py.test.ensuretemp("bz2").join("foo"))) cls.w_create_temp_file = space.wrap(create_temp_file) - cls.w_decompress = space.wrap(decompress) + def decompress_w(space, w_data): + return space.wrapbytes(decompress(space.bytes_w(w_data))) + cls.w_decompress = space.wrap(interp2app(decompress_w)) cls.w_create_broken_temp_file = space.wrap(create_broken_temp_file) - cls.w_random_data = space.wrap(RANDOM_DATA) + cls.w_random_data = space.wrapbytes(RANDOM_DATA) def test_attributes(self): from bz2 import BZ2File @@ -224,13 +227,13 @@ def test_readline(self): from bz2 import BZ2File - from cStringIO import StringIO + from io import BytesIO self.create_temp_file() bz2f = BZ2File(self.temppath) # XXX #raises(TypeError, bz2f.readline, None) - sio = StringIO(self.TEXT) + sio = BytesIO(self.TEXT) for line in sio.readlines(): line_read = bz2f.readline() assert line_read == line @@ -317,33 +320,33 @@ def test_readlines(self): from bz2 import BZ2File - from cStringIO import StringIO + from io import BytesIO self.create_temp_file() bz2f = BZ2File(self.temppath) # XXX #raises(TypeError, bz2f.readlines, None) - sio = StringIO(self.TEXT) + sio = BytesIO(self.TEXT) assert bz2f.readlines() == sio.readlines() bz2f.close() def test_iterator(self): from bz2 import BZ2File - from cStringIO import StringIO + from io import BytesIO self.create_temp_file() bz2f = BZ2File(self.temppath) - sio = StringIO(self.TEXT) + sio = BytesIO(self.TEXT) assert list(iter(bz2f)) == sio.readlines() bz2f.close() def test_xreadlines(self): from bz2 import BZ2File - from cStringIO import StringIO + from io import BytesIO self.create_temp_file() bz2f = BZ2File(self.temppath) - sio = StringIO(self.TEXT) + sio = BytesIO(self.TEXT) assert list(bz2f.xreadlines()) == sio.readlines() bz2f.close() @@ -351,7 +354,7 @@ # readlines()/xreadlines() for files containing no newline from bz2 import BZ2File - DATA = 'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t' + DATA = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t' f = open(self.temppath, "wb") f.write(DATA) f.close() @@ -398,11 +401,11 @@ def test_writelines(self): from bz2 import BZ2File - from cStringIO import StringIO + from io import BytesIO bz2f = BZ2File(self.temppath, 'w') raises(TypeError, bz2f.writelines) - sio = StringIO(self.TEXT) + sio = BytesIO(self.TEXT) bz2f.writelines(sio.readlines()) bz2f.close() f = open(self.temppath, "rb") @@ -413,8 +416,8 @@ from bz2 import BZ2File bz2f = BZ2File(self.temppath, 'r') - raises(IOError, bz2f.write, "abc") - raises(IOError, bz2f.writelines, ["abc"]) + raises(IOError, bz2f.write, b"abc") + raises(IOError, bz2f.writelines, [b"abc"]) bz2f.close() def test_write_bigger_file(self): @@ -432,11 +435,11 @@ with BZ2File(self.temppath, 'w') as f: assert not f.closed - f.write("abc") + f.write(b"abc") assert f.closed with BZ2File(self.temppath, 'r') as f: data = f.read() - assert data == "abc" + assert data == b"abc" assert f.closed diff --git a/pypy/module/bz2/test/test_large.py b/pypy/module/bz2/test/test_large.py --- a/pypy/module/bz2/test/test_large.py +++ b/pypy/module/bz2/test/test_large.py @@ -8,7 +8,7 @@ py.test.skip("skipping this very slow test; try 'pypy-c -A'") cls.space = gettestobjspace(usemodules=('bz2',)) largetest_bz2 = py.path.local(__file__).dirpath().join("largetest.bz2") - cls.w_compressed_data = cls.space.wrap(largetest_bz2.read('rb')) + cls.w_compressed_data = cls.space.wrapbytes(largetest_bz2.read('rb')) def test_decompress(self): from bz2 import decompress From noreply at buildbot.pypy.org Sat Dec 17 23:01:13 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:01:13 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix BZ2File by importing the applevel version from CPython 3.3. Message-ID: <20111217220113.981998205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50645:8d890adadd94 Date: 2011-12-17 22:59 +0100 http://bitbucket.org/pypy/pypy/changeset/8d890adadd94/ Log: Fix BZ2File by importing the applevel version from CPython 3.3. diff --git a/pypy/module/bz2/__init__.py b/pypy/module/bz2/__init__.py --- a/pypy/module/bz2/__init__.py +++ b/pypy/module/bz2/__init__.py @@ -12,8 +12,8 @@ 'BZ2Decompressor': 'interp_bz2.W_BZ2Decompressor', 'compress': 'interp_bz2.compress', 'decompress': 'interp_bz2.decompress', - 'BZ2File': 'interp_bz2.W_BZ2File', } appleveldefs = { + 'BZ2File': 'app_bz2file.BZ2File', } diff --git a/pypy/module/bz2/app_bz2file.py b/pypy/module/bz2/app_bz2file.py new file mode 100644 --- /dev/null +++ b/pypy/module/bz2/app_bz2file.py @@ -0,0 +1,370 @@ +"""Interface to the libbzip2 compression library. + +This file is an almost exact copy of CPython3.3 Lib/bz2.py. +""" + +import io +import threading + +from bz2 import BZ2Compressor, BZ2Decompressor + + +_MODE_CLOSED = 0 +_MODE_READ = 1 +_MODE_READ_EOF = 2 +_MODE_WRITE = 3 + +_BUFFER_SIZE = 8192 + + +class BZ2File(io.BufferedIOBase): + + """A file object providing transparent bzip2 (de)compression. + + A BZ2File can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BZ2File provides a *binary* file interface - data read is + returned as bytes, and data to be written should be given as bytes. + """ + + def __init__(self, filename=None, mode="r", buffering=None, + compresslevel=9, fileobj=None): + """Open a bzip2-compressed file. + + If filename is given, open the named file. Otherwise, operate on + the file object given by fileobj. Exactly one of these two + parameters should be provided. + + mode can be 'r' for reading (default), or 'w' for writing. + + buffering is ignored. Its use is deprecated. + + If mode is 'w', compresslevel can be a number between 1 and 9 + specifying the level of compression: 1 produces the least + compression, and 9 (default) produces the most compression. + """ + # This lock must be recursive, so that BufferedIOBase's + # readline(), readlines() and writelines() don't deadlock. + self._lock = threading.RLock() + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._pos = 0 + self._size = -1 + + if not (1 <= compresslevel <= 9): + raise ValueError("compresslevel must be between 1 and 9") + + if mode in ("", "r", "rb"): + mode = "rb" + mode_code = _MODE_READ + self._decompressor = BZ2Decompressor() + self._buffer = None + elif mode in ("w", "wb"): + mode = "wb" + mode_code = _MODE_WRITE + self._compressor = BZ2Compressor(compresslevel) + elif mode in ("a", "ab"): + mode = "ab" + mode_code = _MODE_WRITE + self._compressor = BZ2Compressor(compresslevel) + else: + raise ValueError("Invalid mode: {!r}".format(mode)) + + if filename is not None and fileobj is None: + self._fp = open(filename, mode) + self._closefp = True + self._mode = mode_code + elif fileobj is not None and filename is None: + self._fp = fileobj + self._mode = mode_code + else: + raise ValueError("Must give exactly one of filename and fileobj") + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + with self._lock: + if self._mode == _MODE_CLOSED: + return + try: + if self._mode in (_MODE_READ, _MODE_READ_EOF): + self._decompressor = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._buffer = None + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode in (_MODE_READ, _MODE_READ_EOF) + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + # Mode-checking helper functions. + + def _check_not_closed(self): + if self.closed: + raise ValueError("I/O operation on closed file") + + def _check_can_read(self): + if not self.readable(): + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if not self.writable(): + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if not self.seekable(): + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + + # Fill the readahead buffer if it is empty. Returns False on EOF. + def _fill_buffer(self): + if self._buffer: + return True + + if self._decompressor.unused_data: + rawblock = self._decompressor.unused_data + else: + rawblock = self._fp.read(_BUFFER_SIZE) + + if not rawblock: + if self._decompressor.eof: + self._mode = _MODE_READ_EOF + self._size = self._pos + return False + else: + raise EOFError("Compressed file ended before the " + "end-of-stream marker was reached") + + # Continue to next stream. + if self._decompressor.eof: + self._decompressor = BZ2Decompressor() + + self._buffer = self._decompressor.decompress(rawblock) + return True + + # Read data until EOF. + # If return_data is false, consume the data without returning it. + def _read_all(self, return_data=True): + blocks = [] + while self._fill_buffer(): + if return_data: + blocks.append(self._buffer) + self._pos += len(self._buffer) + self._buffer = None + if return_data: + return b"".join(blocks) + + # Read a block of up to n bytes. + # If return_data is false, consume the data without returning it. + def _read_block(self, n, return_data=True): + blocks = [] + while n > 0 and self._fill_buffer(): + if n < len(self._buffer): + data = self._buffer[:n] + self._buffer = self._buffer[n:] + else: + data = self._buffer + self._buffer = None + if return_data: + blocks.append(data) + self._pos += len(data) + n -= len(data) + if return_data: + return b"".join(blocks) + + def peek(self, n=0): + """Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + """ + with self._lock: + self._check_can_read() + if self._mode == _MODE_READ_EOF or not self._fill_buffer(): + return b"" + return self._buffer + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + """ + with self._lock: + self._check_can_read() + if self._mode == _MODE_READ_EOF or size == 0: + return b"" + elif size < 0: + return self._read_all() + else: + return self._read_block(size) + + def read1(self, size=-1): + """Read up to size uncompressed bytes with at most one read + from the underlying stream. + + Returns b'' if the file is at EOF. + """ + with self._lock: + self._check_can_read() + if (size == 0 or self._mode == _MODE_READ_EOF or + not self._fill_buffer()): + return b"" + if 0 < size < len(self._buffer): + data = self._buffer[:size] + self._buffer = self._buffer[size:] + else: + data = self._buffer + self._buffer = None + self._pos += len(data) + return data + + def readinto(self, b): + """Read up to len(b) bytes into b. + + Returns the number of bytes read (0 for EOF). + """ + with self._lock: + return io.BufferedIOBase.readinto(self, b) + + def readline(self, size=-1): + """Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + """ + if not hasattr(size, "__index__"): + raise TypeError("Integer argument expected") + size = size.__index__() + with self._lock: + return io.BufferedIOBase.readline(self, size) + + def readlines(self, size=-1): + """Read a list of lines of uncompressed bytes from the file. + + size can be specified to control the number of lines read: no + further lines will be read once the total size of the lines read + so far equals or exceeds size. + """ + if not hasattr(size, "__index__"): + raise TypeError("Integer argument expected") + size = size.__index__() + with self._lock: + return io.BufferedIOBase.readlines(self, size) + + def write(self, data): + """Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + """ + with self._lock: + self._check_can_write() + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += len(data) + return len(data) + + def writelines(self, seq): + """Write a sequence of byte strings to the file. + + Returns the number of uncompressed bytes written. + seq can be any iterable yielding byte strings. + + Line separators are not added between the written byte strings. + """ + with self._lock: + return io.BufferedIOBase.writelines(self, seq) + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0, 0) + self._mode = _MODE_READ + self._pos = 0 + self._decompressor = BZ2Decompressor() + self._buffer = None + + def seek(self, offset, whence=0): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + with self._lock: + self._check_can_seek() + + # Recalculate offset as an absolute file position. + if whence == 0: + pass + elif whence == 1: + offset = self._pos + offset + elif whence == 2: + # Seeking relative to EOF - we need to know the file's size. + if self._size < 0: + self._read_all(return_data=False) + offset = self._size + offset + else: + raise ValueError("Invalid value for whence: {}".format(whence)) + + # Make it so that offset is the number of bytes to skip forward. + if offset < self._pos: + self._rewind() + else: + offset -= self._pos + + # Read and discard data until we reach the desired position. + if self._mode != _MODE_READ_EOF: + self._read_block(offset, return_data=False) + + return self._pos + + def tell(self): + """Return the current file position.""" + with self._lock: + self._check_not_closed() + return self._pos diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -2,11 +2,11 @@ from pypy.rpython.tool import rffi_platform as platform from pypy.rpython.lltypesystem import rffi from pypy.rpython.lltypesystem import lltype -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.typedef import TypeDef, interp_attrproperty_bytes +from pypy.interpreter.typedef import GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.rlib.streamio import Stream from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.translator.platform import platform as compiler from pypy.rlib.rarithmetic import intmask, r_longlong @@ -232,275 +232,6 @@ def __exit__(self, *args): self.free() -# ____________________________________________________________ -# -# Make the BZ2File type by internally inheriting from W_File. -# XXX this depends on internal details of W_File to work properly. - -from pypy.module._file.interp_file import W_File - -class W_BZ2File(W_File): - - def check_mode_ok(self, mode): - if (not mode or mode[0] not in ['r', 'w', 'a', 'U']): - space = self.space - raise operationerrfmt(space.w_ValueError, - "invalid mode: '%s'", mode) - - @unwrap_spec(mode=str, buffering=int, compresslevel=int) - def direct_bz2__init__(self, w_name, mode='r', buffering=-1, - compresslevel=9): - self.direct_close() - # the stream should always be opened in binary mode - if "b" not in mode: - mode = mode + "b" - self.check_mode_ok(mode) - stream = open_bz2file_as_stream(self.space, w_name, mode, - buffering, compresslevel) - fd = stream.try_to_find_file_descriptor() - self.fdopenstream(stream, fd, mode, w_name) - - _exposed_method_names = [] - W_File._decl.im_func(locals(), "bz2__init__", - """Opens a BZ2-compressed file.""") - # XXX ^^^ hacking hacking... can't just use the name "__init__" again - # because the RTyper is confused about the two direct__init__() with - # a different signature, confusion caused by the fact that - # W_File.file__init__() would appear to contain an indirect call to - # one of the two versions of direct__init__(). - - def file_bz2__repr__(self): - if self.stream is None: - head = "closed" - else: - head = "open" - w_name = self.w_name - if w_name is None: - w_name = self.space.wrap('?') - info = "%s bz2.BZ2File %s, mode '%s'" % (head, self.getdisplayname(), - self.mode) - return self.getrepr(self.space, info) - -def descr_bz2file__new__(space, w_subtype, __args__): - bz2file = space.allocate_instance(W_BZ2File, w_subtype) - W_BZ2File.__init__(bz2file, space) - return space.wrap(bz2file) - -same_attributes_as_in_file = list(W_File._exposed_method_names) -same_attributes_as_in_file.remove('__init__') -same_attributes_as_in_file.extend([ - 'name', 'mode', 'encoding', 'closed', 'newlines', 'softspace', - 'writelines', '__exit__', '__weakref__']) - -W_BZ2File.typedef = TypeDef( - "BZ2File", - __doc__ = """\ -BZ2File(name [, mode='r', buffering=-1, compresslevel=9]) -> file object - -Open a bz2 file. The mode can be 'r' or 'w', for reading (default) or -writing. When opened for writing, the file will be created if it doesn't -exist, and truncated otherwise. If the buffering argument is given, 0 means -unbuffered, and larger numbers specify the buffer size. If compresslevel -is given, must be a number between 1 and 9. - -Add a 'U' to mode to open the file for input with universal newline -support. Any line ending in the input file will be seen as a '\\n' in -Python. Also, a file so opened gains the attribute 'newlines'; the value -for this attribute is one of None (no newline read yet), '\\r', '\\n', -'\\r\\n' or a tuple containing all the newline types seen. Universal -newlines are available only when reading.""", - __new__ = interp2app(descr_bz2file__new__), - __init__ = interp2app(W_BZ2File.file_bz2__init__), - __repr__ = interp2app(W_BZ2File.file_bz2__repr__), - **dict([(name, W_File.typedef.rawdict[name]) - for name in same_attributes_as_in_file])) - -# ____________________________________________________________ - -def open_bz2file_as_stream(space, w_path, mode="r", buffering=-1, - compresslevel=9): - from pypy.rlib.streamio import decode_mode, open_path_helper - from pypy.rlib.streamio import construct_stream_tower - os_flags, universal, reading, writing, basemode, binary = decode_mode(mode) - if reading and writing: - raise OperationError(space.w_ValueError, - space.wrap("cannot open in read-write mode")) - if basemode == "a": - raise OperationError(space.w_ValueError, - space.wrap("cannot append to bz2 file")) - stream = open_path_helper(space.str_w(w_path), os_flags, False) - if reading: - bz2stream = ReadBZ2Filter(space, stream, buffering) - buffering = 0 # by construction, the ReadBZ2Filter acts like - # a read buffer too - no need for another one - else: - assert writing - bz2stream = WriteBZ2Filter(space, stream, compresslevel) - stream = construct_stream_tower(bz2stream, buffering, universal, reading, - writing, binary) - return stream - - -class ReadBZ2Filter(Stream): - - """Standard I/O stream filter that decompresses the stream with bz2.""" - - def __init__(self, space, stream, buffering): - self.space = space - self.stream = stream - self.decompressor = W_BZ2Decompressor(space) - self.readlength = r_longlong(0) - self.buffer = "" - self.pos = 0 - self.finished = False - if buffering < 1024: - buffering = 1024 # minimum amount of compressed data read at once - self.buffering = buffering - - def close(self): - self.stream.close() - - def tell(self): - return self.readlength - - def seek(self, offset, whence): - READMAX = 2**18 # 256KB - - # Make offset relative to the start of the file - if whence == 2: - # Read everything to arrive at the end - while len(self.read(READMAX)) > 0: - pass - offset += self.readlength - elif whence == 1: - offset += self.readlength - elif whence == 0: - pass - else: - raise operationerrfmt(self.space.w_ValueError, - "Invalid value for whence: %d", whence) - - # Make offset relative to the current pos - # Rewind iff necessary - if offset < self.readlength: - self.stream.seek(0, 0) - self.decompressor = W_BZ2Decompressor(self.space) - self.readlength = r_longlong(0) - self.pos = 0 - self.buffer = "" - self.finished = False - else: - offset -= self.readlength - - # Seek - read = r_longlong(0) - while read < offset: - count = offset - read - if count < READMAX: - count = intmask(count) - else: - count = READMAX - length = len(self.read(count)) - if not length: - break - read += length - - def readall(self): - w_result = self.decompressor.decompress(self.stream.readall()) - if self.decompressor.running: - raise OperationError(self.space.w_EOFError, - self.space.wrap("compressed file ended before the logical end-of-the-stream was detected")) - result = self.space.bytes_w(w_result) - self.readlength += len(result) - if len(self.buffer) != self.pos: - pos = self.pos - assert pos >= 0 - result = self.buffer[pos:] + result - self.buffer = '' - self.pos = 0 - return result - - def read(self, n): - # XXX not nice - if n <= 0: - return '' - while self.pos == len(self.buffer): - if self.finished: - return "" - moredata = self.stream.read(max(self.buffering, n)) - if not moredata: - self.finished = True - return "" - try: - w_read = self.decompressor.decompress(moredata) - except OperationError, e: - if e.match(self.space, self.space.w_EOFError): - self.finished = True - return "" - raise - self.buffer = self.space.bytes_w(w_read) - self.pos = 0 - if len(self.buffer) - self.pos >= n: - pos = self.pos - assert pos >= 0 - result = self.buffer[pos:pos + n] - self.pos += n - else: - pos = self.pos - assert pos >= 0 - result = self.buffer[pos:] - self.pos = 0 - self.buffer = "" - self.readlength += len(result) - return result - - def peek(self): - pos = self.pos - assert pos >= 0 - return self.buffer[pos:] - - def try_to_find_file_descriptor(self): - return self.stream.try_to_find_file_descriptor() - - def write(self, s): - raise OperationError(self.space.w_IOError, - self.space.wrap("file is not ready for writing")) - -class WriteBZ2Filter(Stream): - """Standard I/O stream filter that compresses the stream with bz2.""" - - def __init__(self, space, stream, compresslevel): - self.stream = stream - self.space = space - self.compressor = W_BZ2Compressor(space, compresslevel) - self.writtenlength = 0 - - def close(self): - self.stream.write(self.space.bytes_w(self.compressor.flush())) - self.stream.close() - - def write(self, data): - self.stream.write(self.space.bytes_w(self.compressor.compress(data))) - self.writtenlength += len(data) - - def tell(self): - return self.writtenlength - - def seek(self, offset, whence): - raise OperationError(self.space.w_IOError, - self.space.wrap("seek works only while reading")) - - def read(self, n): - raise OperationError(self.space.w_IOError, - self.space.wrap("file is not ready for reading")) - - def readall(self): - raise OperationError(self.space.w_IOError, - self.space.wrap("file is not ready for reading")) - - def try_to_find_file_descriptor(self): - return self.stream.try_to_find_file_descriptor() - @unwrap_spec(compresslevel=int) def descr_compressor__new__(space, w_subtype, compresslevel=9): x = space.allocate_instance(W_BZ2Compressor, w_subtype) @@ -639,6 +370,12 @@ BZ2_bzDecompressEnd(self.bzs) lltype.free(self.bzs, flavor='raw') + def eof_w(self, space): + if self.running: + return space.w_False + else: + return space.w_True + @unwrap_spec(data='bufferstr') def decompress(self, data): """decompress(data) -> string @@ -691,6 +428,7 @@ __doc__ = W_BZ2Decompressor.__doc__, __new__ = interp2app(descr_decompressor__new__), unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor), + eof = GetSetProperty(W_BZ2Decompressor.eof_w), decompress = interp2app(W_BZ2Decompressor.decompress), ) diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -50,7 +50,7 @@ # XXX for unknown reasons, we cannot do allocation checks, as sth is # keeping those objects alive (BZ2File objects) def setup_class(cls): - space = gettestobjspace(usemodules=('bz2',)) + space = gettestobjspace(usemodules=('bz2', 'thread')) cls.space = space cls.w_TEXT = space.wrapbytes(TEXT) cls.w_DATA = space.wrapbytes(DATA) @@ -67,10 +67,6 @@ from bz2 import BZ2File bz2f = BZ2File(self.temppath, mode="w") - assert bz2f.name == self.temppath - assert bz2f.newlines == None - assert bz2f.mode == "wb" - assert bz2f.softspace == False assert bz2f.closed == False bz2f.close() assert bz2f.closed == True @@ -83,7 +79,7 @@ # XXX the following is fine, currently: #raises(ValueError, BZ2File, self.temppath, mode='ww') - BZ2File(self.temppath, mode='wU', buffering=0, compresslevel=8) + BZ2File(self.temppath, mode='w', buffering=0, compresslevel=8) BZ2File(self.temppath, mode='wb') # a large buf size BZ2File(self.temppath, mode='w', buffering=4096) @@ -118,7 +114,7 @@ # hack to create a foo file open(self.temppath, "w").close() - + # cannot seek if close bz2f = BZ2File(self.temppath, mode='r') bz2f.close() @@ -132,10 +128,11 @@ bz2f = BZ2File(self.temppath, mode='r') raises(TypeError, bz2f.seek) raises(TypeError, bz2f.seek, "foo") - raises(TypeError, bz2f.seek, 0, "foo") - + raises((TypeError, ValueError), bz2f.seek, 0, "foo") + bz2f.seek(0) assert bz2f.tell() == 0 + bz2f.close() del bz2f # delete from this frame, which is captured in the traceback def test_open_close_del(self): @@ -151,19 +148,6 @@ from bz2 import BZ2File raises(IOError, BZ2File, "/non/existent/path") - def test_open_mode_U(self): - # bug #1194181: bz2.BZ2File opened for write with mode "U" - from bz2 import BZ2File - self.create_temp_file() - - bz2f = BZ2File(self.temppath, "U") - bz2f.close() - f = open(self.temppath) - f.seek(0, 2) - f.read() - assert f.tell() == len(self.DATA) - f.close() - def test_seek_forward(self): from bz2 import BZ2File self.create_temp_file() @@ -201,7 +185,7 @@ bz2f = BZ2File(self.temppath) bz2f.seek(150000) assert bz2f.tell() == len(self.TEXT) - assert bz2f.read() == "" + assert bz2f.read() == b"" bz2f.close() def test_seek_post_end_twice(self): @@ -212,7 +196,7 @@ bz2f.seek(150000) bz2f.seek(150000) assert bz2f.tell() == len(self.TEXT) - assert bz2f.read() == "" + assert bz2f.read() == b"" bz2f.close() def test_seek_pre_start(self): @@ -261,7 +245,7 @@ self.create_broken_temp_file() bz2f = BZ2File(self.temppath) raises(EOFError, bz2f.read) - del bz2f # delete from this frame, which is captured in the traceback + bz2f.close() def test_subsequent_read_broken_file(self): from bz2 import BZ2File @@ -275,19 +259,19 @@ raise Exception("should generate EOFError earlier") except EOFError: pass - del bz2f # delete from this frame, which is captured in the traceback + bz2f.close() def test_read_chunk9(self): from bz2 import BZ2File self.create_temp_file() bz2f = BZ2File(self.temppath) - text_read = "" + text_read = b"" while True: data = bz2f.read(9) # 9 doesn't divide evenly into data length if not data: break - text_read = "%s%s" % (text_read, data) + text_read += data assert text_read == self.TEXT bz2f.close() @@ -299,25 +283,6 @@ assert bz2f.read(100) == self.TEXT[:100] bz2f.close() - def test_universal_newlines_lf(self): - from bz2 import BZ2File - self.create_temp_file() - - bz2f = BZ2File(self.temppath, "rU") - assert bz2f.read() == self.TEXT - assert bz2f.newlines == "\n" - bz2f.close() - - def test_universal_newlines_crlf(self): - from bz2 import BZ2File - self.create_temp_file(crlf=True) - - bz2f = BZ2File(self.temppath, "rU") - data = bz2f.read() - assert data == self.TEXT - assert bz2f.newlines == "\r\n" - bz2f.close() - def test_readlines(self): from bz2 import BZ2File from io import BytesIO @@ -340,35 +305,6 @@ assert list(iter(bz2f)) == sio.readlines() bz2f.close() - def test_xreadlines(self): - from bz2 import BZ2File - from io import BytesIO - self.create_temp_file() - - bz2f = BZ2File(self.temppath) - sio = BytesIO(self.TEXT) - assert list(bz2f.xreadlines()) == sio.readlines() - bz2f.close() - - def test_readlines_bug_1191043(self): - # readlines()/xreadlines() for files containing no newline - from bz2 import BZ2File - - DATA = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t' - f = open(self.temppath, "wb") - f.write(DATA) - f.close() - - bz2f = BZ2File(self.temppath) - lines = bz2f.readlines() - bz2f.close() - assert lines == ['Test'] - - bz2f = BZ2File(self.temppath) - xlines = list(bz2f.xreadlines()) - bz2f.close() - assert xlines == ['Test'] - def test_write(self): from bz2 import BZ2File From noreply at buildbot.pypy.org Sat Dec 17 23:15:07 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 Dec 2011 23:15:07 +0100 (CET) Subject: [pypy-commit] pypy py3k: Try to fix translation Message-ID: <20111217221507.D12E58205C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50646:c0d4b0fdc8f1 Date: 2011-12-17 23:14 +0100 http://bitbucket.org/pypy/pypy/changeset/c0d4b0fdc8f1/ Log: Try to fix translation diff --git a/pypy/module/bz2/app_bz2file.py b/pypy/module/bz2/app_bz2file.py --- a/pypy/module/bz2/app_bz2file.py +++ b/pypy/module/bz2/app_bz2file.py @@ -4,7 +4,6 @@ """ import io -import threading from bz2 import BZ2Compressor, BZ2Decompressor @@ -46,6 +45,7 @@ """ # This lock must be recursive, so that BufferedIOBase's # readline(), readlines() and writelines() don't deadlock. + import threading self._lock = threading.RLock() self._fp = None self._closefp = False From noreply at buildbot.pypy.org Sun Dec 18 11:14:40 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 18 Dec 2011 11:14:40 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: add support for optimizing over multiple intermediate labels Message-ID: <20111218101440.E91D682009@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50647:94ea6fc640c5 Date: 2011-12-18 11:01 +0100 http://bitbucket.org/pypy/pypy/changeset/94ea6fc640c5/ Log: add support for optimizing over multiple intermediate labels diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -8,7 +8,7 @@ class BaseTestMultiLabel(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" - def optimize_loop(self, ops, expected): + def optimize_loop(self, ops, expected, expected_shorts=None): loop = self.parse(ops) if expected != "crash!": expected = self.parse(expected) @@ -32,15 +32,17 @@ if nxt < len(loop.operations): label = loop.operations[nxt] assert label.getopnum() == rop.LABEL - jumpop = ResOperation(rop.JUMP, label.getarglist(), - None, descr=token) - operations.append(jumpop) + if label.getdescr() is None: + label.setdescr(token) + operations.append(label) part.operations = operations + self._do_optimize_loop(part, None) if part.operations[-1].getopnum() == rop.LABEL: last_label = [part.operations.pop()] else: last_label = [] + optimized.operations.extend(part.operations) prv = nxt + 1 @@ -53,9 +55,32 @@ print 'Failed!' print + shorts = [op.getdescr().short_preamble + for op in optimized.operations + if op.getopnum() == rop.LABEL] + + if expected_shorts: + for short in shorts: + print + print "Short preamble:" + print '\n'.join([str(o) for o in short]) + + assert expected != "crash!", "should have raised an exception" self.assert_equal(optimized, expected) + if expected_shorts: + assert len(shorts) == len(expected_shorts) + for short, expected_short in zip(shorts, expected_shorts): + expected_short = self.parse(expected_short) + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, expected_short, + text_right='expected short preamble') + + return optimized def test_simple(self): @@ -193,8 +218,168 @@ """ with raises(InvalidLoop): self.optimize_loop(ops, ops) - + + def test_two_intermediate_labels_basic_1(self): + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + expected = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1, i2) + i4 = int_add(i1, i2) + label(p1, i4) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + short1 = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + short2 = """ + [p1, i1] + label(p1, i1) + jump(p1, i1) + """ + self.optimize_loop(ops, expected, expected_shorts=[short1, short2]) + + def test_two_intermediate_labels_basic_2(self): + ops = """ + [p1, i1] + i2 = int_add(i1, 1) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = getfield_gc(p1, descr=valuedescr) + i6 = int_add(i4, i5) + jump(p1, i6) + """ + expected = """ + [p1, i1] + i2 = int_add(i1, 1) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4, i3) + i6 = int_add(i4, i3) + jump(p1, i6, i3) + """ + short1 = """ + [p1, i1] + label(p1, i1) + jump(p1, i1) + """ + short2 = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, expected, expected_shorts=[short1, short2]) + + def test_two_intermediate_labels_both(self): + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = getfield_gc(p1, descr=valuedescr) + i6 = int_mul(i4, i5) + jump(p1, i6) + """ + expected = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1, i2) + i4 = int_add(i1, i2) + label(p1, i4, i2) + i6 = int_mul(i4, i2) + jump(p1, i6, i2) + """ + short = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, expected, expected_shorts=[short, short]) + + def test_import_across_multiple_labels_basic(self): + # Not supported, juts make sure we get a functional trace + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = int_add(i1, 1) + label(p1, i1) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + self.optimize_loop(ops, ops) + + def test_import_across_multiple_labels_with_duplication(self): + # Not supported, juts make sure we get a functional trace + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i2) + i3 = int_add(i2, 1) + label(p1, i2) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + exported = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + i6 = same_as(i2) + label(p1, i2) + i3 = int_add(i2, 1) + label(p1, i2) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + self.optimize_loop(ops, exported) + def test_import_virtual_across_multiple_labels(self): + ops = """ + [p0, i1] + i1a = int_add(i1, 1) + pv = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(pv, i1a, descr=valuedescr) + label(pv, i1) + i2 = int_mul(i1, 3) + label(pv, i2) + i3 = getfield_gc(pv, descr=valuedescr) + i4 = int_add(i3, i2) + jump(pv, i4) + """ + expected = """ + [p0, i1] + i1a = int_add(i1, 1) + i5 = same_as(i1a) + label(i1a, i1) + i2 = int_mul(i1, 3) + label(i1a, i2) + i4 = int_add(i1a, i2) + jump(i1a, i4) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestMultiLabel, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -54,6 +54,7 @@ def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) + self.boxes_created_this_iteration = None def fix_snapshot(self, jump_args, snapshot): if snapshot is None: @@ -129,6 +130,12 @@ return # Found nothing to jump to, emit a label instead + + if self.short: + # Construct our short preamble + assert start_label + self.close_bridge(start_label) + self.optimizer.flush() KillHugeIntBounds(self.optimizer).apply() @@ -172,7 +179,13 @@ inputargs = virtual_state.make_inputargs(values, self.optimizer) short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - short_boxes = ShortBoxes(self.optimizer, inputargs) + + if self.boxes_created_this_iteration is not None: + for box in self.inputargs: + self.boxes_created_this_iteration[box] = True + + short_boxes = ShortBoxes(self.optimizer, inputargs, + self.boxes_created_this_iteration) self.optimizer.clear_newoperations() for i in range(len(original_jump_args)): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -559,12 +559,13 @@ pass class ShortBoxes(object): - def __init__(self, optimizer, surviving_boxes): + def __init__(self, optimizer, surviving_boxes, availible_boxes=None): self.potential_ops = {} self.alternatives = {} self.synthetic = {} self.rename = {} self.optimizer = optimizer + self.availible_boxes = availible_boxes if surviving_boxes is not None: for box in surviving_boxes: @@ -635,6 +636,8 @@ return if box in self.short_boxes_in_production: raise BoxNotProducable + if self.availible_boxes is not None and box not in self.availible_boxes: + raise BoxNotProducable self.short_boxes_in_production[box] = True if box in self.potential_ops: From noreply at buildbot.pypy.org Sun Dec 18 11:14:42 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 18 Dec 2011 11:14:42 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: log the inputarg_setup_ops of the exported state to make intermediate jit-log-noopt-loop print proper traces Message-ID: <20111218101442.18FD082009@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50648:2d7b3df99c37 Date: 2011-12-18 11:12 +0100 http://bitbucket.org/pypy/pypy/changeset/2d7b3df99c37/ Log: log the inputarg_setup_ops of the exported state to make intermediate jit-log-noopt-loop print proper traces diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -5,7 +5,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.history import Const, ConstInt, Box, \ - BoxInt, ConstFloat, BoxFloat, AbstractFailDescr + BoxInt, ConstFloat, BoxFloat, AbstractFailDescr, TargetToken class Logger(object): @@ -135,6 +135,13 @@ fail_args = '' return s_offset + res + op.getopname() + '(' + args + ')' + fail_args + def _log_inputarg_setup_ops(self, op): + target_token = op.getdescr() + if isinstance(target_token, TargetToken): + if target_token.exported_state: + for op in target_token.exported_state.inputarg_setup_ops: + debug_print(' ' + self.repr_of_resop(op)) + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return @@ -146,6 +153,8 @@ for i in range(len(operations)): op = operations[i] debug_print(self.repr_of_resop(operations[i], ops_offset)) + if op.getopnum() == rop.LABEL: + self._log_inputarg_setup_ops(op) if ops_offset and None in ops_offset: offset = ops_offset[None] debug_print("+%d: --end of the loop--" % offset) From noreply at buildbot.pypy.org Sun Dec 18 11:35:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 11:35:20 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Finish the refactoring of descr.py. Message-ID: <20111218103521.0033A82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50649:c5baa5ab7372 Date: 2011-12-18 11:29 +0100 http://bitbucket.org/pypy/pypy/changeset/c5baa5ab7372/ Log: Finish the refactoring of descr.py. diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,6 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -25,14 +26,6 @@ assert isinstance(ARRAY, lltype.GcArray) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs @@ -71,6 +64,7 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs @@ -141,10 +135,10 @@ cache[ARRAY_OR_STRUCT] = result return result + # ____________________________________________________________ # ArrayDescrs - class ArrayDescr(AbstractDescr): tid = 0 basesize = 0 # workaround for the annotator @@ -181,6 +175,7 @@ cache[ARRAY] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr @@ -210,17 +205,36 @@ cache[(ARRAY, name)] = descr return descr + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_flag, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_flag' is a FLAG_xxx value about the result + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_flag = result_flag + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which @@ -229,7 +243,7 @@ # it is just ignored anyway. def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -257,14 +271,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -302,18 +316,25 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -327,10 +348,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -338,151 +362,42 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.arg_classes, self.result_type) -class BaseIntCallDescr(BaseCallDescr): - # Calls having a return kind of 'int' (including non-gc pointers). - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - -#... - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') - else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) + arg_classes = map(map_type_to_argclass, ARGS) arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + result_type = map_type_to_argclass(RESULT, accept_void=True) + result_flag = get_type_flag(RESULT) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + else: + result_size = symbolic.get_size(RESULT, gccache.translate_support_code) + if isinstance(RESULT, lltype.Ptr): + RESULT_ERASED = llmemory.Address # avoid too many CallDescrs + key = (arg_classes, result_type, result_flag, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_flag, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -604,25 +604,26 @@ rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) def bh_call_i(self, func, calldescr, args_i, args_r, args_f): - assert isinstance(calldescr, BaseIntCallDescr) + assert isinstance(calldescr, CallDescr) if not we_are_translated(): calldescr.verify_types(args_i, args_r, args_f, history.INT + 'S') - return calldescr.call_stub(func, args_i, args_r, args_f) + return calldescr.call_stub_i(func, args_i, args_r, args_f) def bh_call_r(self, func, calldescr, args_i, args_r, args_f): - assert isinstance(calldescr, GcPtrCallDescr) + assert isinstance(calldescr, CallDescr) if not we_are_translated(): calldescr.verify_types(args_i, args_r, args_f, history.REF) - return calldescr.call_stub(func, args_i, args_r, args_f) + return calldescr.call_stub_r(func, args_i, args_r, args_f) def bh_call_f(self, func, calldescr, args_i, args_r, args_f): - assert isinstance(calldescr, FloatCallDescr) # or LongLongCallDescr + assert isinstance(calldescr, CallDescr) if not we_are_translated(): calldescr.verify_types(args_i, args_r, args_f, history.FLOAT + 'L') - return calldescr.call_stub(func, args_i, args_r, args_f) + return calldescr.call_stub_f(func, args_i, args_r, args_f) def bh_call_v(self, func, calldescr, args_i, args_r, args_f): - assert isinstance(calldescr, VoidCallDescr) + assert isinstance(calldescr, CallDescr) if not we_are_translated(): calldescr.verify_types(args_i, args_r, args_f, history.VOID) - return calldescr.call_stub(func, args_i, args_r, args_f) + # the 'i' return value is ignored (and nonsense anyway) + calldescr.call_stub_i(func, args_i, args_r, args_f) diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -217,34 +217,34 @@ def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -253,13 +253,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -267,18 +267,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -331,16 +331,16 @@ assert descr3i.repr_of_descr() == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert descr4.repr_of_descr() == '' # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert descr4i.repr_of_descr() == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert descr4f.repr_of_descr() == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert descr5f.repr_of_descr() == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -350,10 +350,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -370,8 +370,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -394,8 +394,8 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 def test_field_arraylen_descr(): From noreply at buildbot.pypy.org Sun Dec 18 12:46:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 12:46:11 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Progress. Message-ID: <20111218114611.935A182009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50650:5267d7c61e57 Date: 2011-12-18 12:45 +0100 http://bitbucket.org/pypy/pypy/changeset/5267d7c61e57/ Log: Progress. diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -23,7 +23,8 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) # ____________________________________________________________ @@ -73,6 +74,7 @@ FLAG_UNSIGNED = 'U' FLAG_SIGNED = 'S' FLAG_STRUCT = 'X' +FLAG_VOID = 'V' class FieldDescr(AbstractDescr): name = '' @@ -86,6 +88,9 @@ self.field_size = field_size self.flag = flag + def is_field_signed(self): + return self.flag == FLAG_SIGNED + def sort_key(self): return self.offset @@ -156,23 +161,26 @@ return '' % (self.flag, self.itemsize) -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - assert isinstance(ARRAY, lltype.Array) - if ARRAY._hints.get('nolength', False): + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT + else: + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): lendescr = None else: - lendescr = get_field_arraylen_descr(gccache, ARRAY) - tsc = gccache.translate_support_code - basesize, itemsize, _ = symbolic.get_array_token(ARRAY, tsc) - flag = get_type_flag(ARRAY.OF) + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr @@ -221,7 +229,7 @@ call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def __init__(self, arg_classes, result_type, result_flag, result_size, + def __init__(self, arg_classes, result_type, result_signed, result_size, extrainfo=None, ffi_flags=1): """ 'arg_classes' is a string of characters, one per argument: @@ -229,11 +237,10 @@ 'result_type' is one character from the same list or 'v' - 'result_flag' is a FLAG_xxx value about the result + 'result_signed' is a boolean True/False """ self.arg_classes = arg_classes self.result_type = result_type - self.result_flag = result_flag self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags @@ -241,6 +248,22 @@ # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = %r" % (result_type,)) + self.result_flag = result_flag def __repr__(self): res = 'CallDescr(%s)' % (self.arg_classes,) @@ -382,20 +405,21 @@ arg_classes = map(map_type_to_argclass, ARGS) arg_classes = ''.join(arg_classes) result_type = map_type_to_argclass(RESULT, accept_void=True) - result_flag = get_type_flag(RESULT) RESULT_ERASED = RESULT if RESULT is lltype.Void: result_size = 0 + result_signed = False else: result_size = symbolic.get_size(RESULT, gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED if isinstance(RESULT, lltype.Ptr): RESULT_ERASED = llmemory.Address # avoid too many CallDescrs - key = (arg_classes, result_type, result_flag, RESULT_ERASED, extrainfo) + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: calldescr = cache[key] except KeyError: - calldescr = CallDescr(arg_classes, result_type, result_flag, + calldescr = CallDescr(arg_classes, result_type, result_signed, result_size, extrainfo) calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -14,11 +14,11 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +##from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr -from pypy.jit.backend.llsupport.descr import get_call_descr -from pypy.jit.backend.llsupport.descr import get_field_arraylen_descr +from pypy.jit.backend.llsupport.descr import get_array_descr +##from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +##from pypy.jit.backend.llsupport.descr import get_call_descr from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot @@ -34,15 +34,12 @@ else: self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, 'typeptr') - (self.str_basesize, self.str_itemsize, self.str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (self.unicode_basesize, self.unicode_itemsize, self.unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE,self.translate_support_code) - self.field_strlen_descr = get_field_arraylen_descr(self, rstr.STR) - self.field_unicodelen_descr = get_field_arraylen_descr(self, - rstr.UNICODE) self._generated_functions = [] + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): """Generates a variant of malloc with the given name and the given arguments. It should raise MemoryError and return NULL if out of @@ -85,12 +82,14 @@ arraydescr.tid) def gc_malloc_str(self, num_elem): + xxx return self._gc_malloc_array(self.str_basesize, num_elem, self.str_itemsize, self.str_ofs_length, self.str_type_id) def gc_malloc_unicode(self, num_elem): + xxx return self._gc_malloc_array(self.unicode_basesize, num_elem, self.unicode_itemsize, self.unicode_ofs_length, @@ -116,11 +115,11 @@ # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): + kind = 'boehm' moving_gc = False gcrootmap = None write_barrier_descr = None fielddescr_tid = None - has_tid = False str_type_id = 0 unicode_type_id = 0 @@ -168,6 +167,7 @@ GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() self._make_functions() def _make_functions(self): @@ -181,7 +181,7 @@ self.generate_function('malloc_fixedsize', malloc_fixedsize, [lltype.Signed]) - def malloc_array(basesize, itemsize, num_elem): + def malloc_array(basesize, num_elem, itemsize, ofs_length): xxx try: totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) @@ -194,23 +194,7 @@ arrayptr[ofs_length/WORD] = num_elem return res self.generate_function('malloc_array', malloc_array, - [lltype.Signed] * 3) - - def malloc_str(length): - return llop1.do_malloc_varsize_clear( - llmemory.GCREF, - str_type_id, length, str_basesize, str_itemsize, - str_ofs_length) - self.generate_function('malloc_str', malloc_str, - [lltype.Signed]) - - def malloc_unicode(length): - return llop1.do_malloc_varsize_clear( - llmemory.GCREF, - unicode_type_id, length, unicode_basesize, unicode_itemsize, - unicode_ofs_length) - self.generate_function('malloc_unicode', malloc_unicode, - [lltype.Signed]) + [lltype.Signed] * 4) def _gc_malloc(self, size, tid): # Boehm: 'tid' is ignored @@ -642,7 +626,7 @@ class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py - has_tid = True + kind = 'framework' def __init__(self, gcdescr, translator, rtyper, llop1=llop, really_not_translated=False): @@ -659,16 +643,21 @@ self._make_gcrootmap() self._make_layoutbuilder() self._setup_gcclass() + self._setup_str() + self._setup_stuff(really_not_translated) self._make_functions() def _initialize_for_tests(self): self.layoutbuilder = None - self.str_type_id = 10083 # random, for tests only - self.unicode_type_id = 10085 self.fielddescr_tid = AbstractDescr() self.max_size_of_young_obj = 1000 self.write_barrier_descr = None + def _setup_stuff(self, really_not_translated): + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work @@ -703,8 +692,6 @@ self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -718,6 +705,8 @@ def malloc_nursery_slowpath(size): """Allocate 'size' null bytes out of the nursery. Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, type_id, size, @@ -732,11 +721,25 @@ check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) self.generate_function('malloc_array', malloc_array, [lltype.Signed] * 3) + def malloc_array_nonstandard(arraydescr_gcref, num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + arraydescr = xxxxxxx + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, xxx, itemsize, xxx) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [llmemory.GCREF, lltype.Signed]) + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, @@ -812,16 +815,6 @@ ## ###self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( ## ### [lltype.Signed], llmemory.GCREF)) ## # -## class ForTestOnly: -## pass -## for_test_only = ForTestOnly() -## for_test_only.x = 1.23 -## def random_usage_of_xmm_registers(): -## x0 = for_test_only.x -## x1 = x0 * 0.1 -## x2 = x0 * 0.2 -## x3 = x0 * 0.3 -## for_test_only.x = x0 + x1 + x2 + x3 ## # ## def malloc_slowpath(size): ## if self.DEBUG: @@ -837,6 +830,18 @@ ## self.malloc_slowpath = malloc_slowpath ## self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 + def get_funcptr_for_malloc_gc_fixed(self): """(size) -> GCREF""" return llhelper(self.MALLOC_GC_FIXED, self.malloc_gc_fixed) diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -9,10 +9,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes from pypy.jit.backend.llsupport.descr import (get_size_descr, - get_field_descr, BaseFieldDescr, DynamicFieldDescr, get_array_descr, - BaseArrayDescr, DynamicArrayNoLengthDescr, get_call_descr, - BaseIntCallDescr, GcPtrCallDescr, FloatCallDescr, VoidCallDescr, - InteriorFieldDescr, get_interiorfield_descr) + get_field_descr, FieldDescr, get_array_descr, + ArrayDescr, get_call_descr, get_interiorfield_descr) from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager @@ -221,14 +219,14 @@ return get_field_descr(self.gc_ll_descr, STRUCT, fieldname) def unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) return fielddescr.offset unpack_fielddescr._always_inline_ = True def unpack_fielddescr_size(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset - size = fielddescr.get_field_size(self.translate_support_code) + size = fielddescr.field_size sign = fielddescr.is_field_signed() return ofs, size, sign unpack_fielddescr_size._always_inline_ = True diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -3,7 +3,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr class GcRewriterAssembler(object): @@ -28,7 +28,6 @@ def __init__(self, gc_ll_descr, cpu): self.gc_ll_descr = gc_ll_descr self.cpu = cpu - self.tsc = self.gc_ll_descr.translate_support_code self.newops = [] self.known_lengths = {} self.recent_mallocs = {} # set of variables @@ -81,53 +80,43 @@ self.newops.append(op) elif opnum == rop.NEW_ARRAY: descr = op.getdescr() - assert isinstance(descr, BaseArrayDescr) - self.handle_new_array(descr.tid, - descr.get_base_size(self.tsc), - descr.get_item_size(self.tsc), - descr.get_field_arraylen_descr(), - op) + assert isinstance(descr, ArrayDescr) + self.handle_new_array(descr, op) elif opnum == rop.NEWSTR: - self.handle_new_array(self.gc_ll_descr.str_type_id, - self.gc_ll_descr.str_basesize, - self.gc_ll_descr.str_itemsize, - self.gc_ll_descr.field_strlen_descr, - op) + self.handle_new_array(self.gc_ll_descr.str_descr, op) elif opnum == rop.NEWUNICODE: - self.handle_new_array(self.gc_ll_descr.unicode_type_id, - self.gc_ll_descr.unicode_basesize, - self.gc_ll_descr.unicode_itemsize, - self.gc_ll_descr.field_unicodelen_descr, - op) + self.handle_new_array(self.gc_ll_descr.unicode_descr, op) else: raise NotImplementedError(op.getopname()) def handle_new_fixedsize(self, descr, op): - assert isinstance(descr, BaseSizeDescr) + assert isinstance(descr, SizeDescr) size = descr.size self.gen_malloc_nursery(size, op.result) self.gen_initialize_tid(op.result, descr.tid) - def handle_new_array(self, tid, base_size, item_size, arraylen_descr, op): + def handle_new_array(self, arraydescr, op): v_length = op.getarg(0) total_size = -1 - if item_size == 0: - total_size = base_size + if arraydescr.itemsize == 0: + total_size = arraydescr.basesize elif isinstance(v_length, ConstInt): num_elem = v_length.getint() try: - var_size = ovfcheck(item_size * num_elem) - total_size = ovfcheck(base_size + var_size) + var_size = ovfcheck(arraydescr.itemsize * num_elem) + total_size = ovfcheck(arraydescr.basesize + var_size) except OverflowError: pass # total_size is still -1 if total_size >= 0: self.gen_malloc_nursery(total_size, op.result) - self.gen_initialize_tid(op.result, tid) - self.gen_initialize_len(op.result, v_length, arraylen_descr) + self.gen_initialize_tid(op.result, arraydescr.tid) + self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) + elif self.gc_ll_descr.kind == 'boehm': + self.gen_boehm_malloc_array(arraydescr, v_length, op.result) else: opnum = op.getopnum() if opnum == rop.NEW_ARRAY: - self.gen_malloc_array(item_size, tid, v_length, op.result) + self.gen_malloc_array(arraydescr, v_length, op.result) elif opnum == rop.NEWSTR: self.gen_malloc_str(v_length, op.result) elif opnum == rop.NEWUNICODE: @@ -160,17 +149,31 @@ self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_fixedsize_fn, ConstInt(size)], v_result) - def gen_malloc_array(self, itemsize, tid, v_num_elem, v_result): - """Generate a CALL_MALLOC_GC(malloc_array_fn, ...).""" - if self.gc_ll_descr.has_tid: + def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" + self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_array_fn, + ConstInt(arraydescr.basesize), + v_num_elem, + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset)], + v_result) + + def gen_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) going either + to the standard or the nonstandard version of the function.""" + # + if (arraydescr.basesize == self.gc_ll_descr.standard_array_basesize + and arraydescr.lendescr.offset == + self.gc_ll_descr.standard_array_length_ofs): + # this is a standard-looking array, common case args = [self.gc_ll_descr.c_malloc_array_fn, - ConstInt(itemsize), - ConstInt(tid), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.tid), v_num_elem] else: - args = [self.gc_ll_descr.c_malloc_array_fn, - ConstInt(basesize), - ConstInt(itemsize), + arraydescr_gcref = xxx + args = [self.gc_ll_descr.c_malloc_array_nonstandard_fn, + ConstPtr(arraydescr_gcref), v_num_elem] self._gen_call_malloc_gc(args, v_result) @@ -296,7 +299,7 @@ self.gen_write_barrier(v_base, v_value) def round_up_for_allocation(self, size): - if self.tsc: + if self.gc_ll_descr.translate_support_code: return llarena.round_up_for_allocation( size, self.gc_ll_descr.minimal_size_in_nursery) else: diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -214,6 +214,13 @@ True: FLAG_SIGNED }[signed] +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED + + def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -49,16 +49,10 @@ tiddescr = self.gc_ll_descr.fielddescr_tid WORD = globals()['WORD'] # - str_type_id = self.gc_ll_descr.str_type_id - str_basesize = self.gc_ll_descr.str_basesize - str_itemsize = self.gc_ll_descr.str_itemsize - strlendescr = get_field_arraylen_descr(self.gc_ll_descr, rstr.STR) - # - unicode_type_id = self.gc_ll_descr.unicode_type_id - unicode_basesize = self.gc_ll_descr.unicode_basesize - unicode_itemsize = self.gc_ll_descr.unicode_itemsize - unicodelendescr = get_field_arraylen_descr(self.gc_ll_descr, - rstr.UNICODE) + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr # namespace = locals().copy() # @@ -114,8 +108,7 @@ """, """ [] p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(adescr.get_base_size(False) + \ - 10 * adescr.get_item_size(False))d) + %(adescr.basesize + 10 * adescr.itemsize)d) setfield_gc(p0, 10, descr=alendescr) jump() """) @@ -128,10 +121,10 @@ """, """ [i1] p0 = call_malloc_gc(ConstClass(malloc_array), \ - %(adescr.get_base_size(False))d, \ - i1, \ - %(adescr.get_item_size(False))d) - setfield_gc(p0, i1, descr=alendescr) + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d) jump() """) @@ -142,7 +135,7 @@ jump() """, """ [p1] - p0 = malloc_gc(102, 0, 0) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() """) @@ -154,8 +147,11 @@ jump() """, """ [i1] - p0 = malloc_gc(%(str_basesize)d, i1, %(str_itemsize)d) - setfield_gc(p0, i1, descr=strlendescr) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d) jump() """) @@ -166,7 +162,9 @@ jump() """, """ [i1] - p0 = malloc_gc(%(unicode_basesize + 10 * unicode_itemsize)d, 0, 0) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d) setfield_gc(p0, 10, descr=unicodelendescr) jump() """) @@ -231,8 +229,7 @@ """, """ [] p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(adescr.get_base_size(False) + \ - 10 * adescr.get_item_size(False))d) + %(adescr.basesize + 10 * adescr.itemsize)d) setfield_gc(p0, 4321, descr=tiddescr) setfield_gc(p0, 10, descr=alendescr) jump() @@ -248,8 +245,7 @@ [] p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ %(sdescr.size + \ - adescr.get_base_size(False) + \ - 10 * adescr.get_item_size(False))d) + adescr.basesize + 10 * adescr.itemsize)d) setfield_gc(p0, 1234, descr=tiddescr) p1 = int_add(p0, %(sdescr.size)d) setfield_gc(p1, 4321, descr=tiddescr) @@ -265,7 +261,7 @@ """, """ [] p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(bdescr.get_base_size(False) + 8)d) + %(bdescr.basesize + 8)d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 6, descr=blendescr) jump() @@ -282,16 +278,16 @@ """, """ [] p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(4 * (bdescr.get_base_size(False) + 8))d) + %(4 * (bdescr.basesize + 8))d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 5, descr=blendescr) - p1 = int_add(p0, %(bdescr.get_base_size(False) + 8)d) + p1 = int_add(p0, %(bdescr.basesize + 8)d) setfield_gc(p1, 8765, descr=tiddescr) setfield_gc(p1, 5, descr=blendescr) - p2 = int_add(p1, %(bdescr.get_base_size(False) + 8)d) + p2 = int_add(p1, %(bdescr.basesize + 8)d) setfield_gc(p2, 8765, descr=tiddescr) setfield_gc(p2, 5, descr=blendescr) - p3 = int_add(p2, %(bdescr.get_base_size(False) + 8)d) + p3 = int_add(p2, %(bdescr.basesize + 8)d) setfield_gc(p3, 8765, descr=tiddescr) setfield_gc(p3, 5, descr=blendescr) jump() @@ -333,7 +329,7 @@ """, """ [] p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(bdescr.get_base_size(False) + 100)d) + %(bdescr.basesize + 100)d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 100, descr=blendescr) jump() @@ -350,14 +346,14 @@ """, """ [] p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(2 * (bdescr.get_base_size(False) + 104))d) + %(2 * (bdescr.basesize + 104))d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 101, descr=blendescr) - p1 = int_add(p0, %(bdescr.get_base_size(False) + 104)d) + p1 = int_add(p0, %(bdescr.basesize + 104)d) setfield_gc(p1, 8765, descr=tiddescr) setfield_gc(p1, 102, descr=blendescr) p2 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(bdescr.get_base_size(False) + 104)d) + %(bdescr.basesize + 104)d) setfield_gc(p2, 8765, descr=tiddescr) setfield_gc(p2, 103, descr=blendescr) jump() @@ -402,12 +398,12 @@ """, """ [i2] p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(str_basesize + 16 * str_itemsize + \ - unicode_basesize + 10 * unicode_itemsize)d) - setfield_gc(p0, %(str_type_id)d, descr=tiddescr) + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) setfield_gc(p0, 14, descr=strlendescr) - p1 = int_add(p0, %(str_basesize + 16 * str_itemsize)d) - setfield_gc(p1, %(unicode_type_id)d, descr=tiddescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) p2 = call_malloc_gc(ConstClass(malloc_unicode), i2) p3 = call_malloc_gc(ConstClass(malloc_str), i2) From noreply at buildbot.pypy.org Sun Dec 18 14:42:28 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 14:42:28 +0100 (CET) Subject: [pypy-commit] pypy py3k: Syntax fixes for "import readline" Message-ID: <20111218134228.69B8C82009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50651:95d16ee05f34 Date: 2011-12-18 13:08 +0100 http://bitbucket.org/pypy/pypy/changeset/95d16ee05f34/ Log: Syntax fixes for "import readline" diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -407,7 +407,7 @@ self.partial_char += char try: c = unicode(self.partial_char, self.encoding) - except UnicodeError, e: + except UnicodeError as e: if len(e.args) > 4 and \ e.args[4] == 'unexpected end of data': pass @@ -427,7 +427,7 @@ while 1: # All hail Unix! try: self.push_char(os.read(self.input_fd, 1)) - except (IOError, OSError), err: + except (IOError, OSError) as err: if err.errno == errno.EINTR: if not self.event_queue.empty(): return self.event_queue.get() From noreply at buildbot.pypy.org Sun Dec 18 14:42:29 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 14:42:29 +0100 (CET) Subject: [pypy-commit] pypy py3k: Argh, str() returned a bytes string! Message-ID: <20111218134229.96FB9823F8@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50652:0445f697e0a1 Date: 2011-12-18 14:41 +0100 http://bitbucket.org/pypy/pypy/changeset/0445f697e0a1/ Log: Argh, str() returned a bytes string! Run tests and fix. diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -292,7 +292,7 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) return w_res -def descr_new_(space, w_unicodetype, w_string='', w_encoding=None, w_errors=None): +def descr_new_(space, w_unicodetype, w_string=u'', w_encoding=None, w_errors=None): # NB. the default value of w_obj is really a *wrapped* empty string: # there is gateway magic at work from pypy.objspace.std.unicodeobject import W_UnicodeObject From noreply at buildbot.pypy.org Sun Dec 18 15:31:18 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 18 Dec 2011 15:31:18 +0100 (CET) Subject: [pypy-commit] pypy default: #966 -- removed a line of dead code, thanks to mikefc for the patch Message-ID: <20111218143118.58DC982009@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50653:29b9e8df6ade Date: 2011-12-18 08:30 -0600 http://bitbucket.org/pypy/pypy/changeset/29b9e8df6ade/ Log: #966 -- removed a line of dead code, thanks to mikefc for the patch diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -53,7 +53,6 @@ i = start for j in range(arr.size): arr[j] = i - j += 1 i += step return arr From noreply at buildbot.pypy.org Sun Dec 18 15:51:48 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 15:51:48 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: In-progress. Message-ID: <20111218145148.92F2E82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50654:2fd85080ac75 Date: 2011-12-18 15:06 +0100 http://bitbucket.org/pypy/pypy/changeset/2fd85080ac75/ Log: In-progress. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -14,7 +14,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -##from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr from pypy.jit.backend.llsupport.descr import get_array_descr ##from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr @@ -70,30 +70,18 @@ def gc_malloc(self, sizedescr): """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', with the vtable pointer set manually afterwards.""" - assert isinstance(sizedescr, BaseSizeDescr) - return self._gc_malloc(sizedescr.size, sizedescr.tid) + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self._gc_malloc_array(basesize, num_elem, itemsize, ofs_length, - arraydescr.tid) + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) def gc_malloc_str(self, num_elem): - xxx - return self._gc_malloc_array(self.str_basesize, num_elem, - self.str_itemsize, - self.str_ofs_length, - self.str_type_id) + return self._bh_malloc_array(self.str_descr, num_elem) def gc_malloc_unicode(self, num_elem): - xxx - return self._gc_malloc_array(self.unicode_basesize, num_elem, - self.unicode_itemsize, - self.unicode_ofs_length, - self.unicode_type_id) + return self._bh_malloc_array(self.unicode_descr, num_elem) def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): @@ -171,10 +159,9 @@ self._make_functions() def _make_functions(self): - malloc_fn_ptr = self.malloc_fn_ptr def malloc_fixedsize(size): - res = malloc_fn_ptr(size) + res = self.malloc_fn_ptr(size) if not res: raise MemoryError return res @@ -182,12 +169,11 @@ [lltype.Signed]) def malloc_array(basesize, num_elem, itemsize, ofs_length): - xxx try: totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: raise MemoryError - res = malloc_fn_ptr(totalsize) + res = self.malloc_fn_ptr(totalsize) if not res: raise MemoryError arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) @@ -196,22 +182,13 @@ self.generate_function('malloc_array', malloc_array, [lltype.Signed] * 4) - def _gc_malloc(self, size, tid): - # Boehm: 'tid' is ignored - return self.malloc_fixedsize(size) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - def _gc_malloc_array(self, basesize, num_elem, itemsize, ofs_length, tid): - # Boehm: 'tid' is ignored - try: - totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) - except OverflowError: - raise MemoryError - res = self.malloc_fn_ptr(totalsize) - if not res: - raise MemoryError - arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) - arrayptr[ofs_length/WORD] = num_elem - return res + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -581,12 +558,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -630,7 +609,6 @@ def __init__(self, gcdescr, translator, rtyper, llop1=llop, really_not_translated=False): - from pypy.rpython.memory.gctypelayout import check_typeid GcLLDescription.__init__(self, gcdescr, translator, rtyper) self.translator = translator self.llop1 = llop1 @@ -643,20 +621,16 @@ self._make_gcrootmap() self._make_layoutbuilder() self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() self._setup_str() - self._setup_stuff(really_not_translated) - self._make_functions() + self._make_functions(really_not_translated) def _initialize_for_tests(self): self.layoutbuilder = None self.fielddescr_tid = AbstractDescr() self.max_size_of_young_obj = 1000 - self.write_barrier_descr = None - - def _setup_stuff(self, really_not_translated): - (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), - not really_not_translated) + self.GCClass = None def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() @@ -699,8 +673,22 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - def _make_functions(self): + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): + self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( + [llmemory.Address, llmemory.Address], lltype.Void)) + self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( + [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) + self.write_barrier_descr = WriteBarrierDescr(self) + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) def malloc_nursery_slowpath(size): """Allocate 'size' null bytes out of the nursery. @@ -762,73 +750,26 @@ self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, [lltype.Signed]) -## # make the fixed malloc function, with one argument -## def malloc_gc_fixed(size): -## type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here -## res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, -## type_id, size, -## False, False, False) -## #llop.debug_print(lltype.Void, "\tmalloc_basic", size, "-->", res) -## # In case the operation above failed, we are returning NULL -## # from this function to assembler. There is also an RPython -## # exception set, typically MemoryError; but it's easier and -## # faster to check for the NULL return value, as done by -## # translator/exceptiontransform.py. -## return res -## self.malloc_gc_fixed = malloc_gc_fixed -## self.MALLOC_GC_FIXED = lltype.Ptr( -## lltype.FuncType([lltype.Signed], llmemory.GCREF)) -## # -## # make the varsize malloc function, with three arguments -## def malloc_gc_variable(basesize, num_elem, itemsize): -## xx -## self.malloc_gc_variable = malloc_gc_variable -## self.MALLOC_GC_VARIABLE = lltype.Ptr( -## lltype.FuncType([lltype.Signed] * 3, llmemory.GCREF)) -## # -## self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( -## [llmemory.Address, llmemory.Address], lltype.Void)) -## self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( -## [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) -## self.write_barrier_descr = WriteBarrierDescr(self) -## self.fielddescr_tid = self.write_barrier_descr.fielddescr_tid -## # -## (str_basesize, str_itemsize, str_ofs_length -## ) = symbolic.get_array_token(rstr.STR, True) -## (unicode_basesize, unicode_itemsize, unicode_ofs_length -## ) = symbolic.get_array_token(rstr.UNICODE, True) -## self.str_type_id = self.layoutbuilder.get_type_id(rstr.STR) -## self.unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) -## # -## def malloc_str(length): -## return llop1.do_malloc_varsize_clear( -## llmemory.GCREF, -## str_type_id, length, str_basesize, str_itemsize, -## str_ofs_length) -## def malloc_unicode(length): -## return llop1.do_malloc_varsize_clear( -## llmemory.GCREF, -## unicode_type_id, length, unicode_basesize,unicode_itemsize, -## unicode_ofs_length) -## ###self.malloc_str = malloc_str -## ###self.malloc_unicode = malloc_unicode -## ###self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( -## ### [lltype.Signed], llmemory.GCREF)) -## # -## # -## def malloc_slowpath(size): -## if self.DEBUG: -## random_usage_of_xmm_registers() -## assert size >= self.minimal_size_in_nursery -## # NB. although we call do_malloc_fixedsize_clear() here, -## # it's a bit of a hack because we set tid to 0 and may -## # also use it to allocate varsized objects. The tid -## # and possibly the length are both set afterward. -## gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, -## 0, size, False, False, False) -## return rffi.cast(lltype.Signed, gcref) -## self.malloc_slowpath = malloc_slowpath -## self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + class ForTestOnly: pass @@ -842,14 +783,6 @@ x3 = x0 * 0.3 self.for_test_only.x = x0 + x1 + x2 + x3 - def get_funcptr_for_malloc_gc_fixed(self): - """(size) -> GCREF""" - return llhelper(self.MALLOC_GC_FIXED, self.malloc_gc_fixed) - - def get_funcptr_for_malloc_gc_variable(self): - """(base_size, num_elem, item_size) -> GCREF""" - return llhelper(self.MALLOC_GC_VARIABLE, self.malloc_gc_variable) - def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) return rffi.cast(lltype.Signed, nurs_addr) diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -1,5 +1,5 @@ from pypy.rlib.rarithmetic import ovfcheck -from pypy.jit.metainterp.history import ConstInt +from pypy.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker from pypy.jit.backend.llsupport.symbolic import WORD diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -32,8 +32,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,47 +246,50 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr assert not has_finalizer assert not has_light_finalizer - assert rffi.cast(lltype.Signed, type_id) == 0 - x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) - x += self.gcheaderbuilder.size_gc_header - p = llmemory.cast_adr_to_ptr(x, RESTYPE) - self.record.append(("fixedsize", repr(size), p)) + p, tid = self._malloc(type_id, size) + p = llmemory.cast_adr_to_ptr(p, RESTYPE) + self.record.append(("fixedsize", repr(size), tid, p)) return p -## def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, -## itemsize, offset_to_length): -## p = llmemory.raw_malloc(size + itemsize * length) -## (p + offset_to_length).signed[0] = length -## p = llmemory.cast_adr_to_ptr(p, RESTYPE) -## tid = llop.combine_ushort(lltype.Signed, type_id, 0) -## self.record.append(("varsize", tid, length, -## repr(size), repr(itemsize), -## repr(offset_to_length), p)) -## return p + def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, + itemsize, offset_to_length): + p, tid = self._malloc(type_id, size + itemsize * length) + (p + offset_to_length).signed[0] = length + p = llmemory.cast_adr_to_ptr(p, RESTYPE) + self.record.append(("varsize", tid, length, + repr(size), repr(itemsize), + repr(offset_to_length), p)) + return p -## def _write_barrier_failing_case(self, adr_struct, adr_newptr): -## self.record.append(('barrier', adr_struct, adr_newptr)) + def _write_barrier_failing_case(self, adr_struct, adr_newptr): + self.record.append(('barrier', adr_struct, adr_newptr)) -## def get_write_barrier_failing_case(self, FPTRTYPE): -## return llhelper(FPTRTYPE, self._write_barrier_failing_case) + def get_write_barrier_failing_case(self, FPTRTYPE): + return llhelper(FPTRTYPE, self._write_barrier_failing_case) -## _have_wb_from_array = False + _have_wb_from_array = False -## def _write_barrier_from_array_failing_case(self, adr_struct, v_index): -## self.record.append(('barrier_from_array', adr_struct, v_index)) + def _write_barrier_from_array_failing_case(self, adr_struct, v_index): + self.record.append(('barrier_from_array', adr_struct, v_index)) -## def get_write_barrier_from_array_failing_case(self, FPTRTYPE): -## if self._have_wb_from_array: -## return llhelper(FPTRTYPE, -## self._write_barrier_from_array_failing_case) -## else: -## return lltype.nullptr(FPTRTYPE.TO) + def get_write_barrier_from_array_failing_case(self, FPTRTYPE): + if self._have_wb_from_array: + return llhelper(FPTRTYPE, + self._write_barrier_from_array_failing_case) + else: + return lltype.nullptr(FPTRTYPE.TO) class TestFramework(object): @@ -345,22 +348,18 @@ sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) assert lltype.typeOf(p) == llmemory.GCREF - assert self.llop1.record == [("fixedsize", repr(sizedescr.size), p)] - p1 = lltype.cast_opaque_ptr(lltype.Ptr(S), p) - hdr = self.gc_ll_descr.gcheaderbuilder.header_of_object(p1) - assert hdr.tid == sizedescr.tid + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), + sizedescr.tid, p)] def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -430,8 +429,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -552,8 +550,7 @@ def test_rewrite_assembler_5(self): S = lltype.GcStruct('S') A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, 'x') wbdescr = self.gc_ll_descr.write_barrier_descr ops = parse(""" [p1, p2] @@ -572,19 +569,18 @@ equaloplists(operations, expected.operations) def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + S = lltype.GcStruct('S', ('x', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) xdescr = get_field_descr(self.gc_ll_descr, S, 'x') ops = parse(""" [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) + p0 = new(descr=sdescr) setfield_gc(p0, p1, descr=xdescr) jump() """, namespace=locals()) expected = parse(""" [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) + p0 = new(descr=sdescr) # no write barrier setfield_gc(p0, p1, descr=xdescr) jump() diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -21,10 +21,11 @@ sdescr.tid = 1234 # T = lltype.GcStruct('T', ('y', lltype.Signed), - ('z', lltype.Signed), + ('z', lltype.Ptr(S)), ('t', lltype.Signed)) tdescr = get_size_descr(self.gc_ll_descr, T) tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') # A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) @@ -47,6 +48,7 @@ register_known_gctype(self.cpu, o_vtable, O) # tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr WORD = globals()['WORD'] # strdescr = self.gc_ll_descr.str_descr @@ -409,3 +411,15 @@ p3 = call_malloc_gc(ConstClass(malloc_str), i2) jump() """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) From noreply at buildbot.pypy.org Sun Dec 18 15:51:49 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 15:51:49 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Port the remaining rewrite tests out of test_gc, and a few fixes. Message-ID: <20111218145149.BDCB582009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50655:cc7e577077b7 Date: 2011-12-18 15:51 +0100 http://bitbucket.org/pypy/pypy/changeset/cc7e577077b7/ Log: Port the remaining rewrite tests out of test_gc, and a few fixes. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -602,6 +602,9 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -46,8 +46,11 @@ if op.is_malloc(): self.handle_malloc_operation(op) continue - elif op.can_malloc() or op.getopnum() == rop.LABEL: + elif op.can_malloc(): self.emitting_an_operation_that_can_collect() + elif op.getopnum() == rop.LABEL: + self.emitting_an_operation_that_can_collect() + self.known_lengths.clear() # ---------- write barriers ---------- if self.gc_ll_descr.write_barrier_descr is not None: if op.getopnum() == rop.SETFIELD_GC: @@ -98,15 +101,16 @@ def handle_new_array(self, arraydescr, op): v_length = op.getarg(0) total_size = -1 - if arraydescr.itemsize == 0: - total_size = arraydescr.basesize - elif isinstance(v_length, ConstInt): + if isinstance(v_length, ConstInt): num_elem = v_length.getint() + self.known_lengths[op.result] = num_elem try: var_size = ovfcheck(arraydescr.itemsize * num_elem) total_size = ovfcheck(arraydescr.basesize + var_size) except OverflowError: pass # total_size is still -1 + elif arraydescr.itemsize == 0: + total_size = arraydescr.basesize if total_size >= 0: self.gen_malloc_nursery(total_size, op.result) self.gen_initialize_tid(op.result, arraydescr.tid) @@ -282,7 +286,7 @@ def gen_write_barrier_array(self, v_base, v_index, v_value): write_barrier_descr = self.gc_ll_descr.write_barrier_descr - if write_barrier_descr.get_write_barrier_from_array_fn(self.cpu) != 0: + if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too # big, then produce a regular write_barrier. If it's unknown or # too big, produce instead a write_barrier_from_array. diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -456,186 +456,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for new_length in (-1, 5, 5000): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - rewriter = GcRewriterAssembler(self.gc_ll_descr, self.fake_cpu) - if new_length >= 0: - rewriter.known_lengths[v_base] = new_length - operations = get_deep_immutable_oplist(operations) - operations = rewriter.rewrite(operations) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for new_length in (-1, 5, 5000): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - rewriter = GcRewriterAssembler(self.gc_ll_descr, self.fake_cpu) - if new_length >= 0: - rewriter.known_lengths[v_base] = new_length - operations = get_deep_immutable_oplist(operations) - operations = rewriter.rewrite(operations) - assert len(operations) == 2 - # - if 0 <= new_length < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sdescr = get_size_descr(self.gc_ll_descr, S) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new(descr=sdescr) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new(descr=sdescr) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -30,12 +30,24 @@ A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) adescr.tid = 4321 - alendescr = get_field_arraylen_descr(self.gc_ll_descr, A) + alendescr = adescr.lendescr # B = lltype.GcArray(lltype.Char) bdescr = get_array_descr(self.gc_ll_descr, B) bdescr.tid = 8765 - blendescr = get_field_arraylen_descr(self.gc_ll_descr, B) + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') # E = lltype.GcStruct('Empty') edescr = get_size_descr(self.gc_ll_descr, E) @@ -183,6 +195,8 @@ gcdescr = get_description(config_) self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) # class FakeCPU(object): def sizeof(self, STRUCT): @@ -423,3 +437,185 @@ setfield_raw(p1, p2, descr=tzdescr) jump() """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + %(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) From noreply at buildbot.pypy.org Sun Dec 18 16:00:59 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 16:00:59 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Fixes. Message-ID: <20111218150059.56FAA82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50656:9ed69ff1e09e Date: 2011-12-18 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/9ed69ff1e09e/ Log: Fixes. diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -88,6 +88,9 @@ self.field_size = field_size self.flag = flag + def is_pointer_field(self): + return self.flag == FLAG_POINTER + def is_field_signed(self): return self.flag == FLAG_SIGNED @@ -157,6 +160,12 @@ self.lendescr = lendescr # or None, if no length self.flag = flag + def is_array_of_pointers(self): + return self.flag == FLAG_POINTER + + def is_item_signed(self): + return self.flag == FLAG_SIGNED + def repr_of_descr(self): return '' % (self.flag, self.itemsize) diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -8,9 +8,10 @@ from pypy.jit.backend.model import AbstractCPU from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes -from pypy.jit.backend.llsupport.descr import (get_size_descr, - get_field_descr, FieldDescr, get_array_descr, - ArrayDescr, get_call_descr, get_interiorfield_descr) +from pypy.jit.backend.llsupport.descr import ( + get_size_descr, get_field_descr, get_array_descr, + get_call_descr, get_interiorfield_descr, + FieldDescr, ArrayDescr, CallDescr) from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager @@ -235,7 +236,7 @@ return get_array_descr(self.gc_ll_descr, A) def interiorfielddescrof(self, A, fieldname): - return get_interiorfield_descr(self.gc_ll_descr, A, A.OF, fieldname) + return get_interiorfield_descr(self.gc_ll_descr, A, fieldname) def interiorfielddescrof_dynamic(self, offset, width, fieldsize, is_pointer, is_float, is_signed): @@ -244,14 +245,14 @@ return InteriorFieldDescr(arraydescr, fielddescr) def unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - return arraydescr.get_base_size(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + return arraydescr.basesize unpack_arraydescr._always_inline_ = True def unpack_arraydescr_size(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_base_size(self.translate_support_code) - size = arraydescr.get_item_size(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize sign = arraydescr.is_item_signed() return ofs, size, sign unpack_arraydescr_size._always_inline_ = True @@ -279,8 +280,8 @@ # ____________________________________________________________ def bh_arraylen_gc(self, arraydescr, array): - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset return rffi.cast(rffi.CArrayPtr(lltype.Signed), array)[ofs/WORD] @specialize.argtype(2) From noreply at buildbot.pypy.org Sun Dec 18 16:11:09 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 18 Dec 2011 16:11:09 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: hg merge default Message-ID: <20111218151109.0C7BD82009@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50657:a7226dfd9add Date: 2011-12-18 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/a7226dfd9add/ Log: hg merge default diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -285,3 +285,10 @@ [], lineno=1, col_offset=0) ]) exec compile(body, '', 'exec') + + def test_invalid_sum(self): + import _ast as ast + pos = dict(lineno=2, col_offset=3) + m = ast.Module([ast.Expr(ast.expr(**pos), **pos)]) + exc = raises(TypeError, compile, m, "", "exec") + diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -17,13 +17,6 @@ g() log = self.run(main, [500]) - # XXX XXX this test fails so far because of a detail that - # changed with jit-simplify-backendintf. We should try to - # think of a way to be more resistent against such details. - # The issue is that we now get one Tracing, then go back - # to the interpreter hoping to immediately run the JITted - # code; but instead, we Trace again, just because another - # counter was also about to reach its limit... loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ i16 = force_token() @@ -34,7 +27,7 @@ jump(..., descr=...) """) assert loop.match_by_id("subtract", """ - setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me i2 = int_sub_ovf(i1, 42) guard_no_overflow(descr=...) """) diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,9 +11,6 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) - c_files.extend([py.path.local(f) for f in eci.separate_module_files]) - eci = ExternalCompilationInfo(**eci._copy_attributes()) - eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Sun Dec 18 16:11:10 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 18 Dec 2011 16:11:10 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: repr of TargetToken's are compared by test_pypy_c so they are not allowed to change and should identify the instance Message-ID: <20111218151110.3436B823F8@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50658:9339faeef95b Date: 2011-12-18 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/9339faeef95b/ Log: repr of TargetToken's are compared by test_pypy_c so they are not allowed to change and should identify the instance diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -758,6 +758,9 @@ self.virtual_state = None self.exported_state = None + + def repr_of_descr(self): + return 'TargetToken(%d)' % compute_identity_hash(self) class TreeLoop(object): inputargs = None From noreply at buildbot.pypy.org Sun Dec 18 16:11:11 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 18 Dec 2011 16:11:11 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: hg merge default Message-ID: <20111218151111.57A4582009@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50659:db1c9e2286c2 Date: 2011-12-18 16:10 +0100 http://bitbucket.org/pypy/pypy/changeset/db1c9e2286c2/ Log: hg merge default diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -53,7 +53,6 @@ i = start for j in range(arr.size): arr[j] = i - j += 1 i += step return arr From noreply at buildbot.pypy.org Sun Dec 18 17:02:47 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 18 Dec 2011 17:02:47 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: Fixes. Message-ID: <20111218160247.6A13382009@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50660:0e374fa5eab3 Date: 2011-12-18 18:02 +0200 http://bitbucket.org/pypy/pypy/changeset/0e374fa5eab3/ Log: Fixes. diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -970,7 +970,7 @@ return array def create_sig(self): - return signature.ViewSignature(self.parent.create_sig()) + return signature.ViewSignature(self.dtype) def setshape(self, space, new_shape): if len(self.shape) < 1: diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -203,6 +203,14 @@ allnumbers.append(no) self.iter_no = no + def _create_iter(self, iterlist, arraylist, arr): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + assert isinstance(arr, ConcreteArray) + if self.iter_no >= len(iterlist): + iterlist.append(ViewIterator(arr)) + if self.array_no >= len(arraylist): + arraylist.append(arr.storage) + class FlatiterSignature(ViewSignature): def debug_repr(self): return 'FlatIter(%s)' % self.child.debug_repr() From noreply at buildbot.pypy.org Sun Dec 18 17:18:38 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 18 Dec 2011 17:18:38 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: minor cleanup Message-ID: <20111218161838.0EE2B82009@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50661:a44392fc3f5d Date: 2011-12-18 18:18 +0200 http://bitbucket.org/pypy/pypy/changeset/a44392fc3f5d/ Log: minor cleanup diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -140,10 +140,13 @@ from pypy.module.micronumpy.interp_numarray import ConcreteArray assert isinstance(arr, ConcreteArray) if self.iter_no >= len(iterlist): - iterlist.append(ArrayIterator(arr.size)) + iterlist.append(self.allocate_iter(arr)) if self.array_no >= len(arraylist): arraylist.append(arr.storage) + def allocate_iter(self, arr): + return ArrayIterator(arr.size) + def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import ConcreteArray assert isinstance(arr, ConcreteArray) @@ -203,13 +206,8 @@ allnumbers.append(no) self.iter_no = no - def _create_iter(self, iterlist, arraylist, arr): - from pypy.module.micronumpy.interp_numarray import ConcreteArray - assert isinstance(arr, ConcreteArray) - if self.iter_no >= len(iterlist): - iterlist.append(ViewIterator(arr)) - if self.array_no >= len(arraylist): - arraylist.append(arr.storage) + def allocate_iter(self, arr): + return ViewIterator(arr) class FlatiterSignature(ViewSignature): def debug_repr(self): From noreply at buildbot.pypy.org Sun Dec 18 17:54:02 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 18 Dec 2011 17:54:02 +0100 (CET) Subject: [pypy-commit] buildbot default: fix the locks and schedule speed.python.org nightly Message-ID: <20111218165402.5680C82009@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r606:4b5f6c1f66ac Date: 2011-12-18 18:53 +0200 http://bitbucket.org/pypy/buildbot/changeset/4b5f6c1f66ac/ Log: fix the locks and schedule speed.python.org nightly diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -345,7 +345,7 @@ addopts = [] self.addStep(ShellCmd( # this step needs exclusive access to the CPU - locks=[TannitCPU.access('exclusive')], + locks=[lock.access('exclusive')], description="run benchmarks on top of pypy-c", command=["python", "runner.py", '--output-filename', 'result.json', '--pypy-c', pypy_c_rel, diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -204,7 +204,7 @@ Nightly("nightly-0-00", [ JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) - #JITBENCH64_2, # on speed.python.org, uses 1 core (in part exclusively) + JITBENCH64_2, # on speed.python.org, uses 1 core (in part exclusively) MACOSX32, # on minime ], branch=None, hour=0, minute=0), # From noreply at buildbot.pypy.org Sun Dec 18 18:50:38 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 18:50:38 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Attach a descr to call_malloc_gc(). Remove the function pointer Message-ID: <20111218175038.401A882009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50662:c7acd213b15b Date: 2011-12-18 16:18 +0100 http://bitbucket.org/pypy/pypy/changeset/c7acd213b15b/ Log: Attach a descr to call_malloc_gc(). Remove the function pointer in call_malloc_nursery(), because there can be only one. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -17,8 +17,7 @@ from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr from pypy.jit.backend.llsupport.descr import get_array_descr -##from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr -##from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.descr import get_call_descr from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot @@ -49,9 +48,11 @@ ll_func = llhelper(FUNCPTR, func) c_ll_func = ConstInt( heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func))) + descr = get_call_descr(self, ARGS, RESULT) setattr(self, '%s' % funcname, func) setattr(self, '%s_fn' % funcname, ll_func) setattr(self, 'c_%s_fn' % funcname, c_ll_func) + setattr(self, '%s_descr' % funcname, descr) self._generated_functions.append(funcname) def _freeze_(self): diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -138,10 +138,10 @@ self._op_malloc_nursery = None self.recent_mallocs.clear() - def _gen_call_malloc_gc(self, args, v_result): + def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.CALL_MALLOC_GC, args, v_result) + op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) self.newops.append(op) # mark 'v_result' as freshly malloced self.recent_mallocs[v_result] = None @@ -151,7 +151,8 @@ Note that with the framework GC, this should be called very rarely. """ self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_fixedsize_fn, - ConstInt(size)], v_result) + ConstInt(size)], v_result, + self.gc_ll_descr.malloc_fixedsize_descr) def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" @@ -160,7 +161,8 @@ v_num_elem, ConstInt(arraydescr.itemsize), ConstInt(arraydescr.lendescr.offset)], - v_result) + v_result, + self.gc_ll_descr.malloc_array_descr) def gen_malloc_array(self, arraydescr, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) going either @@ -174,22 +176,26 @@ ConstInt(arraydescr.itemsize), ConstInt(arraydescr.tid), v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_descr else: arraydescr_gcref = xxx args = [self.gc_ll_descr.c_malloc_array_nonstandard_fn, ConstPtr(arraydescr_gcref), v_num_elem] - self._gen_call_malloc_gc(args, v_result) + calldescr = self.gc_ll_descr.malloc_array_nonstandard_descr + self._gen_call_malloc_gc(args, v_result, calldescr) def gen_malloc_str(self, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_str_fn, - v_num_elem], v_result) + v_num_elem], v_result, + self.gc_ll_descr.malloc_str_descr) def gen_malloc_unicode(self, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...).""" self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_unicode_fn, - v_num_elem], v_result) + v_num_elem], v_result, + self.gc_ll_descr.malloc_unicode_descr) def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. @@ -203,11 +209,11 @@ # if self._op_malloc_nursery is not None: # already a MALLOC_NURSERY: increment its total size - total_size = self._op_malloc_nursery.getarg(1).getint() + total_size = self._op_malloc_nursery.getarg(0).getint() total_size += size if self.gc_ll_descr.can_use_nursery_malloc(total_size): # if the total size is still reasonable, merge it - self._op_malloc_nursery.setarg(1, ConstInt(total_size)) + self._op_malloc_nursery.setarg(0, ConstInt(total_size)) op = ResOperation(rop.INT_ADD, [self._v_last_malloced_nursery, ConstInt(self._previous_size)], @@ -216,8 +222,7 @@ # if we failed to merge with a previous MALLOC_NURSERY, emit one self.emitting_an_operation_that_can_collect() op = ResOperation(rop.CALL_MALLOC_NURSERY, - [self.gc_ll_descr.c_malloc_nursery_fn, - ConstInt(size)], + [ConstInt(size)], v_result) self._op_malloc_nursery = op # diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -72,6 +72,8 @@ # for funcname in self.gc_ll_descr._generated_functions: namespace[funcname] = getattr(self.gc_ll_descr, '%s_fn' % funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) # ops = parse(frm_operations, namespace=namespace) expected = parse(to_operations % Evaluator(namespace), @@ -97,7 +99,8 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) jump() """) @@ -109,8 +112,10 @@ jump() """, """ [] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d) - p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) jump() """) @@ -122,7 +127,8 @@ """, """ [] p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(adescr.basesize + 10 * adescr.itemsize)d) + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) setfield_gc(p0, 10, descr=alendescr) jump() """) @@ -134,11 +140,12 @@ jump() """, """ [i1] - p0 = call_malloc_gc(ConstClass(malloc_array), \ - %(adescr.basesize)d, \ - i1, \ - %(adescr.itemsize)d, \ - %(adescr.lendescr.offset)d) + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) jump() """) @@ -149,7 +156,8 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() """) @@ -165,7 +173,8 @@ %(strdescr.basesize)d, \ i1, \ %(strdescr.itemsize)d, \ - %(strlendescr.offset)d) + %(strlendescr.offset)d, \ + descr=malloc_array_descr) jump() """) @@ -176,9 +185,10 @@ jump() """, """ [i1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(unicodedescr.basesize + \ - 10 * unicodedescr.itemsize)d) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) setfield_gc(p0, 10, descr=unicodelendescr) jump() """) @@ -212,8 +222,7 @@ jump() """, """ [p1] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(sdescr.size)d) + p0 = call_malloc_nursery(%(sdescr.size)d) setfield_gc(p0, 1234, descr=tiddescr) jump() """) @@ -227,7 +236,7 @@ jump() """, """ [] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p0 = call_malloc_nursery( \ %(sdescr.size + tdescr.size + sdescr.size)d) setfield_gc(p0, 1234, descr=tiddescr) p1 = int_add(p0, %(sdescr.size)d) @@ -244,7 +253,7 @@ jump() """, """ [] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p0 = call_malloc_nursery( \ %(adescr.basesize + 10 * adescr.itemsize)d) setfield_gc(p0, 4321, descr=tiddescr) setfield_gc(p0, 10, descr=alendescr) @@ -259,7 +268,7 @@ jump() """, """ [] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p0 = call_malloc_nursery( \ %(sdescr.size + \ adescr.basesize + 10 * adescr.itemsize)d) setfield_gc(p0, 1234, descr=tiddescr) @@ -276,8 +285,7 @@ jump() """, """ [] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(bdescr.basesize + 8)d) + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 6, descr=blendescr) jump() @@ -293,8 +301,7 @@ jump() """, """ [] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(4 * (bdescr.basesize + 8))d) + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 5, descr=blendescr) p1 = int_add(p0, %(bdescr.basesize + 8)d) @@ -317,7 +324,7 @@ jump() """, """ [] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), %(4*WORD)d) + p0 = call_malloc_nursery(%(4*WORD)d) setfield_gc(p0, 9000, descr=tiddescr) p1 = int_add(p0, %(2*WORD)d) setfield_gc(p1, 9000, descr=tiddescr) @@ -332,7 +339,8 @@ """, """ [i0] p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ - %(bdescr.tid)d, i0) + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) jump(i0) """) @@ -345,7 +353,8 @@ """, """ [] p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(bdescr.basesize + 100)d) + %(bdescr.basesize + 100)d, \ + descr=malloc_fixedsize_descr) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 100, descr=blendescr) jump() @@ -361,14 +370,14 @@ jump() """, """ [] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p0 = call_malloc_nursery( \ %(2 * (bdescr.basesize + 104))d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 101, descr=blendescr) p1 = int_add(p0, %(bdescr.basesize + 104)d) setfield_gc(p1, 8765, descr=tiddescr) setfield_gc(p1, 102, descr=blendescr) - p2 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p2 = call_malloc_nursery( \ %(bdescr.basesize + 104)d) setfield_gc(p2, 8765, descr=tiddescr) setfield_gc(p2, 103, descr=blendescr) @@ -382,8 +391,7 @@ jump() """, """ [p1] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - 104) # rounded up + p0 = call_malloc_nursery(104) # rounded up setfield_gc(p0, 9315, descr=tiddescr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() @@ -397,7 +405,8 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102) + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) setfield_gc(p0, 9315, descr=tiddescr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() @@ -413,7 +422,7 @@ jump() """, """ [i2] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p0 = call_malloc_nursery( \ %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) @@ -421,8 +430,10 @@ p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) - p2 = call_malloc_gc(ConstClass(malloc_unicode), i2) - p3 = call_malloc_gc(ConstClass(malloc_str), i2) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) jump() """) @@ -461,7 +472,7 @@ jump() """, """ [i2, p3] - p1 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p1 = call_malloc_nursery( \ %(cdescr.basesize + 129 * cdescr.itemsize)d) setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 129, descr=clendescr) @@ -481,7 +492,7 @@ jump() """, """ [i2, p3] - p1 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p1 = call_malloc_nursery( \ %(cdescr.basesize + 130 * cdescr.itemsize)d) setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 130, descr=clendescr) @@ -512,7 +523,7 @@ jump() """, """ [i2, p3] - p1 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p1 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 5, descr=clendescr) @@ -542,8 +553,7 @@ jump() """, """ [p1] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(tdescr.size)d) + p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) setfield_gc(p0, p1, descr=tzdescr) jump() @@ -558,8 +568,7 @@ jump() """, """ [] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(tdescr.size + sdescr.size)d) + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) p1 = int_add(p0, %(tdescr.size)d) setfield_gc(p1, 1234, descr=tiddescr) @@ -576,7 +585,7 @@ jump() """, """ [p1, i2] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ + p0 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) setfield_gc(p0, 8111, descr=tiddescr) setfield_gc(p0, 5, descr=clendescr) @@ -593,10 +602,10 @@ jump() """, """ [i0] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(tdescr.size)d) + p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) - p1 = call_malloc_gc(ConstClass(malloc_str), i0) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) cond_call_gc_wb(p0, p1, descr=wbdescr) setfield_raw(p0, p1, descr=tzdescr) jump() @@ -611,8 +620,7 @@ jump() """, """ [p1] - p0 = call_malloc_nursery(ConstClass(malloc_nursery), \ - %(tdescr.size)d) + p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) label(p0, p1) cond_call_gc_wb(p0, p1, descr=wbdescr) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -509,7 +509,7 @@ #'OOSEND_PURE', # ootype operation 'CALL_PURE/*d', # removed before it's passed to the backend 'CALL_MALLOC_GC/*d', # like CALL, but NULL => propagate MemoryError - 'CALL_MALLOC_NURSERY/2d', # nursery malloc, const number of bytes, zeroed + 'CALL_MALLOC_NURSERY/1', # nursery malloc, const number of bytes, zeroed '_CALL_LAST', '_CANRAISE_LAST', # ----- end of can_raise operations ----- From noreply at buildbot.pypy.org Sun Dec 18 18:50:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 18:50:39 +0100 (CET) Subject: [pypy-commit] pypy default: (reported by amaury) Message-ID: <20111218175039.6569882009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50663:16d3f098e8ec Date: 2011-12-18 18:50 +0100 http://bitbucket.org/pypy/pypy/changeset/16d3f098e8ec/ Log: (reported by amaury) Test and fix: proxies used to force too many arguments. They should really force the "minimum" number of them, which means "usually 1 and sometimes 2" based on some half-random rule. Tested against CPython. diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -329,11 +329,16 @@ special_ops = {'repr': True, 'userdel': True, 'hash': True} for opname, _, arity, special_methods in ObjSpace.MethodTable: - if opname in special_ops: + if opname in special_ops or not special_methods: continue nonspaceargs = ", ".join(["w_obj%s" % i for i in range(arity)]) code = "def func(space, %s):\n '''%s'''\n" % (nonspaceargs, opname) - for i in range(arity): + assert arity >= len(special_methods) + forcing_count = len(special_methods) + if opname.startswith('inplace_'): + assert arity == 2 + forcing_count = arity + for i in range(forcing_count): code += " w_obj%s = force(space, w_obj%s)\n" % (i, i) code += " return space.%s(%s)" % (opname, nonspaceargs) exec py.code.Source(code).compile() diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -466,3 +466,44 @@ # No exception should be raised here gc.collect() + def test_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + a3 = p1 + p2 + assert a3 is a2 + + def test_inplace_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1 += p2 + assert p1 is a2 + + def test_setattr(self): + import _weakref + class A(object): + def __setitem__(self, key, value): + self.setkey = key + self.setvalue = value + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1[p2] = 42 + assert a1.setkey is p2 + assert a1.setvalue == 42 + # + p1[42] = p2 + assert a1.setkey == 42 + assert a1.setvalue is p2 From noreply at buildbot.pypy.org Sun Dec 18 19:34:21 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:21 +0100 (CET) Subject: [pypy-commit] pypy py3k: Add str.maketrans() Message-ID: <20111218183421.43C9782009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50664:540ce4e08539 Date: 2011-12-18 15:52 +0100 http://bitbucket.org/pypy/pypy/changeset/540ce4e08539/ Log: Add str.maketrans() diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -404,6 +404,24 @@ raises(TypeError, 'hello'.translate) + def test_maketrans(self): + assert 'abababc' == 'abababc'.translate({'b': ''}) + tbl = str.maketrans({'a': None, 'b': ''}) + assert 'c' == 'abababc'.translate(tbl) + tbl = str.maketrans('abc', 'xyz', 'd') + assert 'xyzzy' == 'abdcdcbdddd'.translate(tbl) + + raises(TypeError, str.maketrans) + raises(ValueError, str.maketrans, 'abc', 'defg') + raises(TypeError, str.maketrans, 2, 'def') + raises(TypeError, str.maketrans, 'abc', 2) + raises(TypeError, str.maketrans, 'abc', 'def', 2) + raises(ValueError, str.maketrans, {'xy': 2}) + raises(TypeError, str.maketrans, {(1,): 2}) + + raises(TypeError, 'hello'.translate) + raises(TypeError, 'abababc'.translate, 'abc', 'xyz') + def test_unicode_form_encoded_object(self): assert str(b'x', 'utf-8') == 'x' assert str(b'x', 'utf-8', 'strict') == 'x' diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -329,6 +329,90 @@ W_UnicodeObject.__init__(w_newobj, w_value._value) return w_newobj +def descr_maketrans(space, w_type, w_x, w_y=None, w_z=None): + """str.maketrans(x[, y[, z]]) -> dict (static method) + + Return a translation table usable for str.translate(). + If there is only one argument, it must be a dictionary mapping Unicode + ordinals (integers) or characters to Unicode ordinals, strings or None. + Character keys will be then converted to ordinals. + If there are two arguments, they must be strings of equal length, and + in the resulting dictionary, each character in x will be mapped to the + character at the same position in y. If there is a third argument, it + must be a string, whose characters will be mapped to None in the result.""" + + if space.is_w(w_y, space.w_None): + y = None + else: + y = space.unicode_w(w_y) + if space.is_w(w_z, space.w_None): + z = None + else: + z = space.unicode_w(w_z) + + w_new = space.newdict() + if y is not None: + # x must be a string too, of equal length + ylen = len(y) + try: + x = space.unicode_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, space.wrap( + "first maketrans argument must " + "be a string if there is a second argument")) + if len(x) != ylen: + raise OperationError(space.w_ValueError, space.wrap( + "the first two maketrans " + "arguments must have equal length")) + # create entries for translating chars in x to those in y + for i in range(len(x)): + w_key = space.newint(ord(x[i])) + w_value = space.newint(ord(y[i])) + space.setitem(w_new, w_key, w_value) + # create entries for deleting chars in z + if z is not None: + for i in range(len(z)): + w_key = space.newint(ord(z[i])) + space.setitem(w_new, w_key, space.w_None) + else: + # x must be a dict + if not space.is_w(space.type(w_x), space.w_dict): + raise OperationError(space.w_TypeError, space.wrap( + "if you give only one argument " + "to maketrans it must be a dict")) + # copy entries into the new dict, converting string keys to int keys + w_iter = space.call_method(w_x, "iteritems") + while True: + try: + w_item = space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + w_key, w_value = space.unpackiterable(w_item, 2) + if space.isinstance_w(w_key, space.w_unicode): + # convert string keys to integer keys + key = space.unicode_w(w_key) + if len(key) != 1: + raise OperationError(space.w_ValueError, space.wrap( + "string keys in translate " + "table must be of length 1")) + w_key = space.newint(ord(key[0])) + else: + # just keep integer keys + try: + space.int_w(w_key) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, space.wrap( + "keys in translate table must " + "be strings or integers")) + space.setitem(w_new, w_key, w_value) + return w_new + # ____________________________________________________________ unicode_typedef = StdTypeDef("str", @@ -337,7 +421,8 @@ Create a new Unicode object from the given encoded string. encoding defaults to the current default string encoding. -errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.''' +errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.''', + maketrans = gateway.interp2app(descr_maketrans, as_classmethod=True), ) unicode_typedef.registermethods(globals()) From noreply at buildbot.pypy.org Sun Dec 18 19:34:22 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:22 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fixes in pyexpat module Message-ID: <20111218183422.71CC282009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50665:924692e049ba Date: 2011-12-18 16:14 +0100 http://bitbucket.org/pypy/pypy/changeset/924692e049ba/ Log: Fixes in pyexpat module diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -606,11 +606,15 @@ # Parse methods - @unwrap_spec(data=str, isfinal=bool) - def Parse(self, space, data, isfinal=False): + @unwrap_spec(isfinal=bool) + def Parse(self, space, w_data, isfinal=False): """Parse(data[, isfinal]) Parse XML data. `isfinal' should be true at end of input.""" + if space.isinstance_w(w_data, space.w_bytes): + data = space.bytes_w(w_data) + else: + data = space.str_w(w_data) res = XML_Parse(self.itself, data, len(data), isfinal) if self._exc_info: e = self._exc_info @@ -779,7 +783,7 @@ Return a new XML parser object.""" if space.is_w(w_encoding, space.w_None): encoding = None - elif space.is_true(space.isinstance(w_encoding, space.w_str)): + elif space.is_true(space.isinstance(w_encoding, space.w_unicode)): encoding = space.str_w(w_encoding) else: type_name = space.type(w_encoding).getname(space) @@ -790,7 +794,7 @@ if space.is_w(w_namespace_separator, space.w_None): namespace_separator = 0 - elif space.is_true(space.isinstance(w_namespace_separator, space.w_str)): + elif space.is_true(space.isinstance(w_namespace_separator, space.w_unicode)): separator = space.str_w(w_namespace_separator) if len(separator) == 0: namespace_separator = 0 @@ -839,5 +843,5 @@ def ErrorString(space, code): """ErrorString(errno) -> string Returns string error for given number.""" - return space.wrapbytes(rffi.charp2str(XML_ErrorString(code))) + return space.wrap(rffi.charp2str(XML_ErrorString(code))) diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -42,7 +42,7 @@ import pyexpat for encoding_arg in (None, 'utf-8', 'iso-8859-1'): for namespace_arg in (None, '{'): - print encoding_arg, namespace_arg + print(encoding_arg, namespace_arg) p = pyexpat.ParserCreate(encoding_arg, namespace_arg) data = [] p.CharacterDataHandler = lambda s: data.append(s) @@ -83,7 +83,7 @@ def test_encoding_xml(self): # use one of the few encodings built-in in expat - xml = "caf\xe9" + xml = b"caf\xe9" import pyexpat p = pyexpat.ParserCreate() def gotText(text): @@ -93,7 +93,7 @@ p.Parse(xml) def test_explicit_encoding(self): - xml = "caf\xe9" + xml = b"caf\xe9" import pyexpat p = pyexpat.ParserCreate(encoding='iso-8859-1') def gotText(text): @@ -103,7 +103,7 @@ def test_python_encoding(self): # This name is not knonwn by expat - xml = "caf\xe9" + xml = b"caf\xe9" import pyexpat p = pyexpat.ParserCreate() def gotText(text): @@ -112,7 +112,7 @@ p.Parse(xml) def test_decode_error(self): - xml = 'Comment \xe7a va ? Tr\xe8s bien ?' + xml = b'Comment \xe7a va ? Tr\xe8s bien ?' import pyexpat p = pyexpat.ParserCreate() def f(*args): pass From noreply at buildbot.pypy.org Sun Dec 18 19:34:23 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:23 +0100 (CET) Subject: [pypy-commit] pypy py3k: copy_reg is now named copyreg Message-ID: <20111218183423.9E05482009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50666:0ef2cb5eb4a6 Date: 2011-12-18 17:01 +0100 http://bitbucket.org/pypy/pypy/changeset/0ef2cb5eb4a6/ Log: copy_reg is now named copyreg diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py --- a/pypy/objspace/std/dicttype.py +++ b/pypy/objspace/std/dicttype.py @@ -137,7 +137,7 @@ This is of course not the standard way. XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. + a registration with copyreg, instead. """ w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) diff --git a/pypy/objspace/std/itertype.py b/pypy/objspace/std/itertype.py --- a/pypy/objspace/std/itertype.py +++ b/pypy/objspace/std/itertype.py @@ -7,7 +7,7 @@ def descr_seqiter__reduce__(w_self, space): """ XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. + a registration with copyreg, instead. """ # cpython does not support pickling iterators but stackless python do @@ -28,7 +28,7 @@ def descr_reverseseqiter__reduce__(w_self, space): """ XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. + a registration with copyreg, instead. """ from pypy.objspace.std.iterobject import W_ReverseSeqIterObject assert isinstance(w_self, W_ReverseSeqIterObject) diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -136,8 +136,8 @@ app = gateway.applevel(r''' def reduce_1(obj, proto): - import copy_reg - return copy_reg._reduce_ex(obj, proto) + import copyreg + return copyreg._reduce_ex(obj, proto) def reduce_2(obj): cls = obj.__class__ @@ -180,8 +180,8 @@ else: dictitems = None - import copy_reg - newobj = copy_reg.__newobj__ + import copyreg + newobj = copyreg.__newobj__ args2 = (cls,) + args return newobj, args2, state, listitems, dictitems @@ -195,10 +195,10 @@ except KeyError: pass - import copy_reg - slotnames = copy_reg._slotnames(cls) + import copyreg + slotnames = copyreg._slotnames(cls) if not isinstance(slotnames, list) and slotnames is not None: - raise TypeError("copy_reg._slotnames didn't return a list or None") + raise TypeError("copyreg._slotnames didn't return a list or None") return slotnames ''', filename=__file__) diff --git a/pypy/translator/test/test_geninterp.py b/pypy/translator/test/test_geninterp.py --- a/pypy/translator/test/test_geninterp.py +++ b/pypy/translator/test/test_geninterp.py @@ -37,8 +37,8 @@ snippet_ad = """if 1: def import_func(): - import copy_reg - return copy_reg._reconstructor.func_code.co_name + import copyreg + return copyreg._reconstructor.func_code.co_name def import_sys_func(): import sys From noreply at buildbot.pypy.org Sun Dec 18 19:34:24 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:24 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix some syntax errors in lib_pypy Message-ID: <20111218183424.CAF7682009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50667:14e9568e4282 Date: 2011-12-18 17:05 +0100 http://bitbucket.org/pypy/pypy/changeset/14e9568e4282/ Log: Fix some syntax errors in lib_pypy diff --git a/lib_pypy/_rpyc_support.py b/lib_pypy/_rpyc_support.py --- a/lib_pypy/_rpyc_support.py +++ b/lib_pypy/_rpyc_support.py @@ -7,7 +7,7 @@ try: conn = connect("localhost", DEFAULT_SERVER_PORT, SlaveService, config=dict(call_by_value_for_builtin_mutable_types=True)) -except socket.error, e: +except socket.error as e: raise ImportError("Error while connecting: " + str(e)) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -604,7 +604,7 @@ def authorizer(userdata, action, arg1, arg2, dbname, source): try: return int(callback(action, arg1, arg2, dbname, source)) - except Exception, e: + except Exception as e: return SQLITE_DENY c_authorizer = AUTHORIZER(authorizer) @@ -651,7 +651,7 @@ if not aggregate_ptr[0]: try: aggregate = cls() - except Exception, e: + except Exception as e: msg = ("user-defined aggregate's '__init__' " "method raised error") sqlite.sqlite3_result_error(context, msg, len(msg)) @@ -665,7 +665,7 @@ params = _convert_params(context, argc, c_params) try: aggregate.step(*params) - except Exception, e: + except Exception as e: msg = ("user-defined aggregate's 'step' " "method raised error") sqlite.sqlite3_result_error(context, msg, len(msg)) @@ -681,7 +681,7 @@ aggregate = self.aggregate_instances[aggregate_ptr[0]] try: val = aggregate.finalize() - except Exception, e: + except Exception as e: msg = ("user-defined aggregate's 'finalize' " "method raised error") sqlite.sqlite3_result_error(context, msg, len(msg)) @@ -788,7 +788,7 @@ if self.statement.kind == DML: self.connection._begin() else: - raise ProgrammingError, "executemany is only for DML statements" + raise ProgrammingError("executemany is only for DML statements") self.rowcount = 0 for params in many_params: @@ -912,7 +912,7 @@ def __init__(self, connection, sql): self.statement = None if not isinstance(sql, str): - raise ValueError, "sql must be a string" + raise ValueError("sql must be a string") self.con = connection self.sql = sql # DEBUG ONLY first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() @@ -941,8 +941,8 @@ raise self.con._get_exception(ret) self.con._remember_statement(self) if _check_remaining_sql(next_char.value): - raise Warning, "One and only one statement required: %r" % ( - next_char.value,) + raise Warning("One and only one statement required: %r" % + (next_char.value,)) # sql_char should remain alive until here self._build_row_cast_map() @@ -1013,7 +1013,8 @@ elif type(param) is buffer: sqlite.sqlite3_bind_blob(self.statement, idx, str(param), len(param), SQLITE_TRANSIENT) else: - raise InterfaceError, "parameter type %s is not supported" % str(type(param)) + raise InterfaceError("parameter type %s is not supported" % + type(param)) def set_params(self, params): ret = sqlite.sqlite3_reset(self.statement) @@ -1042,11 +1043,11 @@ for idx in range(1, sqlite.sqlite3_bind_parameter_count(self.statement) + 1): param_name = sqlite.sqlite3_bind_parameter_name(self.statement, idx) if param_name is None: - raise ProgrammingError, "need named parameters" + raise ProgrammingError("need named parameters") param_name = param_name[1:] try: param = params[param_name] - except KeyError, e: + except KeyError as e: raise ProgrammingError("missing parameter '%s'" %param) self.set_param(idx, param) @@ -1257,7 +1258,7 @@ params = _convert_params(context, nargs, c_params) try: val = real_cb(*params) - except Exception, e: + except Exception as e: msg = "user-defined function raised exception" sqlite.sqlite3_result_error(context, msg, len(msg)) else: diff --git a/lib_pypy/dbm.py b/lib_pypy/dbm.py --- a/lib_pypy/dbm.py +++ b/lib_pypy/dbm.py @@ -165,7 +165,7 @@ 'c': os.O_RDWR | os.O_CREAT, 'n': os.O_RDWR | os.O_CREAT | os.O_TRUNC, }[flag] - except KeyError, e: + except KeyError as e: raise error("arg 2 to open should be 'r', 'w', 'c', or 'n'") a_db = getattr(lib, funcs['open'])(filename, openflag, mode) diff --git a/lib_pypy/disassembler.py b/lib_pypy/disassembler.py --- a/lib_pypy/disassembler.py +++ b/lib_pypy/disassembler.py @@ -82,7 +82,7 @@ print "Disassembly of %s:" % name try: dis(x1) - except TypeError, msg: + except TypeError as msg: print "Sorry:", msg print elif hasattr(x, 'co_code'): diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -77,7 +77,7 @@ try: unbound_method = getattr(_continulet, methodname) args = unbound_method(current, *args, to=target) - except GreenletExit, e: + except GreenletExit as e: args = (e,) finally: _tls.current = current @@ -139,6 +139,6 @@ def _greenlet_throw(greenlet, exc, value, tb): _tls.current = greenlet try: - raise exc, value, tb + raise value.with_traceback(tb) finally: _continuation.permute(greenlet, greenlet.parent) From noreply at buildbot.pypy.org Sun Dec 18 19:34:26 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:26 +0100 (CET) Subject: [pypy-commit] pypy py3k: All types are now printed as " Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50668:81bc80bf4f62 Date: 2011-12-18 17:13 +0100 http://bitbucket.org/pypy/pypy/changeset/81bc80bf4f62/ Log: All types are now printed as "" - assert repr(type(type)) == "" - assert repr(complex) == "" - assert repr(property) == "" - assert repr(TypeError) == "" + assert repr(type(type)) == "" + assert repr(complex) == "" + assert repr(property) == "" + assert repr(TypeError) == "" def test_invalid_mro(self): class A(object): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -872,19 +872,14 @@ def repr__Type(space, w_obj): w_mod = w_obj.get_module() - if not space.isinstance_w(w_mod, space.w_str): + if not space.isinstance_w(w_mod, space.w_unicode): mod = None else: mod = space.str_w(w_mod) - if (not w_obj.is_heaptype() or - (mod == '__builtin__' or mod == 'exceptions')): - kind = 'type' + if mod is not None and mod !='builtins': + return space.wrap("" % (mod, w_obj.name)) else: - kind = 'class' - if mod is not None and mod !='builtins': - return space.wrap("<%s '%s.%s'>" % (kind, mod, w_obj.name)) - else: - return space.wrap("<%s '%s'>" % (kind, w_obj.name)) + return space.wrap("" % (w_obj.name)) def getattr__Type_ANY(space, w_type, w_name): name = space.str_w(w_name) From noreply at buildbot.pypy.org Sun Dec 18 19:34:27 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:27 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix keyword argument for str(object=) Message-ID: <20111218183427.2E67282009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50669:1aa5643d7e26 Date: 2011-12-18 17:16 +0100 http://bitbucket.org/pypy/pypy/changeset/1aa5643d7e26/ Log: fix keyword argument for str(object=) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -283,6 +283,7 @@ assert str() == '' assert str(None) == 'None' assert str(123) == '123' + assert str(object=123) == '123' assert str([2, 3]) == '[2, 3]' class U(str): pass diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -292,12 +292,12 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) return w_res -def descr_new_(space, w_unicodetype, w_string=u'', w_encoding=None, w_errors=None): +def descr_new_(space, w_unicodetype, w_object=u'', w_encoding=None, w_errors=None): # NB. the default value of w_obj is really a *wrapped* empty string: # there is gateway magic at work from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.ropeunicodeobject import W_RopeUnicodeObject - w_obj = w_string + w_obj = w_object encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) # convoluted logic for the case when unicode subclass has a __unicode__ From noreply at buildbot.pypy.org Sun Dec 18 19:34:28 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:28 +0100 (CET) Subject: [pypy-commit] pypy py3k: Use the same check (and error message) for bool() and len(), Message-ID: <20111218183428.59C3D82009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50670:4508c3d16324 Date: 2011-12-18 17:43 +0100 http://bitbucket.org/pypy/pypy/changeset/4508c3d16324/ Log: Use the same check (and error message) for bool() and len(), when the same __len__ method is used for both. diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -236,14 +236,11 @@ if space.is_w(w_res, space.w_True): return True w_restype = space.type(w_res) - # Note there is no check for bool here because the only possible - # instances of bool are w_False and w_True, which are checked above. - if (space.is_w(w_restype, space.w_int) or - space.is_w(w_restype, space.w_long)): - return space.int_w(w_res) != 0 + if method == '__len__': + return space._check_len_result(w_res) != 0 else: - msg = "%s should return bool or integer" % (method,) - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise OperationError(space.w_TypeError, space.wrap( + "__bool__ should return bool")) def nonzero(space, w_obj): if space.is_true(w_obj): diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -677,5 +677,20 @@ l = len(X(X(2))) assert l == 2 and type(l) is int + def test_sane_len(self): + # this test just tests our assumptions about __len__ + # this will start failing if __len__ changes assertions + for badval in ['illegal', -1, 1 << 32]: + class A: + def __len__(self): + return badval + try: + bool(A()) + except (Exception) as e_bool: + try: + len(A()) + except (Exception) as e_len: + assert str(e_bool) == str(e_len) + class AppTestWithBuiltinShortcut(AppTest_Descroperation): OPTIONS = {'objspace.std.builtinshortcut': True} From noreply at buildbot.pypy.org Sun Dec 18 19:34:29 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:29 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix tests in test_descroperation Message-ID: <20111218183429.8478782009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50671:4f0c02a4d39e Date: 2011-12-18 18:18 +0100 http://bitbucket.org/pypy/pypy/changeset/4f0c02a4d39e/ Log: Fix tests in test_descroperation diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -98,16 +98,10 @@ a1 *= b assert a1 == "imul" - if base is object: - assert a - b == "B's rsub" - else: - assert a - b == "sub" + assert a - b == "B's rsub" assert b - a == "B's sub" assert b - b == "B's sub" - if base is object: - assert a ** b == "B's rpow" - else: - assert a ** b == "pow" + assert a ** b == "B's rpow" assert b ** a == "B's pow" assert b ** b == "B's pow" assert -b == "B's neg" @@ -128,32 +122,28 @@ def test_getslice(self): class Sq(object): def __getitem__(self, key): - start, stop = key - return (start, stop) + return key.start, key.stop def __len__(self): return 100 sq = Sq() assert sq[1:3] == (1,3) - slice_min, slice_max = sq[:] - assert slice_min == 0 - assert slice_max >= 2**31-1 - assert sq[1:] == (1, slice_max) - assert sq[:3] == (0, 3) - assert sq[:] == (0, slice_max) + assert sq[:] == (None, None) + assert sq[1:] == (1, None) + assert sq[:3] == (None, 3) + assert sq[:] == (None, None) # negative indices - assert sq[-1:3] == (99, 3) - assert sq[1:-3] == (1, 97) - assert sq[-1:-3] == (99, 97) - # extended slice syntax always uses __getitem__() - assert sq[::] == "booh" + assert sq[-1:3] == (-1, 3) + assert sq[1:-3] == (1, -3) + assert sq[-1:-3] == (-1, -3) + # extended slice syntax also uses __getitem__() + assert sq[::] == (None, None) def test_setslice(self): class Sq(object): def __setitem__(self, key, value): - start, stop = key - ops.append((start, stop, value)) + ops.append((key.start, key.stop, value)) def __len__(self): return 100 @@ -163,21 +153,18 @@ sq[12:] = 'world' sq[:-1] = 'spam' sq[:] = 'egg' - slice_max = ops[-1][1] - assert slice_max >= 2**31-1 assert ops == [ - (95, 3, 'hello'), - (12, slice_max, 'world'), - (0, 99, 'spam'), - (0, slice_max, 'egg'), + (-5, 3, 'hello'), + (12, None, 'world'), + (None, -1, 'spam'), + (None, None, 'egg'), ] def test_delslice(self): class Sq(object): def __delitem__(self, key): - start, stop = key - ops.append((start, stop)) + ops.append((key.start, key.stop)) def __len__(self): return 100 @@ -187,37 +174,32 @@ del sq[-12:] del sq[:1] del sq[:] - slice_max = ops[-1][1] - assert slice_max >= 2**31-1 assert ops == [ - (5, 97), - (88, slice_max), - (0, 1), - (0, slice_max), + (5, -3), + (-12, None), + (None,1), + (None,None), ] def test_getslice_nolength(self): class Sq(object): def __getitem__(self, key): - start, stop = key - return (start, stop) + return key.start, key.stop sq = Sq() assert sq[1:3] == (1,3) - slice_min, slice_max = sq[:] - assert slice_min == 0 - assert slice_max >= 2**31-1 - assert sq[1:] == (1, slice_max) - assert sq[:3] == (0, 3) - assert sq[:] == (0, slice_max) + assert sq[:] == (None, None) + assert sq[1:] == (1, None) + assert sq[:3] == (None, 3) + assert sq[:] == (None, None) # negative indices, but no __len__ assert sq[-1:3] == (-1, 3) assert sq[1:-3] == (1, -3) assert sq[-1:-3] == (-1, -3) - # extended slice syntax always uses __getitem__() - assert sq[::] == "booh" + # extended slice syntax also uses __getitem__() + assert sq[::] == (None, None) def test_ipow(self): x = 2 @@ -581,7 +563,7 @@ def test_mod_failure(self): try: [] % 3 - except TypeError, e: + except TypeError as e: assert '%' in str(e) else: assert False, "did not raise" @@ -620,8 +602,8 @@ if cls is B: return True return False - class A: - __metaclass__ = Meta + A = Meta('A', (), {}) # like "class A(metaclass=Meta)", but + # Python2 cannot parse this class B(A): pass a = A() @@ -637,11 +619,11 @@ assert issubclass(B, B) assert issubclass(23, B) - def test_truth_of_long(self): + def test_truth_of_int(self): class X(object): - def __len__(self): return 1L + def __len__(self): return 1 __bool__ = __len__ - assert X() + raises(TypeError, bool, X()) del X.__bool__ assert X() From noreply at buildbot.pypy.org Sun Dec 18 19:34:30 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:30 +0100 (CET) Subject: [pypy-commit] pypy py3k: No need to test oldstyle classes... Message-ID: <20111218183430.AEFE982009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50672:70a99765cd99 Date: 2011-12-18 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/70a99765cd99/ Log: No need to test oldstyle classes... diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -38,86 +38,83 @@ cls.space = conftest.gettestobjspace(**cls.OPTIONS) def test_special_methods(self): - class OldStyle: + class A: + def __lt__(self, other): + return "lt" + def __imul__(self, other): + return "imul" + def __sub__(self, other): + return "sub" + def __rsub__(self, other): + return "rsub" + def __pow__(self, other): + return "pow" + def __rpow__(self, other): + return "rpow" + def __neg__(self): + return "neg" + a = A() + assert (a < 5) == "lt" + assert (object() > a) == "lt" + a1 = a + a1 *= 4 + assert a1 == "imul" + assert a - 2 == "sub" + assert a - object() == "sub" + assert 2 - a == "rsub" + assert object() - a == "rsub" + assert a ** 2 == "pow" + assert a ** object() == "pow" + assert 2 ** a == "rpow" + assert object() ** a == "rpow" + assert -a == "neg" + + class B(A): + def __lt__(self, other): + return "B's lt" + def __imul__(self, other): + return "B's imul" + def __sub__(self, other): + return "B's sub" + def __rsub__(self, other): + return "B's rsub" + def __pow__(self, other): + return "B's pow" + def __rpow__(self, other): + return "B's rpow" + def __neg__(self): + return "B's neg" + + b = B() + assert (a < b) == "lt" + assert (b > a) == "lt" + b1 = b + b1 *= a + assert b1 == "B's imul" + a1 = a + a1 *= b + assert a1 == "imul" + + assert a - b == "B's rsub" + assert b - a == "B's sub" + assert b - b == "B's sub" + assert a ** b == "B's rpow" + assert b ** a == "B's pow" + assert b ** b == "B's pow" + assert -b == "B's neg" + + class C(B): pass - for base in (object, OldStyle,): - class A(base): - def __lt__(self, other): - return "lt" - def __imul__(self, other): - return "imul" - def __sub__(self, other): - return "sub" - def __rsub__(self, other): - return "rsub" - def __pow__(self, other): - return "pow" - def __rpow__(self, other): - return "rpow" - def __neg__(self): - return "neg" - a = A() - assert (a < 5) == "lt" - assert (object() > a) == "lt" - a1 = a - a1 *= 4 - assert a1 == "imul" - assert a - 2 == "sub" - assert a - object() == "sub" - assert 2 - a == "rsub" - assert object() - a == "rsub" - assert a ** 2 == "pow" - assert a ** object() == "pow" - assert 2 ** a == "rpow" - assert object() ** a == "rpow" - assert -a == "neg" + c = C() + assert c - 1 == "B's sub" + assert 1 - c == "B's rsub" + assert c - b == "B's sub" + assert b - c == "B's sub" - class B(A): - def __lt__(self, other): - return "B's lt" - def __imul__(self, other): - return "B's imul" - def __sub__(self, other): - return "B's sub" - def __rsub__(self, other): - return "B's rsub" - def __pow__(self, other): - return "B's pow" - def __rpow__(self, other): - return "B's rpow" - def __neg__(self): - return "B's neg" - - b = B() - assert (a < b) == "lt" - assert (b > a) == "lt" - b1 = b - b1 *= a - assert b1 == "B's imul" - a1 = a - a1 *= b - assert a1 == "imul" - - assert a - b == "B's rsub" - assert b - a == "B's sub" - assert b - b == "B's sub" - assert a ** b == "B's rpow" - assert b ** a == "B's pow" - assert b ** b == "B's pow" - assert -b == "B's neg" - - class C(B): - pass - c = C() - assert c - 1 == "B's sub" - assert 1 - c == "B's rsub" - assert c - b == "B's sub" - assert b - c == "B's sub" - - assert c ** 1 == "B's pow" - assert 1 ** c == "B's rpow" - assert c ** b == "B's pow" - assert b ** c == "B's pow" + assert c ** 1 == "B's pow" + assert 1 ** c == "B's rpow" + assert c ** b == "B's pow" + assert b ** c == "B's pow" def test_getslice(self): class Sq(object): From noreply at buildbot.pypy.org Sun Dec 18 19:34:31 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:31 +0100 (CET) Subject: [pypy-commit] pypy py3k: Kill more oldstyle tests Message-ID: <20111218183431.D8CFC82009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50673:506acf2ebd22 Date: 2011-12-18 18:24 +0100 http://bitbucket.org/pypy/pypy/changeset/506acf2ebd22/ Log: Kill more oldstyle tests diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -412,7 +412,6 @@ assert l == [B, A, B, A] def test_rich_comparison(self): - # Old-style class A: def __init__(self, a): self.a = a @@ -421,32 +420,11 @@ class B: def __init__(self, a): self.a = a - # New-style - class C(object): - def __init__(self, a): - self.a = a - def __eq__(self, other): - return self.a == other.a - class D(object): - def __init__(self, a): - self.a = a assert A(1) == B(1) assert B(1) == A(1) - assert A(1) == C(1) - assert C(1) == A(1) - assert A(1) == D(1) - assert D(1) == A(1) - assert C(1) == D(1) - assert D(1) == C(1) assert not(A(1) == B(2)) assert not(B(1) == A(2)) - assert not(A(1) == C(2)) - assert not(C(1) == A(2)) - assert not(A(1) == D(2)) - assert not(D(1) == A(2)) - assert not(C(1) == D(2)) - assert not(D(1) == C(2)) def test_partial_ordering(self): class A(object): @@ -527,7 +505,6 @@ assert (D() >= A()) == 'D:A.ge' def test_addition(self): - # Old-style class A: def __init__(self, a): self.a = a @@ -537,25 +514,9 @@ class B: def __init__(self, a): self.a = a - # New-style - class C(object): - def __init__(self, a): - self.a = a - def __add__(self, other): - return self.a + other.a - __radd__ = __add__ - class D(object): - def __init__(self, a): - self.a = a assert A(1) + B(2) == 3 assert B(1) + A(2) == 3 - assert A(1) + C(2) == 3 - assert C(1) + A(2) == 3 - assert A(1) + D(2) == 3 - assert D(1) + A(2) == 3 - assert C(1) + D(2) == 3 - assert D(1) + C(2) == 3 def test_mod_failure(self): try: From noreply at buildbot.pypy.org Sun Dec 18 19:34:33 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:34:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20111218183433.1245482009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50674:e8bdadce915a Date: 2011-12-18 19:33 +0100 http://bitbucket.org/pypy/pypy/changeset/e8bdadce915a/ Log: hg merge default diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -329,11 +329,16 @@ special_ops = {'repr': True, 'userdel': True, 'hash': True} for opname, _, arity, special_methods in ObjSpace.MethodTable: - if opname in special_ops: + if opname in special_ops or not special_methods: continue nonspaceargs = ", ".join(["w_obj%s" % i for i in range(arity)]) code = "def func(space, %s):\n '''%s'''\n" % (nonspaceargs, opname) - for i in range(arity): + assert arity >= len(special_methods) + forcing_count = len(special_methods) + if opname.startswith('inplace_'): + assert arity == 2 + forcing_count = arity + for i in range(forcing_count): code += " w_obj%s = force(space, w_obj%s)\n" % (i, i) code += " return space.%s(%s)" % (opname, nonspaceargs) exec py.code.Source(code).compile() diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -466,3 +466,44 @@ # No exception should be raised here gc.collect() + def test_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + a3 = p1 + p2 + assert a3 is a2 + + def test_inplace_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1 += p2 + assert p1 is a2 + + def test_setattr(self): + import _weakref + class A(object): + def __setitem__(self, key, value): + self.setkey = key + self.setvalue = value + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1[p2] = 42 + assert a1.setkey is p2 + assert a1.setvalue == 42 + # + p1[42] = p2 + assert a1.setkey == 42 + assert a1.setvalue is p2 diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -53,7 +53,6 @@ i = start for j in range(arr.size): arr[j] = i - j += 1 i += step return arr diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -17,13 +17,6 @@ g() log = self.run(main, [500]) - # XXX XXX this test fails so far because of a detail that - # changed with jit-simplify-backendintf. We should try to - # think of a way to be more resistent against such details. - # The issue is that we now get one Tracing, then go back - # to the interpreter hoping to immediately run the JITted - # code; but instead, we Trace again, just because another - # counter was also about to reach its limit... loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ i16 = force_token() @@ -34,7 +27,7 @@ jump(..., descr=...) """) assert loop.match_by_id("subtract", """ - setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me i2 = int_sub_ovf(i1, 42) guard_no_overflow(descr=...) """) diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,9 +11,6 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) - c_files.extend([py.path.local(f) for f in eci.separate_module_files]) - eci = ExternalCompilationInfo(**eci._copy_attributes()) - eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Sun Dec 18 19:43:22 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 18 Dec 2011 19:43:22 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix translation Message-ID: <20111218184322.A63F382009@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50675:a2fe74325124 Date: 2011-12-18 19:43 +0100 http://bitbucket.org/pypy/pypy/changeset/a2fe74325124/ Log: Fix translation diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -631,8 +631,7 @@ Parse XML data from file-like object.""" # XXX not the more efficient method w_data = space.call_method(w_file, 'read') - data = space.str_w(w_data) - return self.Parse(space, data, isfinal=True) + return self.Parse(space, w_data, isfinal=True) @unwrap_spec(base=str) def SetBase(self, space, base): From noreply at buildbot.pypy.org Sun Dec 18 19:54:33 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 19:54:33 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Start to kill stuff in the x86 backend :-) Message-ID: <20111218185433.AC68982009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50676:b21c37068b7c Date: 2011-12-18 19:03 +0100 http://bitbucket.org/pypy/pypy/changeset/b21c37068b7c/ Log: Start to kill stuff in the x86 backend :-) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -91,6 +91,9 @@ def is_pointer_field(self): return self.flag == FLAG_POINTER + def is_float_field(self): + return self.flag == FLAG_FLOAT + def is_field_signed(self): return self.flag == FLAG_SIGNED @@ -163,9 +166,15 @@ def is_array_of_pointers(self): return self.flag == FLAG_POINTER + def is_array_of_floats(self): + return self.flag == FLAG_FLOAT + def is_item_signed(self): return self.flag == FLAG_SIGNED + def is_array_of_structs(self): + return self.flag == FLAG_STRUCT + def repr_of_descr(self): return '' % (self.flag, self.itemsize) @@ -208,6 +217,12 @@ def sort_key(self): return self.fielddescr.sort_key() + def is_pointer_field(self): + return self.fielddescr.is_pointer_field() + + def is_float_field(self): + return self.fielddescr.is_float_field() + def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -111,6 +111,7 @@ fielddescr_tid = None str_type_id = 0 unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -837,6 +838,9 @@ def freeing_block(self, start, stop): self.gcrootmap.freeing_block(start, stop) + def get_malloc_slowpath_addr(self): + return self.c_malloc_nursery_fn.value + # ____________________________________________________________ def get_ll_description(gcdescr, translator=None, rtyper=None): diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -70,10 +70,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -108,20 +104,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -1357,46 +1339,10 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) - def genop_new_with_vtable(self, op, arglocs, result_loc): - assert result_loc is eax - loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, ImmedLoc) - arglocs = arglocs[:-1] - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - self.set_vtable(eax, loc_vtable) + # ---------- - def set_vtable(self, loc, loc_vtable): - if self.cpu.vtable_offset is not None: - assert isinstance(loc, RegLoc) - assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert isinstance(loc, RegLoc) - assert isinstance(loc_num_elem, ImmedLoc) - self.mc.MOV(mem(loc, ofs_length), loc_num_elem) - - # XXX genop_new is abused for all varsized mallocs with Boehm, for now - # (instead of genop_new_array, genop_newstr, genop_newunicode) - def genop_new(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_new_array(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_array_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newstr(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_str_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newunicode(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_unicode_func_addr, arglocs, eax) + def genop_call_malloc_gc(self, op, arglocs, result_loc): + self.genop_call(op, arglocs, result_loc) self.propagate_memoryerror_if_eax_is_null() def propagate_memoryerror_if_eax_is_null(self): @@ -2065,6 +2011,8 @@ self._genop_call(op, arglocs, resloc, force_index) def _genop_call(self, op, arglocs, resloc, force_index): + from pypy.jit.backend.llsupport.descr import CallDescr + sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -2079,13 +2027,16 @@ else: tmp = eax + descr = op.getdescr() + assert isinstance(descr, CallDescr) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types(), - callconv=op.getdescr().get_call_conv()) + argtypes=descr.get_arg_types(), + callconv=descr.get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return - if op.getdescr().get_return_type() == 'L': + if descr.get_result_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long self.mc.MOV_br(resloc.value + 4, edx.value) # XXX should ideally not move the result on the stack, @@ -2094,7 +2045,7 @@ # can just be always a stack location else: self.mc.FSTPL_b(resloc.value) # float return - elif op.getdescr().get_return_type() == 'S': + elif descr.get_result_type() == 'S': # singlefloat return assert resloc is eax if IS_X86_32: diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -16,8 +16,8 @@ from pypy.jit.codewriter import heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr +from pypy.jit.backend.llsupport.descr import FieldDescr, ArrayDescr +from pypy.jit.backend.llsupport.descr import CallDescr, SizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox @@ -870,9 +870,9 @@ def _consider_call(self, op, guard_not_forced_op=None): calldescr = op.getdescr() - assert isinstance(calldescr, BaseCallDescr) + assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - 1 - size = calldescr.get_result_size(self.translate_support_code) + size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm1 @@ -917,12 +917,15 @@ consider_call_release_gil = consider_call_may_force + def consider_call_malloc_gc(self, op): + self._consider_call(op) + def consider_call_assembler(self, op, guard_op): descr = op.getdescr() assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size(self.translate_support_code) + size = jd.portal_calldescr.get_result_size() vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.getarg(vable_index)) @@ -958,10 +961,12 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def fastpath_malloc_fixedsize(self, op, descr): - assert isinstance(descr, BaseSizeDescr) + KILLME + assert isinstance(descr, SizeDescr) self._do_fastpath_malloc(op, descr.size, descr.tid) def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + KILLME assert isinstance(arraydescr, BaseArrayDescr) ofs_length = arraydescr.get_ofs_length(self.translate_support_code) basesize = arraydescr.get_base_size(self.translate_support_code) @@ -971,6 +976,7 @@ self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) def _do_fastpath_malloc(self, op, size, tid): + KILLME gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) # @@ -987,6 +993,7 @@ ) def consider_new(self, op): + KILLME gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): self.fastpath_malloc_fixedsize(op, op.getdescr()) @@ -996,6 +1003,7 @@ return self._call(op, arglocs) def consider_new_with_vtable(self, op): + KILLME classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): @@ -1009,14 +1017,17 @@ return self._call(op, arglocs) def consider_newstr(self, op): + KILLME loc = self.loc(op.getarg(0)) return self._call(op, [loc]) def consider_newunicode(self, op): + KILLME loc = self.loc(op.getarg(0)) return self._call(op, [loc]) def consider_new_array(self, op): + KILLME gc_ll_descr = self.assembler.cpu.gc_ll_descr box_num_elem = op.getarg(0) if isinstance(box_num_elem, ConstInt): @@ -1032,34 +1043,32 @@ self._call(op, arglocs) def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - ofs = arraydescr.get_base_size(self.translate_support_code) - size = arraydescr.get_item_size(self.translate_support_code) - ptr = arraydescr.is_array_of_pointers() + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize sign = arraydescr.is_item_signed() - return size, ofs, ofs_length, ptr, sign + return size, ofs, sign def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset - size = fielddescr.get_field_size(self.translate_support_code) - ptr = fielddescr.is_pointer_field() + size = fielddescr.field_size sign = fielddescr.is_field_signed() - return imm(ofs), imm(size), ptr, sign + return imm(ofs), imm(size), sign + _unpack_fielddescr._always_inline_ = True def _unpack_interiorfielddescr(self, descr): assert isinstance(descr, InteriorFieldDescr) arraydescr = descr.arraydescr - ofs = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + ofs = arraydescr.basesize + itemsize = arraydescr.itemsize + fieldsize = descr.fielddescr.field_size sign = descr.fielddescr.is_field_signed() ofs += descr.fielddescr.offset return imm(ofs), imm(itemsize), imm(fieldsize), sign def consider_setfield_gc(self, op): - ofs_loc, size_loc, _, _ = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -1117,7 +1126,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - itemsize, ofs, _, _, _ = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, _ = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if itemsize == 1: @@ -1134,7 +1143,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _, sign = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, sign = self._unpack_fielddescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -1150,7 +1159,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - itemsize, ofs, _, _, sign = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, sign = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -1229,8 +1238,8 @@ def consider_arraylen_gc(self, op): arraydescr = op.getdescr() - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -46,7 +46,7 @@ # get the function address as an integer func = argboxes[0].getint() # do the call using the correct function from the cpu - rettype = descr.get_return_type() + rettype = descr.get_result_type() if rettype == INT or rettype == 'S': # *S*ingle float try: result = cpu.bh_call_i(func, descr, args_i, args_r, args_f) From noreply at buildbot.pypy.org Sun Dec 18 19:54:34 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 19:54:34 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Finish the integration of the x86 backend, which means Message-ID: <20111218185434.DF57A82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50677:091dcde9fb47 Date: 2011-12-18 19:54 +0100 http://bitbucket.org/pypy/pypy/changeset/091dcde9fb47/ Log: Finish the integration of the x86 backend, which means mostly killing stuff. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -11,7 +11,7 @@ from pypy.jit.backend.llsupport.descr import ( get_size_descr, get_field_descr, get_array_descr, get_call_descr, get_interiorfield_descr, - FieldDescr, ArrayDescr, CallDescr) + FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr) from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2448,9 +2448,8 @@ else: self.mc.JMP(imm(target)) - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): - size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) - size = (size + WORD-1) & ~(WORD-1) # round up + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size): + assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) @@ -2486,9 +2485,6 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - # on 64-bits, 'tid' is a value that fits in 31 bits - assert rx86.fits_in_32bits(tid) - self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -960,24 +960,10 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb - def fastpath_malloc_fixedsize(self, op, descr): - KILLME - assert isinstance(descr, SizeDescr) - self._do_fastpath_malloc(op, descr.size, descr.tid) - - def fastpath_malloc_varsize(self, op, arraydescr, num_elem): - KILLME - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - size = basesize + itemsize * num_elem - self._do_fastpath_malloc(op, size, arraydescr.tid) - self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) - - def _do_fastpath_malloc(self, op, size, tid): - KILLME - gc_ll_descr = self.assembler.cpu.gc_ll_descr + def consider_call_malloc_nursery(self, op): + size_box = op.getarg(0) + assert isinstance(size_box, ConstInt) + size = size_box.getint() self.rm.force_allocate_reg(op.result, selected_reg=eax) # # We need edx as a temporary, but otherwise don't save any more @@ -986,61 +972,11 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) # + gc_ll_descr = self.assembler.cpu.gc_ll_descr self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - size, tid, - ) - - def consider_new(self, op): - KILLME - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.getdescr()): - self.fastpath_malloc_fixedsize(op, op.getdescr()) - else: - args = gc_ll_descr.args_for_new(op.getdescr()) - arglocs = [imm(x) for x in args] - return self._call(op, arglocs) - - def consider_new_with_vtable(self, op): - KILLME - classint = op.getarg(0).getint() - descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) - if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self.fastpath_malloc_fixedsize(op, descrsize) - self.assembler.set_vtable(eax, imm(classint)) - # result of fastpath malloc is in eax - else: - args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) - - def consider_newstr(self, op): - KILLME - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_newunicode(self, op): - KILLME - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_new_array(self, op): - KILLME - gc_ll_descr = self.assembler.cpu.gc_ll_descr - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(box_num_elem)) - self._call(op, arglocs) + size) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, ArrayDescr) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.backend.llsupport.descr import GcCache +from pypy.jit.backend.llsupport.descr import GcCache, FieldDescr, FLAG_SIGNED from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc @@ -17,7 +17,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcPtrFieldDescr +from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.jit.backend.x86.test.test_regalloc import MockAssembler from pypy.jit.backend.x86.test.test_regalloc import BaseTestRegalloc @@ -41,20 +41,15 @@ return ['compressed'] + shape[1:] class MockGcDescr(GcCache): - def get_funcptr_for_new(self): - return 123 - get_funcptr_for_newarray = get_funcptr_for_new - get_funcptr_for_newstr = get_funcptr_for_new - get_funcptr_for_newunicode = get_funcptr_for_new get_malloc_slowpath_addr = None - + write_barrier_descr = None moving_gc = True gcrootmap = MockGcRootMap() def initialize(self): pass - record_constptrs = GcLLDescr_framework.record_constptrs.im_func + _record_constptrs = GcLLDescr_framework._record_constptrs.im_func rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func class TestRegallocDirectGcIntegration(object): @@ -170,42 +165,32 @@ ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) +NOT_INITIALIZED = chr(0xdd) + class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - expected_malloc_slowpath_size = WORD*2 + write_barrier_descr = None def __init__(self): - GcCache.__init__(self, False) + GcLLDescription.__init__(self, None) # create a nursery - NTP = rffi.CArray(lltype.Signed) - self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, + NTP = rffi.CArray(lltype.Char) + self.nursery = lltype.malloc(NTP, 64, flavor='raw') + for i in range(64): + self.nursery[i] = NOT_INITIALIZED + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 16*WORD - self.addrs[2] = 0 - # 16 WORDs + self.addrs[1] = self.addrs[0] + 64 + self.calls = [] def malloc_slowpath(size): - assert size == self.expected_malloc_slowpath_size + self.calls.append(size) + # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size - self.addrs[2] += 1 return nadr - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) - self._counter = 123000 - - def can_inline_malloc(self, descr): - return True - - def get_funcptr_for_new(self): - return 42 -# return llhelper(lltype.Ptr(self.NEW_TP), self.new) - - def init_size_descr(self, S, descr): - descr.tid = self._counter - self._counter += 1 + self.generate_function('malloc_nursery', malloc_slowpath, + [lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): return rffi.cast(lltype.Signed, self.addrs) @@ -214,204 +199,61 @@ return rffi.cast(lltype.Signed, self.addrs) + WORD def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) + return self.c_malloc_nursery_fn.value - get_funcptr_for_newarray = None - get_funcptr_for_newstr = None - get_funcptr_for_newunicode = None + def check_nothing_in_nursery(self): + # CALL_MALLOC_NURSERY should not write anything in the nursery + for i in range(64): + assert self.nursery[i] == NOT_INITIALIZED class TestMallocFastpath(BaseTestRegalloc): def setup_method(self, method): cpu = CPU(None, None) - cpu.vtable_offset = WORD cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() + self.cpu = cpu - # hack: specify 'tid' explicitly, because this test is not running - # with the gc transformer - NODE = lltype.GcStruct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) - valuedescr = cpu.fielddescrof(NODE, 'value') - - self.cpu = cpu - self.nodedescr = nodedescr - vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - vtable_int = cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(vtable)) - NODE2 = lltype.GcStruct('node2', - ('parent', rclass.OBJECT), - ('tid', lltype.Signed), - ('vtable', lltype.Ptr(rclass.OBJECT_VTABLE))) - descrsize = cpu.sizeof(NODE2) - heaptracker.register_known_gctype(cpu, vtable, NODE2) - self.descrsize = descrsize - self.vtable_int = vtable_int - - self.namespace = locals().copy() - def test_malloc_fastpath(self): ops = ''' - [i0] - p0 = new(descr=nodedescr) - setfield_gc(p0, i0, descr=valuedescr) - finish(p0) + [] + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(16) + finish(p0, p1, p2) ''' - self.interpret(ops, [42]) - # check the nursery + self.interpret(ops, []) + # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.nodedescr.tid - assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 48 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 64 + # slowpath never called + assert gc_ll_descr.calls == [] def test_malloc_slowpath(self): ops = ''' [] - p0 = new(descr=nodedescr) - p1 = new(descr=nodedescr) - p2 = new(descr=nodedescr) - p3 = new(descr=nodedescr) - p4 = new(descr=nodedescr) - p5 = new(descr=nodedescr) - p6 = new(descr=nodedescr) - p7 = new(descr=nodedescr) - p8 = new(descr=nodedescr) - finish(p0, p1, p2, p3, p4, p5, p6, p7, p8) + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(24) # overflow + finish(p0, p1, p2) ''' self.interpret(ops, []) + # check the returned pointers + gc_ll_descr = self.cpu.gc_ll_descr + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 0 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once - gc_ll_descr = self.cpu.gc_ll_descr - nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nadr + (WORD*2) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_new_with_vtable(self): - ops = ''' - [i0, i1] - p0 = new_with_vtable(ConstClass(vtable)) - guard_class(p0, ConstClass(vtable)) [i0] - finish(i1) - ''' - self.interpret(ops, [0, 1]) - assert self.getint(0) == 1 - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.descrsize.tid - assert gc_ll_descr.nursery[1] == self.vtable_int - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - -class Seen(Exception): - pass - -class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): - def can_inline_malloc_varsize(self, arraydescr, num_elem): - return num_elem < 5 - def get_funcptr_for_newarray(self): - return 52 - def init_array_descr(self, A, descr): - descr.tid = self._counter - self._counter += 1 - def args_for_new_array(self, descr): - raise Seen("args_for_new_array") - -class TestMallocVarsizeFastpath(BaseTestRegalloc): - def setup_method(self, method): - cpu = CPU(None, None) - cpu.vtable_offset = WORD - cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() - cpu.setup_once() - self.cpu = cpu - - ARRAY = lltype.GcArray(lltype.Signed) - arraydescr = cpu.arraydescrof(ARRAY) - self.arraydescr = arraydescr - ARRAYCHAR = lltype.GcArray(lltype.Char) - arraychardescr = cpu.arraydescrof(ARRAYCHAR) - - self.namespace = locals().copy() - - def test_malloc_varsize_fastpath(self): - # Hack. Running the GcLLDescr_framework without really having - # a complete GC means that we end up with both the tid and the - # length being at offset 0. In this case, so the length overwrites - # the tid. This is of course only the case in this test class. - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 142, descr=arraydescr) - setarrayitem_gc(p0, 3, 143, descr=arraydescr) - finish(p0) - ''' - self.interpret(ops, []) - # check the nursery - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == 4 - assert gc_ll_descr.nursery[1] == 142 - assert gc_ll_descr.nursery[4] == 143 - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - def test_malloc_varsize_slowpath(self): - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 420, descr=arraydescr) - setarrayitem_gc(p0, 3, 430, descr=arraydescr) - p1 = new_array(4, descr=arraydescr) - setarrayitem_gc(p1, 0, 421, descr=arraydescr) - setarrayitem_gc(p1, 3, 431, descr=arraydescr) - p2 = new_array(4, descr=arraydescr) - setarrayitem_gc(p2, 0, 422, descr=arraydescr) - setarrayitem_gc(p2, 3, 432, descr=arraydescr) - p3 = new_array(4, descr=arraydescr) - setarrayitem_gc(p3, 0, 423, descr=arraydescr) - setarrayitem_gc(p3, 3, 433, descr=arraydescr) - finish(p0, p1, p2, p3) - ''' - gc_ll_descr = self.cpu.gc_ll_descr - gc_ll_descr.expected_malloc_slowpath_size = 5*WORD - self.interpret(ops, []) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_malloc_varsize_too_big(self): - ops = ''' - [] - p0 = new_array(5, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_varsize_variable(self): - ops = ''' - [i0] - p0 = new_array(i0, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_array_of_char(self): - # check that fastpath_malloc_varsize() respects the alignment - # of the pointer in the nursery - ops = ''' - [] - p1 = new_array(1, descr=arraychardescr) - p2 = new_array(2, descr=arraychardescr) - p3 = new_array(3, descr=arraychardescr) - p4 = new_array(4, descr=arraychardescr) - finish(p1, p2, p3, p4) - ''' - self.interpret(ops, []) - p1 = self.getptr(0, llmemory.GCREF) - p2 = self.getptr(1, llmemory.GCREF) - p3 = self.getptr(2, llmemory.GCREF) - p4 = self.getptr(3, llmemory.GCREF) - assert p1._obj.intval & (WORD-1) == 0 # aligned - assert p2._obj.intval & (WORD-1) == 0 # aligned - assert p3._obj.intval & (WORD-1) == 0 # aligned - assert p4._obj.intval & (WORD-1) == 0 # aligned + assert gc_ll_descr.calls == [24] From noreply at buildbot.pypy.org Sun Dec 18 20:00:38 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 20:00:38 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fix. Message-ID: <20111218190038.3349182009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50678:65da3ad409e7 Date: 2011-12-18 19:57 +0100 http://bitbucket.org/pypy/pypy/changeset/65da3ad409e7/ Log: fix. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -366,7 +366,7 @@ arraydescr = descr.arraydescr ofs, size, _ = self.unpack_arraydescr_size(arraydescr) ofs += descr.fielddescr.offset - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + fieldsize = descr.fielddescr.field_size sign = descr.fielddescr.is_field_signed() fullofs = itemindex * size + ofs # --- start of GC unsafe code (no GC operation!) --- @@ -417,7 +417,7 @@ arraydescr = descr.arraydescr ofs, size, _ = self.unpack_arraydescr_size(arraydescr) ofs += descr.fielddescr.offset - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + fieldsize = descr.fielddescr.field_size ofs = itemindex * size + ofs # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) From noreply at buildbot.pypy.org Sun Dec 18 20:43:18 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 20:43:18 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Fix test. Message-ID: <20111218194318.3BB6F82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50679:f75f79e52d06 Date: 2011-12-18 20:22 +0100 http://bitbucket.org/pypy/pypy/changeset/f75f79e52d06/ Log: Fix test. diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -69,16 +69,17 @@ def get_functions_to_patch(): from pypy.jit.backend.llsupport import gc # - can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc - def can_inline_malloc2(*args): + can_use_nursery_malloc1 = gc.GcLLDescr_framework.can_use_nursery_malloc + def can_use_nursery_malloc2(*args): try: if os.environ['PYPY_NO_INLINE_MALLOC']: return False except KeyError: pass - return can_inline_malloc1(*args) + return can_use_nursery_malloc1(*args) # - return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + return {(gc.GcLLDescr_framework, 'can_use_nursery_malloc'): + can_use_nursery_malloc2} def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings From noreply at buildbot.pypy.org Sun Dec 18 20:43:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 20:43:19 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: - fix call_stub_r Message-ID: <20111218194319.63CAE82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50680:8f9b2eb1ff42 Date: 2011-12-18 20:22 +0100 http://bitbucket.org/pypy/pypy/changeset/8f9b2eb1ff42/ Log: - fix call_stub_r - test and fix for malloc_array_nonstandard diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -368,7 +368,8 @@ result = 'rffi.cast(lltype.Signed, res)' category = 'i' elif result_type == history.REF: - result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' + assert RESULT == llmemory.GCREF # should be ensured by the caller + result = 'res' category = 'r' elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' @@ -437,7 +438,11 @@ result_size = symbolic.get_size(RESULT, gccache.translate_support_code) result_signed = get_type_flag(RESULT) == FLAG_SIGNED if isinstance(RESULT, lltype.Ptr): - RESULT_ERASED = llmemory.Address # avoid too many CallDescrs + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF + else: + RESULT_ERASED = llmemory.Address key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -719,19 +719,19 @@ self.generate_function('malloc_array', malloc_array, [lltype.Signed] * 3) - def malloc_array_nonstandard(arraydescr_gcref, num_elem): + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): """For the rare case of non-standard arrays, i.e. arrays where self.standard_array_{basesize,length_ofs} is wrong. It can occur e.g. with arrays of floats on Win32.""" - arraydescr = xxxxxxx type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, xxx, itemsize, xxx) + type_id, num_elem, basesize, itemsize, lengthofs) self.generate_function('malloc_array_nonstandard', malloc_array_nonstandard, - [llmemory.GCREF, lltype.Signed]) + [lltype.Signed] * 5) def malloc_str(length): return llop1.do_malloc_varsize_clear( diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -178,9 +178,12 @@ v_num_elem] calldescr = self.gc_ll_descr.malloc_array_descr else: - arraydescr_gcref = xxx + # rare case, so don't care too much about the number of arguments args = [self.gc_ll_descr.c_malloc_array_nonstandard_fn, - ConstPtr(arraydescr_gcref), + ConstInt(arraydescr.basesize), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset), + ConstInt(arraydescr.tid), v_num_elem] calldescr = self.gc_ll_descr.malloc_array_nonstandard_descr self._gen_call_malloc_gc(args, v_result, calldescr) diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -14,7 +14,7 @@ class RewriteTests(object): - def check_rewrite(self, frm_operations, to_operations): + def check_rewrite(self, frm_operations, to_operations, **namespace): S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) sdescr = get_size_descr(self.gc_ll_descr, S) @@ -42,13 +42,6 @@ cdescr.tid = 8111 clendescr = cdescr.lendescr # - INTERIOR = lltype.GcArray(('z', lltype.Ptr(S))) - interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) - interiordescr.tid = 1291 - interiorlendescr = interiordescr.lendescr - interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, - INTERIOR, 'z') - # E = lltype.GcStruct('Empty') edescr = get_size_descr(self.gc_ll_descr, E) edescr.tid = 9000 @@ -68,7 +61,7 @@ strlendescr = strdescr.lendescr unicodelendescr = unicodedescr.lendescr # - namespace = locals().copy() + namespace.update(locals()) # for funcname in self.gc_ll_descr._generated_functions: namespace[funcname] = getattr(self.gc_ll_descr, '%s_fn' % funcname) @@ -344,6 +337,29 @@ jump(i0) """) + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + def test_rewrite_assembler_maximal_size_1(self): self.gc_ll_descr.max_size_of_young_obj = 100 self.check_rewrite(""" @@ -534,6 +550,13 @@ """) def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') self.check_rewrite(""" [p1, p2] setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) @@ -543,7 +566,7 @@ cond_call_gc_wb(p1, p2, descr=wbdescr) setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) jump(p1, p2) - """) + """, interiorzdescr=interiorzdescr) def test_initialization_store(self): self.check_rewrite(""" From noreply at buildbot.pypy.org Sun Dec 18 20:43:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 20:43:20 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fixes Message-ID: <20111218194320.8E98982009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50681:cddd54cacea7 Date: 2011-12-18 20:31 +0100 http://bitbucket.org/pypy/pypy/changeset/cddd54cacea7/ Log: fixes diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -67,6 +67,10 @@ return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError def gc_malloc(self, sizedescr): """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2243,9 +2243,9 @@ # # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: - from pypy.jit.backend.llsupport.descr import BaseFieldDescr + from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset self.mc.MOV(eax, arglocs[1]) self.mc.MOV_mi((eax.value, ofs), 0) From noreply at buildbot.pypy.org Sun Dec 18 20:43:21 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 20:43:21 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: translation fixes Message-ID: <20111218194321.B4A4182009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50682:2ba13ee957b9 Date: 2011-12-18 20:42 +0100 http://bitbucket.org/pypy/pypy/changeset/2ba13ee957b9/ Log: translation fixes diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -9,8 +9,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.jit.codewriter import heaptracker -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD @@ -45,16 +44,23 @@ memory. """ FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) - ll_func = llhelper(FUNCPTR, func) - c_ll_func = ConstInt( - heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func))) descr = get_call_descr(self, ARGS, RESULT) - setattr(self, '%s' % funcname, func) - setattr(self, '%s_fn' % funcname, ll_func) - setattr(self, 'c_%s_fn' % funcname, c_ll_func) - setattr(self, '%s_descr' % funcname, descr) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) self._generated_functions.append(funcname) + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -150,13 +150,14 @@ """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). Note that with the framework GC, this should be called very rarely. """ - self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_fixedsize_fn, - ConstInt(size)], v_result, + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, self.gc_ll_descr.malloc_fixedsize_descr) def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" - self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_array_fn, + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + self._gen_call_malloc_gc([ConstInt(addr), ConstInt(arraydescr.basesize), v_num_elem, ConstInt(arraydescr.itemsize), @@ -172,14 +173,17 @@ and arraydescr.lendescr.offset == self.gc_ll_descr.standard_array_length_ofs): # this is a standard-looking array, common case - args = [self.gc_ll_descr.c_malloc_array_fn, + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + args = [ConstInt(addr), ConstInt(arraydescr.itemsize), ConstInt(arraydescr.tid), v_num_elem] calldescr = self.gc_ll_descr.malloc_array_descr else: # rare case, so don't care too much about the number of arguments - args = [self.gc_ll_descr.c_malloc_array_nonstandard_fn, + addr = self.gc_ll_descr.get_malloc_fn_addr( + 'malloc_array_nonstandard') + args = [ConstInt(addr), ConstInt(arraydescr.basesize), ConstInt(arraydescr.itemsize), ConstInt(arraydescr.lendescr.offset), @@ -190,14 +194,14 @@ def gen_malloc_str(self, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" - self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_str_fn, - v_num_elem], v_result, + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_str') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, self.gc_ll_descr.malloc_str_descr) def gen_malloc_unicode(self, v_num_elem, v_result): """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...).""" - self._gen_call_malloc_gc([self.gc_ll_descr.c_malloc_unicode_fn, - v_num_elem], v_result, + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_unicode') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, self.gc_ll_descr.malloc_unicode_descr) def gen_malloc_nursery(self, size, v_result): diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -64,7 +64,7 @@ namespace.update(locals()) # for funcname in self.gc_ll_descr._generated_functions: - namespace[funcname] = getattr(self.gc_ll_descr, '%s_fn' % funcname) + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, '%s_descr' % funcname) # From noreply at buildbot.pypy.org Sun Dec 18 21:06:37 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 21:06:37 +0100 (CET) Subject: [pypy-commit] buildbot default: Add mattip's buildslave. Message-ID: <20111218200637.5B79082009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r607:a32a017c3434 Date: 2011-12-18 21:06 +0100 http://bitbucket.org/pypy/buildbot/changeset/a32a017c3434/ Log: Add mattip's buildslave. diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -330,7 +330,7 @@ 'category' : 'mac64', }, {"name": WIN32, - "slavenames": ["snakepit32", "bigboard"], + "slavenames": ["snakepit32", "bigboard", "SalsaSalsa"], "builddir": WIN32, "factory": pypyOwnTestFactoryWin, "category": 'win32' @@ -342,13 +342,13 @@ "category": 'win32' }, {"name": APPLVLWIN32, - "slavenames": ["snakepit32", "bigboard"], + "slavenames": ["snakepit32", "bigboard", "SalsaSalsa"], "builddir": APPLVLWIN32, "factory": pypyTranslatedAppLevelTestFactoryWin, "category": "win32" }, {"name" : JITWIN32, - "slavenames": ["snakepit32", "bigboard"], + "slavenames": ["snakepit32", "bigboard", "SalsaSalsa"], 'builddir' : JITWIN32, 'factory' : pypyJITTranslatedTestFactoryWin, 'category' : 'win32', From noreply at buildbot.pypy.org Sun Dec 18 21:26:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 21:26:31 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: more fixes Message-ID: <20111218202631.1830682009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50683:d40bf7f7a0e3 Date: 2011-12-18 20:59 +0100 http://bitbucket.org/pypy/pypy/changeset/d40bf7f7a0e3/ Log: more fixes diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -4,7 +4,7 @@ from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr -from pypy.rpython.lltypesystem import llgroup, llarena +from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo @@ -743,6 +743,15 @@ malloc_array_nonstandard, [lltype.Signed] * 5) + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, @@ -849,7 +858,7 @@ self.gcrootmap.freeing_block(start, stop) def get_malloc_slowpath_addr(self): - return self.c_malloc_nursery_fn.value + return self.get_malloc_fn_addr('malloc_nursery') # ____________________________________________________________ diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -209,7 +209,8 @@ If that fails, generate a plain CALL_MALLOC_GC instead. """ if not self.gc_ll_descr.can_use_nursery_malloc(size): - return self.gen_malloc_fixedsize(size, v_result) + self.gen_malloc_fixedsize(size, v_result) + return # size = self.round_up_for_allocation(size) op = None @@ -237,7 +238,6 @@ self._previous_size = size self._v_last_malloced_nursery = v_result self.recent_mallocs[v_result] = None - return True def gen_initialize_tid(self, v_newgcobj, tid): if self.gc_ll_descr.fielddescr_tid is not None: @@ -316,6 +316,7 @@ def round_up_for_allocation(self, size): if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena return llarena.round_up_for_allocation( size, self.gc_ll_descr.minimal_size_in_nursery) else: From noreply at buildbot.pypy.org Sun Dec 18 21:26:32 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 21:26:32 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Yay, test_zrpy_gc passes! Message-ID: <20111218202632.3F3D782009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50684:c0ac6c2b2d1e Date: 2011-12-18 21:26 +0100 http://bitbucket.org/pypy/pypy/changeset/c0ac6c2b2d1e/ Log: Yay, test_zrpy_gc passes! diff --git a/pypy/rpython/lltypesystem/llarena.py b/pypy/rpython/lltypesystem/llarena.py --- a/pypy/rpython/lltypesystem/llarena.py +++ b/pypy/rpython/lltypesystem/llarena.py @@ -374,6 +374,7 @@ following an object. For arenas containing heterogenous objects. If minsize is specified, it gives a minimum on the resulting size.""" return _round_up_for_allocation(size, minsize) +round_up_for_allocation._annenforceargs_ = [int, int] def _round_up_for_allocation(size, minsize): # internal return RoundedUpForAllocation(size, minsize) From noreply at buildbot.pypy.org Sun Dec 18 22:57:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 22:57:13 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fix Message-ID: <20111218215713.7BC3782009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50685:c3eb411bcb54 Date: 2011-12-18 22:32 +0100 http://bitbucket.org/pypy/pypy/changeset/c3eb411bcb54/ Log: fix diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -237,6 +237,21 @@ cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + # ____________________________________________________________ # CallDescrs diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -10,7 +10,7 @@ from pypy.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes from pypy.jit.backend.llsupport.descr import ( get_size_descr, get_field_descr, get_array_descr, - get_call_descr, get_interiorfield_descr, + get_call_descr, get_interiorfield_descr, get_dynamic_interiorfield_descr, FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr) from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager @@ -239,10 +239,10 @@ return get_interiorfield_descr(self.gc_ll_descr, A, fieldname) def interiorfielddescrof_dynamic(self, offset, width, fieldsize, - is_pointer, is_float, is_signed): - arraydescr = DynamicArrayNoLengthDescr(width) - fielddescr = DynamicFieldDescr(offset, fieldsize, is_pointer, is_float, is_signed) - return InteriorFieldDescr(arraydescr, fielddescr) + is_pointer, is_float, is_signed): + return get_dynamic_interiorfield_descr(self.gc_ll_descr, + offset, width, fieldsize, + is_pointer, is_float, is_signed) def unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, ArrayDescr) From noreply at buildbot.pypy.org Sun Dec 18 22:57:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 22:57:14 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fix Message-ID: <20111218215714.A1F1782009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50686:bfdcb012f2f5 Date: 2011-12-18 21:40 +0000 http://bitbucket.org/pypy/pypy/changeset/bfdcb012f2f5/ Log: fix diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -450,14 +450,15 @@ result_size = 0 result_signed = False else: - result_size = symbolic.get_size(RESULT, gccache.translate_support_code) - result_signed = get_type_flag(RESULT) == FLAG_SIGNED if isinstance(RESULT, lltype.Ptr): # avoid too many CallDescrs if result_type == 'r': RESULT_ERASED = llmemory.GCREF else: RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: From noreply at buildbot.pypy.org Sun Dec 18 22:57:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Dec 2011 22:57:15 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: bah Message-ID: <20111218215715.C5AAF82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50687:83e4fca37356 Date: 2011-12-18 21:48 +0000 http://bitbucket.org/pypy/pypy/changeset/83e4fca37356/ Log: bah diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -301,7 +301,7 @@ elif result_type == 'S': result_flag = FLAG_UNSIGNED else: - raise NotImplementedError("result_type = %r" % (result_type,)) + raise NotImplementedError("result_type = '%s'" % (result_type,)) self.result_flag = result_flag def __repr__(self): From noreply at buildbot.pypy.org Mon Dec 19 06:36:47 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 06:36:47 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: hg merge default Message-ID: <20111219053648.1115982009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50688:5416fa240dbe Date: 2011-12-19 06:36 +0100 http://bitbucket.org/pypy/pypy/changeset/5416fa240dbe/ Log: hg merge default diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -249,7 +249,7 @@ while len(result) < count: x = fn() keys = [x._getregkey()] - if isinstance(x, StackLoc) and x.width > WORD: + if isinstance(x, StackLoc) and x.get_width() > WORD: keys.append(keys[0] + WORD) for key in keys: if key in seen: @@ -267,7 +267,7 @@ for i, loc in enumerate(locations): if isinstance(loc, RegLoc): if loc.is_xmm: - if loc.width > WORD: + if loc.get_width() > WORD: newvalue = ('value-xmm-%d' % i, 'value-xmm-hiword-%d' % i) else: @@ -276,8 +276,8 @@ else: regs1[loc.value] = 'value-int-%d' % i elif isinstance(loc, StackLoc): - stack[loc.value] = 'value-width%d-%d' % (loc.width, i) - if loc.width > WORD: + stack[loc.value] = 'value-width%d-%d' % (loc.get_width(), i) + if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: assert isinstance(loc, ImmedLoc) @@ -299,7 +299,7 @@ # def read(loc, expected_width=None): if expected_width is not None: - assert loc.width == expected_width + assert loc.get_width() == expected_width if isinstance(loc, RegLoc): if loc.is_xmm: return regs2[loc.value] @@ -307,7 +307,7 @@ return regs1[loc.value] if isinstance(loc, StackLoc): got = stack[loc.value] - if loc.width > WORD: + if loc.get_width() > WORD: got = (got, stack[loc.value+WORD]) return got if isinstance(loc, ImmedLoc): @@ -321,7 +321,7 @@ else: regs1[loc.value] = newvalue elif isinstance(loc, StackLoc): - if loc.width > WORD: + if loc.get_width() > WORD: newval1, newval2 = newvalue stack[loc.value] = newval1 stack[loc.value+WORD] = newval2 diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -285,3 +285,10 @@ [], lineno=1, col_offset=0) ]) exec compile(body, '', 'exec') + + def test_invalid_sum(self): + import _ast as ast + pos = dict(lineno=2, col_offset=3) + m = ast.Module([ast.Expr(ast.expr(**pos), **pos)]) + exc = raises(TypeError, compile, m, "", "exec") + diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -329,11 +329,16 @@ special_ops = {'repr': True, 'userdel': True, 'hash': True} for opname, _, arity, special_methods in ObjSpace.MethodTable: - if opname in special_ops: + if opname in special_ops or not special_methods: continue nonspaceargs = ", ".join(["w_obj%s" % i for i in range(arity)]) code = "def func(space, %s):\n '''%s'''\n" % (nonspaceargs, opname) - for i in range(arity): + assert arity >= len(special_methods) + forcing_count = len(special_methods) + if opname.startswith('inplace_'): + assert arity == 2 + forcing_count = arity + for i in range(forcing_count): code += " w_obj%s = force(space, w_obj%s)\n" % (i, i) code += " return space.%s(%s)" % (opname, nonspaceargs) exec py.code.Source(code).compile() diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -466,3 +466,44 @@ # No exception should be raised here gc.collect() + def test_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + a3 = p1 + p2 + assert a3 is a2 + + def test_inplace_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1 += p2 + assert p1 is a2 + + def test_setattr(self): + import _weakref + class A(object): + def __setitem__(self, key, value): + self.setkey = key + self.setvalue = value + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1[p2] = 42 + assert a1.setkey is p2 + assert a1.setvalue == 42 + # + p1[42] = p2 + assert a1.setkey == 42 + assert a1.setvalue is p2 diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -53,7 +53,6 @@ i = start for j in range(arr.size): arr[j] = i - j += 1 i += step return arr diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -133,7 +133,7 @@ descr__new__, get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - pass + descr__new__, get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,34 +1,90 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi +from pypy.module.micronumpy import interp_dtype +from pypy.objspace.std.strutil import strip_spaces FLOAT_SIZE = rffi.sizeof(lltype.Float) - at unwrap_spec(s=str) -def fromstring(space, s): +def _fromstring_text(space, s, count, sep, length, dtype): from pypy.module.micronumpy.interp_numarray import W_NDimArray + + sep_stripped = strip_spaces(sep) + skip_bad_vals = len(sep_stripped) == 0 + + items = [] + num_items = 0 + idx = 0 + + while (num_items < count or count == -1) and idx < len(s): + nextidx = s.find(sep, idx) + if nextidx < 0: + nextidx = length + piece = strip_spaces(s[idx:nextidx]) + if len(piece) > 0 or not skip_bad_vals: + if len(piece) == 0 and not skip_bad_vals: + val = dtype.itemtype.default_fromstring(space) + else: + try: + val = dtype.coerce(space, space.wrap(piece)) + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + gotit = False + while not gotit and len(piece) > 0: + piece = piece[:-1] + try: + val = dtype.coerce(space, space.wrap(piece)) + gotit = True + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + if not gotit: + val = dtype.itemtype.default_fromstring(space) + nextidx = length + items.append(val) + num_items += 1 + idx = nextidx + 1 + + if count > num_items: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(num_items, [num_items], dtype=dtype) + for i, val in enumerate(items): + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + +def _fromstring_bin(space, s, count, length, dtype): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + itemsize = dtype.itemtype.get_element_size() + if count == -1: + count = length / itemsize + if length % itemsize != 0: + raise operationerrfmt(space.w_ValueError, + "string length %d not divisable by item size %d", + length, itemsize) + if count * itemsize > length: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(count, [count], dtype=dtype) + for i in range(count): + val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + + at unwrap_spec(s=str, count=int, sep=str) +def fromstring(space, s, w_dtype=None, count=-1, sep=''): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) length = len(s) - - if length % FLOAT_SIZE == 0: - number = length/FLOAT_SIZE + if sep == '': + return _fromstring_bin(space, s, count, length, dtype) else: - raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - - dtype = get_dtype_cache(space).w_float64dtype - a = W_NDimArray(number, [number], dtype=dtype) - - start = 0 - end = FLOAT_SIZE - i = 0 - while i < number: - part = s[start:end] - a.dtype.setitem(a.storage, i, dtype.box(runpack('d', part))) - i += 1 - start += FLOAT_SIZE - end += FLOAT_SIZE - - return space.wrap(a) + return _fromstring_text(space, s, count, sep, length, dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1194,13 +1194,110 @@ import struct BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) + cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) + cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) + cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) + cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) def test_fromstring(self): - from numpypy import fromstring + import sys + from numpypy import fromstring, array, uint8, float32, int32 + a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") + b = fromstring('\x01\x02', dtype=uint8) + assert a[0] == 1 + assert a[1] == 2 + c = fromstring(self.fdata, dtype=float32) + assert c[0] == float32(2.3) + d = fromstring("1 2", sep=' ', count=2, dtype=uint8) + assert len(d) == 2 + assert d[0] == 1 + assert d[1] == 2 + e = fromstring('3, 4,5', dtype=uint8, sep=',') + assert len(e) == 3 + assert e[0] == 3 + assert e[1] == 4 + assert e[2] == 5 + f = fromstring('\x01\x02\x03\x04\x05', dtype=uint8, count=3) + assert len(f) == 3 + assert f[0] == 1 + assert f[1] == 2 + assert f[2] == 3 + g = fromstring("1 2 3 ", dtype=uint8, sep=" ") + assert len(g) == 3 + assert g[0] == 1 + assert g[1] == 2 + assert g[2] == 3 + h = fromstring("1, , 2, 3", dtype=uint8, sep=",") + assert (h == [1,0,2,3]).all() + i = fromstring("1 2 3", dtype=uint8, sep=" ") + assert (i == [1,2,3]).all() + j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") + assert (j == [1,2,3]).all() + k = fromstring("1,x,2,3", dtype=uint8, sep=",") + assert (k == [1,0]).all() + l = fromstring("1,x,2,3", dtype='float32', sep=",") + assert (l == [1.0,-1.0]).all() + m = fromstring("1,,2,3", sep=",") + assert (m == [1.0,-1.0,2.0,3.0]).all() + n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + assert (n == [3]).all() + o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") + assert len(o) == 2 + assert o[0] == 1.0 + assert o[1] == 2.0 + p = fromstring("1.0,,2.0,3.0", sep=",") + assert (p == [1.0, -1.0, 2.0, 3.0]).all() + q = fromstring("1.0,,2.0,3.0", sep=" ") + assert (q == [1.0]).all() + r = fromstring("\x01\x00\x02", dtype='bool') + assert (r == [True, False, True]).all() + s = fromstring("1,2,3,,5", dtype=bool, sep=",") + assert (s == [True, True, True, False, True]).all() + t = fromstring("", bool) + assert (t == []).all() + u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int) + if sys.maxint > 2 ** 31 - 1: + assert (u == [1]).all() + else: + assert (u == [1, 0]).all() + + def test_fromstring_types(self): + from numpypy import (fromstring, int8, int16, int32, int64, uint8, + uint16, uint32, float32, float64) + + a = fromstring('\xFF', dtype=int8) + assert a[0] == -1 + b = fromstring('\xFF', dtype=uint8) + assert b[0] == 255 + c = fromstring('\xFF\xFF', dtype=int16) + assert c[0] == -1 + d = fromstring('\xFF\xFF', dtype=uint16) + assert d[0] == 65535 + e = fromstring('\xFF\xFF\xFF\xFF', dtype=int32) + assert e[0] == -1 + f = fromstring('\xFF\xFF\xFF\xFF', dtype=uint32) + assert repr(f[0]) == '4294967295' + g = fromstring('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', dtype=int64) + assert g[0] == -1 + h = fromstring(self.float32val, dtype=float32) + assert h[0] == float32(5.2) + i = fromstring(self.float64val, dtype=float64) + assert i[0] == float64(300.4) + j = fromstring(self.ulongval, dtype='L') + assert j[0] == 12 + + + def test_fromstring_invalid(self): + from numpypy import fromstring, uint16, uint8, int32 + #default dtype is 64-bit float, so 3 bytes should fail + raises(ValueError, fromstring, "\x01\x02\x03") + #3 bytes is not modulo 2 bytes (int16) + raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) + #5 bytes is larger than 3 bytes + raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) class AppTestRepr(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -8,6 +8,7 @@ from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rstruct.runpack import runpack def simple_unary_op(func): @@ -55,6 +56,7 @@ class Primitive(object): _mixin_ = True + def get_element_size(self): return rffi.sizeof(self.T) @@ -84,6 +86,9 @@ def _coerce(self, space, w_item): raise NotImplementedError + def default_fromstring(self, space): + raise NotImplementedError + def read(self, storage, width, i, offset): return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset @@ -102,6 +107,9 @@ width, storage, i, offset, value ) + def runpack_str(self, s): + return self.box(runpack(self.format_code, s)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -164,6 +172,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool BoxType = interp_boxes.W_BoolBox + format_code = "?" True = BoxType(True) False = BoxType(False) @@ -193,6 +202,9 @@ def for_computation(self, v): return int(v) + def default_fromstring(self, space): + return self.box(False) + class Integer(Primitive): _mixin_ = True @@ -206,6 +218,9 @@ def for_computation(self, v): return widen(v) + def default_fromstring(self, space): + return self.box(0) + @simple_binary_op def div(self, v1, v2): if v2 == 0: @@ -241,42 +256,52 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box + format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box + format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT BoxType = interp_boxes.W_Int16Box + format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box + format_code = "H" class Int32(BaseType, Integer): T = rffi.INT BoxType = interp_boxes.W_Int32Box + format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT BoxType = interp_boxes.W_UInt32Box + format_code = "I" class Long(BaseType, Integer): T = rffi.LONG BoxType = interp_boxes.W_LongBox + format_code = "l" class ULong(BaseType, Integer): T = rffi.ULONG BoxType = interp_boxes.W_ULongBox + format_code = "L" class Int64(BaseType, Integer): T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box + format_code = "q" class UInt64(BaseType, Integer): T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + format_code = "Q" def _coerce(self, space, w_item): try: @@ -304,6 +329,9 @@ def for_computation(self, v): return float(v) + def default_fromstring(self, space): + return self.box(-1.0) + @simple_binary_op def div(self, v1, v2): try: @@ -403,7 +431,9 @@ class Float32(BaseType, Float): T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box + format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box \ No newline at end of file + BoxType = interp_boxes.W_Float64Box + format_code = "d" \ No newline at end of file diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -17,17 +17,8 @@ g() log = self.run(main, [500]) - # XXX XXX this test fails so far because of a detail that - # changed with jit-simplify-backendintf. We should try to - # think of a way to be more resistent against such details. - # The issue is that we now get one Tracing, then go back - # to the interpreter hoping to immediately run the JITted - # code; but instead, we Trace again, just because another - # counter was also about to reach its limit... - loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') + loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ - ... - label(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) @@ -36,7 +27,7 @@ jump(..., descr=...) """) assert loop.match_by_id("subtract", """ - setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me i2 = int_sub_ovf(i1, 42) guard_no_overflow(descr=...) """) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -514,44 +514,41 @@ if maxsplit == 0: return space.wrap(input) - #print "from replace, input: %s, sub: %s, by: %s" % (input, sub, by) + # An ok guess at the default size + builder = StringBuilder(len(input)) + first = True if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - substrings_w = [""] + first = False for i in range(upper): - c = input[i] - substrings_w.append(c) - substrings_w.append(input[upper:]) + builder.append(by) + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) else: start = 0 sublen = len(sub) - substrings_w = [] while maxsplit != 0: next = input.find(sub, start) if next < 0: break - substrings_w.append(input[start:next]) + if not first: + builder.append(by) + first = False + builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - substrings_w.append(input[start:]) + if not first: + builder.append(by) + builder.append_slice(input, start, len(input)) - try: - # XXX conservative estimate. If your strings are that close - # to overflowing, bad luck. - one = ovfcheck(len(substrings_w) * len(by)) - ovfcheck(one + len(input)) - except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("replace string is too long")) - - return space.wrap(by.join(substrings_w)) + return space.wrap(builder.build()) def str_replace__String_ANY_ANY_ANY(space, w_self, w_sub, w_by, w_maxsplit): diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -79,19 +79,19 @@ longlong2float = rffi.llexternal( "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) float2longlong = rffi.llexternal( "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG, _callable=float2longlong_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,9 +11,6 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) - c_files.extend([py.path.local(f) for f in eci.separate_module_files]) - eci = ExternalCompilationInfo(**eci._copy_attributes()) - eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Mon Dec 19 06:49:26 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 06:49:26 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fix Message-ID: <20111219054926.D450182009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50689:836dcbe6b49e Date: 2011-12-19 06:47 +0100 http://bitbucket.org/pypy/pypy/changeset/836dcbe6b49e/ Log: fix diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -384,7 +384,7 @@ category = 'i' elif result_type == history.REF: assert RESULT == llmemory.GCREF # should be ensured by the caller - result = 'res' + result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' category = 'r' elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' From noreply at buildbot.pypy.org Mon Dec 19 06:49:28 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 06:49:28 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fix Message-ID: <20111219054928.0617B82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50690:0375ea14f16d Date: 2011-12-19 06:48 +0100 http://bitbucket.org/pypy/pypy/changeset/0375ea14f16d/ Log: fix diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -199,7 +199,7 @@ return rffi.cast(lltype.Signed, self.addrs) + WORD def get_malloc_slowpath_addr(self): - return self.c_malloc_nursery_fn.value + return self.get_malloc_fn_addr('malloc_nursery') def check_nothing_in_nursery(self): # CALL_MALLOC_NURSERY should not write anything in the nursery From noreply at buildbot.pypy.org Mon Dec 19 07:26:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 07:26:15 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: skip this test, does not contain anything really interesting any more Message-ID: <20111219062615.4DF5682009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50691:cda4e2cc10e1 Date: 2011-12-19 07:19 +0100 http://bitbucket.org/pypy/pypy/changeset/cda4e2cc10e1/ Log: skip this test, does not contain anything really interesting any more diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -69,6 +69,7 @@ return ctypes.cast(res.value._obj.intval, ctypes.POINTER(item_tp)) def test_allocations(self): + py.test.skip("rewrite or kill") from pypy.rpython.lltypesystem import rstr allocs = [None] From noreply at buildbot.pypy.org Mon Dec 19 07:26:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 07:26:16 +0100 (CET) Subject: [pypy-commit] pypy default: Fix if the malloc() functions provided by gc.py don't raise MemoryError Message-ID: <20111219062616.83C3A82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50692:30994ab30fe5 Date: 2011-12-19 07:23 +0100 http://bitbucket.org/pypy/pypy/changeset/30994ab30fe5/ Log: Fix if the malloc() functions provided by gc.py don't raise MemoryError themselves, as e.g. the Boehm ones. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -107,9 +107,14 @@ _exception_emulator[1] = 0 self.saved_exc_value = rffi.cast(llmemory.GCREF, v_i) + def save_exception_memoryerr(): + save_exception() + self.saved_exc_value = "memoryerror!" # for tests + self.pos_exception = pos_exception self.pos_exc_value = pos_exc_value self.save_exception = save_exception + self.save_exception_memoryerr = save_exception_memoryerr self.insert_stack_check = lambda: (0, 0, 0) @@ -134,6 +139,13 @@ # in the assignment to self.saved_exc_value, as needed. self.saved_exc_value = exc_value + def save_exception_memoryerr(): + from pypy.rpython.annlowlevel import cast_instance_to_base_ptr + save_exception() + exc = MemoryError() + exc = cast_instance_to_base_ptr(exc) + self.saved_exc_value = lltype.cast_opaque_ptr(llmemory.GCREF, exc) + from pypy.rlib import rstack STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) @@ -147,16 +159,19 @@ self.pos_exception = pos_exception self.pos_exc_value = pos_exc_value self.save_exception = save_exception + self.save_exception_memoryerr = save_exception_memoryerr self.insert_stack_check = insert_stack_check def _setup_on_leave_jitted_untranslated(self): # assume we don't need a backend leave in this case self.on_leave_jitted_save_exc = self.save_exception + self.on_leave_jitted_memoryerr = self.save_exception_memoryerr self.on_leave_jitted_noexc = lambda : None def _setup_on_leave_jitted_translated(self): on_leave_jitted_hook = self.get_on_leave_jitted_hook() save_exception = self.save_exception + save_exception_memoryerr = self.save_exception_memoryerr def on_leave_jitted_noexc(): on_leave_jitted_hook() @@ -165,16 +180,23 @@ save_exception() on_leave_jitted_hook() + def on_leave_jitted_memoryerr(): + save_exception_memoryerr() + on_leave_jitted_hook() + self.on_leave_jitted_noexc = on_leave_jitted_noexc self.on_leave_jitted_save_exc = on_leave_jitted_save_exc + self.on_leave_jitted_memoryerr = on_leave_jitted_memoryerr def get_on_leave_jitted_hook(self): return lambda : None _ON_JIT_LEAVE_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) - def get_on_leave_jitted_int(self, save_exception): - if save_exception: + def get_on_leave_jitted_int(self, save_exception, memoryerror=False): + if memoryerror: + f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_memoryerr) + elif save_exception: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_save_exc) else: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_noexc) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2930,6 +2930,8 @@ # overflowing value: fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" def test_compile_loop_with_target(self): i0 = BoxInt() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -275,7 +275,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() From noreply at buildbot.pypy.org Mon Dec 19 07:26:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 07:26:17 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: hg merge default Message-ID: <20111219062617.B721682009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50693:5fab2978624a Date: 2011-12-19 07:23 +0100 http://bitbucket.org/pypy/pypy/changeset/5fab2978624a/ Log: hg merge default diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -106,9 +106,14 @@ _exception_emulator[1] = 0 self.saved_exc_value = rffi.cast(llmemory.GCREF, v_i) + def save_exception_memoryerr(): + save_exception() + self.saved_exc_value = "memoryerror!" # for tests + self.pos_exception = pos_exception self.pos_exc_value = pos_exc_value self.save_exception = save_exception + self.save_exception_memoryerr = save_exception_memoryerr self.insert_stack_check = lambda: (0, 0, 0) @@ -133,6 +138,13 @@ # in the assignment to self.saved_exc_value, as needed. self.saved_exc_value = exc_value + def save_exception_memoryerr(): + from pypy.rpython.annlowlevel import cast_instance_to_base_ptr + save_exception() + exc = MemoryError() + exc = cast_instance_to_base_ptr(exc) + self.saved_exc_value = lltype.cast_opaque_ptr(llmemory.GCREF, exc) + from pypy.rlib import rstack STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) @@ -146,16 +158,19 @@ self.pos_exception = pos_exception self.pos_exc_value = pos_exc_value self.save_exception = save_exception + self.save_exception_memoryerr = save_exception_memoryerr self.insert_stack_check = insert_stack_check def _setup_on_leave_jitted_untranslated(self): # assume we don't need a backend leave in this case self.on_leave_jitted_save_exc = self.save_exception + self.on_leave_jitted_memoryerr = self.save_exception_memoryerr self.on_leave_jitted_noexc = lambda : None def _setup_on_leave_jitted_translated(self): on_leave_jitted_hook = self.get_on_leave_jitted_hook() save_exception = self.save_exception + save_exception_memoryerr = self.save_exception_memoryerr def on_leave_jitted_noexc(): on_leave_jitted_hook() @@ -164,16 +179,23 @@ save_exception() on_leave_jitted_hook() + def on_leave_jitted_memoryerr(): + save_exception_memoryerr() + on_leave_jitted_hook() + self.on_leave_jitted_noexc = on_leave_jitted_noexc self.on_leave_jitted_save_exc = on_leave_jitted_save_exc + self.on_leave_jitted_memoryerr = on_leave_jitted_memoryerr def get_on_leave_jitted_hook(self): return lambda : None _ON_JIT_LEAVE_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) - def get_on_leave_jitted_int(self, save_exception): - if save_exception: + def get_on_leave_jitted_int(self, save_exception, memoryerror=False): + if memoryerror: + f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_memoryerr) + elif save_exception: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_save_exc) else: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_noexc) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2930,6 +2930,8 @@ # overflowing value: fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" def test_compile_loop_with_target(self): i0 = BoxInt() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -257,7 +257,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() From noreply at buildbot.pypy.org Mon Dec 19 07:26:18 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 07:26:18 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Don't raise MemoryError explicitly; it is not needed, and confuses Message-ID: <20111219062618.E6C3982009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50694:5d8f68105e9c Date: 2011-12-19 07:25 +0100 http://bitbucket.org/pypy/pypy/changeset/5d8f68105e9c/ Log: Don't raise MemoryError explicitly; it is not needed, and confuses test_runner:test_memoryerror. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -40,8 +40,8 @@ def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): """Generates a variant of malloc with the given name and the given - arguments. It should raise MemoryError and return NULL if out of - memory. + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. """ FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) descr = get_call_descr(self, ARGS, RESULT) @@ -173,10 +173,7 @@ def _make_functions(self): def malloc_fixedsize(size): - res = self.malloc_fn_ptr(size) - if not res: - raise MemoryError - return res + return self.malloc_fn_ptr(size) self.generate_function('malloc_fixedsize', malloc_fixedsize, [lltype.Signed]) @@ -184,12 +181,11 @@ try: totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: - raise MemoryError + return lltype.nullptr(llmemory.GCREF.TO) res = self.malloc_fn_ptr(totalsize) - if not res: - raise MemoryError - arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) - arrayptr[ofs_length/WORD] = num_elem + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res self.generate_function('malloc_array', malloc_array, [lltype.Signed] * 4) From noreply at buildbot.pypy.org Mon Dec 19 07:33:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 07:33:31 +0100 (CET) Subject: [pypy-commit] pypy default: don't overwrite a previous exception with MemoryError, if any Message-ID: <20111219063331.0B8F382009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50695:fb6b04734b48 Date: 2011-12-19 07:32 +0100 http://bitbucket.org/pypy/pypy/changeset/fb6b04734b48/ Log: don't overwrite a previous exception with MemoryError, if any diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -109,7 +109,8 @@ def save_exception_memoryerr(): save_exception() - self.saved_exc_value = "memoryerror!" # for tests + if not self.saved_exc_value: + self.saved_exc_value = "memoryerror!" # for tests self.pos_exception = pos_exception self.pos_exc_value = pos_exc_value @@ -142,9 +143,11 @@ def save_exception_memoryerr(): from pypy.rpython.annlowlevel import cast_instance_to_base_ptr save_exception() - exc = MemoryError() - exc = cast_instance_to_base_ptr(exc) - self.saved_exc_value = lltype.cast_opaque_ptr(llmemory.GCREF, exc) + if not self.saved_exc_value: + exc = MemoryError() + exc = cast_instance_to_base_ptr(exc) + exc = lltype.cast_opaque_ptr(llmemory.GCREF, exc) + self.saved_exc_value = exc from pypy.rlib import rstack STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed], @@ -193,8 +196,9 @@ _ON_JIT_LEAVE_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) - def get_on_leave_jitted_int(self, save_exception, memoryerror=False): - if memoryerror: + def get_on_leave_jitted_int(self, save_exception, + default_to_memoryerror=False): + if default_to_memoryerror: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_memoryerr) elif save_exception: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_save_exc) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -276,7 +276,7 @@ self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() addr = self.cpu.get_on_leave_jitted_int(save_exception=True, - memoryerror=True) + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() From noreply at buildbot.pypy.org Mon Dec 19 07:44:49 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 07:44:49 +0100 (CET) Subject: [pypy-commit] pypy default: fix on 64-bit, found by test_zll_stress again Message-ID: <20111219064449.BE01D82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50696:805e3e5dd0f6 Date: 2011-12-19 07:44 +0100 http://bitbucket.org/pypy/pypy/changeset/805e3e5dd0f6/ Log: fix on 64-bit, found by test_zll_stress again diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -866,8 +866,8 @@ high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] low_part = intmask(low_part) high_part = intmask(high_part) - self.mc.MOV_bi(to_loc.value, low_part) - self.mc.MOV_bi(to_loc.value + 4, high_part) + self.mc.MOV32_bi(to_loc.value, low_part) + self.mc.MOV32_bi(to_loc.value + 4, high_part) def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) From noreply at buildbot.pypy.org Mon Dec 19 07:45:45 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 07:45:45 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: hg merge default Message-ID: <20111219064545.215FA82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50697:f654eab08551 Date: 2011-12-19 07:45 +0100 http://bitbucket.org/pypy/pypy/changeset/f654eab08551/ Log: hg merge default diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -108,7 +108,8 @@ def save_exception_memoryerr(): save_exception() - self.saved_exc_value = "memoryerror!" # for tests + if not self.saved_exc_value: + self.saved_exc_value = "memoryerror!" # for tests self.pos_exception = pos_exception self.pos_exc_value = pos_exc_value @@ -141,9 +142,11 @@ def save_exception_memoryerr(): from pypy.rpython.annlowlevel import cast_instance_to_base_ptr save_exception() - exc = MemoryError() - exc = cast_instance_to_base_ptr(exc) - self.saved_exc_value = lltype.cast_opaque_ptr(llmemory.GCREF, exc) + if not self.saved_exc_value: + exc = MemoryError() + exc = cast_instance_to_base_ptr(exc) + exc = lltype.cast_opaque_ptr(llmemory.GCREF, exc) + self.saved_exc_value = exc from pypy.rlib import rstack STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed], @@ -192,8 +195,9 @@ _ON_JIT_LEAVE_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) - def get_on_leave_jitted_int(self, save_exception, memoryerror=False): - if memoryerror: + def get_on_leave_jitted_int(self, save_exception, + default_to_memoryerror=False): + if default_to_memoryerror: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_memoryerr) elif save_exception: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_save_exc) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -258,7 +258,7 @@ self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() addr = self.cpu.get_on_leave_jitted_int(save_exception=True, - memoryerror=True) + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() @@ -848,8 +848,8 @@ high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] low_part = intmask(low_part) high_part = intmask(high_part) - self.mc.MOV_bi(to_loc.value, low_part) - self.mc.MOV_bi(to_loc.value + 4, high_part) + self.mc.MOV32_bi(to_loc.value, low_part) + self.mc.MOV32_bi(to_loc.value + 4, high_part) def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) From noreply at buildbot.pypy.org Mon Dec 19 08:23:29 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 08:23:29 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fix Message-ID: <20111219072329.A747F82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50698:7b32ea5f4208 Date: 2011-12-19 07:57 +0100 http://bitbucket.org/pypy/pypy/changeset/7b32ea5f4208/ Log: fix diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): From noreply at buildbot.pypy.org Mon Dec 19 09:13:40 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 09:13:40 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fix Message-ID: <20111219081340.D1DEF82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50699:83ec8d8b7000 Date: 2011-12-19 09:11 +0100 http://bitbucket.org/pypy/pypy/changeset/83ec8d8b7000/ Log: fix diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -1,3 +1,4 @@ +import sys from pypy.rlib.rarithmetic import ovfcheck from pypy.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr from pypy.jit.metainterp.resoperation import ResOperation, rop @@ -208,13 +209,16 @@ """Try to generate or update a CALL_MALLOC_NURSERY. If that fails, generate a plain CALL_MALLOC_GC instead. """ + if size <= (sys.maxint & ~(WORD-1)): + size = self.round_up_for_allocation(size) + else: + size = sys.maxint #corner case: generate a malloc that always fail + # if not self.gc_ll_descr.can_use_nursery_malloc(size): self.gen_malloc_fixedsize(size, v_result) return # - size = self.round_up_for_allocation(size) op = None - # if self._op_malloc_nursery is not None: # already a MALLOC_NURSERY: increment its total size total_size = self._op_malloc_nursery.getarg(0).getint() diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -149,7 +149,7 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ descr=malloc_fixedsize_descr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() @@ -364,15 +364,15 @@ self.gc_ll_descr.max_size_of_young_obj = 100 self.check_rewrite(""" [] - p0 = new_array(100, descr=bdescr) + p0 = new_array(103, descr=bdescr) jump() """, """ [] p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ - %(bdescr.basesize + 100)d, \ + %(bdescr.basesize + 104)d, \ descr=malloc_fixedsize_descr) setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 100, descr=blendescr) + setfield_gc(p0, 103, descr=blendescr) jump() """) @@ -421,7 +421,7 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ descr=malloc_fixedsize_descr) setfield_gc(p0, 9315, descr=tiddescr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) From noreply at buildbot.pypy.org Mon Dec 19 09:13:42 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 09:13:42 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Fix on 64-bit Message-ID: <20111219081342.0167482009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50700:3ab2729fe2b1 Date: 2011-12-19 09:13 +0100 http://bitbucket.org/pypy/pypy/changeset/3ab2729fe2b1/ Log: Fix on 64-bit diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -480,6 +480,7 @@ """) def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 self.check_rewrite(""" [i2, p3] p1 = new_array(129, descr=cdescr) @@ -500,6 +501,7 @@ def test_write_barrier_before_long_array(self): # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 self.check_rewrite(""" [i2, p3] p1 = new_array(130, descr=cdescr) From noreply at buildbot.pypy.org Mon Dec 19 09:44:43 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 09:44:43 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: - choose an arbitrary upper limit, and beyond it, generate a regular Message-ID: <20111219084443.9121D82009@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50701:5467c010ecde Date: 2011-12-19 09:40 +0100 http://bitbucket.org/pypy/pypy/changeset/5467c010ecde/ Log: - choose an arbitrary upper limit, and beyond it, generate a regular malloc_array even if the length is constant - fix boehm translation diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -116,6 +116,7 @@ class GcLLDescr_boehm(GcLLDescription): kind = 'boehm' moving_gc = False + round_up = False gcrootmap = None write_barrier_descr = None fielddescr_tid = None @@ -617,6 +618,7 @@ class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py kind = 'framework' + round_up = True def __init__(self, gcdescr, translator, rtyper, llop1=llop, really_not_translated=False): diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py --- a/pypy/jit/backend/llsupport/rewrite.py +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -112,7 +112,7 @@ pass # total_size is still -1 elif arraydescr.itemsize == 0: total_size = arraydescr.basesize - if total_size >= 0: + if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily self.gen_malloc_nursery(total_size, op.result) self.gen_initialize_tid(op.result, arraydescr.tid) self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) @@ -209,11 +209,7 @@ """Try to generate or update a CALL_MALLOC_NURSERY. If that fails, generate a plain CALL_MALLOC_GC instead. """ - if size <= (sys.maxint & ~(WORD-1)): - size = self.round_up_for_allocation(size) - else: - size = sys.maxint #corner case: generate a malloc that always fail - # + size = self.round_up_for_allocation(size) if not self.gc_ll_descr.can_use_nursery_malloc(size): self.gen_malloc_fixedsize(size, v_result) return @@ -319,6 +315,8 @@ self.gen_write_barrier(v_base, v_value) def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size if self.gc_ll_descr.translate_support_code: from pypy.rpython.lltypesystem import llarena return llarena.round_up_for_allocation( diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py --- a/pypy/jit/backend/llsupport/test/test_rewrite.py +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -149,7 +149,7 @@ jump() """, """ [p1] - p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ descr=malloc_fixedsize_descr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) jump() @@ -400,6 +400,20 @@ jump() """) + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + def test_new_with_vtable(self): self.check_rewrite(""" [] From noreply at buildbot.pypy.org Mon Dec 19 13:19:26 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 13:19:26 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: unify Broadcast and View Message-ID: <20111219121926.3A324823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50702:2d23987ef2d2 Date: 2011-12-19 14:18 +0200 http://bitbucket.org/pypy/pypy/changeset/2d23987ef2d2/ Log: unify Broadcast and View diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -61,11 +61,28 @@ return self.offset class ViewIterator(BaseIterator): - def __init__(self, arr): + def __init__(self, arr, res_shape=None): self.indices = [0] * len(arr.shape) self.offset = arr.start - self.arr = arr self._done = False + if res_shape is not None and res_shape != arr.shape: + self.strides = [] + self.backstrides = [] + for i in range(len(arr.shape)): + if arr.shape[i] == 1: + self.strides.append(0) + self.backstrides.append(0) + else: + self.strides.append(arr.strides[i]) + self.backstrides.append(arr.backstrides[i]) + self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides + self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides + self.res_shape = res_shape + else: + self.strides = arr.strides + self.backstrides = arr.backstrides + self.res_shape = arr.shape + @jit.unroll_safe def next(self, shapelen): @@ -75,59 +92,6 @@ indices[i] = self.indices[i] done = False for i in range(shapelen - 1, -1, -1): - if indices[i] < self.arr.shape[i] - 1: - indices[i] += 1 - offset += self.arr.strides[i] - break - else: - indices[i] = 0 - offset -= self.arr.backstrides[i] - else: - done = True - res = instantiate(ViewIterator) - res.offset = offset - res.indices = indices - res.arr = self.arr - res._done = done - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class BroadcastIterator(BaseIterator): - '''Like a view iterator, but will repeatedly access values - for all iterations across a res_shape, folding the offset - using mod() arithmetic - ''' - def __init__(self, arr, res_shape): - self.indices = [0] * len(res_shape) - self.offset = arr.start - #strides are 0 where original shape==1 - self.strides = [] - self.backstrides = [] - for i in range(len(arr.shape)): - if arr.shape[i] == 1: - self.strides.append(0) - self.backstrides.append(0) - else: - self.strides.append(arr.strides[i]) - self.backstrides.append(arr.backstrides[i]) - self.res_shape = res_shape - self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides - self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - _done = False - for i in range(shapelen): - indices[i] = self.indices[i] - for i in range(shapelen - 1, -1, -1): if indices[i] < self.res_shape[i] - 1: indices[i] += 1 offset += self.strides[i] @@ -136,14 +100,14 @@ indices[i] = 0 offset -= self.backstrides[i] else: - _done = True - res = instantiate(BroadcastIterator) + done = True + res = instantiate(ViewIterator) + res.offset = offset res.indices = indices - res.offset = offset - res._done = _done res.strides = self.strides res.backstrides = self.backstrides res.res_shape = self.res_shape + res._done = done return res def done(self): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -875,6 +875,8 @@ class ConcreteArray(BaseArray): """ An array that have actual storage, whether owned or not """ + _immutable_fields_ = ['storage'] + def __init__(self, size, shape, dtype, order='C', parent=None): self.size = size self.parent = parent @@ -1010,8 +1012,6 @@ """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ - _immutable_fields_ = ['storage'] - def copy(self): array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,25 +1,10 @@ from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.rlib.rarithmetic import intmask from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ - BroadcastIterator, OneDimIterator, ConstantIterator + OneDimIterator, ConstantIterator from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr from pypy.rlib.jit import hint, unroll_safe, promote -# def components_eq(lhs, rhs): -# if len(lhs) != len(rhs): -# return False -# for i in range(len(lhs)): -# v1, v2 = lhs[i], rhs[i] -# if type(v1) is not type(v2) or not v1.eq(v2): -# return False -# return True - -# def components_hash(components): -# res = 0x345678 -# for component in components: -# res = intmask((1000003 * res) ^ component.hash()) -# return res - def sigeq(one, two): return one.eq(two) From noreply at buildbot.pypy.org Mon Dec 19 13:32:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 13:32:20 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: fix Message-ID: <20111219123220.88139823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50703:4b7166b905f8 Date: 2011-12-19 13:32 +0100 http://bitbucket.org/pypy/pypy/changeset/4b7166b905f8/ Log: fix diff --git a/pypy/jit/metainterp/test/test_executor.py b/pypy/jit/metainterp/test/test_executor.py --- a/pypy/jit/metainterp/test/test_executor.py +++ b/pypy/jit/metainterp/test/test_executor.py @@ -18,7 +18,7 @@ pass class FakeCallDescr(FakeDescr): - def get_return_type(self): + def get_result_type(self): return history.FLOAT class FakeFieldDescr(FakeDescr): From noreply at buildbot.pypy.org Mon Dec 19 13:37:52 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 13:37:52 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: simplify and remove ForcedSignature alltogether Message-ID: <20111219123753.03F70823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50704:3dbc54249ed3 Date: 2011-12-19 14:37 +0200 http://bitbucket.org/pypy/pypy/changeset/3dbc54249ed3/ Log: simplify and remove ForcedSignature alltogether diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -708,10 +708,11 @@ def descr_debug_repr(self, space): return space.wrap(self.find_sig().debug_repr()) - def find_sig(self): + def find_sig(self, res_shape=None): """ find a correct signature for the array """ - return signature.find_sig(self.create_sig(), self) + res_shape = res_shape or self.shape + return signature.find_sig(self.create_sig(res_shape), self) def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): @@ -762,7 +763,7 @@ # so in order to have a consistent API, let it go through. pass - def create_sig(self): + def create_sig(self, res_shape): return signature.ScalarSignature(self.dtype) class VirtualArray(BaseArray): @@ -839,10 +840,11 @@ def _find_dtype(self): return self.res_dtype - def create_sig(self): + def create_sig(self, res_shape): if self.forced_result is not None: - return signature.ForcedSignature(self.forced_result.dtype) - return signature.Call1(self.ufunc, self.name, self.values.create_sig()) + return signature.ArraySignature(self.forced_result.dtype) + return signature.Call1(self.ufunc, self.name, + self.values.create_sig(res_shape)) class Call2(VirtualArray): """ @@ -865,12 +867,12 @@ def _find_size(self): return self.size - def create_sig(self): + def create_sig(self, res_shape): if self.forced_result is not None: - return signature.ForcedSignature(self.forced_result.dtype) + return signature.ArraySignature(self.forced_result.dtype) return signature.Call2(self.ufunc, self.name, self.calc_dtype, - self.left.create_sig(), - self.right.create_sig()) + self.left.create_sig(res_shape), + self.right.create_sig(res_shape)) class ConcreteArray(BaseArray): """ An array that have actual storage, whether owned or not @@ -946,7 +948,7 @@ self._sliceloop(w_value, res_shape) def _sliceloop(self, source, res_shape): - sig = source.find_sig() + sig = source.find_sig(res_shape) frame = sig.create_frame(source) res_iter = ViewIterator(self) shapelen = len(res_shape) @@ -971,7 +973,7 @@ a_iter = a_iter.next(len(array.shape)) return array - def create_sig(self): + def create_sig(self, res_shape): return signature.ViewSignature(self.dtype) def setshape(self, space, new_shape): @@ -1025,7 +1027,7 @@ self.shape = new_shape self.calc_strides(new_shape) - def create_sig(self): + def create_sig(self, res_shape): return signature.ArraySignature(self.dtype) def __del__(self): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -70,7 +70,7 @@ shapelen = len(obj.shape) sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), - obj.create_sig()), obj) + obj.create_sig(obj.shape)), obj) frame = sig.create_frame(obj) if shapelen > 1 and not multidim: raise OperationError(space.w_NotImplementedError, diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -117,53 +117,23 @@ return 'Array' def _invent_array_numbering(self, arr, cache): - from pypy.module.micronumpy.interp_numarray import ConcreteArray - assert isinstance(arr, ConcreteArray) - self.array_no = _add_ptr_to_cache(arr.storage, cache) + storage = arr.get_concrete().storage + self.array_no = _add_ptr_to_cache(storage, cache) def _create_iter(self, iterlist, arraylist, arr): - from pypy.module.micronumpy.interp_numarray import ConcreteArray - assert isinstance(arr, ConcreteArray) + storage = arr.get_concrete().storage if self.iter_no >= len(iterlist): iterlist.append(self.allocate_iter(arr)) if self.array_no >= len(arraylist): - arraylist.append(arr.storage) + arraylist.append(storage) def allocate_iter(self, arr): return ArrayIterator(arr.size) def eval(self, frame, arr): - from pypy.module.micronumpy.interp_numarray import ConcreteArray - assert isinstance(arr, ConcreteArray) iter = frame.iterators[self.iter_no] return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) -class ForcedSignature(ArraySignature): - def debug_repr(self): - return 'ForcedArray' - - def _invent_array_numbering(self, arr, cache): - from pypy.module.micronumpy.interp_numarray import VirtualArray - assert isinstance(arr, VirtualArray) - arr = arr.forced_result - self.array_no = _add_ptr_to_cache(arr.storage, cache) - - def _create_iter(self, iterlist, arraylist, arr): - from pypy.module.micronumpy.interp_numarray import VirtualArray - assert isinstance(arr, VirtualArray) - arr = arr.forced_result - if self.iter_no >= len(iterlist): - iterlist.append(ArrayIterator(arr.size)) - if self.array_no >= len(arraylist): - arraylist.append(arr.storage) - - def eval(self, frame, arr): - from pypy.module.micronumpy.interp_numarray import VirtualArray - assert isinstance(arr, VirtualArray) - arr = arr.forced_result - iter = frame.iterators[self.iter_no] - return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) - class ScalarSignature(ConcreteSignature): def debug_repr(self): return 'Scalar' From noreply at buildbot.pypy.org Mon Dec 19 13:52:03 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 13:52:03 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: reinstitute broadcast - no code addition Message-ID: <20111219125203.BB2EE823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50705:0ce8fad59c36 Date: 2011-12-19 14:51 +0200 http://bitbucket.org/pypy/pypy/changeset/0ce8fad59c36/ Log: reinstitute broadcast - no code addition diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -62,7 +62,6 @@ class ViewIterator(BaseIterator): def __init__(self, arr, res_shape=None): - self.indices = [0] * len(arr.shape) self.offset = arr.start self._done = False if res_shape is not None and res_shape != arr.shape: @@ -82,6 +81,7 @@ self.strides = arr.strides self.backstrides = arr.backstrides self.res_shape = arr.shape + self.indices = [0] * len(self.res_shape) @jit.unroll_safe diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -842,7 +842,7 @@ def create_sig(self, res_shape): if self.forced_result is not None: - return signature.ArraySignature(self.forced_result.dtype) + return self.forced_result.array_sig(res_shape) return signature.Call1(self.ufunc, self.name, self.values.create_sig(res_shape)) @@ -869,7 +869,7 @@ def create_sig(self, res_shape): if self.forced_result is not None: - return signature.ArraySignature(self.forced_result.dtype) + return self.forced_result.array_sig(res_shape) return signature.Call2(self.ufunc, self.name, self.calc_dtype, self.left.create_sig(res_shape), self.right.create_sig(res_shape)) @@ -930,6 +930,11 @@ self.strides = strides[:] self.backstrides = backstrides[:] + def array_sig(self, res_shape): + if res_shape is not None and self.shape != res_shape: + return signature.ViewSignature(self.dtype) + return signature.ArraySignature(self.dtype) + class W_NDimSlice(ConcreteArray): def __init__(self, start, strides, backstrides, shape, parent): if isinstance(parent, W_NDimSlice): @@ -949,7 +954,7 @@ def _sliceloop(self, source, res_shape): sig = source.find_sig(res_shape) - frame = sig.create_frame(source) + frame = sig.create_frame(source, res_shape) res_iter = ViewIterator(self) shapelen = len(res_shape) while not res_iter.done(): @@ -1028,7 +1033,7 @@ self.calc_strides(new_shape) def create_sig(self, res_shape): - return signature.ArraySignature(self.dtype) + return self.array_sig(res_shape) def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -88,10 +88,11 @@ allnumbers.append(no) self.iter_no = no - def create_frame(self, arr): + def create_frame(self, arr, res_shape=None): + res_shape = res_shape or arr.shape iterlist = [] arraylist = [] - self._create_iter(iterlist, arraylist, arr) + self._create_iter(iterlist, arraylist, arr, res_shape) return NumpyEvalFrame(iterlist, arraylist) class ConcreteSignature(Signature): @@ -120,14 +121,14 @@ storage = arr.get_concrete().storage self.array_no = _add_ptr_to_cache(storage, cache) - def _create_iter(self, iterlist, arraylist, arr): + def _create_iter(self, iterlist, arraylist, arr, res_shape): storage = arr.get_concrete().storage if self.iter_no >= len(iterlist): - iterlist.append(self.allocate_iter(arr)) + iterlist.append(self.allocate_iter(arr, res_shape)) if self.array_no >= len(arraylist): arraylist.append(storage) - def allocate_iter(self, arr): + def allocate_iter(self, arr, res_shape): return ArrayIterator(arr.size) def eval(self, frame, arr): @@ -141,7 +142,7 @@ def _invent_array_numbering(self, arr, cache): pass - def _create_iter(self, iterlist, arraylist, arr): + def _create_iter(self, iterlist, arraylist, arr, res_shape): if self.iter_no >= len(iterlist): iter = ConstantIterator() iterlist.append(iter) @@ -161,14 +162,14 @@ allnumbers.append(no) self.iter_no = no - def allocate_iter(self, arr): - return ViewIterator(arr) + def allocate_iter(self, arr, res_shape): + return ViewIterator(arr, res_shape) class FlatiterSignature(ViewSignature): def debug_repr(self): return 'FlatIter(%s)' % self.child.debug_repr() - def _create_iter(self, iterlist, arraylist, arr): + def _create_iter(self, iterlist, arraylist, arr, res_shape): raise NotImplementedError class Call1(Signature): @@ -200,10 +201,10 @@ assert isinstance(arr, Call1) self.child._invent_array_numbering(arr.values, cache) - def _create_iter(self, iterlist, arraylist, arr): + def _create_iter(self, iterlist, arraylist, arr, res_shape): from pypy.module.micronumpy.interp_numarray import Call1 assert isinstance(arr, Call1) - self.child._create_iter(iterlist, arraylist, arr.values) + self.child._create_iter(iterlist, arraylist, arr.values, res_shape) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import Call1 @@ -244,12 +245,12 @@ self.left._invent_numbering(cache, allnumbers) self.right._invent_numbering(cache, allnumbers) - def _create_iter(self, iterlist, arraylist, arr): + def _create_iter(self, iterlist, arraylist, arr, res_shape): from pypy.module.micronumpy.interp_numarray import Call2 assert isinstance(arr, Call2) - self.left._create_iter(iterlist, arraylist, arr.left) - self.right._create_iter(iterlist, arraylist, arr.right) + self.left._create_iter(iterlist, arraylist, arr.left, res_shape) + self.right._create_iter(iterlist, arraylist, arr.right, res_shape) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import Call2 @@ -263,8 +264,8 @@ self.right.debug_repr()) class ReduceSignature(Call2): - def _create_iter(self, iterlist, arraylist, arr): - self.right._create_iter(iterlist, arraylist, arr) + def _create_iter(self, iterlist, arraylist, arr, res_shape): + self.right._create_iter(iterlist, arraylist, arr, res_shape) def _invent_numbering(self, cache, allnumbers): self.right._invent_numbering(cache, allnumbers) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -865,7 +865,6 @@ assert (a == [8, 6, 4, 2, 0]).all() def test_debug_repr(self): - skip("for now") from numpypy import zeros, sin a = zeros(1) assert a.__debug_repr__() == 'Array' @@ -1001,7 +1000,6 @@ assert a[0, 1, 2] == 1.0 def test_broadcast_ufunc(self): - skip("broadcast unsupported") from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) b = array([5, 6]) @@ -1009,15 +1007,13 @@ assert c.all() def test_broadcast_setslice(self): - skip("broadcast unsupported") from numpypy import zeros, ones - a = zeros((100, 100)) - b = ones(100) + a = zeros((10, 10)) + b = ones(10) a[:, :] = b - assert a[13, 15] == 1 + assert a[3, 5] == 1 def test_broadcast_shape_agreement(self): - skip("broadcast unsupported") from numpypy import zeros, array a = zeros((3, 1, 3)) b = array(((10, 11, 12), (20, 21, 22), (30, 31, 32))) @@ -1032,7 +1028,6 @@ assert c.all() def test_broadcast_scalar(self): - skip("broadcast unsupported") from numpypy import zeros a = zeros((4, 5), 'd') a[:, 1] = 3 @@ -1044,7 +1039,6 @@ assert a[3, 2] == 0 def test_broadcast_call2(self): - skip("broadcast unsupported") from numpypy import zeros, ones a = zeros((4, 1, 5)) b = ones((4, 3, 5)) From noreply at buildbot.pypy.org Mon Dec 19 13:53:58 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 13:53:58 +0100 (CET) Subject: [pypy-commit] pypy op_malloc_gc: Close branch before merge Message-ID: <20111219125358.BCC2B823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: op_malloc_gc Changeset: r50706:e5cd68baa3d1 Date: 2011-12-19 13:49 +0100 http://bitbucket.org/pypy/pypy/changeset/e5cd68baa3d1/ Log: Close branch before merge From noreply at buildbot.pypy.org Mon Dec 19 13:54:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 13:54:00 +0100 (CET) Subject: [pypy-commit] pypy default: Merge op_malloc_gc: move out of the jit backend the NEWxxx operations. Message-ID: <20111219125400.B0541823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50707:8aacc90b4732 Date: 2011-12-19 13:53 +0100 http://bitbucket.org/pypy/pypy/changeset/8aacc90b4732/ Log: Merge op_malloc_gc: move out of the jit backend the NEWxxx operations. Now they are replaced during a rewrite phase with explicit CALL_MALLOC_GC's, which are almost equivalent to plain CALLs, as well as CALL_MALLOC_NURSERY, which are a simplified version of "bump the nursery pointer". The point, besides simplifying the backends, is that the rewriting is done a bit more cleverly now: it will merge several consecutive NEWxxx into a single CALL_MALLOC_NURSERY, which also avoids some write barriers. CALL_MALLOC_GC is only used for mallocs that are either too large or of a non-constant size. diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,265 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -363,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -408,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -433,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -444,161 +425,48 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.arg_classes, self.result_type) -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,87 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -735,49 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - assert not self.layoutbuilder.has_finalizer(S) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -791,108 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETINTERIORFIELD_GC ------ - if op.getopnum() == rop.SETINTERIORFIELD_GC: - val = op.getarg(0) - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -369,29 +324,30 @@ # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert descr2.repr_of_descr() == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert descr2i.repr_of_descr() == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert descr3.repr_of_descr() == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert descr3i.repr_of_descr() == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert descr4.repr_of_descr() == '' # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert descr4i.repr_of_descr() == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert descr4f.repr_of_descr() == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert descr5f.repr_of_descr() == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +357,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +377,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +401,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,211 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -70,10 +70,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -108,20 +104,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -1358,46 +1340,10 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) - def genop_new_with_vtable(self, op, arglocs, result_loc): - assert result_loc is eax - loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, ImmedLoc) - arglocs = arglocs[:-1] - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - self.set_vtable(eax, loc_vtable) + # ---------- - def set_vtable(self, loc, loc_vtable): - if self.cpu.vtable_offset is not None: - assert isinstance(loc, RegLoc) - assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert isinstance(loc, RegLoc) - assert isinstance(loc_num_elem, ImmedLoc) - self.mc.MOV(mem(loc, ofs_length), loc_num_elem) - - # XXX genop_new is abused for all varsized mallocs with Boehm, for now - # (instead of genop_new_array, genop_newstr, genop_newunicode) - def genop_new(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_new_array(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_array_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newstr(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_str_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newunicode(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_unicode_func_addr, arglocs, eax) + def genop_call_malloc_gc(self, op, arglocs, result_loc): + self.genop_call(op, arglocs, result_loc) self.propagate_memoryerror_if_eax_is_null() def propagate_memoryerror_if_eax_is_null(self): @@ -2066,6 +2012,8 @@ self._genop_call(op, arglocs, resloc, force_index) def _genop_call(self, op, arglocs, resloc, force_index): + from pypy.jit.backend.llsupport.descr import CallDescr + sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -2080,13 +2028,16 @@ else: tmp = eax + descr = op.getdescr() + assert isinstance(descr, CallDescr) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types(), - callconv=op.getdescr().get_call_conv()) + argtypes=descr.get_arg_types(), + callconv=descr.get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return - if op.getdescr().get_return_type() == 'L': + if descr.get_result_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long self.mc.MOV_br(resloc.value + 4, edx.value) # XXX should ideally not move the result on the stack, @@ -2095,7 +2046,7 @@ # can just be always a stack location else: self.mc.FSTPL_b(resloc.value) # float return - elif op.getdescr().get_return_type() == 'S': + elif descr.get_result_type() == 'S': # singlefloat return assert resloc is eax if IS_X86_32: @@ -2293,9 +2244,9 @@ # # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: - from pypy.jit.backend.llsupport.descr import BaseFieldDescr + from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset self.mc.MOV(eax, arglocs[1]) self.mc.MOV_mi((eax.value, ofs), 0) @@ -2498,9 +2449,8 @@ else: self.mc.JMP(imm(target)) - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): - size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) - size = (size + WORD-1) & ~(WORD-1) # round up + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size): + assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) @@ -2536,9 +2486,6 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - # on 64-bits, 'tid' is a value that fits in 31 bits - assert rx86.fits_in_32bits(tid) - self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -16,8 +16,8 @@ from pypy.jit.codewriter import heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr +from pypy.jit.backend.llsupport.descr import FieldDescr, ArrayDescr +from pypy.jit.backend.llsupport.descr import CallDescr, SizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox @@ -870,9 +870,9 @@ def _consider_call(self, op, guard_not_forced_op=None): calldescr = op.getdescr() - assert isinstance(calldescr, BaseCallDescr) + assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - 1 - size = calldescr.get_result_size(self.translate_support_code) + size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm1 @@ -917,12 +917,15 @@ consider_call_release_gil = consider_call_may_force + def consider_call_malloc_gc(self, op): + self._consider_call(op) + def consider_call_assembler(self, op, guard_op): descr = op.getdescr() assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size(self.translate_support_code) + size = jd.portal_calldescr.get_result_size() vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.getarg(vable_index)) @@ -957,21 +960,10 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb - def fastpath_malloc_fixedsize(self, op, descr): - assert isinstance(descr, BaseSizeDescr) - self._do_fastpath_malloc(op, descr.size, descr.tid) - - def fastpath_malloc_varsize(self, op, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - size = basesize + itemsize * num_elem - self._do_fastpath_malloc(op, size, arraydescr.tid) - self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) - - def _do_fastpath_malloc(self, op, size, tid): - gc_ll_descr = self.assembler.cpu.gc_ll_descr + def consider_call_malloc_nursery(self, op): + size_box = op.getarg(0) + assert isinstance(size_box, ConstInt) + size = size_box.getint() self.rm.force_allocate_reg(op.result, selected_reg=eax) # # We need edx as a temporary, but otherwise don't save any more @@ -980,86 +972,39 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) # + gc_ll_descr = self.assembler.cpu.gc_ll_descr self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - size, tid, - ) - - def consider_new(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.getdescr()): - self.fastpath_malloc_fixedsize(op, op.getdescr()) - else: - args = gc_ll_descr.args_for_new(op.getdescr()) - arglocs = [imm(x) for x in args] - return self._call(op, arglocs) - - def consider_new_with_vtable(self, op): - classint = op.getarg(0).getint() - descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) - if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self.fastpath_malloc_fixedsize(op, descrsize) - self.assembler.set_vtable(eax, imm(classint)) - # result of fastpath malloc is in eax - else: - args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) - - def consider_newstr(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_newunicode(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_new_array(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(box_num_elem)) - self._call(op, arglocs) + size) def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - ofs = arraydescr.get_base_size(self.translate_support_code) - size = arraydescr.get_item_size(self.translate_support_code) - ptr = arraydescr.is_array_of_pointers() + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize sign = arraydescr.is_item_signed() - return size, ofs, ofs_length, ptr, sign + return size, ofs, sign def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset - size = fielddescr.get_field_size(self.translate_support_code) - ptr = fielddescr.is_pointer_field() + size = fielddescr.field_size sign = fielddescr.is_field_signed() - return imm(ofs), imm(size), ptr, sign + return imm(ofs), imm(size), sign + _unpack_fielddescr._always_inline_ = True def _unpack_interiorfielddescr(self, descr): assert isinstance(descr, InteriorFieldDescr) arraydescr = descr.arraydescr - ofs = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + ofs = arraydescr.basesize + itemsize = arraydescr.itemsize + fieldsize = descr.fielddescr.field_size sign = descr.fielddescr.is_field_signed() ofs += descr.fielddescr.offset return imm(ofs), imm(itemsize), imm(fieldsize), sign def consider_setfield_gc(self, op): - ofs_loc, size_loc, _, _ = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -1117,7 +1062,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - itemsize, ofs, _, _, _ = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, _ = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if itemsize == 1: @@ -1134,7 +1079,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _, sign = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, sign = self._unpack_fielddescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -1150,7 +1095,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - itemsize, ofs, _, _, sign = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, sign = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -1229,8 +1174,8 @@ def consider_arraylen_gc(self, op): arraydescr = op.getdescr() - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.backend.llsupport.descr import GcCache +from pypy.jit.backend.llsupport.descr import GcCache, FieldDescr, FLAG_SIGNED from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc @@ -17,7 +17,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcPtrFieldDescr +from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.jit.backend.x86.test.test_regalloc import MockAssembler from pypy.jit.backend.x86.test.test_regalloc import BaseTestRegalloc @@ -41,20 +41,15 @@ return ['compressed'] + shape[1:] class MockGcDescr(GcCache): - def get_funcptr_for_new(self): - return 123 - get_funcptr_for_newarray = get_funcptr_for_new - get_funcptr_for_newstr = get_funcptr_for_new - get_funcptr_for_newunicode = get_funcptr_for_new get_malloc_slowpath_addr = None - + write_barrier_descr = None moving_gc = True gcrootmap = MockGcRootMap() def initialize(self): pass - record_constptrs = GcLLDescr_framework.record_constptrs.im_func + _record_constptrs = GcLLDescr_framework._record_constptrs.im_func rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func class TestRegallocDirectGcIntegration(object): @@ -170,42 +165,32 @@ ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) +NOT_INITIALIZED = chr(0xdd) + class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - expected_malloc_slowpath_size = WORD*2 + write_barrier_descr = None def __init__(self): - GcCache.__init__(self, False) + GcLLDescription.__init__(self, None) # create a nursery - NTP = rffi.CArray(lltype.Signed) - self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, + NTP = rffi.CArray(lltype.Char) + self.nursery = lltype.malloc(NTP, 64, flavor='raw') + for i in range(64): + self.nursery[i] = NOT_INITIALIZED + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 16*WORD - self.addrs[2] = 0 - # 16 WORDs + self.addrs[1] = self.addrs[0] + 64 + self.calls = [] def malloc_slowpath(size): - assert size == self.expected_malloc_slowpath_size + self.calls.append(size) + # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size - self.addrs[2] += 1 return nadr - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) - self._counter = 123000 - - def can_inline_malloc(self, descr): - return True - - def get_funcptr_for_new(self): - return 42 -# return llhelper(lltype.Ptr(self.NEW_TP), self.new) - - def init_size_descr(self, S, descr): - descr.tid = self._counter - self._counter += 1 + self.generate_function('malloc_nursery', malloc_slowpath, + [lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): return rffi.cast(lltype.Signed, self.addrs) @@ -214,204 +199,61 @@ return rffi.cast(lltype.Signed, self.addrs) + WORD def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) + return self.get_malloc_fn_addr('malloc_nursery') - get_funcptr_for_newarray = None - get_funcptr_for_newstr = None - get_funcptr_for_newunicode = None + def check_nothing_in_nursery(self): + # CALL_MALLOC_NURSERY should not write anything in the nursery + for i in range(64): + assert self.nursery[i] == NOT_INITIALIZED class TestMallocFastpath(BaseTestRegalloc): def setup_method(self, method): cpu = CPU(None, None) - cpu.vtable_offset = WORD cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() + self.cpu = cpu - # hack: specify 'tid' explicitly, because this test is not running - # with the gc transformer - NODE = lltype.GcStruct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) - valuedescr = cpu.fielddescrof(NODE, 'value') - - self.cpu = cpu - self.nodedescr = nodedescr - vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - vtable_int = cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(vtable)) - NODE2 = lltype.GcStruct('node2', - ('parent', rclass.OBJECT), - ('tid', lltype.Signed), - ('vtable', lltype.Ptr(rclass.OBJECT_VTABLE))) - descrsize = cpu.sizeof(NODE2) - heaptracker.register_known_gctype(cpu, vtable, NODE2) - self.descrsize = descrsize - self.vtable_int = vtable_int - - self.namespace = locals().copy() - def test_malloc_fastpath(self): ops = ''' - [i0] - p0 = new(descr=nodedescr) - setfield_gc(p0, i0, descr=valuedescr) - finish(p0) + [] + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(16) + finish(p0, p1, p2) ''' - self.interpret(ops, [42]) - # check the nursery + self.interpret(ops, []) + # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.nodedescr.tid - assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 48 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 64 + # slowpath never called + assert gc_ll_descr.calls == [] def test_malloc_slowpath(self): ops = ''' [] - p0 = new(descr=nodedescr) - p1 = new(descr=nodedescr) - p2 = new(descr=nodedescr) - p3 = new(descr=nodedescr) - p4 = new(descr=nodedescr) - p5 = new(descr=nodedescr) - p6 = new(descr=nodedescr) - p7 = new(descr=nodedescr) - p8 = new(descr=nodedescr) - finish(p0, p1, p2, p3, p4, p5, p6, p7, p8) + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(24) # overflow + finish(p0, p1, p2) ''' self.interpret(ops, []) + # check the returned pointers + gc_ll_descr = self.cpu.gc_ll_descr + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 0 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once - gc_ll_descr = self.cpu.gc_ll_descr - nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nadr + (WORD*2) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_new_with_vtable(self): - ops = ''' - [i0, i1] - p0 = new_with_vtable(ConstClass(vtable)) - guard_class(p0, ConstClass(vtable)) [i0] - finish(i1) - ''' - self.interpret(ops, [0, 1]) - assert self.getint(0) == 1 - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.descrsize.tid - assert gc_ll_descr.nursery[1] == self.vtable_int - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - -class Seen(Exception): - pass - -class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): - def can_inline_malloc_varsize(self, arraydescr, num_elem): - return num_elem < 5 - def get_funcptr_for_newarray(self): - return 52 - def init_array_descr(self, A, descr): - descr.tid = self._counter - self._counter += 1 - def args_for_new_array(self, descr): - raise Seen("args_for_new_array") - -class TestMallocVarsizeFastpath(BaseTestRegalloc): - def setup_method(self, method): - cpu = CPU(None, None) - cpu.vtable_offset = WORD - cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() - cpu.setup_once() - self.cpu = cpu - - ARRAY = lltype.GcArray(lltype.Signed) - arraydescr = cpu.arraydescrof(ARRAY) - self.arraydescr = arraydescr - ARRAYCHAR = lltype.GcArray(lltype.Char) - arraychardescr = cpu.arraydescrof(ARRAYCHAR) - - self.namespace = locals().copy() - - def test_malloc_varsize_fastpath(self): - # Hack. Running the GcLLDescr_framework without really having - # a complete GC means that we end up with both the tid and the - # length being at offset 0. In this case, so the length overwrites - # the tid. This is of course only the case in this test class. - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 142, descr=arraydescr) - setarrayitem_gc(p0, 3, 143, descr=arraydescr) - finish(p0) - ''' - self.interpret(ops, []) - # check the nursery - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == 4 - assert gc_ll_descr.nursery[1] == 142 - assert gc_ll_descr.nursery[4] == 143 - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - def test_malloc_varsize_slowpath(self): - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 420, descr=arraydescr) - setarrayitem_gc(p0, 3, 430, descr=arraydescr) - p1 = new_array(4, descr=arraydescr) - setarrayitem_gc(p1, 0, 421, descr=arraydescr) - setarrayitem_gc(p1, 3, 431, descr=arraydescr) - p2 = new_array(4, descr=arraydescr) - setarrayitem_gc(p2, 0, 422, descr=arraydescr) - setarrayitem_gc(p2, 3, 432, descr=arraydescr) - p3 = new_array(4, descr=arraydescr) - setarrayitem_gc(p3, 0, 423, descr=arraydescr) - setarrayitem_gc(p3, 3, 433, descr=arraydescr) - finish(p0, p1, p2, p3) - ''' - gc_ll_descr = self.cpu.gc_ll_descr - gc_ll_descr.expected_malloc_slowpath_size = 5*WORD - self.interpret(ops, []) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_malloc_varsize_too_big(self): - ops = ''' - [] - p0 = new_array(5, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_varsize_variable(self): - ops = ''' - [i0] - p0 = new_array(i0, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_array_of_char(self): - # check that fastpath_malloc_varsize() respects the alignment - # of the pointer in the nursery - ops = ''' - [] - p1 = new_array(1, descr=arraychardescr) - p2 = new_array(2, descr=arraychardescr) - p3 = new_array(3, descr=arraychardescr) - p4 = new_array(4, descr=arraychardescr) - finish(p1, p2, p3, p4) - ''' - self.interpret(ops, []) - p1 = self.getptr(0, llmemory.GCREF) - p2 = self.getptr(1, llmemory.GCREF) - p3 = self.getptr(2, llmemory.GCREF) - p4 = self.getptr(3, llmemory.GCREF) - assert p1._obj.intval & (WORD-1) == 0 # aligned - assert p2._obj.intval & (WORD-1) == 0 # aligned - assert p3._obj.intval & (WORD-1) == 0 # aligned - assert p4._obj.intval & (WORD-1) == 0 # aligned + assert gc_ll_descr.calls == [24] diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -69,6 +69,7 @@ return ctypes.cast(res.value._obj.intval, ctypes.POINTER(item_tp)) def test_allocations(self): + py.test.skip("rewrite or kill") from pypy.rpython.lltypesystem import rstr allocs = [None] diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -69,16 +69,17 @@ def get_functions_to_patch(): from pypy.jit.backend.llsupport import gc # - can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc - def can_inline_malloc2(*args): + can_use_nursery_malloc1 = gc.GcLLDescr_framework.can_use_nursery_malloc + def can_use_nursery_malloc2(*args): try: if os.environ['PYPY_NO_INLINE_MALLOC']: return False except KeyError: pass - return can_inline_malloc1(*args) + return can_use_nursery_malloc1(*args) # - return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + return {(gc.GcLLDescr_framework, 'can_use_nursery_malloc'): + can_use_nursery_malloc2} def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -46,7 +46,7 @@ # get the function address as an integer func = argboxes[0].getint() # do the call using the correct function from the cpu - rettype = descr.get_return_type() + rettype = descr.get_result_type() if rettype == INT or rettype == 'S': # *S*ingle float try: result = cpu.bh_call_i(func, descr, args_i, args_r, args_f) @@ -344,6 +344,8 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.CALL_MALLOC_GC, + rop.CALL_MALLOC_NURSERY, rop.LABEL, ): # list of opcodes never executed by pyjitpl continue diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -142,59 +142,6 @@ def repr_of_descr(self): return '%r' % (self,) - def get_arg_types(self): - """ Implement in call descr. - Must return a string of INT, REF and FLOAT ('i', 'r', 'f'). - """ - raise NotImplementedError - - def get_return_type(self): - """ Implement in call descr. - Must return INT, REF, FLOAT, or 'v' for void. - On 32-bit (hack) it can also be 'L' for longlongs. - Additionally it can be 'S' for singlefloats. - """ - raise NotImplementedError - - def get_extra_info(self): - """ Implement in call descr - """ - raise NotImplementedError - - def is_array_of_pointers(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_floats(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_structs(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_pointer_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def is_float_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def as_vtable_size_descr(self): - """ Implement for size descr representing objects with vtables. - Returns self. (it's an annotation hack) - """ - raise NotImplementedError - - def count_fields_if_immutable(self): - return -1 - def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -508,6 +508,8 @@ #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation 'CALL_PURE/*d', # removed before it's passed to the backend + 'CALL_MALLOC_GC/*d', # like CALL, but NULL => propagate MemoryError + 'CALL_MALLOC_NURSERY/1', # nursery malloc, const number of bytes, zeroed '_CALL_LAST', '_CANRAISE_LAST', # ----- end of can_raise operations ----- diff --git a/pypy/jit/metainterp/test/test_executor.py b/pypy/jit/metainterp/test/test_executor.py --- a/pypy/jit/metainterp/test/test_executor.py +++ b/pypy/jit/metainterp/test/test_executor.py @@ -18,7 +18,7 @@ pass class FakeCallDescr(FakeDescr): - def get_return_type(self): + def get_result_type(self): return history.FLOAT class FakeFieldDescr(FakeDescr): diff --git a/pypy/rpython/lltypesystem/llarena.py b/pypy/rpython/lltypesystem/llarena.py --- a/pypy/rpython/lltypesystem/llarena.py +++ b/pypy/rpython/lltypesystem/llarena.py @@ -374,6 +374,7 @@ following an object. For arenas containing heterogenous objects. If minsize is specified, it gives a minimum on the resulting size.""" return _round_up_for_allocation(size, minsize) +round_up_for_allocation._annenforceargs_ = [int, int] def _round_up_for_allocation(size, minsize): # internal return RoundedUpForAllocation(size, minsize) From noreply at buildbot.pypy.org Mon Dec 19 13:59:32 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 13:59:32 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: fix debug repr Message-ID: <20111219125932.C3BF4823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50708:65faee125fa5 Date: 2011-12-19 14:58 +0200 http://bitbucket.org/pypy/pypy/changeset/65faee125fa5/ Log: fix debug repr diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -154,7 +154,7 @@ class ViewSignature(ArraySignature): def debug_repr(self): - return 'Slice(%s)' % self.child.debug_repr() + return 'Slice' def _invent_numbering(self, cache, allnumbers): # always invent a new number for view @@ -260,8 +260,8 @@ return self.binfunc(self.calc_dtype, lhs, rhs) def debug_repr(self): - return 'Call2(%s, %s)' % (self.left.debug_repr(), - self.right.debug_repr()) + return 'Call2(%s, %s, %s)' % (self.name, self.left.debug_repr(), + self.right.debug_repr()) class ReduceSignature(Call2): def _create_iter(self, iterlist, arraylist, arr, res_shape): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -869,13 +869,13 @@ a = zeros(1) assert a.__debug_repr__() == 'Array' assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' + assert (a[::2]).__debug_repr__() == 'Slice' assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' + #assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' assert sin(a).__debug_repr__() == 'Call1(sin, Array)' b = a + a b[0] = 3 - assert b.__debug_repr__() == 'Forced' + assert b.__debug_repr__() == 'Array' class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): From noreply at buildbot.pypy.org Mon Dec 19 15:23:58 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 15:23:58 +0100 (CET) Subject: [pypy-commit] pypy default: This was meant to say (2, 6) and not (2.6), I am pretty sure. Message-ID: <20111219142358.A80FA823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50709:b506f9eca729 Date: 2011-12-19 15:23 +0100 http://bitbucket.org/pypy/pypy/changeset/b506f9eca729/ Log: This was meant to say (2, 6) and not (2.6), I am pretty sure. Fixed, but then the test fails on top of Python 2.6... diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/pypy/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/pypy/tool/jitlogparser/test/test_modulefinder.py @@ -3,7 +3,7 @@ import re, sys def setup_module(mod): - if sys.version_info[:2] != (2.6): + if sys.version_info[:2] != (2, 6): py.test.skip("Specific python 2.6 tests") def test_gather_code_py(): From noreply at buildbot.pypy.org Mon Dec 19 18:21:55 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 19 Dec 2011 18:21:55 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: hg merge default Message-ID: <20111219172155.1EA92823F8@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50710:ac207c29864b Date: 2011-12-19 08:07 +0100 http://bitbucket.org/pypy/pypy/changeset/ac207c29864b/ Log: hg merge default diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -107,9 +107,15 @@ _exception_emulator[1] = 0 self.saved_exc_value = rffi.cast(llmemory.GCREF, v_i) + def save_exception_memoryerr(): + save_exception() + if not self.saved_exc_value: + self.saved_exc_value = "memoryerror!" # for tests + self.pos_exception = pos_exception self.pos_exc_value = pos_exc_value self.save_exception = save_exception + self.save_exception_memoryerr = save_exception_memoryerr self.insert_stack_check = lambda: (0, 0, 0) @@ -134,6 +140,15 @@ # in the assignment to self.saved_exc_value, as needed. self.saved_exc_value = exc_value + def save_exception_memoryerr(): + from pypy.rpython.annlowlevel import cast_instance_to_base_ptr + save_exception() + if not self.saved_exc_value: + exc = MemoryError() + exc = cast_instance_to_base_ptr(exc) + exc = lltype.cast_opaque_ptr(llmemory.GCREF, exc) + self.saved_exc_value = exc + from pypy.rlib import rstack STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) @@ -147,16 +162,19 @@ self.pos_exception = pos_exception self.pos_exc_value = pos_exc_value self.save_exception = save_exception + self.save_exception_memoryerr = save_exception_memoryerr self.insert_stack_check = insert_stack_check def _setup_on_leave_jitted_untranslated(self): # assume we don't need a backend leave in this case self.on_leave_jitted_save_exc = self.save_exception + self.on_leave_jitted_memoryerr = self.save_exception_memoryerr self.on_leave_jitted_noexc = lambda : None def _setup_on_leave_jitted_translated(self): on_leave_jitted_hook = self.get_on_leave_jitted_hook() save_exception = self.save_exception + save_exception_memoryerr = self.save_exception_memoryerr def on_leave_jitted_noexc(): on_leave_jitted_hook() @@ -165,16 +183,24 @@ save_exception() on_leave_jitted_hook() + def on_leave_jitted_memoryerr(): + save_exception_memoryerr() + on_leave_jitted_hook() + self.on_leave_jitted_noexc = on_leave_jitted_noexc self.on_leave_jitted_save_exc = on_leave_jitted_save_exc + self.on_leave_jitted_memoryerr = on_leave_jitted_memoryerr def get_on_leave_jitted_hook(self): return lambda : None _ON_JIT_LEAVE_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) - def get_on_leave_jitted_int(self, save_exception): - if save_exception: + def get_on_leave_jitted_int(self, save_exception, + default_to_memoryerror=False): + if default_to_memoryerror: + f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_memoryerr) + elif save_exception: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_save_exc) else: f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_noexc) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2930,6 +2930,8 @@ # overflowing value: fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" def test_compile_loop_with_target(self): i0 = BoxInt() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -275,7 +275,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() @@ -865,8 +866,8 @@ high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] low_part = intmask(low_part) high_part = intmask(high_part) - self.mc.MOV_bi(to_loc.value, low_part) - self.mc.MOV_bi(to_loc.value + 4, high_part) + self.mc.MOV32_bi(to_loc.value, low_part) + self.mc.MOV32_bi(to_loc.value + 4, high_part) def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -329,11 +329,16 @@ special_ops = {'repr': True, 'userdel': True, 'hash': True} for opname, _, arity, special_methods in ObjSpace.MethodTable: - if opname in special_ops: + if opname in special_ops or not special_methods: continue nonspaceargs = ", ".join(["w_obj%s" % i for i in range(arity)]) code = "def func(space, %s):\n '''%s'''\n" % (nonspaceargs, opname) - for i in range(arity): + assert arity >= len(special_methods) + forcing_count = len(special_methods) + if opname.startswith('inplace_'): + assert arity == 2 + forcing_count = arity + for i in range(forcing_count): code += " w_obj%s = force(space, w_obj%s)\n" % (i, i) code += " return space.%s(%s)" % (opname, nonspaceargs) exec py.code.Source(code).compile() diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -466,3 +466,44 @@ # No exception should be raised here gc.collect() + def test_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + a3 = p1 + p2 + assert a3 is a2 + + def test_inplace_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1 += p2 + assert p1 is a2 + + def test_setattr(self): + import _weakref + class A(object): + def __setitem__(self, key, value): + self.setkey = key + self.setvalue = value + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1[p2] = 42 + assert a1.setkey is p2 + assert a1.setvalue == 42 + # + p1[42] = p2 + assert a1.setkey == 42 + assert a1.setvalue is p2 From noreply at buildbot.pypy.org Mon Dec 19 18:22:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 18:22:50 +0100 (CET) Subject: [pypy-commit] pypy generator-in-rpython: in-progress Message-ID: <20111219172250.51E3E823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: generator-in-rpython Changeset: r50711:c5bd904d1a8f Date: 2011-12-19 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/c5bd904d1a8f/ Log: in-progress diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -249,7 +249,7 @@ def build_flow(self): if self.is_generator: - self.produce_generator_entry() + self.produce_generator_mark() while self.pendingblocks: block = self.pendingblocks.popleft() frame = self.create_frame() @@ -316,11 +316,10 @@ del self.recorder self.fixeggblocks() - def produce_generator_entry(self): + def produce_generator_mark(self): [initialblock] = self.pendingblocks initialblock.operations.append( - SpaceOperation('generator_entry', list(initialblock.inputargs), - Variable())) + SpaceOperation('generator_mark', [], Variable())) def generate_yield(self, frame, w_result): assert self.is_generator diff --git a/pypy/objspace/flow/test/test_generator.py b/pypy/objspace/flow/test/test_generator.py --- a/pypy/objspace/flow/test/test_generator.py +++ b/pypy/objspace/flow/test/test_generator.py @@ -12,7 +12,7 @@ i += 1 graph = self.codetest(f) ops = self.all_operations(graph) - assert ops == {'generator_entry': 1, + assert ops == {'generator_mark': 1, 'lt': 1, 'is_true': 1, 'yield': 2, 'inplace_add': 1} diff --git a/pypy/translator/test/test_generator.py b/pypy/translator/test/test_generator.py --- a/pypy/translator/test/test_generator.py +++ b/pypy/translator/test/test_generator.py @@ -1,6 +1,9 @@ +from pypy.conftest import option from pypy.objspace.flow.objspace import FlowObjSpace +from pypy.objspace.flow.model import Variable from pypy.translator.translator import TranslationContext from pypy.translator.generator import replace_graph_with_bootstrap +from pypy.translator.generator import get_variable_names # ____________________________________________________________ @@ -50,6 +53,10 @@ def test_explicit(): assert list(f_gen(10)) == list(f_explicit(10)) +def test_get_variable_names(): + lst = get_variable_names([Variable('a'), Variable('b_'), Variable('a')]) + assert lst == ['g_a', 'g_b', 'g_a_'] + # ____________________________________________________________ @@ -62,20 +69,22 @@ # space = FlowObjSpace() graph = space.build_flow(func) - assert graph.startblock.operations[0].opname == 'generator_entry' + assert graph.startblock.operations[0].opname == 'generator_mark' replace_graph_with_bootstrap(graph, 'newgraph') + if option.view: + graph.show() block = graph.startblock ops = block.operations assert ops[0].opname == 'call' # e = Entry1() - assert ops[1].opname == 'setattr' # e.n_0 = n - assert ops[1].args[1].value.startswith('n_') - assert ops[2].opname == 'setattr' # e.x_0 = x - assert ops[2].args[1].value.startswith('x_') - assert ops[3].opname == 'setattr' # e.y_0 = y - assert ops[3].args[1].value.startswith('y_') - assert ops[4].opname == 'setattr' # e.z_0 = z - assert ops[4].args[1].value.startswith('z_') - assert ops[5].opname == 'call' # g = Generator(e) + assert ops[1].opname == 'setattr' # e.g_n = n + assert ops[1].args[1].value == 'g_n' + assert ops[2].opname == 'setattr' # e.g_x = x + assert ops[2].args[1].value == 'g_x' + assert ops[3].opname == 'setattr' # e.g_y = y + assert ops[3].args[1].value == 'g_y' + assert ops[4].opname == 'setattr' # e.g_z = z + assert ops[4].args[1].value == 'g_z' + assert ops[5].opname == 'call' # g = GeneratorIterator(e) assert ops[5].args[1] == ops[0].result assert len(ops) == 6 assert len(block.exits) == 1 From noreply at buildbot.pypy.org Mon Dec 19 18:23:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 18:23:00 +0100 (CET) Subject: [pypy-commit] pypy generator-in-rpython: hg merge default Message-ID: <20111219172300.B7D5E823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: generator-in-rpython Changeset: r50712:d3860e3e2259 Date: 2011-12-19 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/d3860e3e2259/ Log: hg merge default diff too long, truncating to 10000 out of 43208 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,3 +1,4 @@ b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 +ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,6 +231,9 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None +sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] +sqlite.sqlite3_enable_load_extension.restype = c_int + ########################################## # END Wrapped SQLite C API and constants ########################################## @@ -705,6 +708,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() + + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") + DML, DQL, DDL = range(3) class Cursor(object): diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -412,7 +412,12 @@ e.args[4] == 'unexpected end of data': pass else: - raise + # was: "raise". But it crashes pyrepl, and by extension the + # pypy currently running, in which we are e.g. in the middle + # of some debugging session. Argh. Instead just print an + # error message to stderr and continue running, for now. + self.partial_char = '' + sys.stderr.write('\n%s: %s\n' % (e.__class__.__name__, e)) else: self.partial_char = '' self.event_queue.push(c) diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -38,9 +38,27 @@ _setlogmask.argtypes = (c_int,) _setlogmask.restype = c_int +_S_log_open = False +_S_ident_o = None + +def _get_argv(): + try: + import sys + script = sys.argv[0] + if isinstance(script, str): + return script[script.rfind('/')+1:] or None + except Exception: + pass + return None + @builtinify -def openlog(ident, option, facility): - _openlog(ident, option, facility) +def openlog(ident=None, logoption=0, facility=LOG_USER): + global _S_ident_o, _S_log_open + if ident is None: + ident = _get_argv() + _S_ident_o = c_char_p(ident) # keepalive + _openlog(_S_ident_o, logoption, facility) + _S_log_open = True @builtinify def syslog(arg1, arg2=None): @@ -48,11 +66,18 @@ priority, message = arg1, arg2 else: priority, message = LOG_INFO, arg1 + # if log is not opened, open it now + if not _S_log_open: + openlog() _syslog(priority, "%s", message) @builtinify def closelog(): - _closelog() + global _S_log_open, S_ident_o + if _S_log_open: + _closelog() + _S_log_open = False + _S_ident_o = None @builtinify def setlogmask(mask): diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 @@ -307,7 +308,7 @@ self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo - self.typename = self.type.__name__ + self.typename = getattr(self.type, "__name__", "???") self.traceback = py.code.Traceback(tb) def __repr__(self): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -281,6 +285,9 @@ "actually create the full list until the resulting " "list is mutated", default=False), + BoolOption("withliststrategies", + "enable optimized ways to store lists of primitives ", + default=True), BoolOption("withtypeversion", "version type objects when changing them", @@ -362,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/config/test/test_translationoption.py b/pypy/config/test/test_translationoption.py new file mode 100644 --- /dev/null +++ b/pypy/config/test/test_translationoption.py @@ -0,0 +1,10 @@ +import py +from pypy.config.translationoption import get_combined_translation_config +from pypy.config.translationoption import set_opt_level +from pypy.config.config import ConflictConfigError + + +def test_no_gcrootfinder_with_boehm(): + config = get_combined_translation_config() + config.translation.gcrootfinder = "shadowstack" + py.test.raises(ConflictConfigError, set_opt_level, config, '0') diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, @@ -398,6 +398,10 @@ # make_sure_not_resized often relies on it, so we always enable them config.translation.suggest(list_comprehension_operations=True) + # finally, make the choice of the gc definitive. This will fail + # if we have specified strange inconsistent settings. + config.translation.gc = config.translation.gc + # ---------------------------------------------------------------- def set_platform(config): diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -270,7 +270,12 @@ - *slicing*: the slice start must be within bounds. The stop doesn't need to, but it must not be smaller than the start. All negative indexes are disallowed, except for - the [:-1] special case. No step. + the [:-1] special case. No step. Slice deletion follows the same rules. + + - *slice assignment*: + only supports ``lst[x:y] = sublist``, if ``len(sublist) == y - x``. + In other words, slice assignment cannot change the total length of the list, + but just replace items. - *other operators*: ``+``, ``+=``, ``in``, ``*``, ``*=``, ``==``, ``!=`` work as expected. diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withliststrategies.txt b/pypy/doc/config/objspace.std.withliststrategies.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withliststrategies.txt @@ -0,0 +1,2 @@ +Enable list strategies: Use specialized representations for lists of primitive +objects, such as ints. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -262,6 +262,26 @@ documented as such (as e.g. for hasattr()), in most cases PyPy lets the exception propagate instead. +Object Identity of Primitive Values, ``is`` and ``id`` +------------------------------------------------------- + +Object identity of primitive values works by value equality, not by identity of +the wrapper. This means that ``x + 1 is x + 1`` is always true, for arbitrary +integers ``x``. The rule applies for the following types: + + - ``int`` + + - ``float`` + + - ``long`` + + - ``complex`` + +This change requires some changes to ``id`` as well. ``id`` fulfills the +following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the +above types will return a value that is computed from the argument, and can +thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long). + Miscellaneous ------------- @@ -284,14 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. -* Do not compare immutable objects with ``is``. For example on CPython - it is true that ``x is 0`` works, i.e. does the same as ``type(x) is - int and x == 0``, but it is so by accident. If you do instead - ``x is 1000``, then it stops working, because 1000 is too large and - doesn't come from the internal cache. In PyPy it fails to work in - both cases, because we have no need for a cache at all. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. -* Also, object identity of immutable keys in dictionaries is not necessarily - preserved. .. include:: _ref.txt diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,6 +1,3 @@ -.. include:: needswork.txt - -.. needs work, it talks about svn. also, it is not really user documentation Making a PyPy Release ======================= @@ -12,11 +9,8 @@ forgetting things. A set of todo files may also work. Check and prioritize all issues for the release, postpone some if necessary, -create new issues also as necessary. A meeting (or meetings) should be -organized to decide what things are priorities, should go in and work for -the release. - -An important thing is to get the documentation into an up-to-date state! +create new issues also as necessary. An important thing is to get +the documentation into an up-to-date state! Release Steps ---------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -23,17 +23,20 @@ PyPy's implementation of the Python ``long`` type is slower than CPython's. Find out why and optimize them. +Make bytearray type fast +------------------------ + +PyPy's bytearray type is very inefficient. It would be an interesting +task to look into possible optimizations on this. + Numpy improvements ------------------ -This is more of a project-container than a single project. Possible ideas: +The numpy is rapidly progressing in pypy, so feel free to come to IRC and +ask for proposed topic. A not necesarilly up-to-date `list of topics`_ +is also available. -* experiment with auto-vectorization using SSE or implement vectorization - without automatically detecting it for array operations. - -* improve numpy, for example implement memory views. - -* interface with fortran/C libraries. +.. _`list of topics`: https://bitbucket.org/pypy/extradoc/src/extradoc/planning/micronumpy.txt Improving the jitviewer ------------------------ diff --git a/pypy/doc/release-1.7.0.rst b/pypy/doc/release-1.7.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-1.7.0.rst @@ -0,0 +1,94 @@ +================================== +PyPy 1.7 - widening the sweet spot +================================== + +We're pleased to announce the 1.7 release of PyPy. As became a habit, this +release brings a lot of bugfixes and performance improvements over the 1.6 +release. However, unlike the previous releases, the focus has been on widening +the "sweet spot" of PyPy. That is, classes of Python code that PyPy can greatly +speed up should be vastly improved with this release. You can download the 1.7 +release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 1.7 and cpython 2.7.1`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 32/64 or +Windows 32. Windows 64 work is ongoing, but not yet natively supported. + +The main topic of this release is widening the range of code which PyPy +can greatly speed up. On average on +our benchmark suite, PyPy 1.7 is around **30%** faster than PyPy 1.6 and up +to **20 times** faster on some benchmarks. + +.. _`pypy 1.7 and cpython 2.7.1`: http://speed.pypy.org + + +Highlights +========== + +* Numerous performance improvements. There are too many examples which python + constructs now should behave faster to list them. + +* Bugfixes and compatibility fixes with CPython. + +* Windows fixes. + +* PyPy now comes with stackless features enabled by default. However, + any loop using stackless features will interrupt the JIT for now, so no real + performance improvement for stackless-based programs. Contact pypy-dev for + info how to help on removing this restriction. + +* NumPy effort in PyPy was renamed numpypy. In order to try using it, simply + write:: + + import numpypy as numpy + + at the beginning of your program. There is a huge progress on numpy in PyPy + since 1.6, the main feature being implementation of dtypes. + +* JSON encoder (but not decoder) has been replaced with a new one. This one + is written in pure Python, but is known to outperform CPython's C extension + up to **2 times** in some cases. It's about **20 times** faster than + the one that we had in 1.6. + +* The memory footprint of some of our RPython modules has been drastically + improved. This should impact any applications using for example cryptography, + like tornado. + +* There was some progress in exposing even more CPython C API via cpyext. + +Things that didn't make it, expect in 1.8 soon +============================================== + +There is an ongoing work, which while didn't make it to the release, is +probably worth mentioning here. This is what you should probably expect in +1.8 some time soon: + +* Specialized list implementation. There is a branch that implements lists of + integers/floats/strings as compactly as array.array. This should drastically + improve performance/memory impact of some applications + +* NumPy effort is progressing forward, with multi-dimensional arrays coming + soon. + +* There are two brand new JIT assembler backends, notably for the PowerPC and + ARM processors. + +Fundraising +=========== + +It's maybe worth mentioning that we're running fundraising campaigns for +NumPy effort in PyPy and for Python 3 in PyPy. In case you want to see any +of those happen faster, we urge you to donate to `numpy proposal`_ or +`py3k proposal`_. In case you want PyPy to progress, but you trust us with +the general direction, you can always donate to the `general pot`_. + +.. _`numpy proposal`: http://pypy.org/numpydonate.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`general pot`: http://pypy.org diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -188,6 +187,12 @@ # ------------------------------------------------------------------- + def is_w(self, space, w_other): + return self is w_other + + def immutable_unique_id(self, space): + return None + def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) raise OperationError(space.w_TypeError, w_msg) @@ -482,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -513,8 +528,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -681,9 +696,20 @@ """shortcut for space.is_true(space.eq(w_obj1, w_obj2))""" return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2)) - def is_w(self, w_obj1, w_obj2): - """shortcut for space.is_true(space.is_(w_obj1, w_obj2))""" - return self.is_true(self.is_(w_obj1, w_obj2)) + def is_(self, w_one, w_two): + return self.newbool(self.is_w(w_one, w_two)) + + def is_w(self, w_one, w_two): + # done by a method call on w_two (and not on w_one, because of the + # expected programming style where we say "if x is None" or + # "if x is object"). + return w_two.is_w(self, w_one) + + def id(self, w_obj): + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -879,6 +905,16 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_str(self, w_list): + """ Return a list of unwrapped strings out of a list of strings. If the + argument is not a list or does not contain only strings, return None. + May return None anyway. + """ + return None + + def newlist_str(self, list_s): + return self.newlist([self.wrap(s) for s in list_s]) + @jit.unroll_safe def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" @@ -1013,9 +1049,6 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) - def id(self, w_obj): - return self.wrap(compute_unique_id(w_obj)) - # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the __builtin__ @@ -1587,6 +1620,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -1,8 +1,9 @@ +from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped +from pypy.interpreter.pyopcode import LoopBlock from pypy.rlib import jit -from pypy.interpreter.pyopcode import LoopBlock +from pypy.rlib.objectmodel import specialize class GeneratorIterator(Wrappable): @@ -156,38 +157,43 @@ break block = block.previous - def unpack_into(self, results_w): - """This is a hack for performance: runs the generator and collects - all produced items in a list.""" - # XXX copied and simplified version of send_ex() - space = self.space - if self.running: - raise OperationError(space.w_ValueError, - space.wrap('generator already executing')) - frame = self.frame - if frame is None: # already finished - return - self.running = True - try: - pycode = self.pycode - while True: - jitdriver.jit_merge_point(self=self, frame=frame, - results_w=results_w, - pycode=pycode) - try: - w_result = frame.execute_frame(space.w_None) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - # if the frame is now marked as finished, it was RETURNed from - if frame.frame_finished_execution: - break - results_w.append(w_result) # YIELDed - finally: - frame.f_backref = jit.vref_None - self.running = False - self.frame = None - -jitdriver = jit.JitDriver(greens=['pycode'], - reds=['self', 'frame', 'results_w']) + # Results can be either an RPython list of W_Root, or it can be an + # app-level W_ListObject, which also has an append() method, that's why we + # generate 2 versions of the function and 2 jit drivers. + def _create_unpack_into(): + jitdriver = jit.JitDriver(greens=['pycode'], + reds=['self', 'frame', 'results']) + def unpack_into(self, results): + """This is a hack for performance: runs the generator and collects + all produced items in a list.""" + # XXX copied and simplified version of send_ex() + space = self.space + if self.running: + raise OperationError(space.w_ValueError, + space.wrap('generator already executing')) + frame = self.frame + if frame is None: # already finished + return + self.running = True + try: + pycode = self.pycode + while True: + jitdriver.jit_merge_point(self=self, frame=frame, + results=results, pycode=pycode) + try: + w_result = frame.execute_frame(space.w_None) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + # if the frame is now marked as finished, it was RETURNed from + if frame.frame_finished_execution: + break + results.append(w_result) # YIELDed + finally: + frame.f_backref = jit.vref_None + self.running = False + self.frame = None + return unpack_into + unpack_into = _create_unpack_into() + unpack_into_w = _create_unpack_into() \ No newline at end of file diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -587,7 +587,7 @@ assert isinstance(meth2, Method) assert meth2.call_args(args) == obj1 # Check method returned from unbound_method.__get__() - w_meth3 = descr_function_get(space, func, None, space.type(obj2)) + w_meth3 = descr_function_get(space, func, space.w_None, space.type(obj2)) meth3 = space.unwrap(w_meth3) w_meth4 = meth3.descr_method_get(obj2, space.w_None) meth4 = space.unwrap(w_meth4) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -63,10 +63,13 @@ def test_unpackiterable(self): space = self.space w = space.wrap - l = [w(1), w(2), w(3), w(4)] + l = [space.newlist([]) for l in range(4)] w_l = space.newlist(l) - assert space.unpackiterable(w_l) == l - assert space.unpackiterable(w_l, 4) == l + l1 = space.unpackiterable(w_l) + l2 = space.unpackiterable(w_l, 4) + for i in range(4): + assert space.is_w(l1[i], l[i]) + assert space.is_w(l2[i], l[i]) err = raises(OperationError, space.unpackiterable, w_l, 3) assert err.value.match(space, space.w_ValueError) err = raises(OperationError, space.unpackiterable, w_l, 5) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/conftest.py b/pypy/jit/backend/conftest.py --- a/pypy/jit/backend/conftest.py +++ b/pypy/jit/backend/conftest.py @@ -12,7 +12,7 @@ help="choose a fixed random seed") group.addoption('--backend', action="store", default='llgraph', - choices=['llgraph', 'x86'], + choices=['llgraph', 'cpu'], dest="backend", help="select the backend to run the functions with") group.addoption('--block-length', action="store", type="int", diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -20,6 +21,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -47,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -321,16 +328,24 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) -def compile_add_descr(loop, ofs, type, arg_types): +def compile_add_descr(loop, ofs, type, arg_types, extrainfo, width): from pypy.jit.backend.llgraph.runner import Descr loop = _from_opaque(loop) op = loop.operations[-1] assert isinstance(type, str) and len(type) == 1 - op.descr = Descr(ofs, type, arg_types=arg_types) + op.descr = Descr(ofs, type, arg_types=arg_types, extrainfo=extrainfo, width=width) def compile_add_descr_arg(loop, ofs, type, arg_types): from pypy.jit.backend.llgraph.runner import Descr @@ -346,6 +361,16 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -380,13 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -520,10 +557,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -616,6 +654,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -825,6 +872,16 @@ else: raise NotImplementedError + def op_getinteriorfield_raw(self, descr, array, index): + if descr.typeinfo == REF: + return do_getinteriorfield_raw_ptr(array, index, descr.width, descr.ofs) + elif descr.typeinfo == INT: + return do_getinteriorfield_raw_int(array, index, descr.width, descr.ofs) + elif descr.typeinfo == FLOAT: + return do_getinteriorfield_raw_float(array, index, descr.width, descr.ofs) + else: + raise NotImplementedError + def op_setinteriorfield_gc(self, descr, array, index, newvalue): if descr.typeinfo == REF: return do_setinteriorfield_gc_ptr(array, index, descr.ofs, @@ -838,6 +895,16 @@ else: raise NotImplementedError + def op_setinteriorfield_raw(self, descr, array, index, newvalue): + if descr.typeinfo == REF: + return do_setinteriorfield_raw_ptr(array, index, newvalue, descr.width, descr.ofs) + elif descr.typeinfo == INT: + return do_setinteriorfield_raw_int(array, index, newvalue, descr.width, descr.ofs) + elif descr.typeinfo == FLOAT: + return do_setinteriorfield_raw_float(array, index, newvalue, descr.width, descr.ofs) + else: + raise NotImplementedError + def op_setfield_gc(self, fielddescr, struct, newvalue): if fielddescr.typeinfo == REF: do_setfield_gc_ptr(struct, fielddescr.ofs, newvalue) @@ -938,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1403,6 +1471,18 @@ struct = array._obj.container.getitem(index) return cast_to_ptr(_getinteriorfield_gc(struct, fieldnum)) +def _getinteriorfield_raw(ffitype, array, index, width, ofs): + addr = rffi.cast(rffi.VOIDP, array) + return libffi.array_getitem(ffitype, width, addr, index, ofs) + +def do_getinteriorfield_raw_int(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) + return res + +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1479,7 +1559,19 @@ return do_setinteriorfield_gc do_setinteriorfield_gc_int = new_setinteriorfield_gc(cast_from_int) do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) -do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) +do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) + +def new_setinteriorfield_raw(cast_func, ffitype): + def do_setinteriorfield_raw(array, index, newvalue, width, ofs): + addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break + return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) + return do_setinteriorfield_raw +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] @@ -1743,9 +1835,11 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -23,8 +23,10 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1, ffi_flags=0): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0, width=-1): + self.ofs = ofs + self.width = width self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name @@ -35,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): @@ -119,14 +121,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1, ffi_flags=0): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0, width=-1): key = (ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut, ffi_flags) + count_fields_if_immut, ffi_flags, width) try: return self._descrs[key] except KeyError: descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut, ffi_flags) + count_fields_if_immut, ffi_flags, width) self._descrs[key] = descr return descr @@ -136,29 +138,30 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + jitcell_token.compiled_loop_token = clt + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -170,19 +173,23 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() if isinstance(descr, Descr): llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, - descr.arg_types) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + descr.arg_types, descr.extrainfo, + descr.width) + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -236,9 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -257,21 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) @@ -324,10 +336,22 @@ def interiorfielddescrof(self, A, fieldname): S = A.OF - ofs2 = symbolic.get_size(A) + width = symbolic.get_size(A) ofs, size = symbolic.get_field_token(S, fieldname) token = history.getkind(getattr(S, fieldname)) - return self.getdescr(ofs, token[0], name=fieldname, extrainfo=ofs2) + return self.getdescr(ofs, token[0], name=fieldname, width=width) + + def interiorfielddescrof_dynamic(self, offset, width, fieldsize, + is_pointer, is_float, is_signed): + + if is_pointer: + typeinfo = REF + elif is_float: + typeinfo = FLOAT + else: + typeinfo = INT + # we abuse the arg_types field to distinguish dynamic and static descrs + return Descr(offset, typeinfo, arg_types='dynamic', name='', width=width) def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): arg_types = [] diff --git a/pypy/jit/backend/llsupport/asmmemmgr.py b/pypy/jit/backend/llsupport/asmmemmgr.py --- a/pypy/jit/backend/llsupport/asmmemmgr.py +++ b/pypy/jit/backend/llsupport/asmmemmgr.py @@ -37,25 +37,25 @@ self._add_free_block(smaller_stop, stop) stop = smaller_stop result = (start, stop) - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result # pair (start, stop) def free(self, start, stop): """Free a block (start, stop) returned by a previous malloc().""" - self.total_mallocs -= (stop - start) + self.total_mallocs -= r_uint(stop - start) self._add_free_block(start, stop) def open_malloc(self, minsize): """Allocate at least minsize bytes. Returns (start, stop).""" result = self._allocate_block(minsize) (start, stop) = result - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result def open_free(self, middle, stop): """Used for freeing the end of an open-allocated block of memory.""" if stop - middle >= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,247 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -345,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -390,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -415,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -426,161 +425,48 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.arg_classes, self.result_type) -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,91 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1<", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -739,52 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - has_finalizer = bool(self.layoutbuilder.has_finalizer(S)) - has_light_finalizer = bool(self.layoutbuilder.has_light_finalizer(S)) - flags = (int(has_finalizer) << llgroup.HALFSHIFT | - int(has_light_finalizer) << (llgroup.HALFSHIFT + 1)) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, flags) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -798,99 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +123,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +146,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +169,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -0,0 +1,328 @@ +import sys +from pypy.rlib.rarithmetic import ovfcheck +from pypy.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr +from pypy.jit.metainterp.resoperation import ResOperation, rop +from pypy.jit.codewriter import heaptracker +from pypy.jit.backend.llsupport.symbolic import WORD +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr + + +class GcRewriterAssembler(object): + # This class performs the following rewrites on the list of operations: + # + # - Remove the DEBUG_MERGE_POINTs. + # + # - Turn all NEW_xxx to either a CALL_MALLOC_GC, or a CALL_MALLOC_NURSERY + # followed by SETFIELDs in order to initialize their GC fields. The + # two advantages of CALL_MALLOC_NURSERY is that it inlines the common + # path, and we need only one such operation to allocate several blocks + # of memory at once. + # + # - Add COND_CALLs to the write barrier before SETFIELD_GC and + # SETARRAYITEM_GC operations. + + _previous_size = -1 + _op_malloc_nursery = None + _v_last_malloced_nursery = None + c_zero = ConstInt(0) + + def __init__(self, gc_ll_descr, cpu): + self.gc_ll_descr = gc_ll_descr + self.cpu = cpu + self.newops = [] + self.known_lengths = {} + self.recent_mallocs = {} # set of variables + + def rewrite(self, operations): + # we can only remember one malloc since the next malloc can possibly + # collect; but we can try to collapse several known-size mallocs into + # one, both for performance and to reduce the number of write + # barriers. We do this on each "basic block" of operations, which in + # this case means between CALLs or unknown-size mallocs. + # + for op in operations: + if op.getopnum() == rop.DEBUG_MERGE_POINT: + continue + # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- + if op.is_malloc(): + self.handle_malloc_operation(op) + continue + elif op.can_malloc(): + self.emitting_an_operation_that_can_collect() + elif op.getopnum() == rop.LABEL: + self.emitting_an_operation_that_can_collect() + self.known_lengths.clear() + # ---------- write barriers ---------- + if self.gc_ll_descr.write_barrier_descr is not None: + if op.getopnum() == rop.SETFIELD_GC: + self.handle_write_barrier_setfield(op) + continue + if op.getopnum() == rop.SETINTERIORFIELD_GC: + self.handle_write_barrier_setinteriorfield(op) + continue + if op.getopnum() == rop.SETARRAYITEM_GC: + self.handle_write_barrier_setarrayitem(op) + continue + # ---------- + self.newops.append(op) + return self.newops + + # ---------- + + def handle_malloc_operation(self, op): + opnum = op.getopnum() + if opnum == rop.NEW: + self.handle_new_fixedsize(op.getdescr(), op) + elif opnum == rop.NEW_WITH_VTABLE: + classint = op.getarg(0).getint() + descr = heaptracker.vtable2descr(self.cpu, classint) + self.handle_new_fixedsize(descr, op) + if self.gc_ll_descr.fielddescr_vtable is not None: + op = ResOperation(rop.SETFIELD_GC, + [op.result, ConstInt(classint)], None, + descr=self.gc_ll_descr.fielddescr_vtable) + self.newops.append(op) + elif opnum == rop.NEW_ARRAY: + descr = op.getdescr() + assert isinstance(descr, ArrayDescr) + self.handle_new_array(descr, op) + elif opnum == rop.NEWSTR: + self.handle_new_array(self.gc_ll_descr.str_descr, op) + elif opnum == rop.NEWUNICODE: + self.handle_new_array(self.gc_ll_descr.unicode_descr, op) + else: + raise NotImplementedError(op.getopname()) + + def handle_new_fixedsize(self, descr, op): + assert isinstance(descr, SizeDescr) + size = descr.size + self.gen_malloc_nursery(size, op.result) + self.gen_initialize_tid(op.result, descr.tid) + + def handle_new_array(self, arraydescr, op): + v_length = op.getarg(0) + total_size = -1 + if isinstance(v_length, ConstInt): + num_elem = v_length.getint() + self.known_lengths[op.result] = num_elem + try: + var_size = ovfcheck(arraydescr.itemsize * num_elem) + total_size = ovfcheck(arraydescr.basesize + var_size) + except OverflowError: + pass # total_size is still -1 + elif arraydescr.itemsize == 0: + total_size = arraydescr.basesize + if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily + self.gen_malloc_nursery(total_size, op.result) + self.gen_initialize_tid(op.result, arraydescr.tid) + self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) + elif self.gc_ll_descr.kind == 'boehm': + self.gen_boehm_malloc_array(arraydescr, v_length, op.result) + else: + opnum = op.getopnum() + if opnum == rop.NEW_ARRAY: + self.gen_malloc_array(arraydescr, v_length, op.result) + elif opnum == rop.NEWSTR: + self.gen_malloc_str(v_length, op.result) + elif opnum == rop.NEWUNICODE: + self.gen_malloc_unicode(v_length, op.result) + else: + raise NotImplementedError(op.getopname()) + + # ---------- + + def emitting_an_operation_that_can_collect(self): + # must be called whenever we emit an operation that can collect: + # forgets the previous MALLOC_NURSERY, if any; and empty the + # set 'recent_mallocs', so that future SETFIELDs will generate + # a write barrier as usual. + self._op_malloc_nursery = None + self.recent_mallocs.clear() + + def _gen_call_malloc_gc(self, args, v_result, descr): + """Generate a CALL_MALLOC_GC with the given args.""" + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) + self.newops.append(op) + # mark 'v_result' as freshly malloced + self.recent_mallocs[v_result] = None + + def gen_malloc_fixedsize(self, size, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). + Note that with the framework GC, this should be called very rarely. + """ + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, + self.gc_ll_descr.malloc_fixedsize_descr) + + def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + self._gen_call_malloc_gc([ConstInt(addr), + ConstInt(arraydescr.basesize), + v_num_elem, + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset)], + v_result, + self.gc_ll_descr.malloc_array_descr) + + def gen_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) going either + to the standard or the nonstandard version of the function.""" + # + if (arraydescr.basesize == self.gc_ll_descr.standard_array_basesize + and arraydescr.lendescr.offset == + self.gc_ll_descr.standard_array_length_ofs): + # this is a standard-looking array, common case + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + args = [ConstInt(addr), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_descr + else: + # rare case, so don't care too much about the number of arguments + addr = self.gc_ll_descr.get_malloc_fn_addr( + 'malloc_array_nonstandard') + args = [ConstInt(addr), + ConstInt(arraydescr.basesize), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_nonstandard_descr + self._gen_call_malloc_gc(args, v_result, calldescr) + + def gen_malloc_str(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_str') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_str_descr) + + def gen_malloc_unicode(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_unicode') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_unicode_descr) + + def gen_malloc_nursery(self, size, v_result): + """Try to generate or update a CALL_MALLOC_NURSERY. + If that fails, generate a plain CALL_MALLOC_GC instead. + """ + size = self.round_up_for_allocation(size) + if not self.gc_ll_descr.can_use_nursery_malloc(size): + self.gen_malloc_fixedsize(size, v_result) + return + # + op = None + if self._op_malloc_nursery is not None: + # already a MALLOC_NURSERY: increment its total size + total_size = self._op_malloc_nursery.getarg(0).getint() + total_size += size + if self.gc_ll_descr.can_use_nursery_malloc(total_size): + # if the total size is still reasonable, merge it + self._op_malloc_nursery.setarg(0, ConstInt(total_size)) + op = ResOperation(rop.INT_ADD, + [self._v_last_malloced_nursery, + ConstInt(self._previous_size)], + v_result) + if op is None: + # if we failed to merge with a previous MALLOC_NURSERY, emit one + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_NURSERY, + [ConstInt(size)], + v_result) + self._op_malloc_nursery = op + # + self.newops.append(op) + self._previous_size = size + self._v_last_malloced_nursery = v_result + self.recent_mallocs[v_result] = None + + def gen_initialize_tid(self, v_newgcobj, tid): + if self.gc_ll_descr.fielddescr_tid is not None: + # produce a SETFIELD to initialize the GC header + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, ConstInt(tid)], None, + descr=self.gc_ll_descr.fielddescr_tid) + self.newops.append(op) + + def gen_initialize_len(self, v_newgcobj, v_length, arraylen_descr): + # produce a SETFIELD to initialize the array length + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, v_length], None, + descr=arraylen_descr) + self.newops.append(op) + + # ---------- + + def handle_write_barrier_setfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(1) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setinteriorfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setarrayitem(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier_array(op.getarg(0), + op.getarg(1), v) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.newops.append(op) + + def gen_write_barrier(self, v_base, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + args = [v_base, v_value] + self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, + descr=write_barrier_descr)) + + def gen_write_barrier_array(self, v_base, v_index, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + if write_barrier_descr.has_write_barrier_from_array(self.cpu): + # If we know statically the length of 'v', and it is not too + # big, then produce a regular write_barrier. If it's unknown or + # too big, produce instead a write_barrier_from_array. + LARGE = 130 + length = self.known_lengths.get(v_base, LARGE) + if length >= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -369,29 +324,30 @@ # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert descr2.repr_of_descr() == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert descr2i.repr_of_descr() == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert descr3.repr_of_descr() == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert descr3i.repr_of_descr() == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert descr4.repr_of_descr() == '' # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert descr4i.repr_of_descr() == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert descr4f.repr_of_descr() == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert descr5f.repr_of_descr() == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +357,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +377,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +401,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,189 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] @@ -40,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -280,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -303,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -325,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -346,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): @@ -183,38 +181,35 @@ lst[n] = None self.fail_descr_free_list.extend(faildescr_indices) - @staticmethod - def sizeof(S): + def sizeof(self, S): raise NotImplementedError - @staticmethod - def fielddescrof(S, fieldname): + def fielddescrof(self, S, fieldname): """Return the Descr corresponding to field 'fieldname' on the structure 'S'. It is important that this function (at least) caches the results.""" raise NotImplementedError - @staticmethod - def arraydescrof(A): + def interiorfielddescrof(self, A, fieldname): raise NotImplementedError - @staticmethod - def calldescrof(FUNC, ARGS, RESULT): + def interiorfielddescrof_dynamic(self, offset, width, fieldsize, is_pointer, + is_float, is_signed): + raise NotImplementedError + + def arraydescrof(self, A): + raise NotImplementedError + + def calldescrof(self, FUNC, ARGS, RESULT): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError - @staticmethod - def methdescrof(SELFTYPE, methname): + def methdescrof(self, SELFTYPE, methname): # must return a subclass of history.AbstractMethDescr raise NotImplementedError - @staticmethod - def typedescrof(TYPE): - raise NotImplementedError - - @staticmethod - def interiorfielddescrof(A, fieldname): + def typedescrof(self, TYPE): raise NotImplementedError # ---------- the backend-dependent operations ---------- diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -107,12 +108,12 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -253,13 +254,13 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -284,12 +285,12 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,22 +32,19 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -106,10 +103,9 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -118,19 +114,20 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -139,19 +136,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -162,15 +162,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -190,15 +192,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -206,14 +210,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -226,17 +229,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -244,14 +251,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -261,19 +267,20 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -290,18 +297,17 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -311,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -320,20 +326,19 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -350,20 +355,20 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -419,14 +424,12 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1082,16 +1085,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1109,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1149,30 +1144,33 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1214,7 +1212,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1222,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1271,7 +1267,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1281,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1330,19 +1324,20 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1400,15 +1395,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1675,15 +1669,14 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1700,9 +1693,9 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1718,14 +1711,13 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1895,18 +1887,14 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1940,18 +1928,14 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -1986,19 +1970,15 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2031,10 +2011,9 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2091,14 +2070,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) From noreply at buildbot.pypy.org Mon Dec 19 18:23:01 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 18:23:01 +0100 (CET) Subject: [pypy-commit] pypy generator-in-rpython: Write and test pieces of the final solution Message-ID: <20111219172301.EDEB1823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: generator-in-rpython Changeset: r50713:6fa43287fca0 Date: 2011-12-19 17:13 +0100 http://bitbucket.org/pypy/pypy/changeset/6fa43287fca0/ Log: Write and test pieces of the final solution diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/generator.py @@ -0,0 +1,136 @@ +from pypy.objspace.flow.model import Block, Link, SpaceOperation, checkgraph +from pypy.objspace.flow.model import Variable, Constant, FunctionGraph +from pypy.translator.unsimplify import insert_empty_startblock +from pypy.translator.unsimplify import split_block +from pypy.translator.simplify import eliminate_empty_blocks + + +class AbstractPosition(object): + _immutable_ = True + _attrs_ = () + + +def replace_graph_with_bootstrap(graph, graph_of_body, Entry): + # + class GeneratorIterator(object): + graph = graph_of_body + def __init__(self, entry): + self.current = entry + GeneratorIterator.Entry = Entry + # + newblock = Block(graph.startblock.inputargs) + v_generator = Variable('generator') + v_entry = Variable('entry') + newblock.operations.append( + SpaceOperation('simple_call', [Constant(Entry)], v_entry)) + assert len(graph.startblock.inputargs) == len(Entry.varnames) + for v, name in zip(graph.startblock.inputargs, Entry.varnames): + newblock.operations.append( + SpaceOperation('setattr', [v_entry, Constant(name), v], + Variable())) + newblock.operations.append( + SpaceOperation('simple_call', [Constant(GeneratorIterator), v_entry], + v_generator)) + newblock.closeblock(Link([v_generator], graph.returnblock)) + graph.startblock = newblock + return GeneratorIterator + +def get_variable_names(variables): + seen = set() + result = [] + for v in variables: + name = v._name.strip('_') + while name in seen: + name += '_' + result.append('g_' + name) + seen.add(name) + return result + +def _insert_reads(block, varnames): + assert len(varnames) == len(block.inputargs) + v_entry1 = Variable('entry') + for i, name in enumerate(varnames): + block.operations.insert(i, + SpaceOperation('getattr', [v_entry1, Constant(name)], + block.inputargs[i])) + block.inputargs = [v_entry1] + +def tweak_generator_body_graph(graph): + assert graph.startblock.operations[0].opname == 'generator_mark' + graph.startblock.operations.pop(0) + # + entryvarnames = get_variable_names(graph.startblock.inputargs) + insert_empty_startblock(None, graph) + _insert_reads(graph.startblock, entryvarnames) + # + class Entry(AbstractPosition): + block = graph.startblock + varnames = entryvarnames + mappings = [Entry] + # + for block in list(graph.iterblocks()): + for exit in block.exits: + if exit.target is graph.returnblock: + exit.args = [Constant(StopIteration), + Constant(StopIteration())] + exit.target = graph.exceptblock + for index in range(len(block.operations)-1, -1, -1): + op = block.operations[index] + if op.opname == 'yield': + [v_yielded_value] = op.args + del block.operations[index] + newlink = split_block(None, block, index) + newblock = newlink.target + # + class Resume(AbstractPosition): + block = newblock + Resume.__name__ = 'Resume%d' % len(mappings) + mappings.append(Resume) + varnames = get_variable_names(newlink.args) + # + _insert_reads(newblock, varnames) + # + v_resume = Variable('resume') + block.operations.append( + SpaceOperation('simple_call', [Constant(Resume)], + v_resume)) + for i, name in enumerate(varnames): + block.operations.append( + SpaceOperation('setattr', [v_resume, Constant(name), + newlink.args[i]], + Variable())) + v_pair = Variable('pair') + block.operations.append( + SpaceOperation('newtuple', [v_resume, v_yielded_value], + v_pair)) + newlink.args = [v_pair] + newlink.target = graph.returnblock + # + regular_entry_block = Block([Variable('entry')]) + block = regular_entry_block + for Resume in mappings: + v_check = Variable() + block.operations.append( + SpaceOperation('simple_call', [Constant(isinstance), + block.inputargs[0], + Constant(Resume)], + v_check)) + block.exitswitch = v_check + link1 = Link([block.inputargs[0]], Resume.block) + link1.exitcase = True + nextblock = Block([Variable('entry')]) + link2 = Link([block.inputargs[0]], nextblock) + link2.exitcase = False + block.closeblock(link1, link2) + block = nextblock + block.closeblock(Link([Constant(AssertionError), + Constant(AssertionError("bad generator class"))], + graph.exceptblock)) + graph.startblock = regular_entry_block + checkgraph(graph) + eliminate_empty_blocks(graph) + try: + graph.func._always_inline_ = True + except AttributeError: + pass + return Entry diff --git a/pypy/translator/test/test_generator.py b/pypy/translator/test/test_generator.py --- a/pypy/translator/test/test_generator.py +++ b/pypy/translator/test/test_generator.py @@ -4,6 +4,7 @@ from pypy.translator.translator import TranslationContext from pypy.translator.generator import replace_graph_with_bootstrap from pypy.translator.generator import get_variable_names +from pypy.translator.generator import tweak_generator_body_graph # ____________________________________________________________ @@ -70,66 +71,37 @@ space = FlowObjSpace() graph = space.build_flow(func) assert graph.startblock.operations[0].opname == 'generator_mark' - replace_graph_with_bootstrap(graph, 'newgraph') + class Entry: + varnames = ['g_n', 'g_x', 'g_y', 'g_z'] + replace_graph_with_bootstrap(graph, 'newgraph', Entry) if option.view: graph.show() block = graph.startblock ops = block.operations - assert ops[0].opname == 'call' # e = Entry1() - assert ops[1].opname == 'setattr' # e.g_n = n + assert ops[0].opname == 'simple_call' # e = Entry1() + assert ops[1].opname == 'setattr' # e.g_n = n assert ops[1].args[1].value == 'g_n' - assert ops[2].opname == 'setattr' # e.g_x = x + assert ops[2].opname == 'setattr' # e.g_x = x assert ops[2].args[1].value == 'g_x' - assert ops[3].opname == 'setattr' # e.g_y = y + assert ops[3].opname == 'setattr' # e.g_y = y assert ops[3].args[1].value == 'g_y' - assert ops[4].opname == 'setattr' # e.g_z = z + assert ops[4].opname == 'setattr' # e.g_z = z assert ops[4].args[1].value == 'g_z' - assert ops[5].opname == 'call' # g = GeneratorIterator(e) + assert ops[5].opname == 'simple_call' # g = GeneratorIterator(e) assert ops[5].args[1] == ops[0].result assert len(ops) == 6 assert len(block.exits) == 1 assert block.exits[0].target is graph.returnblock - def test_make_generator_body_graph(self): + def test_tweak_generator_body_graph(self): def f(n, x, y, z): z *= 10 - yield n + yield n + 1 z -= 10 # - def f__next(generator): - n = generator.n_0 - x = generator.x_0 - y = generator.y_0 - z = generator.z_0 - e = generator.current - generator.current = None - if isinstance(e, "some class"): - xxx - # space = FlowObjSpace() - graph = space.build_flow(func) - newgraph = make_generator_body_graph(graph) - assert len(newgraph.startblock.inputargs) == 1 - [v_generator] = newgraph.startblock.inputargs - ops = newgraph.startblock.operations - assert ops[0].opname == 'getattr' # n = g.n_0 - assert ops[0].args[0] == v_generator - assert ops[0].args[1].value.startswith('n_') - assert ops[1].opname == 'getattr' # x = g.x_0 - assert ops[1].args[0] == v_generator - assert ops[1].args[1].value.startswith('x_') - assert ops[2].opname == 'getattr' # y = g.y_0 - assert ops[2].args[0] == v_generator - assert ops[2].args[1].value.startswith('y_') - assert ops[3].opname == 'getattr' # z = g.z_0 - assert ops[3].args[0] == v_generator - assert ops[3].args[1].value.startswith('z_') - assert ops[4].opname == 'getattr' # e = g.current - assert ops[4].args[0] == v_generator - assert ops[4].args[1].value == 'current' - assert ops[5].opname == 'setattr' # g.current = None - assert ops[5].args[0] == v_generator - assert ops[5].args[1].value == 'current' - assert ops[6].opname == 'call' # isinstance(e, Yield1) - assert ops[6].args[0].value == isinstance - assert len(ops) == 7 + graph = space.build_flow(f) + tweak_generator_body_graph(graph) + if option.view: + graph.show() + # XXX how to test directly that the graph is correct? :-( From noreply at buildbot.pypy.org Mon Dec 19 18:23:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 18:23:03 +0100 (CET) Subject: [pypy-commit] pypy generator-in-rpython: Finish the replacement of the flow graph of generators. The behavior of Message-ID: <20111219172303.2645E823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: generator-in-rpython Changeset: r50714:a4906cedba52 Date: 2011-12-19 18:00 +0100 http://bitbucket.org/pypy/pypy/changeset/a4906cedba52/ Log: Finish the replacement of the flow graph of generators. The behavior of the tweaked flow graphs is not tested so far; needs some RPython tests. diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -248,7 +248,7 @@ return ecls return None - def build_flow(self, func, constargs={}): + def build_flow(self, func, constargs={}, tweak_for_generator=True): """ """ if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): @@ -291,6 +291,11 @@ e = error.FlowingError(formated) raise error.FlowingError, e, tb checkgraph(graph) + # + if is_generator and tweak_for_generator: + from pypy.translator.generator import tweak_generator_graph + tweak_generator_graph(graph) + # return graph def fixedview(self, w_tuple, expected_length=None): diff --git a/pypy/objspace/flow/test/test_generator.py b/pypy/objspace/flow/test/test_generator.py --- a/pypy/objspace/flow/test/test_generator.py +++ b/pypy/objspace/flow/test/test_generator.py @@ -10,7 +10,7 @@ yield i yield i i += 1 - graph = self.codetest(f) + graph = self.codetest(f, tweak_for_generator=False) ops = self.all_operations(graph) assert ops == {'generator_mark': 1, 'lt': 1, 'is_true': 1, diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -16,14 +16,14 @@ is_operator = getattr(operator, 'is_', operator.eq) # it's not there 2.2 class Base: - def codetest(self, func): + def codetest(self, func, **kwds): import inspect try: func = func.im_func except AttributeError: pass #name = func.func_name - graph = self.space.build_flow(func) + graph = self.space.build_flow(func, **kwds) graph.source = inspect.getsource(func) self.show(graph) return graph diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py --- a/pypy/translator/generator.py +++ b/pypy/translator/generator.py @@ -3,6 +3,7 @@ from pypy.translator.unsimplify import insert_empty_startblock from pypy.translator.unsimplify import split_block from pypy.translator.simplify import eliminate_empty_blocks +from pypy.tool.sourcetools import func_with_new_name class AbstractPosition(object): @@ -10,14 +11,32 @@ _attrs_ = () -def replace_graph_with_bootstrap(graph, graph_of_body, Entry): - # +def tweak_generator_graph(graph): + if not hasattr(graph.func, '_generator_next_method_of_'): + # This is the first copy of the graph. We replace it with + # a small bootstrap graph. + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + # We attach a 'next' method to the GeneratorIterator class + # that will invoke the real function, based on a second + # copy of the graph. + attach_next_method(GeneratorIterator, graph) + else: + # This is the second copy of the graph. Tweak it. + GeneratorIterator = graph.func._generator_next_method_of_ + tweak_generator_body_graph(GeneratorIterator.Entry, graph) + + +def make_generatoriterator_class(graph): class GeneratorIterator(object): - graph = graph_of_body + class Entry(AbstractPosition): + varnames = get_variable_names(graph.startblock.inputargs) def __init__(self, entry): self.current = entry - GeneratorIterator.Entry = Entry - # + return GeneratorIterator + +def replace_graph_with_bootstrap(GeneratorIterator, graph): + Entry = GeneratorIterator.Entry newblock = Block(graph.startblock.inputargs) v_generator = Variable('generator') v_entry = Variable('entry') @@ -33,7 +52,21 @@ v_generator)) newblock.closeblock(Link([v_generator], graph.returnblock)) graph.startblock = newblock - return GeneratorIterator + +def attach_next_method(GeneratorIterator, graph): + func = graph.func + func = func_with_new_name(func, '%s__next' % (func.func_name,)) + func._generator_next_method_of_ = GeneratorIterator + func._always_inline_ = True + # + def next(self): + entry = self.current + self.current = None + (next_entry, return_value) = func(entry) + self.current = next_entry + return return_value + GeneratorIterator.next = next + return func # for debugging def get_variable_names(variables): seen = set() @@ -55,17 +88,14 @@ block.inputargs[i])) block.inputargs = [v_entry1] -def tweak_generator_body_graph(graph): +def tweak_generator_body_graph(Entry, graph): assert graph.startblock.operations[0].opname == 'generator_mark' graph.startblock.operations.pop(0) # - entryvarnames = get_variable_names(graph.startblock.inputargs) insert_empty_startblock(None, graph) - _insert_reads(graph.startblock, entryvarnames) + _insert_reads(graph.startblock, Entry.varnames) + Entry.block = graph.startblock # - class Entry(AbstractPosition): - block = graph.startblock - varnames = entryvarnames mappings = [Entry] # for block in list(graph.iterblocks()): @@ -129,8 +159,3 @@ graph.startblock = regular_entry_block checkgraph(graph) eliminate_empty_blocks(graph) - try: - graph.func._always_inline_ = True - except AttributeError: - pass - return Entry diff --git a/pypy/translator/test/test_generator.py b/pypy/translator/test/test_generator.py --- a/pypy/translator/test/test_generator.py +++ b/pypy/translator/test/test_generator.py @@ -2,9 +2,12 @@ from pypy.objspace.flow.objspace import FlowObjSpace from pypy.objspace.flow.model import Variable from pypy.translator.translator import TranslationContext +from pypy.translator.generator import make_generatoriterator_class from pypy.translator.generator import replace_graph_with_bootstrap from pypy.translator.generator import get_variable_names from pypy.translator.generator import tweak_generator_body_graph +from pypy.translator.generator import attach_next_method +from pypy.translator.simplify import join_blocks # ____________________________________________________________ @@ -69,11 +72,10 @@ yield n # space = FlowObjSpace() - graph = space.build_flow(func) + graph = space.build_flow(func, tweak_for_generator=False) assert graph.startblock.operations[0].opname == 'generator_mark' - class Entry: - varnames = ['g_n', 'g_x', 'g_y', 'g_z'] - replace_graph_with_bootstrap(graph, 'newgraph', Entry) + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) if option.view: graph.show() block = graph.startblock @@ -100,8 +102,51 @@ z -= 10 # space = FlowObjSpace() - graph = space.build_flow(f) - tweak_generator_body_graph(graph) + graph = space.build_flow(f, tweak_for_generator=False) + class Entry: + varnames = ['g_n', 'g_x', 'g_y', 'g_z'] + tweak_generator_body_graph(Entry, graph) if option.view: graph.show() # XXX how to test directly that the graph is correct? :-( + + def test_tweak_generator_graph(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + func1 = attach_next_method(GeneratorIterator, graph) + if option.view: + graph.show() + # + assert func1._generator_next_method_of_ is GeneratorIterator + assert hasattr(GeneratorIterator, 'next') + # + graph_next = space.build_flow(GeneratorIterator.next.im_func) + join_blocks(graph_next) + if option.view: + graph_next.show() + # + graph1 = space.build_flow(func1, tweak_for_generator=False) + tweak_generator_body_graph(GeneratorIterator.Entry, graph1) + if option.view: + graph1.show() + + def test_automatic(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f) # tweak_for_generator=True + if option.view: + graph.show() + block = graph.startblock + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock From noreply at buildbot.pypy.org Mon Dec 19 18:23:04 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Dec 2011 18:23:04 +0100 (CET) Subject: [pypy-commit] pypy generator-in-rpython: Generators work, at least in this simple test and by calling .next() Message-ID: <20111219172304.54635823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: generator-in-rpython Changeset: r50715:1b3b2e3f0e18 Date: 2011-12-19 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/1b3b2e3f0e18/ Log: Generators work, at least in this simple test and by calling .next() explicitly. diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -180,7 +180,12 @@ if name is None: name = pyobj.func_name if signature is None: - signature = cpython_code_signature(pyobj.func_code) + if hasattr(pyobj, '_generator_next_method_of_'): + from pypy.interpreter.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyobj.func_code) if defaults is None: defaults = pyobj.func_defaults self.name = name diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/rpython/test/test_generator.py @@ -0,0 +1,25 @@ +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + + +class BaseTestGenerator(BaseRtypingTest): + + def test_simple_explicit(self): + def g(a, b, c): + yield a + yield b + yield c + def f(): + gen = g(3, 5, 8) + x = gen.next() * 100 + x += gen.next() * 10 + x += gen.next() + return x + res = self.interpret(f, []) + assert res == 358 + + +class TestLLtype(BaseTestGenerator, LLRtypeMixin): + pass + +class TestOOtype(BaseTestGenerator, OORtypeMixin): + pass diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py --- a/pypy/translator/generator.py +++ b/pypy/translator/generator.py @@ -4,6 +4,7 @@ from pypy.translator.unsimplify import split_block from pypy.translator.simplify import eliminate_empty_blocks from pypy.tool.sourcetools import func_with_new_name +from pypy.interpreter.argument import Signature class AbstractPosition(object): @@ -30,6 +31,7 @@ def make_generatoriterator_class(graph): class GeneratorIterator(object): class Entry(AbstractPosition): + _immutable_ = True varnames = get_variable_names(graph.startblock.inputargs) def __init__(self, entry): self.current = entry @@ -113,6 +115,7 @@ newblock = newlink.target # class Resume(AbstractPosition): + _immutable_ = True block = newblock Resume.__name__ = 'Resume%d' % len(mappings) mappings.append(Resume) @@ -157,5 +160,7 @@ Constant(AssertionError("bad generator class"))], graph.exceptblock)) graph.startblock = regular_entry_block + graph.signature = Signature(['entry']) + graph.defaults = () checkgraph(graph) eliminate_empty_blocks(graph) diff --git a/pypy/translator/test/test_generator.py b/pypy/translator/test/test_generator.py --- a/pypy/translator/test/test_generator.py +++ b/pypy/translator/test/test_generator.py @@ -1,6 +1,7 @@ from pypy.conftest import option from pypy.objspace.flow.objspace import FlowObjSpace from pypy.objspace.flow.model import Variable +from pypy.interpreter.argument import Signature from pypy.translator.translator import TranslationContext from pypy.translator.generator import make_generatoriterator_class from pypy.translator.generator import replace_graph_with_bootstrap @@ -96,7 +97,7 @@ assert block.exits[0].target is graph.returnblock def test_tweak_generator_body_graph(self): - def f(n, x, y, z): + def f(n, x, y, z=3): z *= 10 yield n + 1 z -= 10 @@ -109,6 +110,9 @@ if option.view: graph.show() # XXX how to test directly that the graph is correct? :-( + assert len(graph.startblock.inputargs) == 1 + assert graph.signature == Signature(['entry']) + assert graph.defaults == () def test_tweak_generator_graph(self): def f(n, x, y, z): From noreply at buildbot.pypy.org Mon Dec 19 18:37:11 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 18:37:11 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: rpythonization and test_zjit fixes Message-ID: <20111219173711.92241823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50716:44fdf6f32ded Date: 2011-12-19 19:36 +0200 http://bitbucket.org/pypy/pypy/changeset/44fdf6f32ded/ Log: rpythonization and test_zjit fixes diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -379,18 +379,19 @@ return space.newtuple([space.wrap(i) for i in self.shape]) def descr_set_shape(self, space, w_iterable): - concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_iterable) - if isinstance(self, ConcreteArray): - # scalars don't have to do anything, just check if the shape - # is still empty - concrete.setshape(space, new_shape) + self.find_size(), w_iterable) + if isinstance(self, Scalar): + return + self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): return space.wrap(self.find_size()) def descr_copy(self, space): + return self.copy() + + def copy(self): return self.get_concrete().copy() def descr_len(self, space): @@ -506,7 +507,7 @@ def descr_str(self, space): ret = StringBuilder() - concrete = self.get_concrete() + concrete = self.get_concrete_or_scalar() concrete.to_str(space, 0, ret, ' ') return space.wrap(ret.build()) @@ -679,12 +680,15 @@ if self.find_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - concr = self.get_concrete() + concr = self.get_concrete_or_scalar() sig = concr.find_sig() frame = sig.create_frame(self) return space.wrap(space.is_true( sig.eval(frame, concr))) + def get_concrete_or_scalar(self): + return self.get_concrete() + def descr_get_transpose(self, space): concrete = self.get_concrete() if len(concrete.shape) < 2: @@ -743,9 +747,6 @@ def find_size(self): return 1 - def get_concrete(self): - return self - def find_dtype(self): return self.dtype @@ -758,14 +759,12 @@ def copy(self): return Scalar(self.dtype, self.value) - def setshape(self, space, new_shape): - # In order to get here, we already checked that prod(new_shape) == 1, - # so in order to have a consistent API, let it go through. - pass - def create_sig(self, res_shape): return signature.ScalarSignature(self.dtype) + def get_concrete_or_scalar(self): + return self + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -935,7 +934,22 @@ return signature.ViewSignature(self.dtype) return signature.ArraySignature(self.dtype) -class W_NDimSlice(ConcreteArray): +class ViewArray(ConcreteArray): + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = ViewIterator(self) + a_iter = ArrayIterator(array.size) + while not iter.done(): + array.setitem(a_iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) + return array + + def create_sig(self, res_shape): + return signature.ViewSignature(self.dtype) + + +class W_NDimSlice(ViewArray): def __init__(self, start, strides, backstrides, shape, parent): if isinstance(parent, W_NDimSlice): parent = parent.parent @@ -968,19 +982,6 @@ frame.next(shapelen) res_iter = res_iter.next(shapelen) - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = ViewIterator(self) - a_iter = ArrayIterator(array.size) - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def create_sig(self, res_shape): - return signature.ViewSignature(self.dtype) - def setshape(self, space, new_shape): if len(self.shape) < 1: return @@ -1180,37 +1181,31 @@ ) -class W_FlatIterator(ConcreteArray): +class W_FlatIterator(ViewArray): @jit.unroll_safe def __init__(self, arr): size = 1 for sh in arr.shape: size *= sh - ConcreteArray.__init__(self, arr.get_concrete(), [arr.strides[-1]], - [arr.backstrides[-1]], [size]) + self.strides = [arr.strides[-1]] + self.backstrides = [arr.backstrides[-1]] + ConcreteArray.__init__(self, size, [size], arr.dtype, arr.order, + arr) self.shapelen = len(arr.shape) - self.arr = arr - self.iter = OneDimIterator(self.arr.start, self.strides[0], - arr.shape[0]) - - def find_dtype(self): - return self.arr.find_dtype() - - def find_size(self): - return self.shape[0] + self.iter = OneDimIterator(arr.start, self.strides[0], + self.shape[0]) def descr_next(self, space): if self.iter.done(): raise OperationError(space.w_StopIteration, space.w_None) - result = self.eval(self.iter) + result = self.getitem(self.iter.offset) self.iter = self.iter.next(self.shapelen) return result def descr_iter(self): return self - W_FlatIterator.typedef = TypeDef( 'flatiter', next = interp2app(W_FlatIterator.descr_next), diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -122,9 +122,10 @@ self.array_no = _add_ptr_to_cache(storage, cache) def _create_iter(self, iterlist, arraylist, arr, res_shape): - storage = arr.get_concrete().storage + concr = arr.get_concrete() + storage = concr.storage if self.iter_no >= len(iterlist): - iterlist.append(self.allocate_iter(arr, res_shape)) + iterlist.append(self.allocate_iter(concr, res_shape)) if self.array_no >= len(arraylist): arraylist.append(storage) @@ -163,6 +164,8 @@ self.iter_no = no def allocate_iter(self, arr, res_shape): + if len(res_shape) == 1: + return OneDimIterator(arr.start, arr.strides[0], res_shape[0]) return ViewIterator(arr, res_shape) class FlatiterSignature(ViewSignature): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -871,7 +871,7 @@ assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' assert (a[::2]).__debug_repr__() == 'Slice' assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - #assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' + assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, Slice)' assert sin(a).__debug_repr__() == 'Call1(sin, Array)' b = a + a b[0] = 3 @@ -1087,7 +1087,6 @@ assert(b[:, 0] == a[0, :]).all() def test_flatiter(self): - skip("unsupported") from numpypy import array, flatiter a = array([[10, 30], [40, 60]]) f_iter = a.flat @@ -1103,7 +1102,6 @@ assert s == 140 def test_flatiter_array_conv(self): - skip("unsupported") from numpypy import array, dot a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -49,7 +49,7 @@ interp.run(space) w_res = interp.results[-1] if isinstance(w_res, BaseArray): - concr = w_res.get_concrete() + concr = w_res.get_concrete_or_scalar() sig = concr.find_sig() frame = sig.create_frame(concr) w_res = sig.eval(frame, concr) @@ -83,7 +83,8 @@ result = self.run("add") self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, 'setinteriorfield_raw': 1, 'int_add': 2, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) assert result == 3 + 3 def define_float_add(): @@ -97,7 +98,8 @@ assert result == 3 + 3 self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, "setinteriorfield_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_sum(): return """ @@ -111,7 +113,7 @@ assert result == 2 * sum(range(30)) self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, "int_add": 1, "int_ge": 1, "guard_false": 1, - "jump": 1}) + "jump": 1, 'arraylen_gc': 1}) def define_prod(): return """ @@ -128,7 +130,8 @@ assert result == expected self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_mul": 1, "int_add": 1, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_max(): return """ @@ -173,7 +176,7 @@ self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_ne": 1, "int_add": 1, "int_ge": 1, "jump": 1, - "guard_false": 2}) + "guard_false": 2, 'arraylen_gc': 1}) def define_already_forced(): return """ @@ -190,12 +193,13 @@ # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 19, - 'getfield_gc_pure': 6, + self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 20, + 'getarrayitem_gc_pure': 2, + 'getfield_gc_pure': 4, 'guard_class': 8, 'int_add': 8, 'float_mul': 2, 'jump': 4, 'int_ge': 4, - 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, - }) + 'getinteriorfield_raw': 4, 'float_add': 2, + 'guard_false': 4, 'arraylen_gc': 2}) def define_ufunc(): return """ @@ -210,7 +214,8 @@ assert result == -6 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, "setinteriorfield_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_specialization(): return """ @@ -253,7 +258,8 @@ 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, - 'jump': 1}) + 'jump': 1, + 'arraylen_gc': 1}) def define_slice2(): return """ @@ -269,7 +275,8 @@ assert result == 15 self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) def define_multidim(): return """ @@ -284,8 +291,9 @@ # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setinteriorfield_raw': 1}) + 'guard_false': 1, 'int_add': 2, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1, + 'arraylen_gc': 1}) def define_multidim_slice(): return """ @@ -333,7 +341,8 @@ self.check_loop_count(1) self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_eq': 1, 'guard_false': 1, 'jump': 1}) + 'int_lt': 1, 'guard_true': 1, 'jump': 1, + 'arraylen_gc': 2}) class TestNumpyOld(LLJitMixin): def setup_class(cls): From noreply at buildbot.pypy.org Mon Dec 19 19:44:31 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 19:44:31 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: kill some unused code Message-ID: <20111219184431.9F9DD823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50717:0ed929dcacd0 Date: 2011-12-19 20:43 +0200 http://bitbucket.org/pypy/pypy/changeset/0ed929dcacd0/ Log: kill some unused code diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -602,40 +602,28 @@ @jit.unroll_safe def create_slice(self, space, chunks): - concr = self.get_concrete() - assert isinstance(concr, ConcreteArray) - if len(chunks) == 1: - start, stop, step, lgt = chunks[0] - if step == 0: - shape = self.shape[1:] - strides = concr.strides[1:] - backstrides = concr.backstrides[1:] - else: - shape = [lgt] + self.shape[1:] - strides = [concr.strides[0] * step] + concr.strides[1:] - backstrides = [(lgt - 1) * concr.strides[0] * step] + concr.backstrides[1:] - start *= concr.strides[0] - start += concr.start - else: - shape = [] - strides = [] - backstrides = [] - start = concr.start - i = -1 - for i, (start_, stop, step, lgt) in enumerate(chunks): - if step != 0: - shape.append(lgt) - strides.append(concr.strides[i] * step) - backstrides.append(concr.strides[i] * (lgt - 1) * step) - start += concr.strides[i] * start_ - # add a reminder - s = i + 1 - assert s >= 0 - shape += concr.shape[s:] - strides += concr.strides[s:] - backstrides += concr.backstrides[s:] + #if not isinstance(self, ConcreteArray): + # return VirtualSlice(self, chunks) + self = self.get_concrete() + shape = [] + strides = [] + backstrides = [] + start = self.start + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + shape.append(lgt) + strides.append(self.strides[i] * step) + backstrides.append(self.strides[i] * (lgt - 1) * step) + start += self.strides[i] * start_ + # add a reminder + s = i + 1 + assert s >= 0 + shape += self.shape[s:] + strides += self.strides[s:] + backstrides += self.backstrides[s:] return W_NDimSlice(start, strides[:], backstrides[:], - shape[:], concr) + shape[:], self) def descr_reshape(self, space, args_w): """reshape(...) @@ -718,6 +706,7 @@ res_shape = res_shape or self.shape return signature.find_sig(self.create_sig(res_shape), self) + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -765,6 +754,7 @@ def get_concrete_or_scalar(self): return self + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -823,6 +813,11 @@ def find_dtype(self): return self.res_dtype +class VirtualSlice(VirtualArray): + def __init__(self, parent, chunks): + self.parent = parent + self.chunks = chunks + VirtualArray.__init__(self, 'slice', parent.shape, parent.find_dtype()) class Call1(VirtualArray): def __init__(self, ufunc, name, shape, res_dtype, values): @@ -934,6 +929,7 @@ return signature.ViewSignature(self.dtype) return signature.ArraySignature(self.dtype) + class ViewArray(ConcreteArray): def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) From noreply at buildbot.pypy.org Mon Dec 19 21:50:47 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 19 Dec 2011 21:50:47 +0100 (CET) Subject: [pypy-commit] pypy default: Make the Python StringBuilder raise exceptions in the same place as the translated one, and get str.replace in Python raising the correct exception for overlfows on 32-bits again. Remove a duplicate test. Message-ID: <20111219205047.D2A2A823F8@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50718:196c4e9bbd48 Date: 2011-12-19 20:50 +0000 http://bitbucket.org/pypy/pypy/changeset/196c4e9bbd48/ Log: Make the Python StringBuilder raise exceptions in the same place as the translated one, and get str.replace in Python raising the correct exception for overlfows on 32-bits again. Remove a duplicate test. diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -524,11 +524,16 @@ upper = maxsplit - 1 assert upper >= 0 first = False - for i in range(upper): + try: + for i in range(upper): + builder.append(by) + builder.append(input[i]) builder.append(by) - builder.append(input[i]) - builder.append(by) - builder.append_slice(input, upper, len(input)) + builder.append_slice(input, upper, len(input)) + except MemoryError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string too long") + ) else: start = 0 sublen = len(sub) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -737,13 +737,6 @@ iterable = "hello" raises(TypeError, len, iter(iterable)) - def test_overflow_replace(self): - import sys - if sys.maxint > 2**31-1: - skip("Wrong platform") - x = "A" * (2**16) - raises(OverflowError, x.replace, '', x) - class AppTestPrebuilt(AppTestStringObject): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withprebuiltchar": True}) diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -3,6 +3,7 @@ from pypy.annotation.model import (SomeObject, SomeString, s_None, SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) +from pypy.rlib.rarithmetic import ovfcheck from pypy.tool.pairtype import pair, pairtype from pypy.rpython.extregistry import ExtRegistryEntry @@ -52,25 +53,37 @@ class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): self.l = [] + self.size = 0 + + def _grow(self, size): + try: + self.size = ovfcheck(self.size + size) + except OverflowError: + raise MemoryError def append(self, s): assert isinstance(s, self.tp) self.l.append(s) + self._grow(len(s)) def append_slice(self, s, start, end): assert isinstance(s, self.tp) assert 0 <= start <= end <= len(s) - self.l.append(s[start:end]) + s = s[start:end] + self.l.append(s) + self._grow(len(s)) def append_multiple_char(self, c, times): assert isinstance(c, self.tp) self.l.append(c * times) + self._grow(times) def append_charpsize(self, s, size): l = [] for i in xrange(size): l.append(s[i]) self.l.append(self.tp("").join(l)) + self._grow(size) def build(self): return self.tp("").join(self.l) From noreply at buildbot.pypy.org Mon Dec 19 21:52:03 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 21:52:03 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: implement virtualviews Message-ID: <20111219205203.DC580823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50719:2ab09b67b929 Date: 2011-12-19 22:51 +0200 http://bitbucket.org/pypy/pypy/changeset/2ab09b67b929/ Log: implement virtualviews diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -1,6 +1,7 @@ from pypy.rlib import jit from pypy.rlib.objectmodel import instantiate +from pypy.module.micronumpy.strides import calculate_broadcast_strides # Iterators for arrays # -------------------- @@ -60,30 +61,24 @@ def get_offset(self): return self.offset +def view_iter_from_arr(arr): + return ViewIterator(arr.start, arr.strides, arr.backstrides, arr.shape) + class ViewIterator(BaseIterator): - def __init__(self, arr, res_shape=None): - self.offset = arr.start + def __init__(self, start, strides, backstrides, shape, res_shape=None): + self.offset = start self._done = False - if res_shape is not None and res_shape != arr.shape: - self.strides = [] - self.backstrides = [] - for i in range(len(arr.shape)): - if arr.shape[i] == 1: - self.strides.append(0) - self.backstrides.append(0) - else: - self.strides.append(arr.strides[i]) - self.backstrides.append(arr.backstrides[i]) - self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides - self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides + if res_shape is not None and res_shape != shape: + r = calculate_broadcast_strides(strides, backstrides, + shape, res_shape) + self.strides, self.backstrides = r self.res_shape = res_shape else: - self.strides = arr.strides - self.backstrides = arr.backstrides - self.res_shape = arr.shape + self.strides = strides + self.backstrides = backstrides + self.res_shape = shape self.indices = [0] * len(self.res_shape) - @jit.unroll_safe def next(self, shapelen): offset = self.offset diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,12 +3,13 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature +from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.module.micronumpy.interp_iter import ArrayIterator, ViewIterator,\ - OneDimIterator +from pypy.module.micronumpy.interp_iter import ArrayIterator,\ + view_iter_from_arr, OneDimIterator numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], @@ -203,7 +204,7 @@ return new_strides class BaseArray(Wrappable): - _attrs_ = ["invalidates", "shape"] + _attrs_ = ["invalidates", "shape", 'size'] _immutable_fields_ = [] @@ -316,8 +317,7 @@ idx += 1 return result def impl(self, space): - size = self.find_size() - if size == 0: + if self.size == 0: raise OperationError(space.w_ValueError, space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) @@ -380,13 +380,13 @@ def descr_set_shape(self, space, w_iterable): new_shape = get_shape_from_iterable(space, - self.find_size(), w_iterable) + self.size, w_iterable) if isinstance(self, Scalar): return self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.find_size()) + return space.wrap(self.size) def descr_copy(self, space): return self.copy() @@ -405,7 +405,7 @@ res.append("array(") concrete = self.get_concrete() dtype = concrete.find_dtype() - if not concrete.find_size(): + if not concrete.size: res.append('[]') if len(self.shape) > 1: # An empty slice reports its shape @@ -417,7 +417,7 @@ concrete.to_str(space, 1, res, indent=' ') if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ - not self.find_size(): + not self.size: res.append(", dtype=" + dtype.name) res.append(")") return space.wrap(res.build()) @@ -428,7 +428,7 @@ Multidimensional arrays/slices will span a number of lines, each line will begin with indent. ''' - size = self.find_size() + size = self.size if size < 1: builder.append('[]') return @@ -454,7 +454,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) builder.append('\n' + indent + '..., ') i = self.shape[0] - 3 @@ -469,7 +469,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) i += 1 elif ndims == 1: @@ -581,7 +581,7 @@ item = concrete._index_of_single_item(space, w_idx) return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) - return space.wrap(self.create_slice(space, chunks)) + return space.wrap(self.create_slice(chunks)) def descr_setitem(self, space, w_idx, w_value): self.invalidated() @@ -597,31 +597,24 @@ if not isinstance(w_value, BaseArray): w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(space, chunks) + view = self.create_slice(chunks).get_concrete() view.setslice(space, w_value) @jit.unroll_safe - def create_slice(self, space, chunks): - #if not isinstance(self, ConcreteArray): - # return VirtualSlice(self, chunks) - self = self.get_concrete() + def create_slice(self, chunks): shape = [] - strides = [] - backstrides = [] - start = self.start i = -1 for i, (start_, stop, step, lgt) in enumerate(chunks): if step != 0: shape.append(lgt) - strides.append(self.strides[i] * step) - backstrides.append(self.strides[i] * (lgt - 1) * step) - start += self.strides[i] * start_ - # add a reminder s = i + 1 assert s >= 0 shape += self.shape[s:] - strides += self.strides[s:] - backstrides += self.backstrides[s:] + if not isinstance(self, ConcreteArray): + return VirtualSlice(self, chunks, shape) + r = calculate_slice_strides(self.start, self.strides, self.backstrides, + chunks) + start, strides, backstrides = r return W_NDimSlice(start, strides[:], backstrides[:], shape[:], self) @@ -642,8 +635,7 @@ else: w_shape = space.newtuple(args_w) concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_shape) + new_shape = get_shape_from_iterable(space, concrete.size, w_shape) # Since we got to here, prod(new_shape) == self.size new_strides = calc_new_strides(new_shape, concrete.shape, concrete.strides) @@ -662,10 +654,10 @@ return arr def descr_mean(self, space): - return space.div(self.descr_sum(space), space.wrap(self.find_size())) + return space.div(self.descr_sum(space), space.wrap(self.size)) def descr_nonzero(self, space): - if self.find_size() > 1: + if self.size > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) concr = self.get_concrete_or_scalar() @@ -725,6 +717,7 @@ """ Intermediate class representing a literal. """ + size = 1 _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): @@ -733,9 +726,6 @@ self.dtype = dtype self.value = value - def find_size(self): - return 1 - def find_dtype(self): return self.dtype @@ -770,16 +760,15 @@ raise NotImplementedError def compute(self): - result_size = self.find_size() - result = W_NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(self.size, self.shape, self.find_dtype()) shapelen = len(self.shape) sig = self.find_sig() frame = sig.create_frame(self) - ri = ArrayIterator(result_size) + ri = ArrayIterator(self.size) while not ri.done(): numpy_driver.jit_merge_point(sig=sig, shapelen=shapelen, - result_size=result_size, + result_size=self.size, frame=frame, ri=ri, self=self, result=result) @@ -804,33 +793,43 @@ def setitem(self, item, value): return self.get_concrete().setitem(item, value) - def find_size(self): - if self.forced_result is not None: - # The result has been computed and sources may be unavailable - return self.forced_result.find_size() - return self._find_size() - def find_dtype(self): return self.res_dtype class VirtualSlice(VirtualArray): - def __init__(self, parent, chunks): - self.parent = parent + def __init__(self, child, chunks, shape): + size = 1 + for sh in shape: + size *= sh + self.child = child self.chunks = chunks - VirtualArray.__init__(self, 'slice', parent.shape, parent.find_dtype()) + self.size = size + VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.VirtualSliceSignature( + self.child.create_sig(res_shape)) + + def force_if_needed(self): + if self.forced_result is None: + concr = self.child.get_concrete() + self.forced_result = concr.create_slice(self.chunks) + + def _del_sources(self): + self.child = None class Call1(VirtualArray): def __init__(self, ufunc, name, shape, res_dtype, values): VirtualArray.__init__(self, name, shape, res_dtype) self.values = values + self.size = values.size self.ufunc = ufunc def _del_sources(self): self.values = None - def _find_size(self): - return self.values.find_size() - def _find_dtype(self): return self.res_dtype @@ -858,9 +857,6 @@ self.left = None self.right = None - def _find_size(self): - return self.size - def create_sig(self, res_shape): if self.forced_result is not None: return self.forced_result.array_sig(res_shape) @@ -891,9 +887,6 @@ def get_concrete(self): return self - def find_size(self): - return self.size - def find_dtype(self): return self.dtype @@ -933,7 +926,7 @@ class ViewArray(ConcreteArray): def copy(self): array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = ViewIterator(self) + iter = view_iter_from_arr(self) a_iter = ArrayIterator(array.size) while not iter.done(): array.setitem(a_iter.offset, self.getitem(iter.offset)) @@ -965,7 +958,7 @@ def _sliceloop(self, source, res_shape): sig = source.find_sig(res_shape) frame = sig.create_frame(source, res_shape) - res_iter = ViewIterator(self) + res_iter = view_iter_from_arr(self) shapelen = len(res_shape) while not res_iter.done(): slice_driver.jit_merge_point(sig=sig, diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -62,7 +62,7 @@ raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) - size = obj.find_size() + size = obj.size dtype = find_unaryop_result_dtype( space, obj.find_dtype(), promote_to_largest=True diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -2,7 +2,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ OneDimIterator, ConstantIterator -from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr +from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib.jit import hint, unroll_safe, promote def sigeq(one, two): @@ -92,7 +92,7 @@ res_shape = res_shape or arr.shape iterlist = [] arraylist = [] - self._create_iter(iterlist, arraylist, arr, res_shape) + self._create_iter(iterlist, arraylist, arr, res_shape, []) return NumpyEvalFrame(iterlist, arraylist) class ConcreteSignature(Signature): @@ -113,23 +113,39 @@ def hash(self): return compute_identity_hash(self.dtype) + def allocate_view_iter(self, arr, res_shape, chunklist): + r = arr.start, arr.strides, arr.backstrides + if chunklist: + for chunkelem in chunklist: + r = calculate_slice_strides(r[0], r[1], r[2], chunkelem) + start, strides, backstrides = r + if len(res_shape) == 1: + return OneDimIterator(start, strides[0], res_shape[0]) + return ViewIterator(start, strides, backstrides, arr.shape, res_shape) + class ArraySignature(ConcreteSignature): def debug_repr(self): return 'Array' def _invent_array_numbering(self, arr, cache): - storage = arr.get_concrete().storage - self.array_no = _add_ptr_to_cache(storage, cache) + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + self.array_no = _add_ptr_to_cache(concr.storage, cache) - def _create_iter(self, iterlist, arraylist, arr, res_shape): + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import ConcreteArray concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) storage = concr.storage if self.iter_no >= len(iterlist): - iterlist.append(self.allocate_iter(concr, res_shape)) + iterlist.append(self.allocate_iter(concr, res_shape, chunklist)) if self.array_no >= len(arraylist): arraylist.append(storage) - def allocate_iter(self, arr, res_shape): + def allocate_iter(self, arr, res_shape, chunklist): + if chunklist: + return self.allocate_view_iter(arr, res_shape, chunklist) return ArrayIterator(arr.size) def eval(self, frame, arr): @@ -143,7 +159,7 @@ def _invent_array_numbering(self, arr, cache): pass - def _create_iter(self, iterlist, arraylist, arr, res_shape): + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): if self.iter_no >= len(iterlist): iter = ConstantIterator() iterlist.append(iter) @@ -163,17 +179,38 @@ allnumbers.append(no) self.iter_no = no - def allocate_iter(self, arr, res_shape): - if len(res_shape) == 1: - return OneDimIterator(arr.start, arr.strides[0], res_shape[0]) - return ViewIterator(arr, res_shape) + def allocate_iter(self, arr, res_shape, chunklist): + return self.allocate_view_iter(arr, res_shape, chunklist) -class FlatiterSignature(ViewSignature): - def debug_repr(self): - return 'FlatIter(%s)' % self.child.debug_repr() +class VirtualSliceSignature(Signature): + def __init__(self, child): + self.child = child - def _create_iter(self, iterlist, arraylist, arr, res_shape): - raise NotImplementedError + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + self.child._invent_array_numbering(arr.child, cache) + + def hash(self): + return intmask(self.child.hash() ^ 1234) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, VirtualSliceSignature) + return self.child.eq(other.child, compare_array_no) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + chunklist.append(arr.chunks) + self.child._create_iter(iterlist, arraylist, arr.child, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + return self.child.eval(frame, arr.child) class Call1(Signature): _immutable_fields_ = ['unfunc', 'name', 'child'] @@ -204,10 +241,11 @@ assert isinstance(arr, Call1) self.child._invent_array_numbering(arr.values, cache) - def _create_iter(self, iterlist, arraylist, arr, res_shape): + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): from pypy.module.micronumpy.interp_numarray import Call1 assert isinstance(arr, Call1) - self.child._create_iter(iterlist, arraylist, arr.values, res_shape) + self.child._create_iter(iterlist, arraylist, arr.values, res_shape, + chunklist) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import Call1 @@ -248,12 +286,14 @@ self.left._invent_numbering(cache, allnumbers) self.right._invent_numbering(cache, allnumbers) - def _create_iter(self, iterlist, arraylist, arr, res_shape): + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): from pypy.module.micronumpy.interp_numarray import Call2 assert isinstance(arr, Call2) - self.left._create_iter(iterlist, arraylist, arr.left, res_shape) - self.right._create_iter(iterlist, arraylist, arr.right, res_shape) + self.left._create_iter(iterlist, arraylist, arr.left, res_shape, + chunklist) + self.right._create_iter(iterlist, arraylist, arr.right, res_shape, + chunklist) def eval(self, frame, arr): from pypy.module.micronumpy.interp_numarray import Call2 @@ -267,8 +307,8 @@ self.right.debug_repr()) class ReduceSignature(Call2): - def _create_iter(self, iterlist, arraylist, arr, res_shape): - self.right._create_iter(iterlist, arraylist, arr, res_shape) + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + self.right._create_iter(iterlist, arraylist, arr, res_shape, chunklist) def _invent_numbering(self, cache, allnumbers): self.right._invent_numbering(cache, allnumbers) diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -57,6 +57,9 @@ v3 = v2.descr_add(space, v1) v4 = v1.descr_add(space, v2) assert v3.find_sig() is v4.find_sig() + v5 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 3, 1))) + v6 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 4, 1))) + assert v5.find_sig() is v6.find_sig() class TestUfuncCoerscion(object): def test_binops(self, space): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -36,92 +36,86 @@ assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -130,7 +124,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -140,7 +134,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -193,7 +193,7 @@ # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 20, + self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 22, 'getarrayitem_gc_pure': 2, 'getfield_gc_pure': 4, 'guard_class': 8, 'int_add': 8, 'float_mul': 2, @@ -342,7 +342,24 @@ self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, 'int_lt': 1, 'guard_true': 1, 'jump': 1, - 'arraylen_gc': 2}) + 'arraylen_gc': 3}) + + def define_virtual_slice(): + return """ + a = |30| + c = a + a + d = c -> 1:20 + d -> 1 + """ + + def test_virtual_slice(self): + result = self.run("virtual_slice") + assert result == 4 + self.check_loop_count(1) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): From noreply at buildbot.pypy.org Mon Dec 19 21:52:05 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 21:52:05 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: a necessary file that contains some helpers Message-ID: <20111219205205.11B7D823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50720:7ff242fbe65f Date: 2011-12-19 22:51 +0200 http://bitbucket.org/pypy/pypy/changeset/7ff242fbe65f/ Log: a necessary file that contains some helpers diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/strides.py @@ -0,0 +1,31 @@ + +def calculate_slice_strides(start, strides, backstrides, chunks): + rstrides = [] + rbackstrides = [] + rstart = start + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + rstrides.append(strides[i] * step) + rbackstrides.append(strides[i] * (lgt - 1) * step) + rstart += strides[i] * start_ + # add a reminder + s = i + 1 + assert s >= 0 + rstrides += strides[s:] + rbackstrides += backstrides[s:] + return rstart, rstrides, rbackstrides + +def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape): + rstrides = [] + rbackstrides = [] + for i in range(len(orig_shape)): + if orig_shape[i] == 1: + rstrides.append(0) + rbackstrides.append(0) + else: + rstrides.append(strides[i]) + rbackstrides.append(backstrides[i]) + rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides + rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides + return rstrides, rbackstrides From noreply at buildbot.pypy.org Mon Dec 19 21:57:49 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 21:57:49 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: kill some dead code Message-ID: <20111219205749.6CC66823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50721:0426f8332cf6 Date: 2011-12-19 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/0426f8332cf6/ Log: kill some dead code diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -22,9 +22,6 @@ def done(self): raise NotImplementedError - def get_offset(self): - raise NotImplementedError - class ArrayIterator(BaseIterator): def __init__(self, size): self.offset = 0 @@ -39,9 +36,6 @@ def done(self): return self.offset >= self.size - def get_offset(self): - return self.offset - class OneDimIterator(BaseIterator): def __init__(self, start, step, stop): self.offset = start @@ -58,9 +52,6 @@ def done(self): return self.offset == self.size - def get_offset(self): - return self.offset - def view_iter_from_arr(arr): return ViewIterator(arr.start, arr.strides, arr.backstrides, arr.shape) @@ -108,16 +99,6 @@ def done(self): return self._done - def get_offset(self): - return self.offset - class ConstantIterator(BaseIterator): def next(self, shapelen): return self - - def done(self): - return False - - def get_offset(self): - return 0 - From noreply at buildbot.pypy.org Mon Dec 19 22:19:40 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 22:19:40 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: write some tests. They expose a bug Message-ID: <20111219211940.5354B823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50722:a063abcf2a3c Date: 2011-12-19 23:19 +0200 http://bitbucket.org/pypy/pypy/changeset/a063abcf2a3c/ Log: write some tests. They expose a bug diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -871,6 +871,19 @@ b[0] = 3 assert b.__debug_repr__() == 'Array' + def test_virtual_views(self): + from numpypy import arange + a = arange(15) + c = (a + a) + d = c[::2] + assert d[3] == 12 + c[6] = 5 + assert d[3] == 5 + a = arange(15) + c = (a + a) + d = c[::2][::2] + assert d[1] == 8 + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -1039,6 +1052,14 @@ b[:] = (a + a) assert (b == zeros((4, 3, 5))).all() + def test_broadcast_virtualview(self): + from numpypy import arange, zeros + a = arange(8).reshape([2, 2, 2]) + b = (a + a)[1, 1] + c = zeros((2, 2, 2)) + c[:] = b + assert (c == [[[12, 14], [12, 14]], [[12, 14], [12, 14]]]).all() + def test_argmax(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) From noreply at buildbot.pypy.org Mon Dec 19 22:32:04 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 22:32:04 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: fix the test Message-ID: <20111219213204.A0583823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50723:82c25e00eebe Date: 2011-12-19 23:31 +0200 http://bitbucket.org/pypy/pypy/changeset/82c25e00eebe/ Log: fix the test diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -612,9 +612,9 @@ shape += self.shape[s:] if not isinstance(self, ConcreteArray): return VirtualSlice(self, chunks, shape) - r = calculate_slice_strides(self.start, self.strides, self.backstrides, - chunks) - start, strides, backstrides = r + r = calculate_slice_strides(self.shape, self.start, self.strides, + self.backstrides, chunks) + _, start, strides, backstrides = r return W_NDimSlice(start, strides[:], backstrides[:], shape[:], self) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -114,14 +114,14 @@ return compute_identity_hash(self.dtype) def allocate_view_iter(self, arr, res_shape, chunklist): - r = arr.start, arr.strides, arr.backstrides + r = arr.shape, arr.start, arr.strides, arr.backstrides if chunklist: for chunkelem in chunklist: - r = calculate_slice_strides(r[0], r[1], r[2], chunkelem) - start, strides, backstrides = r + r = calculate_slice_strides(r[0], r[1], r[2], r[3], chunkelem) + shape, start, strides, backstrides = r if len(res_shape) == 1: return OneDimIterator(start, strides[0], res_shape[0]) - return ViewIterator(start, strides, backstrides, arr.shape, res_shape) + return ViewIterator(start, strides, backstrides, shape, res_shape) class ArraySignature(ConcreteSignature): def debug_repr(self): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,20 +1,23 @@ -def calculate_slice_strides(start, strides, backstrides, chunks): +def calculate_slice_strides(shape, start, strides, backstrides, chunks): rstrides = [] rbackstrides = [] rstart = start + rshape = [] i = -1 for i, (start_, stop, step, lgt) in enumerate(chunks): if step != 0: rstrides.append(strides[i] * step) rbackstrides.append(strides[i] * (lgt - 1) * step) + rshape.append(lgt) rstart += strides[i] * start_ # add a reminder s = i + 1 assert s >= 0 rstrides += strides[s:] rbackstrides += backstrides[s:] - return rstart, rstrides, rbackstrides + rshape += shape[s:] + return rshape, rstart, rstrides, rbackstrides def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape): rstrides = [] From noreply at buildbot.pypy.org Mon Dec 19 23:01:38 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 23:01:38 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: We require flatiter to force the array for now Message-ID: <20111219220138.E9DDE823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50724:993a31c296bb Date: 2011-12-20 00:00 +0200 http://bitbucket.org/pypy/pypy/changeset/993a31c296bb/ Log: We require flatiter to force the array for now diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1174,6 +1174,7 @@ @jit.unroll_safe def __init__(self, arr): + arr = arr.get_concrete() size = 1 for sh in arr.shape: size *= sh From noreply at buildbot.pypy.org Mon Dec 19 23:04:24 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 23:04:24 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: a missing test Message-ID: <20111219220424.A58B9823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50725:ace20e87f32a Date: 2011-12-20 00:03 +0200 http://bitbucket.org/pypy/pypy/changeset/ace20e87f32a/ Log: a missing test diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1121,6 +1121,11 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 + def test_flatiter_varray(self): + from numpypy import ones + a = ones((2, 2)) + assert list(((a + a).flat)) == [2, 2, 2, 2] + def test_slice_copy(self): from numpypy import zeros a = zeros((10, 10)) From noreply at buildbot.pypy.org Mon Dec 19 23:05:21 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 19 Dec 2011 23:05:21 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: Merged default in (doesn't translate anymore though) Message-ID: <20111219220521.4B3A4823F8@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50726:c6370fd540dc Date: 2011-12-19 16:04 -0600 http://bitbucket.org/pypy/pypy/changeset/c6370fd540dc/ Log: Merged default in (doesn't translate anymore though) diff too long, truncating to 10000 out of 25590 lines diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -190,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -487,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -696,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -1607,6 +1620,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -48,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -322,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -347,6 +361,16 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -381,13 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -521,10 +557,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -617,6 +654,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -959,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1788,9 +1835,11 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): @@ -138,29 +138,30 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + jitcell_token.compiled_loop_token = clt + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -183,9 +185,11 @@ llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types, descr.extrainfo, descr.width) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -239,9 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,21 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,265 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -363,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -408,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -433,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -444,161 +425,48 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.arg_classes, self.result_type) -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,87 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -735,49 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - assert not self.layoutbuilder.has_finalizer(S) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -791,108 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETINTERIORFIELD_GC ------ - if op.getopnum() == rop.SETINTERIORFIELD_GC: - val = op.getarg(0) - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +123,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +146,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +169,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -0,0 +1,328 @@ +import sys +from pypy.rlib.rarithmetic import ovfcheck +from pypy.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr +from pypy.jit.metainterp.resoperation import ResOperation, rop +from pypy.jit.codewriter import heaptracker +from pypy.jit.backend.llsupport.symbolic import WORD +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr + + +class GcRewriterAssembler(object): + # This class performs the following rewrites on the list of operations: + # + # - Remove the DEBUG_MERGE_POINTs. + # + # - Turn all NEW_xxx to either a CALL_MALLOC_GC, or a CALL_MALLOC_NURSERY + # followed by SETFIELDs in order to initialize their GC fields. The + # two advantages of CALL_MALLOC_NURSERY is that it inlines the common + # path, and we need only one such operation to allocate several blocks + # of memory at once. + # + # - Add COND_CALLs to the write barrier before SETFIELD_GC and + # SETARRAYITEM_GC operations. + + _previous_size = -1 + _op_malloc_nursery = None + _v_last_malloced_nursery = None + c_zero = ConstInt(0) + + def __init__(self, gc_ll_descr, cpu): + self.gc_ll_descr = gc_ll_descr + self.cpu = cpu + self.newops = [] + self.known_lengths = {} + self.recent_mallocs = {} # set of variables + + def rewrite(self, operations): + # we can only remember one malloc since the next malloc can possibly + # collect; but we can try to collapse several known-size mallocs into + # one, both for performance and to reduce the number of write + # barriers. We do this on each "basic block" of operations, which in + # this case means between CALLs or unknown-size mallocs. + # + for op in operations: + if op.getopnum() == rop.DEBUG_MERGE_POINT: + continue + # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- + if op.is_malloc(): + self.handle_malloc_operation(op) + continue + elif op.can_malloc(): + self.emitting_an_operation_that_can_collect() + elif op.getopnum() == rop.LABEL: + self.emitting_an_operation_that_can_collect() + self.known_lengths.clear() + # ---------- write barriers ---------- + if self.gc_ll_descr.write_barrier_descr is not None: + if op.getopnum() == rop.SETFIELD_GC: + self.handle_write_barrier_setfield(op) + continue + if op.getopnum() == rop.SETINTERIORFIELD_GC: + self.handle_write_barrier_setinteriorfield(op) + continue + if op.getopnum() == rop.SETARRAYITEM_GC: + self.handle_write_barrier_setarrayitem(op) + continue + # ---------- + self.newops.append(op) + return self.newops + + # ---------- + + def handle_malloc_operation(self, op): + opnum = op.getopnum() + if opnum == rop.NEW: + self.handle_new_fixedsize(op.getdescr(), op) + elif opnum == rop.NEW_WITH_VTABLE: + classint = op.getarg(0).getint() + descr = heaptracker.vtable2descr(self.cpu, classint) + self.handle_new_fixedsize(descr, op) + if self.gc_ll_descr.fielddescr_vtable is not None: + op = ResOperation(rop.SETFIELD_GC, + [op.result, ConstInt(classint)], None, + descr=self.gc_ll_descr.fielddescr_vtable) + self.newops.append(op) + elif opnum == rop.NEW_ARRAY: + descr = op.getdescr() + assert isinstance(descr, ArrayDescr) + self.handle_new_array(descr, op) + elif opnum == rop.NEWSTR: + self.handle_new_array(self.gc_ll_descr.str_descr, op) + elif opnum == rop.NEWUNICODE: + self.handle_new_array(self.gc_ll_descr.unicode_descr, op) + else: + raise NotImplementedError(op.getopname()) + + def handle_new_fixedsize(self, descr, op): + assert isinstance(descr, SizeDescr) + size = descr.size + self.gen_malloc_nursery(size, op.result) + self.gen_initialize_tid(op.result, descr.tid) + + def handle_new_array(self, arraydescr, op): + v_length = op.getarg(0) + total_size = -1 + if isinstance(v_length, ConstInt): + num_elem = v_length.getint() + self.known_lengths[op.result] = num_elem + try: + var_size = ovfcheck(arraydescr.itemsize * num_elem) + total_size = ovfcheck(arraydescr.basesize + var_size) + except OverflowError: + pass # total_size is still -1 + elif arraydescr.itemsize == 0: + total_size = arraydescr.basesize + if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily + self.gen_malloc_nursery(total_size, op.result) + self.gen_initialize_tid(op.result, arraydescr.tid) + self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) + elif self.gc_ll_descr.kind == 'boehm': + self.gen_boehm_malloc_array(arraydescr, v_length, op.result) + else: + opnum = op.getopnum() + if opnum == rop.NEW_ARRAY: + self.gen_malloc_array(arraydescr, v_length, op.result) + elif opnum == rop.NEWSTR: + self.gen_malloc_str(v_length, op.result) + elif opnum == rop.NEWUNICODE: + self.gen_malloc_unicode(v_length, op.result) + else: + raise NotImplementedError(op.getopname()) + + # ---------- + + def emitting_an_operation_that_can_collect(self): + # must be called whenever we emit an operation that can collect: + # forgets the previous MALLOC_NURSERY, if any; and empty the + # set 'recent_mallocs', so that future SETFIELDs will generate + # a write barrier as usual. + self._op_malloc_nursery = None + self.recent_mallocs.clear() + + def _gen_call_malloc_gc(self, args, v_result, descr): + """Generate a CALL_MALLOC_GC with the given args.""" + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) + self.newops.append(op) + # mark 'v_result' as freshly malloced + self.recent_mallocs[v_result] = None + + def gen_malloc_fixedsize(self, size, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). + Note that with the framework GC, this should be called very rarely. + """ + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, + self.gc_ll_descr.malloc_fixedsize_descr) + + def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + self._gen_call_malloc_gc([ConstInt(addr), + ConstInt(arraydescr.basesize), + v_num_elem, + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset)], + v_result, + self.gc_ll_descr.malloc_array_descr) + + def gen_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) going either + to the standard or the nonstandard version of the function.""" + # + if (arraydescr.basesize == self.gc_ll_descr.standard_array_basesize + and arraydescr.lendescr.offset == + self.gc_ll_descr.standard_array_length_ofs): + # this is a standard-looking array, common case + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + args = [ConstInt(addr), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_descr + else: + # rare case, so don't care too much about the number of arguments + addr = self.gc_ll_descr.get_malloc_fn_addr( + 'malloc_array_nonstandard') + args = [ConstInt(addr), + ConstInt(arraydescr.basesize), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_nonstandard_descr + self._gen_call_malloc_gc(args, v_result, calldescr) + + def gen_malloc_str(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_str') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_str_descr) + + def gen_malloc_unicode(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_unicode') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_unicode_descr) + + def gen_malloc_nursery(self, size, v_result): + """Try to generate or update a CALL_MALLOC_NURSERY. + If that fails, generate a plain CALL_MALLOC_GC instead. + """ + size = self.round_up_for_allocation(size) + if not self.gc_ll_descr.can_use_nursery_malloc(size): + self.gen_malloc_fixedsize(size, v_result) + return + # + op = None + if self._op_malloc_nursery is not None: + # already a MALLOC_NURSERY: increment its total size + total_size = self._op_malloc_nursery.getarg(0).getint() + total_size += size + if self.gc_ll_descr.can_use_nursery_malloc(total_size): + # if the total size is still reasonable, merge it + self._op_malloc_nursery.setarg(0, ConstInt(total_size)) + op = ResOperation(rop.INT_ADD, + [self._v_last_malloced_nursery, + ConstInt(self._previous_size)], + v_result) + if op is None: + # if we failed to merge with a previous MALLOC_NURSERY, emit one + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_NURSERY, + [ConstInt(size)], + v_result) + self._op_malloc_nursery = op + # + self.newops.append(op) + self._previous_size = size + self._v_last_malloced_nursery = v_result + self.recent_mallocs[v_result] = None + + def gen_initialize_tid(self, v_newgcobj, tid): + if self.gc_ll_descr.fielddescr_tid is not None: + # produce a SETFIELD to initialize the GC header + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, ConstInt(tid)], None, + descr=self.gc_ll_descr.fielddescr_tid) + self.newops.append(op) + + def gen_initialize_len(self, v_newgcobj, v_length, arraylen_descr): + # produce a SETFIELD to initialize the array length + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, v_length], None, + descr=arraylen_descr) + self.newops.append(op) + + # ---------- + + def handle_write_barrier_setfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(1) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setinteriorfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setarrayitem(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier_array(op.getarg(0), + op.getarg(1), v) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.newops.append(op) + + def gen_write_barrier(self, v_base, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + args = [v_base, v_value] + self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, + descr=write_barrier_descr)) + + def gen_write_barrier_array(self, v_base, v_index, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + if write_barrier_descr.has_write_barrier_from_array(self.cpu): + # If we know statically the length of 'v', and it is not too + # big, then produce a regular write_barrier. If it's unknown or + # too big, produce instead a write_barrier_from_array. + LARGE = 130 + length = self.known_lengths.get(v_base, LARGE) + if length >= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -369,29 +324,30 @@ # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert descr2.repr_of_descr() == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert descr2i.repr_of_descr() == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert descr3.repr_of_descr() == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert descr3i.repr_of_descr() == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert descr4.repr_of_descr() == '' # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert descr4i.repr_of_descr() == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert descr4f.repr_of_descr() == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert descr5f.repr_of_descr() == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +357,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +377,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +401,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,211 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -42,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -282,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -305,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -327,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -348,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -107,12 +108,12 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -253,13 +254,13 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -284,12 +285,12 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,22 +32,19 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -106,10 +103,9 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -118,19 +114,20 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -139,19 +136,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -162,15 +162,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -190,15 +192,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -206,14 +210,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -226,17 +229,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -244,14 +251,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -261,19 +267,20 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -290,18 +297,17 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -311,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -320,20 +326,19 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -350,20 +355,20 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -419,14 +424,12 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1082,16 +1085,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1109,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1149,30 +1144,33 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1214,7 +1212,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1222,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1271,7 +1267,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1281,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1330,19 +1324,20 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1400,15 +1395,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1675,15 +1669,14 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1700,9 +1693,9 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1718,14 +1711,13 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1895,18 +1887,14 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1940,18 +1928,14 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -1986,19 +1970,15 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2031,10 +2011,9 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2091,14 +2070,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2147,13 +2126,12 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2169,12 +2147,10 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2183,9 +2159,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2201,9 +2175,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2212,9 +2184,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2415,7 +2385,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 @@ -2423,9 +2393,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2435,11 +2404,10 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -2471,12 +2439,12 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2486,11 +2454,11 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2499,11 +2467,11 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2561,12 +2529,12 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2578,13 +2546,13 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2596,7 +2564,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) @@ -2604,10 +2572,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2958,12 +2925,138 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" + + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [BoxInt()] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2, -9) + assert fail.identifier == 42 class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,9 +3,10 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -179,7 +180,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -525,29 +526,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +581,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +612,22 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -608,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -676,33 +717,55 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,8 +2,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -69,10 +70,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -107,20 +104,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -152,14 +135,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -275,7 +257,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() @@ -310,12 +293,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -326,7 +308,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -422,12 +404,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -443,37 +421,35 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, - frame_depth+param_depth) + clt.frame_depth = frame_depth + clt.param_depth = param_depth + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, - rawstart + directbootstrappos, + rawstart + looppos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -484,18 +460,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -548,6 +523,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +646,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,20 +668,24 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -793,152 +780,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -965,7 +821,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -976,13 +832,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV32_bi(to_loc.value, low_part) + self.mc.MOV32_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1134,18 +1002,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -1472,46 +1340,10 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) - def genop_new_with_vtable(self, op, arglocs, result_loc): - assert result_loc is eax - loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, ImmedLoc) - arglocs = arglocs[:-1] - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - self.set_vtable(eax, loc_vtable) + # ---------- - def set_vtable(self, loc, loc_vtable): - if self.cpu.vtable_offset is not None: - assert isinstance(loc, RegLoc) - assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert isinstance(loc, RegLoc) - assert isinstance(loc_num_elem, ImmedLoc) - self.mc.MOV(mem(loc, ofs_length), loc_num_elem) - - # XXX genop_new is abused for all varsized mallocs with Boehm, for now - # (instead of genop_new_array, genop_newstr, genop_newunicode) - def genop_new(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_new_array(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_array_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newstr(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_str_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newunicode(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_unicode_func_addr, arglocs, eax) + def genop_call_malloc_gc(self, op, arglocs, result_loc): + self.genop_call(op, arglocs, result_loc) self.propagate_memoryerror_if_eax_is_null() def propagate_memoryerror_if_eax_is_null(self): @@ -1882,10 +1714,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1901,7 +1733,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position From noreply at buildbot.pypy.org Mon Dec 19 23:05:22 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 19 Dec 2011 23:05:22 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: merged upstream Message-ID: <20111219220522.7AC8B823F8@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50727:f6d18f175eda Date: 2011-12-19 16:04 -0600 http://bitbucket.org/pypy/pypy/changeset/f6d18f175eda/ Log: merged upstream diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1220,6 +1220,7 @@ @jit.unroll_safe def __init__(self, arr): + arr = arr.get_concrete() size = 1 for sh in arr.shape: size *= sh From noreply at buildbot.pypy.org Mon Dec 19 23:05:23 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 19 Dec 2011 23:05:23 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: mergedupstream Message-ID: <20111219220523.A3281823F8@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50728:a3d170151dfd Date: 2011-12-19 16:05 -0600 http://bitbucket.org/pypy/pypy/changeset/a3d170151dfd/ Log: mergedupstream diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1187,6 +1187,11 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 + def test_flatiter_varray(self): + from numpypy import ones + a = ones((2, 2)) + assert list(((a + a).flat)) == [2, 2, 2, 2] + def test_slice_copy(self): from numpypy import zeros a = zeros((10, 10)) From noreply at buildbot.pypy.org Mon Dec 19 23:05:46 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 23:05:46 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: merge default Message-ID: <20111219220546.8F68D823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50729:194b4bb50224 Date: 2011-12-20 00:05 +0200 http://bitbucket.org/pypy/pypy/changeset/194b4bb50224/ Log: merge default diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -249,7 +249,7 @@ while len(result) < count: x = fn() keys = [x._getregkey()] - if isinstance(x, StackLoc) and x.width > WORD: + if isinstance(x, StackLoc) and x.get_width() > WORD: keys.append(keys[0] + WORD) for key in keys: if key in seen: @@ -267,7 +267,7 @@ for i, loc in enumerate(locations): if isinstance(loc, RegLoc): if loc.is_xmm: - if loc.width > WORD: + if loc.get_width() > WORD: newvalue = ('value-xmm-%d' % i, 'value-xmm-hiword-%d' % i) else: @@ -276,8 +276,8 @@ else: regs1[loc.value] = 'value-int-%d' % i elif isinstance(loc, StackLoc): - stack[loc.value] = 'value-width%d-%d' % (loc.width, i) - if loc.width > WORD: + stack[loc.value] = 'value-width%d-%d' % (loc.get_width(), i) + if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: assert isinstance(loc, ImmedLoc) @@ -299,7 +299,7 @@ # def read(loc, expected_width=None): if expected_width is not None: - assert loc.width == expected_width + assert loc.get_width() == expected_width if isinstance(loc, RegLoc): if loc.is_xmm: return regs2[loc.value] @@ -307,7 +307,7 @@ return regs1[loc.value] if isinstance(loc, StackLoc): got = stack[loc.value] - if loc.width > WORD: + if loc.get_width() > WORD: got = (got, stack[loc.value+WORD]) return got if isinstance(loc, ImmedLoc): @@ -321,7 +321,7 @@ else: regs1[loc.value] = newvalue elif isinstance(loc, StackLoc): - if loc.width > WORD: + if loc.get_width() > WORD: newval1, newval2 = newvalue stack[loc.value] = newval1 stack[loc.value+WORD] = newval2 diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, r_uint from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,6 +21,7 @@ # class MemoryManager(object): + NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -36,12 +37,13 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK self.alive_loops = {} + self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK else: self.max_age = max_age if check_frequency <= 0: @@ -49,10 +51,11 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self): + def next_generation(self, do_cleanups_now=True): self.current_generation += 1 - if self.current_generation == self.next_check: + if do_cleanups_now and self.current_generation >= self.next_check: self._kill_old_loops_now() + self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -81,3 +84,22 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") + + def get_current_generation_uint(self): + """Return the current generation, possibly truncated to a uint. + To use only as an approximation for decaying counters.""" + return r_uint(self.current_generation) + + def record_jitcell_dict(self, callback): + """NOT_RPYTHON. The given jitcell_dict is a dict that needs + occasional clean-ups of old cells. A cell is old if it never + reached the threshold, and its counter decayed to a tiny value.""" + # note that the various jitcell_dicts have different RPython types, + # so we have to make a different function for each one. These + # functions are chained to each other: each calls the previous one. + def cleanup_dict(): + callback() + cleanup_previous() + # + cleanup_previous = self._cleanup_jitcell_dicts + self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -246,15 +246,16 @@ self.force_lazy_setfields_and_arrayitems_for_guard()) return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or # handled specially - opnum == rop.SETFIELD_RAW or # no effect on GC struct/array - opnum == rop.SETARRAYITEM_GC or # handled specially - opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.STRSETITEM or # no effect on GC struct/array - opnum == rop.UNICODESETITEM or # no effect on GC struct/array - opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever - opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array - opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7755,6 +7755,22 @@ """ self.optimize_loop(ops, expected) + def test_setinteriorfield_should_not_clear_cache(self): + ops = """ + [i0, p0] + i2 = getfield_gc(p0, descr=adescr) + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0) + """ + expected = """ + [i0, p0, i2] + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0, i2) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,6 +2910,27 @@ res = self.meta_interp(f, [32]) assert res == f(32) + def test_decay_counters(self): + myjitdriver = JitDriver(greens = ['m'], reds = ['n']) + def f(m, n): + while n > 0: + myjitdriver.jit_merge_point(m=m, n=n) + n += m + n -= m + n -= 1 + def main(): + f(5, 7) # run 7x with m=5 counter[m=5] = 7 + f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) + f(5, 5) # run 5x times with m=5 counter[m=5] = 8 + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=9, trace_eagerness=99) + self.check_trace_count(1) + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=8, trace_eagerness=99) + self.check_trace_count(2) + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,3 +1,4 @@ +import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -8,7 +9,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rlib.rarithmetic import r_singlefloat, r_uint def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -275,3 +276,77 @@ state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True + +def test_decay_counters(): + cell = JitCell(r_uint(5)) + cell.counter = 100 + cell.adjust_counter(r_uint(5), math.log(0.9)) + assert cell.counter == 100 + cell.adjust_counter(r_uint(6), math.log(0.9)) + assert cell.counter == 90 + cell.adjust_counter(r_uint(9), math.log(0.9)) + assert cell.counter == int(90 * (0.9**3)) + +def test_cleanup_jitcell_dict(): + from pypy.jit.metainterp.memmgr import MemoryManager + class FakeWarmRunnerDesc: + memory_manager = MemoryManager() + class cpu: + pass + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed] + # + # Test creating tons of jitcells that remain at 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell1 = get_jitcell(True, -1) + assert len(warmstate._jitcell_dict) == 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 + # + for i in range(1, 20005): + get_jitcell(True, i) # should trigger a clean-up at 20001 + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 + # + # Same test, with one jitcell that has a counter of BASE instead of 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate.set_param_decay_halflife(2) + warmstate.set_param_threshold(5) + warmstate.set_param_function_threshold(0) + get_jitcell = warmstate._make_jitcell_getter_default() + cell2 = get_jitcell(True, -2) + cell2.counter = BASE = warmstate.increment_threshold * 3 + # + for i in range(0, 20005): + get_jitcell(True, i) + assert len(warmstate._jitcell_dict) == (i % 19999) + 2 + # + assert cell2 in warmstate._jitcell_dict.values() + assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + # Same test, with jitcells that are compiled and free by the memmgr + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + get_jitcell(True, -1) + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -1 + cell.wref_procedure_token = None # or a dead weakref, equivalently + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + # Same test, with counter == -2 (rare case, kept alive) + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell = get_jitcell(True, -1) + cell.counter = -2 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -2 + assert len(warmstate._jitcell_dict) == i + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,9 +64,11 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, + threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): + function_threshold=4, decay_halflife=0, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, + **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -83,15 +85,16 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(3) # for tests + jd.warmstate.set_param_threshold(threshold) jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(2) # for tests + jd.warmstate.set_param_trace_eagerness(trace_eagerness) jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) + jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref +import sys, weakref, math from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -153,6 +153,25 @@ dont_trace_here = False wref_procedure_token = None + def __init__(self, generation): + # The stored 'counter' value follows an exponential decay model. + # Conceptually after every generation, it decays by getting + # multiplied by a constant <= 1.0. In practice, decaying occurs + # lazily: the following field records the latest seen generation + # number, and adjustment is done by adjust_counter() when needed. + self.latest_generation_seen = generation + + def adjust_counter(self, generation, log_decay_factor): + if generation != self.latest_generation_seen: + # The latest_generation_seen is older than the current generation. + # Adjust by multiplying self.counter N times by decay_factor, i.e. + # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). + assert self.counter >= 0 + N = generation - self.latest_generation_seen + factor = math.exp(log_decay_factor * N) + self.counter = int(self.counter * factor) + self.latest_generation_seen = generation + def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -172,7 +191,6 @@ class WarmEnterState(object): THRESHOLD_LIMIT = sys.maxint // 2 - default_jitcell_dict = None def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -213,6 +231,17 @@ def set_param_inlining(self, value): self.inlining = value + def set_param_decay_halflife(self, value): + # Use 0 or -1 to mean "no decay". Initialize the internal variable + # 'log_decay_factor'. It is choosen such that by multiplying the + # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every + # generation, then the counter will be divided by two after 'value' + # generations have passed. + if value <= 0: + self.log_decay_factor = 0.0 # log(1.0) + else: + self.log_decay_factor = math.log(0.5) / value + def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -282,6 +311,11 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) + memmgr = self.warmrunnerdesc.memory_manager + if memmgr is not None: + get_current_generation = memmgr.get_current_generation_uint + else: + get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -326,6 +360,8 @@ if cell.counter >= 0: # update the profiling counter + cell.adjust_counter(get_current_generation(), + self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n @@ -418,6 +454,15 @@ # return jit_getter + def _new_jitcell(self): + warmrunnerdesc = self.warmrunnerdesc + if (warmrunnerdesc is not None and + warmrunnerdesc.memory_manager is not None): + gen = warmrunnerdesc.memory_manager.get_current_generation_uint() + else: + gen = r_uint(0) + return JitCell(gen) + def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -447,13 +492,53 @@ except AttributeError: pass # + memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager + if memmgr: + def _cleanup_dict(): + minimum = sys.maxint + if self.increment_threshold > 0: + minimum = min(minimum, self.increment_threshold) + if self.increment_function_threshold > 0: + minimum = min(minimum, self.increment_function_threshold) + currentgen = memmgr.get_current_generation_uint() + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.adjust_counter(currentgen, self.log_decay_factor) + if cell.counter < minimum: + killme.append(key) + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # If no tracing goes on at all because the jitcells are + # each time for new greenargs, the dictionary grows forever. + # So every one in a (rare) while, we decide to force an + # artificial next_generation() and _cleanup_dict(). + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + memmgr.next_generation(do_cleanups_now=False) + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests + memmgr.record_jitcell_dict(_cleanup_dict) + else: + def _maybe_cleanup_dict(): + pass + # def get_jitcell(build, *greenargs): try: cell = jitcell_dict[greenargs] except KeyError: if not build: return None - cell = JitCell() + _maybe_cleanup_dict() + cell = self._new_jitcell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -464,6 +549,10 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} + # note that there is no equivalent of record_jitcell_dict() + # in the case of custom getters. We assume that the interpreter + # stores the JitCells on some objects that can go away by GC, + # like the PyCode objects in PyPy. # def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) @@ -485,7 +574,7 @@ if not build: return cell if cell is None: - cell = JitCell() + cell = self._new_jitcell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -285,3 +285,10 @@ [], lineno=1, col_offset=0) ]) exec compile(body, '', 'exec') + + def test_invalid_sum(self): + import _ast as ast + pos = dict(lineno=2, col_offset=3) + m = ast.Module([ast.Expr(ast.expr(**pos), **pos)]) + exc = raises(TypeError, compile, m, "", "exec") + diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -133,7 +133,7 @@ descr__new__, get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - pass + descr__new__, get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,34 +1,90 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi +from pypy.module.micronumpy import interp_dtype +from pypy.objspace.std.strutil import strip_spaces FLOAT_SIZE = rffi.sizeof(lltype.Float) - at unwrap_spec(s=str) -def fromstring(space, s): +def _fromstring_text(space, s, count, sep, length, dtype): from pypy.module.micronumpy.interp_numarray import W_NDimArray + + sep_stripped = strip_spaces(sep) + skip_bad_vals = len(sep_stripped) == 0 + + items = [] + num_items = 0 + idx = 0 + + while (num_items < count or count == -1) and idx < len(s): + nextidx = s.find(sep, idx) + if nextidx < 0: + nextidx = length + piece = strip_spaces(s[idx:nextidx]) + if len(piece) > 0 or not skip_bad_vals: + if len(piece) == 0 and not skip_bad_vals: + val = dtype.itemtype.default_fromstring(space) + else: + try: + val = dtype.coerce(space, space.wrap(piece)) + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + gotit = False + while not gotit and len(piece) > 0: + piece = piece[:-1] + try: + val = dtype.coerce(space, space.wrap(piece)) + gotit = True + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + if not gotit: + val = dtype.itemtype.default_fromstring(space) + nextidx = length + items.append(val) + num_items += 1 + idx = nextidx + 1 + + if count > num_items: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(num_items, [num_items], dtype=dtype) + for i, val in enumerate(items): + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + +def _fromstring_bin(space, s, count, length, dtype): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + itemsize = dtype.itemtype.get_element_size() + if count == -1: + count = length / itemsize + if length % itemsize != 0: + raise operationerrfmt(space.w_ValueError, + "string length %d not divisable by item size %d", + length, itemsize) + if count * itemsize > length: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(count, [count], dtype=dtype) + for i in range(count): + val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + + at unwrap_spec(s=str, count=int, sep=str) +def fromstring(space, s, w_dtype=None, count=-1, sep=''): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) length = len(s) - - if length % FLOAT_SIZE == 0: - number = length/FLOAT_SIZE + if sep == '': + return _fromstring_bin(space, s, count, length, dtype) else: - raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - - dtype = get_dtype_cache(space).w_float64dtype - a = W_NDimArray(number, [number], dtype=dtype) - - start = 0 - end = FLOAT_SIZE - i = 0 - while i < number: - part = s[start:end] - a.dtype.setitem(a.storage, i, dtype.box(runpack('d', part))) - i += 1 - start += FLOAT_SIZE - end += FLOAT_SIZE - - return space.wrap(a) + return _fromstring_text(space, s, count, sep, length, dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1194,13 +1194,110 @@ import struct BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) + cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) + cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) + cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) + cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) def test_fromstring(self): - from numpypy import fromstring + import sys + from numpypy import fromstring, array, uint8, float32, int32 + a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") + b = fromstring('\x01\x02', dtype=uint8) + assert a[0] == 1 + assert a[1] == 2 + c = fromstring(self.fdata, dtype=float32) + assert c[0] == float32(2.3) + d = fromstring("1 2", sep=' ', count=2, dtype=uint8) + assert len(d) == 2 + assert d[0] == 1 + assert d[1] == 2 + e = fromstring('3, 4,5', dtype=uint8, sep=',') + assert len(e) == 3 + assert e[0] == 3 + assert e[1] == 4 + assert e[2] == 5 + f = fromstring('\x01\x02\x03\x04\x05', dtype=uint8, count=3) + assert len(f) == 3 + assert f[0] == 1 + assert f[1] == 2 + assert f[2] == 3 + g = fromstring("1 2 3 ", dtype=uint8, sep=" ") + assert len(g) == 3 + assert g[0] == 1 + assert g[1] == 2 + assert g[2] == 3 + h = fromstring("1, , 2, 3", dtype=uint8, sep=",") + assert (h == [1,0,2,3]).all() + i = fromstring("1 2 3", dtype=uint8, sep=" ") + assert (i == [1,2,3]).all() + j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") + assert (j == [1,2,3]).all() + k = fromstring("1,x,2,3", dtype=uint8, sep=",") + assert (k == [1,0]).all() + l = fromstring("1,x,2,3", dtype='float32', sep=",") + assert (l == [1.0,-1.0]).all() + m = fromstring("1,,2,3", sep=",") + assert (m == [1.0,-1.0,2.0,3.0]).all() + n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + assert (n == [3]).all() + o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") + assert len(o) == 2 + assert o[0] == 1.0 + assert o[1] == 2.0 + p = fromstring("1.0,,2.0,3.0", sep=",") + assert (p == [1.0, -1.0, 2.0, 3.0]).all() + q = fromstring("1.0,,2.0,3.0", sep=" ") + assert (q == [1.0]).all() + r = fromstring("\x01\x00\x02", dtype='bool') + assert (r == [True, False, True]).all() + s = fromstring("1,2,3,,5", dtype=bool, sep=",") + assert (s == [True, True, True, False, True]).all() + t = fromstring("", bool) + assert (t == []).all() + u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int) + if sys.maxint > 2 ** 31 - 1: + assert (u == [1]).all() + else: + assert (u == [1, 0]).all() + + def test_fromstring_types(self): + from numpypy import (fromstring, int8, int16, int32, int64, uint8, + uint16, uint32, float32, float64) + + a = fromstring('\xFF', dtype=int8) + assert a[0] == -1 + b = fromstring('\xFF', dtype=uint8) + assert b[0] == 255 + c = fromstring('\xFF\xFF', dtype=int16) + assert c[0] == -1 + d = fromstring('\xFF\xFF', dtype=uint16) + assert d[0] == 65535 + e = fromstring('\xFF\xFF\xFF\xFF', dtype=int32) + assert e[0] == -1 + f = fromstring('\xFF\xFF\xFF\xFF', dtype=uint32) + assert repr(f[0]) == '4294967295' + g = fromstring('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', dtype=int64) + assert g[0] == -1 + h = fromstring(self.float32val, dtype=float32) + assert h[0] == float32(5.2) + i = fromstring(self.float64val, dtype=float64) + assert i[0] == float64(300.4) + j = fromstring(self.ulongval, dtype='L') + assert j[0] == 12 + + + def test_fromstring_invalid(self): + from numpypy import fromstring, uint16, uint8, int32 + #default dtype is 64-bit float, so 3 bytes should fail + raises(ValueError, fromstring, "\x01\x02\x03") + #3 bytes is not modulo 2 bytes (int16) + raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) + #5 bytes is larger than 3 bytes + raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) class AppTestRepr(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -8,6 +8,7 @@ from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rstruct.runpack import runpack def simple_unary_op(func): @@ -55,6 +56,7 @@ class Primitive(object): _mixin_ = True + def get_element_size(self): return rffi.sizeof(self.T) @@ -84,6 +86,9 @@ def _coerce(self, space, w_item): raise NotImplementedError + def default_fromstring(self, space): + raise NotImplementedError + def read(self, storage, width, i, offset): return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset @@ -102,6 +107,9 @@ width, storage, i, offset, value ) + def runpack_str(self, s): + return self.box(runpack(self.format_code, s)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -164,6 +172,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool BoxType = interp_boxes.W_BoolBox + format_code = "?" True = BoxType(True) False = BoxType(False) @@ -193,6 +202,9 @@ def for_computation(self, v): return int(v) + def default_fromstring(self, space): + return self.box(False) + class Integer(Primitive): _mixin_ = True @@ -206,6 +218,9 @@ def for_computation(self, v): return widen(v) + def default_fromstring(self, space): + return self.box(0) + @simple_binary_op def div(self, v1, v2): if v2 == 0: @@ -241,42 +256,52 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box + format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box + format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT BoxType = interp_boxes.W_Int16Box + format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box + format_code = "H" class Int32(BaseType, Integer): T = rffi.INT BoxType = interp_boxes.W_Int32Box + format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT BoxType = interp_boxes.W_UInt32Box + format_code = "I" class Long(BaseType, Integer): T = rffi.LONG BoxType = interp_boxes.W_LongBox + format_code = "l" class ULong(BaseType, Integer): T = rffi.ULONG BoxType = interp_boxes.W_ULongBox + format_code = "L" class Int64(BaseType, Integer): T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box + format_code = "q" class UInt64(BaseType, Integer): T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + format_code = "Q" def _coerce(self, space, w_item): try: @@ -304,6 +329,9 @@ def for_computation(self, v): return float(v) + def default_fromstring(self, space): + return self.box(-1.0) + @simple_binary_op def div(self, v1, v2): try: @@ -403,7 +431,9 @@ class Float32(BaseType, Float): T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box + format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box \ No newline at end of file + BoxType = interp_boxes.W_Float64Box + format_code = "d" \ No newline at end of file diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -24,10 +24,8 @@ # to the interpreter hoping to immediately run the JITted # code; but instead, we Trace again, just because another # counter was also about to reach its limit... - loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') + loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ - ... - label(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -514,44 +514,41 @@ if maxsplit == 0: return space.wrap(input) - #print "from replace, input: %s, sub: %s, by: %s" % (input, sub, by) + # An ok guess at the default size + builder = StringBuilder(len(input)) + first = True if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - substrings_w = [""] + first = False for i in range(upper): - c = input[i] - substrings_w.append(c) - substrings_w.append(input[upper:]) + builder.append(by) + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) else: start = 0 sublen = len(sub) - substrings_w = [] while maxsplit != 0: next = input.find(sub, start) if next < 0: break - substrings_w.append(input[start:next]) + if not first: + builder.append(by) + first = False + builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - substrings_w.append(input[start:]) + if not first: + builder.append(by) + builder.append_slice(input, start, len(input)) - try: - # XXX conservative estimate. If your strings are that close - # to overflowing, bad luck. - one = ovfcheck(len(substrings_w) * len(by)) - ovfcheck(one + len(input)) - except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("replace string is too long")) - - return space.wrap(by.join(substrings_w)) + return space.wrap(builder.build()) def str_replace__String_ANY_ANY_ANY(space, w_self, w_sub, w_by, w_maxsplit): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,6 +395,7 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', + 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -79,19 +79,19 @@ longlong2float = rffi.llexternal( "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) float2longlong = rffi.llexternal( "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG, _callable=float2longlong_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -126,10 +126,7 @@ rtype_inplace_rshift = rtype_rshift def rtype_pow(_, hop): - raise MissingRTypeOperation("pow(int, int)" - " (use float**float instead; it is too" - " easy to overlook the overflow" - " issues of int**int)") + raise MissingRTypeOperation("'**' not supported in RPython") rtype_pow_ovf = rtype_pow rtype_inplace_pow = rtype_pow diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -11,14 +11,17 @@ sys.exit(1) def heads(args): - g = os.popen(r"hg heads --topo %s --template '{branches} {node|short}\n'" + g = os.popen(r"hg heads --topo %s --template '{node|short}:{branches}\n'" % args, 'r') result = g.read() g.close() result = result.splitlines(False) - result = [s for s in result - if not s.startswith(' ') - and not s.startswith('closed-branches ')] + for line in result: + if len(line.split(':', 1)) != 2: + raise ValueError("'result' contains: %r" % line) + result = [s.split(':', 1) for s in result] + result = [(head, branch) for (head, branch) in result + if branch not in ['', 'closed-branches']] return result all_heads = heads("--closed") @@ -34,8 +37,7 @@ closed_heads.reverse() -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print '\t', branch print print 'The branches listed above will be merged to "closed-branches".' @@ -54,8 +56,7 @@ print '*** error %r' % (err,) sys.exit(1) -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print print '***** %s ***** %s *****' % (branch, head) do("hg up --clean closed-branches") diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,9 +11,6 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) - c_files.extend([py.path.local(f) for f in eci.separate_module_files]) - eci = ExternalCompilationInfo(**eci._copy_attributes()) - eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Mon Dec 19 23:23:15 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 23:23:15 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: hopefully fix translation Message-ID: <20111219222315.D40BC823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50730:04190107c193 Date: 2011-12-20 00:22 +0200 http://bitbucket.org/pypy/pypy/changeset/04190107c193/ Log: hopefully fix translation diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -423,89 +423,6 @@ res.append(")") return space.wrap(res.build()) - def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - '''Modifies builder with a representation of the array/slice - The items will be seperated by a comma if comma is 1 - Multidimensional arrays/slices will span a number of lines, - each line will begin with indent. - ''' - size = self.size - if size < 1: - builder.append('[]') - return - if size > 1000: - # Once this goes True it does not go back to False for recursive - # calls - use_ellipsis = True - dtype = self.find_dtype() - ndims = len(self.shape) - i = 0 - start = True - builder.append('[') - if ndims > 1: - if use_ellipsis: - for i in range(3): - if start: - start = False - else: - builder.append(',' * comma + '\n') - if ndims == 3: - builder.append('\n' + indent) - else: - builder.append(indent) - # create_slice requires len(chunks) > 1 in order to reduce - # shape - view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) - view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) - builder.append('\n' + indent + '..., ') - i = self.shape[0] - 3 - while i < self.shape[0]: - if start: - start = False - else: - builder.append(',' * comma + '\n') - if ndims == 3: - builder.append('\n' + indent) - else: - builder.append(indent) - # create_slice requires len(chunks) > 1 in order to reduce - # shape - view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) - view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) - i += 1 - elif ndims == 1: - spacer = ',' * comma + ' ' - item = self.start - # An iterator would be a nicer way to walk along the 1d array, but - # how do I reset it if printing ellipsis? iterators have no - # "set_offset()" - i = 0 - if use_ellipsis: - for i in range(3): - if start: - start = False - else: - builder.append(spacer) - builder.append(dtype.itemtype.str_format(self.getitem(item))) - item += self.strides[0] - # Add a comma only if comma is False - this prevents adding two - # commas - builder.append(spacer + '...' + ',' * (1 - comma)) - # Ugly, but can this be done with an iterator? - item = self.start + self.backstrides[0] - 2 * self.strides[0] - i = self.shape[0] - 3 - while i < self.shape[0]: - if start: - start = False - else: - builder.append(spacer) - builder.append(dtype.itemtype.str_format(self.getitem(item))) - item += self.strides[0] - i += 1 - else: - builder.append('[') - builder.append(']') - def descr_str(self, space): ret = StringBuilder() concrete = self.get_concrete_or_scalar() @@ -513,30 +430,6 @@ return space.wrap(ret.build()) @jit.unroll_safe - def _index_of_single_item(self, space, w_idx): - if space.isinstance_w(w_idx, space.w_int): - idx = space.int_w(w_idx) - if idx < 0: - idx = self.shape[0] + idx - if idx < 0 or idx >= self.shape[0]: - raise OperationError(space.w_IndexError, - space.wrap("index out of range")) - return self.start + idx * self.strides[0] - index = [space.int_w(w_item) - for w_item in space.fixedview(w_idx)] - item = self.start - for i in range(len(index)): - v = index[i] - if v < 0: - v += self.shape[i] - if v < 0 or v >= self.shape[i]: - raise operationerrfmt(space.w_IndexError, - "index (%d) out of range (0<=index<%d", i, self.shape[i], - ) - item += v * self.strides[i] - return item - - @jit.unroll_safe def _single_item_result(self, space, w_idx): """ The result of getitem/setitem is a single item if w_idx is a list of scalars that match the size of shape @@ -575,7 +468,6 @@ def descr_getitem(self, space, w_idx): if self._single_item_result(space, w_idx): concrete = self.get_concrete() - assert isinstance(concrete, ConcreteArray) if len(concrete.shape) < 1: raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) @@ -588,7 +480,6 @@ self.invalidated() if self._single_item_result(space, w_idx): concrete = self.get_concrete() - assert isinstance(concrete, ConcreteArray) if len(concrete.shape) < 1: raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) @@ -647,7 +538,7 @@ new_backstrides = [0] * ndims for nd in range(ndims): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - arr = W_NDimSlice(self.start, new_strides, new_backstrides, + arr = W_NDimSlice(concrete.start, new_strides, new_backstrides, new_shape, self) else: # Create copy with contiguous data @@ -693,7 +584,7 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(self.start, strides[:], + return space.wrap(W_NDimSlice(concrete.start, strides[:], backstrides[:], shape[:], concrete)) def descr_get_flatiter(self, space): @@ -810,7 +701,9 @@ def get_concrete(self): self.force_if_needed() - return self.forced_result + res = self.forced_result + assert isinstance(res, ConcreteArray) + return res def getitem(self, item): return self.get_concrete().getitem(item) @@ -947,6 +840,113 @@ return signature.ViewSignature(self.dtype) return signature.ArraySignature(self.dtype) + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + '''Modifies builder with a representation of the array/slice + The items will be seperated by a comma if comma is 1 + Multidimensional arrays/slices will span a number of lines, + each line will begin with indent. + ''' + size = self.size + if size < 1: + builder.append('[]') + return + if size > 1000: + # Once this goes True it does not go back to False for recursive + # calls + use_ellipsis = True + dtype = self.find_dtype() + ndims = len(self.shape) + i = 0 + start = True + builder.append('[') + if ndims > 1: + if use_ellipsis: + for i in range(3): + if start: + start = False + else: + builder.append(',' * comma + '\n') + if ndims == 3: + builder.append('\n' + indent) + else: + builder.append(indent) + # create_slice requires len(chunks) > 1 in order to reduce + # shape + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() + view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) + builder.append('\n' + indent + '..., ') + i = self.shape[0] - 3 + while i < self.shape[0]: + if start: + start = False + else: + builder.append(',' * comma + '\n') + if ndims == 3: + builder.append('\n' + indent) + else: + builder.append(indent) + # create_slice requires len(chunks) > 1 in order to reduce + # shape + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() + view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) + i += 1 + elif ndims == 1: + spacer = ',' * comma + ' ' + item = self.start + # An iterator would be a nicer way to walk along the 1d array, but + # how do I reset it if printing ellipsis? iterators have no + # "set_offset()" + i = 0 + if use_ellipsis: + for i in range(3): + if start: + start = False + else: + builder.append(spacer) + builder.append(dtype.itemtype.str_format(self.getitem(item))) + item += self.strides[0] + # Add a comma only if comma is False - this prevents adding two + # commas + builder.append(spacer + '...' + ',' * (1 - comma)) + # Ugly, but can this be done with an iterator? + item = self.start + self.backstrides[0] - 2 * self.strides[0] + i = self.shape[0] - 3 + while i < self.shape[0]: + if start: + start = False + else: + builder.append(spacer) + builder.append(dtype.itemtype.str_format(self.getitem(item))) + item += self.strides[0] + i += 1 + else: + builder.append('[') + builder.append(']') + + @jit.unroll_safe + def _index_of_single_item(self, space, w_idx): + if space.isinstance_w(w_idx, space.w_int): + idx = space.int_w(w_idx) + if idx < 0: + idx = self.shape[0] + idx + if idx < 0 or idx >= self.shape[0]: + raise OperationError(space.w_IndexError, + space.wrap("index out of range")) + return self.start + idx * self.strides[0] + index = [space.int_w(w_item) + for w_item in space.fixedview(w_idx)] + item = self.start + for i in range(len(index)): + v = index[i] + if v < 0: + v += self.shape[i] + if v < 0 or v >= self.shape[i]: + raise operationerrfmt(space.w_IndexError, + "index (%d) out of range (0<=index<%d", i, self.shape[i], + ) + item += v * self.strides[i] + return item + class ViewArray(ConcreteArray): def copy(self): @@ -965,6 +965,7 @@ class W_NDimSlice(ViewArray): def __init__(self, start, strides, backstrides, shape, parent): + assert isinstance(parent, ConcreteArray) if isinstance(parent, W_NDimSlice): parent = parent.parent size = 1 From noreply at buildbot.pypy.org Mon Dec 19 23:32:35 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Dec 2011 23:32:35 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: make it not print everything Message-ID: <20111219223235.25E47823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50731:1531f3617e45 Date: 2011-12-20 00:32 +0200 http://bitbucket.org/pypy/pypy/changeset/1531f3617e45/ Log: make it not print everything diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -252,7 +252,7 @@ # grow the list done = 0 while done < len(self._seen_extras): - print self._seen_extras + #print self._seen_extras ann.build_types(self._seen_extras[done], [], complete_now=False) done += 1 From noreply at buildbot.pypy.org Tue Dec 20 00:27:48 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Dec 2011 00:27:48 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: fixed tests (ugly!) Message-ID: <20111219232748.7C793823F8@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50732:6b7fcfaea904 Date: 2011-12-19 17:27 -0600 http://bitbucket.org/pypy/pypy/changeset/6b7fcfaea904/ Log: fixed tests (ugly!) diff --git a/pypy/module/micronumpy/test/test_ztranslation.py b/pypy/module/micronumpy/test/test_ztranslation.py --- a/pypy/module/micronumpy/test/test_ztranslation.py +++ b/pypy/module/micronumpy/test/test_ztranslation.py @@ -1,5 +1,8 @@ - +from pypy.module.micronumpy import signature from pypy.objspace.fake.checkmodule import checkmodule def test_numpy_translates(): + # XXX: If there are signatures floating around this might explode. This fix + # is ugly. + signature.known_sigs.clear() checkmodule('micronumpy') From noreply at buildbot.pypy.org Tue Dec 20 00:27:49 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Dec 2011 00:27:49 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: merged upstream Message-ID: <20111219232749.A2603823F8@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50733:a42e683256d3 Date: 2011-12-19 17:27 -0600 http://bitbucket.org/pypy/pypy/changeset/a42e683256d3/ Log: merged upstream diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -252,7 +252,7 @@ # grow the list done = 0 while done < len(self._seen_extras): - print self._seen_extras + #print self._seen_extras ann.build_types(self._seen_extras[done], [], complete_now=False) done += 1 From noreply at buildbot.pypy.org Tue Dec 20 01:11:34 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Dec 2011 01:11:34 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: kill 2 pieces of dead code. Message-ID: <20111220001134.B99CD823F8@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50734:d628f620b13e Date: 2011-12-19 18:10 -0600 http://bitbucket.org/pypy/pypy/changeset/d628f620b13e/ Log: kill 2 pieces of dead code. diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -811,9 +811,6 @@ def getitem(self, item): return self.dtype.getitem(self.storage, item) - def setitem_w(self, space, item, w_value): - return self.setitem(item, self.dtype.coerce(space, w_value)) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -35,7 +35,7 @@ self.arrays = arrays[:] for i in range(len(self.iterators)): iter = self.iterators[i] - if not isinstance(iter, ConstantIterator):# or not isinstance(iter, BroadcastIterator): + if not isinstance(iter, ConstantIterator): self.final_iter = i break else: @@ -255,7 +255,7 @@ class Call2(Signature): _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] - + def __init__(self, func, name, calc_dtype, left, right): self.binfunc = func self.left = left @@ -288,7 +288,7 @@ def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): from pypy.module.micronumpy.interp_numarray import Call2 - + assert isinstance(arr, Call2) self.left._create_iter(iterlist, arraylist, arr.left, res_shape, chunklist) From noreply at buildbot.pypy.org Tue Dec 20 01:11:35 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Dec 2011 01:11:35 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: my review notes Message-ID: <20111220001135.E185B823F8@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50735:feddaef940ac Date: 2011-12-19 18:11 -0600 http://bitbucket.org/pypy/pypy/changeset/feddaef940ac/ Log: my review notes diff --git a/pypy/module/micronumpy/REVIEW.txt b/pypy/module/micronumpy/REVIEW.txt new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/REVIEW.txt @@ -0,0 +1,16 @@ +REVIEW NOTES +============ + +* Scalar.reshape should turn the value into an array correct for an input of + ``1`` or ``(1,)``. +* VirtualSlice vs. W_NDimSlice? +* Call{1, 2}.create_sig, should it call forced_result.create_sig(), instead of + array_sig()? If not, why not? +* W_NDimSlice.__init__ calls ConcreteArray.__init__ instead of + ViewArray.__init__, W_FlatIterator as well. +* Better names for sigeq and sigeq2, sighash doesn't say if numberings are + included in the hash. +* Cleanup of the iterator and array caching/numbering. It's a mess right now: + * _creater_iter updates the arraylist + * Why do Scalars need an iterator at all? + * Do views share storage with concrete arrays or other views? From noreply at buildbot.pypy.org Tue Dec 20 01:32:41 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 01:32:41 +0100 (CET) Subject: [pypy-commit] pypy py3k: - range() object now allows large numbers above sys.maxint Message-ID: <20111220003241.2D25E823F8@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50736:3a17a31ac77a Date: 2011-12-19 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/3a17a31ac77a/ Log: - range() object now allows large numbers above sys.maxint - add range.count and range.__contains__ diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -13,13 +13,7 @@ from pypy.rlib.rbigint import rbigint -def get_len_of_range(space, lo, hi, step): - """ - Return number of items in range (lo, hi, step). - Raise ValueError if step == 0 and OverflowError if the true value is too - large to fit in a signed long. - """ - +def get_len_of_range(lo, hi, step): # If lo >= hi, the range is empty. # Else if n values are in the range, the last one is # lo + (n-1)*step, which must be <= hi-1. Rearranging, @@ -30,23 +24,31 @@ # for the RHS numerator is hi=M, lo=-M-1, and then # hi-lo-1 = M-(-M-1)-1 = 2*M. Therefore unsigned long has enough # precision to compute the RHS exactly. - if step == 0: - raise OperationError(space.w_ValueError, - space.wrap("step argument must not be zero")) - elif step < 0: + assert step != 0 + if step < 0: lo, hi, step = hi, lo, -step if lo < hi: uhi = r_uint(hi) ulo = r_uint(lo) diff = uhi - ulo - 1 n = intmask(diff // r_uint(step) + 1) - if n < 0: - raise OperationError(space.w_OverflowError, - space.wrap("result has too many items")) else: n = 0 return n +def compute_range_length(space, w_start, w_stop, w_step): + # Algorithm is equal to that of get_len_of_range(), but operates + # on wrapped objects. + if space.is_true(space.lt(w_step, space.newint(0))): + w_start, w_stop = w_stop, w_start + w_step = space.neg(w_step) + if space.is_true(space.lt(w_start, w_stop)): + w_diff = space.sub(space.sub(w_stop, w_start), space.newint(1)) + w_len = space.add(space.floordiv(w_diff, w_step), space.newint(1)) + else: + w_len = space.newint(0) + return w_len + @specialize.arg(2) @jit.look_inside_iff(lambda space, args, implementation_of: @@ -227,69 +229,109 @@ class W_Range(Wrappable): - def __init__(self, space, start, len, step): - self.space = space - self.start = start - self.len = len - self.step = step + def __init__(self, w_start, w_stop, w_step, w_length): + self.w_start = w_start + self.w_stop = w_stop + self.w_step = w_step + self.w_length = w_length def descr_new(space, w_subtype, w_start, w_stop=None, w_step=1): - start = _toint(space, w_start) - step = _toint(space, w_step) + w_start = space.index(w_start) if space.is_w(w_stop, space.w_None): # only 1 argument provided - start, stop = 0, start + w_start, w_stop = space.newint(0), w_start else: - stop = _toint(space, w_stop) - howmany = get_len_of_range(space, start, stop, step) + w_stop = space.index(w_stop) + w_step = space.index(w_step) + try: + step = space.int_w(w_step) + except OperationError: + pass # We know it's not zero + else: + if step == 0: + raise OperationError(space.w_ValueError, space.wrap( + "step argument must not be zero")) + w_length = compute_range_length(space, w_start, w_stop, w_step) obj = space.allocate_instance(W_Range, w_subtype) - W_Range.__init__(obj, space, start, howmany, step) + W_Range.__init__(obj, w_start, w_stop, w_step, w_length) return space.wrap(obj) - def descr_repr(self): - stop = self.start + self.len * self.step - if self.start == 0 and self.step == 1: - s = "range(%d)" % (stop,) - elif self.step == 1: - s = "range(%d, %d)" % (self.start, stop) + def descr_repr(self, space): + if not space.is_true(space.eq(self.w_step, space.newint(1))): + return space.mod(space.wrap("range(%d, %d, %d)"), + space.newtuple([self.w_start, self.w_stop, + self.w_step])) + elif space.is_true(space.eq(self.w_start, space.newint(0))): + return space.mod(space.wrap("range(%d)"), + space.newtuple([self.w_stop])) else: - s = "range(%d, %d, %d)" %(self.start, stop, self.step) - return self.space.wrap(s) + return space.mod(space.wrap("range(%d, %d)"), + space.newtuple([self.w_start, self.w_stop])) def descr_len(self): - return self.space.wrap(self.len) + return self.w_length - @unwrap_spec(i='index') - def descr_getitem(self, i): + def descr_getitem(self, space, w_index): # range does NOT support slicing - space = self.space - len = self.len - if i < 0: - i += len - if 0 <= i < len: - return space.wrap(self.start + i * self.step) - raise OperationError(space.w_IndexError, - space.wrap("range object index out of range")) + # return self.start + (i * self.step) + return space.add(self.w_start, space.mul(w_index, self.w_step)) - def descr_iter(self): - return self.space.wrap(W_RangeIterator(self.space, self.start, - self.len, self.step)) + def descr_iter(self, space): + return space.wrap(W_RangeIterator( + space, self.w_start, self.w_step, self.w_length)) - def descr_reversed(self): - lastitem = self.start + (self.len-1) * self.step - return self.space.wrap(W_RangeIterator(self.space, lastitem, - self.len, -self.step)) + def descr_reversed(self, space): + # lastitem = self.start + (self.length-1) * self.step + w_lastitem = space.add( + self.w_start, + space.mul(space.sub(self.w_length, space.newint(1)), + self.w_step)) + return space.wrap(W_RangeIterator( + space, w_lastitem, space.neg(self.w_step), self.w_length)) - def descr_reduce(self): - space = self.space + def descr_reduce(self, space): return space.newtuple( [space.type(self), - space.newtuple([space.wrap(self.start), - space.wrap(self.start + self.len * self.step), - space.wrap(self.step)]) + space.newtuple([self.w_start, self.w_stop, self.w_step]), ]) -def _toint(space, w_obj): - return space.int_w(space.index(w_obj)) + def _contains_long(self, space, w_item): + # Check if the value can possibly be in the range. + if space.is_true(space.gt(self.w_step, space.newint(0))): + # positive steps: start <= ob < stop + if not (space.is_true(space.le(self.w_start, w_item)) and + space.is_true(space.lt(w_item, self.w_stop))): + return False + else: + # negative steps: stop < ob <= start + if not (space.is_true(space.lt(self.w_stop, w_item)) and + space.is_true(space.le(w_item, self.w_start))): + return False + # Check that the stride does not invalidate ob's membership. + if space.is_true(space.mod(space.sub(w_item, self.w_start), + self.w_step)): + return False + return True + + def descr_contains(self, space, w_item): + try: + int_value = space.int_w(w_item) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + return space.sequence_contains(self, w_item) + else: + return space.newbool(self._contains_long(space, w_item)) + + def descr_count(self, space, w_item): + try: + int_value = space.int_w(w_item) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + return space.sequence_count(self, w_item) + else: + return space.newint(self._contains_long(space, w_item)) + W_Range.typedef = TypeDef("range", __new__ = interp2app(W_Range.descr_new.im_func), @@ -299,40 +341,45 @@ __len__ = interp2app(W_Range.descr_len), __reversed__ = interp2app(W_Range.descr_reversed), __reduce__ = interp2app(W_Range.descr_reduce), + __contains__ = interp2app(W_Range.descr_contains), + count = interp2app(W_Range.descr_count), ) class W_RangeIterator(Wrappable): - def __init__(self, space, current, remaining, step): - self.space = space - self.current = current - self.remaining = remaining - self.step = step + def __init__(self, space, w_start, w_step, w_len, w_index=None): + self.w_start = w_start + self.w_step = w_step + self.w_len = w_len + if w_index is None: + w_index = space.newint(0) + self.w_index = w_index - def descr_iter(self): - return self.space.wrap(self) + def descr_iter(self, space): + return space.wrap(self) - def descr_next(self): - if self.remaining > 0: - item = self.current - self.current = item + self.step - self.remaining -= 1 - return self.space.wrap(item) - raise OperationError(self.space.w_StopIteration, self.space.w_None) + def descr_next(self, space): + if space.is_true(space.lt(self.w_index, self.w_len)): + w_index = space.add(self.w_index, space.newint(1)) + w_product = space.mul(self.w_index, self.w_step) + w_result = space.add(w_product, self.w_start) + self.w_index = w_index + return w_result + raise OperationError(space.w_StopIteration, space.w_None) - def descr_len(self): - return self.space.wrap(self.remaining) + def descr_len(self, space): + return space.sub(self.w_length, self.w_index) - def descr_reduce(self): + def descr_reduce(self, space): from pypy.interpreter.mixedmodule import MixedModule - space = self.space w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) - new_inst = mod.get('rangeiter_new') - w = space.wrap - nt = space.newtuple - tup = [w(self.current), w(self.remaining), w(self.step)] - return nt([new_inst, nt(tup)]) + return space.newtuple( + [mod.get('rangeiter_new'), + space.newtuple([self.w_start, self.w_step, + self.w_len, self.w_index]), + ]) + W_RangeIterator.typedef = TypeDef("rangeiterator", __iter__ = interp2app(W_RangeIterator.descr_iter), diff --git a/pypy/module/__builtin__/test/test_range.py b/pypy/module/__builtin__/test/test_range.py --- a/pypy/module/__builtin__/test/test_range.py +++ b/pypy/module/__builtin__/test/test_range.py @@ -112,3 +112,18 @@ expected.append(a) a += step assert lst == expected + + def test_range_contains(self): + assert 3 in range(5) + assert 3 not in range(3) + assert 3 not in range(4, 5) + assert 3 in range(1, 5, 2) + assert 3 not in range(0, 5, 2) + assert '3' not in range(5) + + def test_range_count(self): + assert range(5).count(3) == 1 + assert type(range(5).count(3)) is int + assert range(0, 5, 2).count(3) == 0 + assert range(5).count(3.0) == 1 + assert range(5).count('3') == 0 diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -66,10 +66,9 @@ new_generator.running = running return space.wrap(new_generator) - at unwrap_spec(current=int, remaining=int, step=int) -def rangeiter_new(space, current, remaining, step): +def rangeiter_new(space, w_start, w_step, w_len, w_index): from pypy.module.__builtin__.functional import W_RangeIterator - new_iter = W_RangeIterator(space, current, remaining, step) + new_iter = W_RangeIterator(space, w_start, w_step, w_len, w_index) return space.wrap(new_iter) @unwrap_spec(identifier=str) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -393,9 +393,9 @@ w_descr = space.lookup(w_container, '__contains__') if w_descr is not None: return space.get_and_call_function(w_descr, w_container, w_item) - return space._contains(w_container, w_item) + return space.sequence_contains(w_container, w_item) - def _contains(space, w_container, w_item): + def sequence_contains(space, w_container, w_item): w_iter = space.iter(w_container) while 1: try: @@ -407,6 +407,19 @@ if space.eq_w(w_next, w_item): return space.w_True + def sequence_count(space, w_container, w_item): + w_iter = space.iter(w_container) + count = 0 + while 1: + try: + w_next = space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + return space.wrap(count) + if space.eq_w(w_next, w_item): + count += 1 + def hash(space, w_obj): w_hash = space.lookup(w_obj, '__hash__') if w_hash is None: From noreply at buildbot.pypy.org Tue Dec 20 01:32:42 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 01:32:42 +0100 (CET) Subject: [pypy-commit] pypy py3k: Update _sha1 because of True Division Message-ID: <20111220003242.59C54823F8@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50737:ed43cc10a928 Date: 2011-12-19 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/ed43cc10a928/ Log: Update _sha1 because of True Division diff --git a/lib_pypy/_sha1.py b/lib_pypy/_sha1.py --- a/lib_pypy/_sha1.py +++ b/lib_pypy/_sha1.py @@ -63,7 +63,7 @@ def _bytelist2longBigEndian(list): "Transform a list of characters into a list of longs." - imax = len(list)/4 + imax = len(list) // 4 hl = [0L] * imax j = 0 From noreply at buildbot.pypy.org Tue Dec 20 01:32:43 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 01:32:43 +0100 (CET) Subject: [pypy-commit] pypy py3k: Implement range slices, and test getitem with large numbers Message-ID: <20111220003243.8A307823F8@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50738:9e845f84d41f Date: 2011-12-19 22:49 +0100 http://bitbucket.org/pypy/pypy/changeset/9e845f84d41f/ Log: Implement range slices, and test getitem with large numbers diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -49,6 +49,60 @@ w_len = space.newint(0) return w_len +def compute_slice_indices3(space, w_slice, w_length): + "An W_Object version of W_SliceObject.indices3" + from pypy.objspace.std.sliceobject import W_SliceObject + assert isinstance(w_slice, W_SliceObject) + w_0 = space.newint(0) + w_1 = space.newint(1) + if space.is_w(w_slice.w_step, space.w_None): + w_step = w_1 + else: + w_step = space.index(w_slice.w_step) + if space.is_true(space.eq(w_step, w_0)): + raise OperationError(space.w_ValueError, + space.wrap("slice step cannot be zero")) + negative_step = space.is_true(space.lt(w_step, w_0)) + if space.is_w(w_slice.w_start, space.w_None): + if negative_step: + w_start = space.sub(w_length, w_1) + else: + w_start = w_0 + else: + w_start = space.index(w_slice.w_start) + if space.is_true(space.lt(w_start, w_0)): + w_start = space.add(w_start, w_length) + if space.is_true(space.lt(w_start, w_0)): + if negative_step: + w_start = space.newint(-1) + else: + w_start = w_0 + elif space.is_true(space.ge(w_start, w_length)): + if negative_step: + w_start = space.sub(w_length, w_1) + else: + w_start = w_length + if space.is_w(w_slice.w_stop, space.w_None): + if negative_step: + w_stop = space.newint(-1) + else: + w_stop = w_length + else: + w_stop = space.index(w_slice.w_stop) + if space.is_true(space.lt(w_stop, w_0)): + w_stop = space.add(w_stop, w_length) + if space.is_true(space.lt(w_stop, w_0)): + if negative_step: + w_stop = space.newint(-1) + else: + w_stop = w_0 + elif space.is_true(space.ge(w_stop, w_length)): + if negative_step: + w_stop = space.sub(w_length, w_1) + else: + w_stop = w_length + return w_start, w_stop, w_step + @specialize.arg(2) @jit.look_inside_iff(lambda space, args, implementation_of: @@ -270,10 +324,41 @@ def descr_len(self): return self.w_length - def descr_getitem(self, space, w_index): - # range does NOT support slicing + def _compute_item0(self, space, w_index): + "Get a range item, when known to be inside bounds" # return self.start + (i * self.step) return space.add(self.w_start, space.mul(w_index, self.w_step)) + + def _compute_item(self, space, w_index): + if space.is_true(space.lt(w_index, space.newint(0))): + w_index = space.add(w_index, self.w_length) + if space.is_true(space.ge(w_index, self.w_length)): + raise OperationError(space.w_IndexError, space.wrap( + "range object index out of range")) + return self._compute_item0(space, w_index) + + def _compute_slice(self, space, w_slice): + w_start, w_stop, w_step = compute_slice_indices3( + space, w_slice, self.w_length) + + w_substep = space.mul(self.w_step, w_step) + w_substart = self._compute_item0(space, w_start) + if w_stop: + w_substop = self._compute_item0(space, w_stop) + else: + w_substop = w_substart + + w_length = compute_range_length(space, w_substart, w_substop, w_substep) + obj = W_Range(w_substart, w_substop, w_substep, w_length) + return space.wrap(obj) + + def descr_getitem(self, space, w_index): + # Cannot use the usual space.decode_index methods, because + # numbers might not fit in longs. + if space.isinstance_w(w_index, space.w_slice): + return self._compute_slice(space, w_index) + else: + return self._compute_item(space, w_index) def descr_iter(self, space): return space.wrap(W_RangeIterator( diff --git a/pypy/module/__builtin__/test/test_range.py b/pypy/module/__builtin__/test/test_range.py --- a/pypy/module/__builtin__/test/test_range.py +++ b/pypy/module/__builtin__/test/test_range.py @@ -127,3 +127,31 @@ assert range(0, 5, 2).count(3) == 0 assert range(5).count(3.0) == 1 assert range(5).count('3') == 0 + + def test_range_getitem(self): + assert range(6)[3] == 3 + assert range(6)[-1] == 5 + raises(IndexError, range(6).__getitem__, 6) + + def test_range_slice(self): + # range objects don't implement equality in 3.2, use the repr + assert repr(range(6)[2:5]) == 'range(2, 5)' + assert repr(range(6)[-1:-3:-2]) == 'range(5, 3, -2)' + + def test_large_range(self): + import sys + def _range_len(x): + try: + length = len(x) + except OverflowError: + step = x[1] - x[0] + length = 1 + ((x[-1] - x[0]) // step) + return length + a = -sys.maxsize + b = sys.maxsize + expected_len = b - a + x = range(a, b) + assert a in x + assert b not in x + raises(OverflowError, len, x) + assert _range_len(x) == expected_len From noreply at buildbot.pypy.org Tue Dec 20 01:32:44 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 01:32:44 +0100 (CET) Subject: [pypy-commit] pypy py3k: Kill more code related to old-style classes Message-ID: <20111220003244.BFA79823F8@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50739:957bcdcb424d Date: 2011-12-19 23:26 +0100 http://bitbucket.org/pypy/pypy/changeset/957bcdcb424d/ Log: Kill more code related to old-style classes diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -924,19 +924,14 @@ def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" if self.is_w(w_exc_type, w_check_class): - return True # fast path (also here to handle string exceptions) - try: - if self.is_true(self.isinstance(w_check_class, self.w_tuple)): - for w_t in self.fixedview(w_check_class): - if self.exception_match(w_exc_type, w_t): - return True - else: - return False - return self.exception_issubclass_w(w_exc_type, w_check_class) - except OperationError, e: - if e.match(self, self.w_TypeError): # string exceptions maybe + return True # fast path + if self.is_true(self.isinstance(w_check_class, self.w_tuple)): + for w_t in self.fixedview(w_check_class): + if self.exception_match(w_exc_type, w_t): + return True + else: return False - raise + return self.exception_issubclass_w(w_exc_type, w_check_class) def call_obj_args(self, w_callable, w_obj, args): if not self.config.objspace.disable_call_speedhacks: @@ -1016,26 +1011,11 @@ return w_value return None - def is_oldstyle_instance(self, w_obj): - # xxx hack hack hack - from pypy.module.__builtin__.interp_classobj import W_InstanceObject - obj = self.interpclass_w(w_obj) - return obj is not None and isinstance(obj, W_InstanceObject) - def callable(self, w_obj): if self.lookup(w_obj, "__call__") is not None: - if self.is_oldstyle_instance(w_obj): - # ugly old style class special treatment, but well ... - try: - self.getattr(w_obj, self.wrap("__call__")) - return self.w_True - except OperationError, e: - if not e.match(self, self.w_AttributeError): - raise - return self.w_False - else: - return self.w_True - return self.w_False + return self.w_True + else: + return self.w_None def issequence_w(self, w_obj): return (self.findattr(w_obj, self.wrap("__getitem__")) is not None) @@ -1064,8 +1044,7 @@ # Equivalent to 'obj.__class__'. return self.type(w_obj) - # CPython rules allows old style classes or subclasses - # of BaseExceptions to be exceptions. + # CPython rules allows subclasses of BaseExceptions to be exceptions. # This is slightly less general than the case above, so we prefix # it with exception_ diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -218,14 +218,9 @@ def reversed(space, w_sequence): """Return a iterator that yields items of sequence in reverse.""" - w_reversed = None - if space.is_oldstyle_instance(w_sequence): - w_reversed = space.findattr(w_sequence, space.wrap("__reversed__")) - else: - w_reversed_descr = space.lookup(w_sequence, "__reversed__") - if w_reversed_descr is not None: - w_reversed = space.get(w_reversed_descr, w_sequence) - if w_reversed is not None: + w_reversed_descr = space.lookup(w_sequence, "__reversed__") + if w_reversed_descr is not None: + w_reversed = space.get(w_reversed_descr, w_sequence) return space.call_function(w_reversed) return space.wrap(W_ReversedIterator(space, w_sequence)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -62,9 +62,6 @@ @unwrap_spec(ObjSpace, W_Root, str) def lookup_special(space, w_obj, meth): """Lookup up a special method on an object.""" - if space.is_oldstyle_instance(w_obj): - w_msg = space.wrap("this doesn't do what you want on old-style classes") - raise OperationError(space.w_TypeError, w_msg) w_descr = space.lookup(w_obj, meth) if w_descr is None: return space.w_None diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -181,9 +181,8 @@ exc is a class object, this also returns true when given is an instance of a subclass. If exc is a tuple, all exceptions in the tuple (and recursively in subtuples) are searched for a match.""" - if (space.is_true(space.isinstance(w_given, space.w_BaseException)) or - space.is_oldstyle_instance(w_given)): - w_given_type = space.exception_getclass(w_given) + if space.is_true(space.isinstance(w_given, space.w_BaseException)): + w_given_type = space.type(w_given) else: w_given_type = w_given return space.exception_match(w_given_type, w_exc) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -55,14 +55,7 @@ "'%s' object attribute '%s' is read-only", typename, name) -# Helpers for old-style and mix-style mixup - def _same_class_w(space, w_obj1, w_obj2, w_typ1, w_typ2): - if (space.is_oldstyle_instance(w_obj1) and - space.is_oldstyle_instance(w_obj2)): - assert isinstance(w_obj1, W_InstanceObject) - assert isinstance(w_obj2, W_InstanceObject) - return space.is_w(w_obj1.w_class, w_obj2.w_class) return space.is_w(w_typ1, w_typ2) @@ -350,7 +343,7 @@ w_typ1 = space.type(w_obj1) w_typ2 = space.type(w_obj2) w_left_src, w_left_impl = space.lookup_in_type_where(w_typ1, '__pow__') - if _same_class_w(space, w_obj1, w_obj2, w_typ1, w_typ2): + if space.is_w(w_typ1, w_typ2): w_right_impl = None else: w_right_src, w_right_impl = space.lookup_in_type_where(w_typ2, '__rpow__') @@ -616,53 +609,6 @@ space.lookup(w_obj, '__float__') is not None) - -# what is the maximum value slices can get on CPython? -# we need to stick to that value, because fake.py etc. -class Temp(object): - def __getslice__(self, i, j): - return j -slice_max = Temp()[:] -del Temp - -def old_slice_range_getlength(space, w_obj): - # NB. the language ref is inconsistent with the new-style class - # behavior when w_obj doesn't implement __len__(), so we just - # follow cpython. Also note that CPython slots make it easier - # to check for object implementing it or not. We just catch errors - # so this behavior is slightly different - try: - return space.len(w_obj) - except OperationError, e: - if not ((e.match(space, space.w_AttributeError) or - e.match(space, space.w_TypeError))): - raise - return None - -def old_slice_range(space, w_obj, w_start, w_stop): - """Only for backward compatibility for __getslice__()&co methods.""" - w_length = None - if space.is_w(w_start, space.w_None): - w_start = space.wrap(0) - else: - start = space.getindex_w(w_start, None) - w_start = space.wrap(start) - if start < 0: - w_length = old_slice_range_getlength(space, w_obj) - if w_length is not None: - w_start = space.add(w_start, w_length) - if space.is_w(w_stop, space.w_None): - w_stop = space.wrap(slice_max) - else: - stop = space.getindex_w(w_stop, None) - w_stop = space.wrap(stop) - if stop < 0: - if w_length is None: - w_length = old_slice_range_getlength(space, w_obj) - if w_length is not None: - w_stop = space.add(w_stop, w_length) - return w_start, w_stop - # regular methods def helpers def _make_binop_impl(symbol, specialnames): @@ -674,7 +620,7 @@ w_typ1 = space.type(w_obj1) w_typ2 = space.type(w_obj2) w_left_src, w_left_impl = space.lookup_in_type_where(w_typ1, left) - if _same_class_w(space, w_obj1, w_obj2, w_typ1, w_typ2): + if space.is_w(w_typ1, w_typ2): w_right_impl = None else: w_right_src, w_right_impl = space.lookup_in_type_where(w_typ2, right) @@ -716,21 +662,20 @@ w_first = w_obj1 w_second = w_obj2 # - if left == right and _same_class_w(space, w_obj1, w_obj2, - w_typ1, w_typ2): + if left == right and space.is_w(w_typ1, w_typ2): # for __eq__ and __ne__, if the objects have the same - # (old-style or new-style) class, then don't try the - # opposite method, which is the same one. + # class, then don't try the opposite method, which is the + # same one. w_right_impl = None else: # in all other cases, try the opposite method. w_right_src, w_right_impl = space.lookup_in_type_where(w_typ2,right) if space.is_w(w_typ1, w_typ2): - # if the type is the same, *or* if both are old-style classes, - # then don't reverse: try left first, right next. + # if the type is the same, then don't reverse: try + # left first, right next. pass elif space.is_true(space.issubtype(w_typ2, w_typ1)): - # for new-style classes, if typ2 is a subclass of typ1. + # if typ2 is a subclass of typ1. w_obj1, w_obj2 = w_obj2, w_obj1 w_left_impl, w_right_impl = w_right_impl, w_left_impl diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -173,20 +173,10 @@ return (w_complex.realval, w_complex.imagval) # # test for a '__complex__' method, and call it if found. - # special case old-style instances, like CPython does. w_z = None - if space.is_oldstyle_instance(w_complex): - try: - w_method = space.getattr(w_complex, space.wrap('__complex__')) - except OperationError, e: - if not e.match(space, space.w_AttributeError): - raise - else: - w_z = space.call_function(w_method) - else: - w_method = space.lookup(w_complex, '__complex__') - if w_method is not None: - w_z = space.get_and_call_function(w_method, w_complex) + w_method = space.lookup(w_complex, '__complex__') + if w_method is not None: + w_z = space.get_and_call_function(w_method, w_complex) # if w_z is not None: # __complex__() must return a complex object From noreply at buildbot.pypy.org Tue Dec 20 01:32:46 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 01:32:46 +0100 (CET) Subject: [pypy-commit] pypy py3k: Remove old-style classobj. Message-ID: <20111220003246.37362823F8@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50740:46d9c473055c Date: 2011-12-19 23:28 +0100 http://bitbucket.org/pypy/pypy/changeset/46d9c473055c/ Log: Remove old-style classobj. diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -45,9 +45,6 @@ 'open' : 'state.get(space).w_open', - # default __metaclass__: old-style class - '__metaclass__' : 'interp_classobj.W_ClassObject', - # interp-level function definitions 'abs' : 'operation.abs', 'ascii' : 'operation.ascii', diff --git a/pypy/module/__builtin__/abstractinst.py b/pypy/module/__builtin__/abstractinst.py --- a/pypy/module/__builtin__/abstractinst.py +++ b/pypy/module/__builtin__/abstractinst.py @@ -9,8 +9,6 @@ from pypy.rlib import jit from pypy.interpreter.error import OperationError -from pypy.module.__builtin__.interp_classobj import W_ClassObject -from pypy.module.__builtin__.interp_classobj import W_InstanceObject from pypy.interpreter.baseobjspace import ObjSpace as BaseObjSpace def _get_bases(space, w_cls): @@ -87,12 +85,6 @@ else: return space.is_true(w_result) - # -- case (old-style instance, old-style class) - oldstyleclass = space.interpclass_w(w_klass_or_tuple) - if isinstance(oldstyleclass, W_ClassObject): - oldstyleinst = space.interpclass_w(w_obj) - if isinstance(oldstyleinst, W_InstanceObject): - return oldstyleinst.w_class.is_subclass_of(oldstyleclass) return _abstract_isinstance_w_helper(space, w_obj, w_klass_or_tuple) @jit.dont_look_inside @@ -151,14 +143,7 @@ else: return space.is_true(w_result) - # -- case (old-style class, old-style class) - oldstylederived = space.interpclass_w(w_derived) - if isinstance(oldstylederived, W_ClassObject): - oldstyleklass = space.interpclass_w(w_klass_or_tuple) - if isinstance(oldstyleklass, W_ClassObject): - return oldstylederived.is_subclass_of(oldstyleklass) - else: - check_class(space, w_derived, "issubclass() arg 1 must be a class") + check_class(space, w_derived, "issubclass() arg 1 must be a class") # from here on, we are sure that w_derived is a class-like object # -- case (class-like-object, abstract-class) @@ -171,32 +156,15 @@ # Exception helpers def exception_is_valid_obj_as_class_w(space, w_obj): - obj = space.interpclass_w(w_obj) - if isinstance(obj, W_ClassObject): - return True return BaseObjSpace.exception_is_valid_obj_as_class_w(space, w_obj) def exception_is_valid_class_w(space, w_cls): - cls = space.interpclass_w(w_cls) - if isinstance(cls, W_ClassObject): - return True return BaseObjSpace.exception_is_valid_class_w(space, w_cls) def exception_getclass(space, w_obj): - obj = space.interpclass_w(w_obj) - if isinstance(obj, W_InstanceObject): - return obj.w_class return BaseObjSpace.exception_getclass(space, w_obj) def exception_issubclass_w(space, w_cls1, w_cls2): - cls1 = space.interpclass_w(w_cls1) - cls2 = space.interpclass_w(w_cls2) - if isinstance(cls1, W_ClassObject): - if isinstance(cls2, W_ClassObject): - return cls1.is_subclass_of(cls2) - return False - if isinstance(cls2, W_ClassObject): - return False return BaseObjSpace.exception_issubclass_w(space, w_cls1, w_cls2) # ____________________________________________________________ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py deleted file mode 100644 --- a/pypy/module/__builtin__/interp_classobj.py +++ /dev/null @@ -1,768 +0,0 @@ -import new -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import GetSetProperty, descr_get_dict, descr_set_dict -from pypy.rlib.objectmodel import compute_identity_hash -from pypy.rlib.debug import make_sure_not_resized -from pypy.rlib import jit - - -def raise_type_err(space, argument, expected, w_obj): - type_name = space.type(w_obj).getname(space) - raise operationerrfmt(space.w_TypeError, - "argument %s must be %s, not %s", - argument, expected, type_name) - -def unwrap_attr(space, w_attr): - try: - return space.str_w(w_attr) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return "?" # any string different from "__dict__" & co. is fine - # XXX it's not clear that we have to catch the TypeError... - -def descr_classobj_new(space, w_subtype, w_name, w_bases, w_dict): - if not space.is_true(space.isinstance(w_bases, space.w_tuple)): - raise_type_err(space, 'bases', 'tuple', w_bases) - - if not space.is_true(space.isinstance(w_dict, space.w_dict)): - raise_type_err(space, 'bases', 'tuple', w_bases) - - if not space.is_true(space.contains(w_dict, space.wrap("__doc__"))): - space.setitem(w_dict, space.wrap("__doc__"), space.w_None) - - # XXX missing: lengthy and obscure logic about "__module__" - - bases_w = space.fixedview(w_bases) - for w_base in bases_w: - if not isinstance(w_base, W_ClassObject): - w_metaclass = space.type(w_base) - if space.is_true(space.callable(w_metaclass)): - return space.call_function(w_metaclass, w_name, - w_bases, w_dict) - raise OperationError(space.w_TypeError, - space.wrap("base must be class")) - - return W_ClassObject(space, w_name, bases_w, w_dict) - -class W_ClassObject(Wrappable): - def __init__(self, space, w_name, bases, w_dict): - self.name = space.str_w(w_name) - make_sure_not_resized(bases) - self.bases_w = bases - self.w_dict = w_dict - - def instantiate(self, space): - cache = space.fromcache(Cache) - if self.lookup(space, '__del__') is not None: - w_inst = cache.cls_with_del(space, self) - else: - w_inst = cache.cls_without_del(space, self) - return w_inst - - def getdict(self, space): - return self.w_dict - - def setdict(self, space, w_dict): - if not space.is_true(space.isinstance(w_dict, space.w_dict)): - raise OperationError( - space.w_TypeError, - space.wrap("__dict__ must be a dictionary object")) - self.w_dict = w_dict - - def setname(self, space, w_newname): - if not space.is_true(space.isinstance(w_newname, space.w_str)): - raise OperationError( - space.w_TypeError, - space.wrap("__name__ must be a string object")) - self.name = space.str_w(w_newname) - - def setbases(self, space, w_bases): - # XXX in theory, this misses a check against inheritance cycles - # although on pypy we don't get a segfault for infinite - # recursion anyway - if not space.is_true(space.isinstance(w_bases, space.w_tuple)): - raise OperationError( - space.w_TypeError, - space.wrap("__bases__ must be a tuple object")) - bases_w = space.fixedview(w_bases) - for w_base in bases_w: - if not isinstance(w_base, W_ClassObject): - raise OperationError(space.w_TypeError, - space.wrap("__bases__ items must be classes")) - self.bases_w = bases_w - - def is_subclass_of(self, other): - assert isinstance(other, W_ClassObject) - if self is other: - return True - for base in self.bases_w: - assert isinstance(base, W_ClassObject) - if base.is_subclass_of(other): - return True - return False - - @jit.unroll_safe - def lookup(self, space, attr): - # returns w_value or interplevel None - w_result = space.finditem_str(self.w_dict, attr) - if w_result is not None: - return w_result - for base in self.bases_w: - # XXX fix annotation of bases_w to be a list of W_ClassObjects - assert isinstance(base, W_ClassObject) - w_result = base.lookup(space, attr) - if w_result is not None: - return w_result - return None - - def descr_getattribute(self, space, w_attr): - name = unwrap_attr(space, w_attr) - if name and name[0] == "_": - if name == "__dict__": - return self.w_dict - elif name == "__name__": - return space.wrap(self.name) - elif name == "__bases__": - return space.newtuple(self.bases_w) - w_value = self.lookup(space, name) - if w_value is None: - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) - - w_descr_get = space.lookup(w_value, '__get__') - if w_descr_get is None: - return w_value - return space.call_function(w_descr_get, w_value, space.w_None, self) - - def descr_setattr(self, space, w_attr, w_value): - name = unwrap_attr(space, w_attr) - if name and name[0] == "_": - if name == "__dict__": - self.setdict(space, w_value) - return - elif name == "__name__": - self.setname(space, w_value) - return - elif name == "__bases__": - self.setbases(space, w_value) - return - elif name == "__del__": - if self.lookup(space, name) is None: - msg = ("a __del__ method added to an existing class " - "will not be called") - space.warn(msg, space.w_RuntimeWarning) - space.setitem(self.w_dict, w_attr, w_value) - - def descr_delattr(self, space, w_attr): - name = unwrap_attr(space, w_attr) - if name in ("__dict__", "__name__", "__bases__"): - raise operationerrfmt( - space.w_TypeError, - "cannot delete attribute '%s'", name) - try: - space.delitem(self.w_dict, w_attr) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) - - def descr_repr(self, space): - mod = self.get_module_string(space) - return self.getrepr(space, "class %s.%s" % (mod, self.name)) - - def descr_str(self, space): - mod = self.get_module_string(space) - if mod == "?": - return space.wrap(self.name) - else: - return space.wrap("%s.%s" % (mod, self.name)) - - def get_module_string(self, space): - try: - w_mod = self.descr_getattribute(space, space.wrap("__module__")) - except OperationError, e: - if not e.match(space, space.w_AttributeError): - raise - return "?" - if space.is_true(space.isinstance(w_mod, space.w_str)): - return space.str_w(w_mod) - return "?" - - def __repr__(self): - # NOT_RPYTHON - return '' % self.name - -class Cache: - def __init__(self, space): - from pypy.interpreter.typedef import _usersubclswithfeature - # evil - self.cls_without_del = _usersubclswithfeature( - space.config, W_InstanceObject, "dict", "weakref") - self.cls_with_del = _usersubclswithfeature( - space.config, self.cls_without_del, "del") - - -def class_descr_call(space, w_self, __args__): - self = space.interp_w(W_ClassObject, w_self) - w_inst = self.instantiate(space) - w_init = w_inst.getattr_from_class(space, '__init__') - if w_init is not None: - w_result = space.call_args(w_init, __args__) - if not space.is_w(w_result, space.w_None): - raise OperationError( - space.w_TypeError, - space.wrap("__init__() should return None")) - elif __args__.arguments_w or __args__.keywords: - raise OperationError( - space.w_TypeError, - space.wrap("this constructor takes no arguments")) - return w_inst - -W_ClassObject.typedef = TypeDef("classobj", - __new__ = interp2app(descr_classobj_new), - __repr__ = interp2app(W_ClassObject.descr_repr), - __str__ = interp2app(W_ClassObject.descr_str), - __call__ = interp2app(class_descr_call), - __getattribute__ = interp2app(W_ClassObject.descr_getattribute), - __setattr__ = interp2app(W_ClassObject.descr_setattr), - __delattr__ = interp2app(W_ClassObject.descr_delattr), - __weakref__ = make_weakref_descr(W_ClassObject), -) -W_ClassObject.typedef.acceptable_as_base_class = False - - -def make_unary_instance_method(name): - def unaryop(self, space): - w_meth = self.getattr(space, name, True) - return space.call_function(w_meth) - unaryop.func_name = name - return unaryop - -def make_binary_returning_notimplemented_instance_method(name): - def binaryop(self, space, w_other): - try: - w_meth = self.getattr(space, name, False) - except OperationError, e: - if e.match(space, space.w_AttributeError): - return space.w_NotImplemented - raise - else: - if w_meth is None: - return space.w_NotImplemented - return space.call_function(w_meth, w_other) - binaryop.func_name = name - return binaryop - -def make_binary_instance_method(name): - specialname = "__%s__" % (name, ) - rspecialname = "__r%s__" % (name, ) - objspacename = name - if name in ['and', 'or']: - objspacename = name + '_' - - def binaryop(self, space, w_other): - w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) - if w_meth is None: - return space.w_NotImplemented - return space.call_function(w_meth, w_b) - else: - return getattr(space, objspacename)(w_a, w_b) - binaryop.func_name = name - - def rbinaryop(self, space, w_other): - w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) - if w_meth is None: - return space.w_NotImplemented - return space.call_function(w_meth, w_other) - else: - return getattr(space, objspacename)(w_b, w_a) - rbinaryop.func_name = "r" + name - return binaryop, rbinaryop - -def _coerce_helper(space, w_self, w_other): - try: - w_tup = space.coerce(w_self, w_other) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return [None, None] - return space.fixedview(w_tup, 2) - -def descr_instance_new(space, w_type, w_class, w_dict=None): - # w_type is not used at all - if not isinstance(w_class, W_ClassObject): - raise OperationError( - space.w_TypeError, - space.wrap("instance() first arg must be class")) - w_result = w_class.instantiate(space) - if not space.is_w(w_dict, space.w_None): - w_result.setdict(space, w_dict) - return w_result - -class W_InstanceObject(Wrappable): - def __init__(self, space, w_class): - # note that user_setup is overridden by the typedef.py machinery - self.user_setup(space, space.gettypeobject(self.typedef)) - assert isinstance(w_class, W_ClassObject) - self.w_class = w_class - - def user_setup(self, space, w_subtype): - self.space = space - - def set_oldstyle_class(self, space, w_class): - if w_class is None or not isinstance(w_class, W_ClassObject): - raise OperationError( - space.w_TypeError, - space.wrap("__class__ must be set to a class")) - self.w_class = w_class - - def getattr_from_class(self, space, name): - # Look up w_name in the class dict, and call its __get__. - # This method ignores the instance dict and the __getattr__. - # Returns None if not found. - assert isinstance(name, str) - w_value = self.w_class.lookup(space, name) - if w_value is None: - return None - w_descr_get = space.lookup(w_value, '__get__') - if w_descr_get is None: - return w_value - return space.call_function(w_descr_get, w_value, self, self.w_class) - - def getattr(self, space, name, exc=True): - # Normal getattr rules: look up w_name in the instance dict, - # in the class dict, and then via a call to __getatttr__. - assert isinstance(name, str) - w_result = self.getdictvalue(space, name) - if w_result is not None: - return w_result - w_result = self.getattr_from_class(space, name) - if w_result is not None: - return w_result - w_meth = self.getattr_from_class(space, '__getattr__') - if w_meth is not None: - try: - return space.call_function(w_meth, space.wrap(name)) - except OperationError, e: - if not exc and e.match(space, space.w_AttributeError): - return None # eat the AttributeError - raise - # not found at all - if exc: - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) - else: - return None - - def descr_getattribute(self, space, w_attr): - name = space.str_w(w_attr) - if len(name) >= 8 and name[0] == '_': - if name == "__dict__": - return self.getdict(space) - elif name == "__class__": - return self.w_class - return self.getattr(space, name) - - def descr_setattr(self, space, w_name, w_value): - name = unwrap_attr(space, w_name) - w_meth = self.getattr_from_class(space, '__setattr__') - if name and name[0] == "_": - if name == '__dict__': - self.setdict(space, w_value) - return - if name == '__class__': - self.set_oldstyle_class(space, w_value) - return - if name == '__del__' and w_meth is None: - cache = space.fromcache(Cache) - if (not isinstance(self, cache.cls_with_del) - and self.getdictvalue(space, '__del__') is None): - msg = ("a __del__ method added to an instance " - "with no __del__ in the class will not be called") - space.warn(msg, space.w_RuntimeWarning) - if w_meth is not None: - space.call_function(w_meth, w_name, w_value) - else: - self.setdictvalue(space, name, w_value) - - def descr_delattr(self, space, w_name): - name = unwrap_attr(space, w_name) - if name and name[0] == "_": - if name == '__dict__': - # use setdict to raise the error - self.setdict(space, space.w_None) - return - elif name == '__class__': - # use set_oldstyle_class to raise the error - self.set_oldstyle_class(space, None) - return - w_meth = self.getattr_from_class(space, '__delattr__') - if w_meth is not None: - space.call_function(w_meth, w_name) - else: - if not self.deldictvalue(space, name): - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) - - def descr_repr(self, space): - w_meth = self.getattr(space, '__repr__', False) - if w_meth is None: - w_class = self.w_class - mod = w_class.get_module_string(space) - return self.getrepr(space, "%s.%s instance" % (mod, w_class.name)) - return space.call_function(w_meth) - - def descr_str(self, space): - w_meth = self.getattr(space, '__str__', False) - if w_meth is None: - return self.descr_repr(space) - return space.call_function(w_meth) - - def descr_unicode(self, space): - w_meth = self.getattr(space, '__unicode__', False) - if w_meth is None: - return self.descr_str(space) - return space.call_function(w_meth) - - def descr_format(self, space, w_format_spec): - w_meth = self.getattr(space, "__format__", False) - if w_meth is not None: - return space.call_function(w_meth, w_format_spec) - else: - if space.isinstance_w(w_format_spec, space.w_unicode): - w_as_str = self.descr_unicode(space) - else: - w_as_str = self.descr_str(space) - if space.len_w(w_format_spec) > 0: - space.warn( - ("object.__format__ with a non-empty format string is " - "deprecated"), - space.w_PendingDeprecationWarning - ) - return space.format(w_as_str, w_format_spec) - - def descr_len(self, space): - w_meth = self.getattr(space, '__len__') - w_result = space.call_function(w_meth) - if space.is_true(space.isinstance(w_result, space.w_int)): - if space.is_true(space.lt(w_result, space.wrap(0))): - raise OperationError( - space.w_ValueError, - space.wrap("__len__() should return >= 0")) - return w_result - raise OperationError( - space.w_TypeError, - space.wrap("__len__() should return an int")) - - def descr_getitem(self, space, w_key): - w_meth = self.getattr(space, '__getitem__') - return space.call_function(w_meth, w_key) - - def descr_setitem(self, space, w_key, w_value): - w_meth = self.getattr(space, '__setitem__') - space.call_function(w_meth, w_key, w_value) - - def descr_delitem(self, space, w_key): - w_meth = self.getattr(space, '__delitem__') - space.call_function(w_meth, w_key) - - def descr_iter(self, space): - w_meth = self.getattr(space, '__iter__', False) - if w_meth is not None: - return space.call_function(w_meth) - w_meth = self.getattr(space, '__getitem__', False) - if w_meth is None: - raise OperationError( - space.w_TypeError, - space.wrap("iteration over non-sequence")) - return space.newseqiter(self) - # XXX do I really need a __next__ method? the old implementation - # had one, but I don't see the point - - def descr_call(self, space, __args__): - w_meth = self.getattr(space, '__call__') - return space.call_args(w_meth, __args__) - - def descr_nonzero(self, space): - w_func = self.getattr(space, '__nonzero__', False) - if w_func is None: - w_func = self.getattr(space, '__len__', False) - if w_func is None: - return space.w_True - w_result = space.call_function(w_func) - if space.is_true(space.isinstance(w_result, space.w_int)): - if space.is_true(space.lt(w_result, space.wrap(0))): - raise OperationError( - space.w_ValueError, - space.wrap("__nonzero__() should return >= 0")) - return w_result - raise OperationError( - space.w_TypeError, - space.wrap("__nonzero__() should return an int")) - - def descr_cmp(self, space, w_other): # do all the work here like CPython - w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) - if isinstance(w_a, W_InstanceObject): - w_func = w_a.getattr(space, '__cmp__', False) - if w_func is not None: - w_res = space.call_function(w_func, w_b) - if space.is_w(w_res, space.w_NotImplemented): - return w_res - try: - res = space.int_w(w_res) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise OperationError( - space.w_TypeError, - space.wrap("__cmp__ must return int")) - raise - if res > 0: - return space.wrap(1) - if res < 0: - return space.wrap(-1) - return space.wrap(0) - if isinstance(w_b, W_InstanceObject): - w_func = w_b.getattr(space, '__cmp__', False) - if w_func is not None: - w_res = space.call_function(w_func, w_a) - if space.is_w(w_res, space.w_NotImplemented): - return w_res - try: - res = space.int_w(w_res) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise OperationError( - space.w_TypeError, - space.wrap("__cmp__ must return int")) - raise - if res < 0: - return space.wrap(1) - if res > 0: - return space.wrap(-1) - return space.wrap(0) - return space.w_NotImplemented - - def descr_hash(self, space): - w_func = self.getattr(space, '__hash__', False) - if w_func is None: - w_eq = self.getattr(space, '__eq__', False) - w_cmp = self.getattr(space, '__cmp__', False) - if w_eq is not None or w_cmp is not None: - raise OperationError(space.w_TypeError, - space.wrap("unhashable instance")) - else: - return space.wrap(compute_identity_hash(self)) - w_ret = space.call_function(w_func) - if (not space.is_true(space.isinstance(w_ret, space.w_int)) and - not space.is_true(space.isinstance(w_ret, space.w_long))): - raise OperationError( - space.w_TypeError, - space.wrap("__hash__ must return int or long")) - return w_ret - - def descr_int(self, space): - w_func = self.getattr(space, '__int__', False) - if w_func is not None: - return space.call_function(w_func) - - w_truncated = space.trunc(self) - # int() needs to return an int - try: - return space.int(w_truncated) - except OperationError: - # Raise a different error - raise OperationError( - space.w_TypeError, - space.wrap("__trunc__ returned non-Integral")) - - def descr_long(self, space): - w_func = self.getattr(space, '__long__', False) - if w_func is not None: - return space.call_function(w_func) - return self.descr_int(space) - - def descr_index(self, space): - w_func = self.getattr(space, '__index__', False) - if w_func is not None: - return space.call_function(w_func) - raise OperationError( - space.w_TypeError, - space.wrap("object cannot be interpreted as an index")) - - def descr_contains(self, space, w_obj): - w_func = self.getattr(space, '__contains__', False) - if w_func is not None: - return space.wrap(space.is_true(space.call_function(w_func, w_obj))) - # now do it ourselves - w_iter = space.iter(self) - while 1: - try: - w_x = space.next(w_iter) - except OperationError, e: - if e.match(space, space.w_StopIteration): - return space.w_False - raise - if space.eq_w(w_x, w_obj): - return space.w_True - - - def descr_pow(self, space, w_other, w_modulo=None): - if space.is_w(w_modulo, space.w_None): - w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented - else: - return space.pow(w_a, w_b, space.w_None) - else: - # CPython also doesn't try coercion in this case - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented - - def descr_rpow(self, space, w_other, w_modulo=None): - if space.is_w(w_modulo, space.w_None): - w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented - else: - return space.pow(w_b, w_a, space.w_None) - else: - # CPython also doesn't try coercion in this case - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented - - def descr_next(self, space): - w_func = self.getattr(space, '__next__', False) - if w_func is None: - raise OperationError(space.w_TypeError, space.wrap( - "instance has no __next__() method")) - return space.call_function(w_func) - - def descr_del(self, space): - # Note that this is called from executioncontext.UserDelAction - # via the space.userdel() method. - w_func = self.getdictvalue(space, '__del__') - if w_func is None: - w_func = self.getattr_from_class(space, '__del__') - if w_func is not None: - space.call_function(w_func) - - def descr_exit(self, space, w_type, w_value, w_tb): - w_func = self.getattr(space, '__exit__', False) - if w_func is not None: - return space.call_function(w_func, w_type, w_value, w_tb) - -rawdict = {} - -# unary operations -for op in "neg pos abs invert trunc float oct hex enter reversed".split(): - specialname = "__%s__" % (op, ) - # fool the gateway logic by giving it a real unbound method - meth = new.instancemethod( - make_unary_instance_method(specialname), - None, - W_InstanceObject) - rawdict[specialname] = interp2app(meth) - -# binary operations that return NotImplemented if they fail -# e.g. rich comparisons, coerce and inplace ops -for op in 'eq ne gt lt ge le coerce imod iand ipow itruediv ilshift ixor irshift ifloordiv idiv isub imul iadd ior'.split(): - specialname = "__%s__" % (op, ) - # fool the gateway logic by giving it a real unbound method - meth = new.instancemethod( - make_binary_returning_notimplemented_instance_method(specialname), - None, - W_InstanceObject) - rawdict[specialname] = interp2app(meth) - -for op in "or and xor lshift rshift add sub mul div mod divmod floordiv truediv".split(): - specialname = "__%s__" % (op, ) - rspecialname = "__r%s__" % (op, ) - func, rfunc = make_binary_instance_method(op) - # fool the gateway logic by giving it a real unbound method - meth = new.instancemethod(func, None, W_InstanceObject) - rawdict[specialname] = interp2app(meth) - rmeth = new.instancemethod(rfunc, None, W_InstanceObject) - rawdict[rspecialname] = interp2app(rmeth) - - -def descr_del_dict(space, w_inst): - # use setdict to raise the error - w_inst.setdict(space, space.w_None) - -dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict) -dict_descr.name = '__dict__' - -W_InstanceObject.typedef = TypeDef("instance", - __new__ = interp2app(descr_instance_new), - __getattribute__ = interp2app(W_InstanceObject.descr_getattribute), - __setattr__ = interp2app(W_InstanceObject.descr_setattr), - __delattr__ = interp2app(W_InstanceObject.descr_delattr), - __repr__ = interp2app(W_InstanceObject.descr_repr), - __str__ = interp2app(W_InstanceObject.descr_str), - __unicode__ = interp2app(W_InstanceObject.descr_unicode), - __format__ = interp2app(W_InstanceObject.descr_format), - __len__ = interp2app(W_InstanceObject.descr_len), - __getitem__ = interp2app(W_InstanceObject.descr_getitem), - __setitem__ = interp2app(W_InstanceObject.descr_setitem), - __delitem__ = interp2app(W_InstanceObject.descr_delitem), - __iter__ = interp2app(W_InstanceObject.descr_iter), - __call__ = interp2app(W_InstanceObject.descr_call), - __nonzero__ = interp2app(W_InstanceObject.descr_nonzero), - __cmp__ = interp2app(W_InstanceObject.descr_cmp), - __hash__ = interp2app(W_InstanceObject.descr_hash), - __int__ = interp2app(W_InstanceObject.descr_int), - __long__ = interp2app(W_InstanceObject.descr_long), - __index__ = interp2app(W_InstanceObject.descr_index), - __contains__ = interp2app(W_InstanceObject.descr_contains), - __pow__ = interp2app(W_InstanceObject.descr_pow), - __rpow__ = interp2app(W_InstanceObject.descr_rpow), - __next__ = interp2app(W_InstanceObject.descr_next), - __del__ = interp2app(W_InstanceObject.descr_del), - __exit__ = interp2app(W_InstanceObject.descr_exit), - __dict__ = dict_descr, - **rawdict -) -W_InstanceObject.typedef.acceptable_as_base_class = False diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py deleted file mode 100644 --- a/pypy/module/__builtin__/test/test_classobj.py +++ /dev/null @@ -1,1083 +0,0 @@ -from __future__ import with_statement -import py -from pypy.conftest import gettestobjspace, option -from pypy.interpreter import gateway - - -class AppTestOldstyle(object): - - def test_simple(self): - class A: - a = 1 - assert A.__name__ == 'A' - assert A.__bases__ == () - assert A.a == 1 - assert A.__dict__['a'] == 1 - a = A() - a.b = 2 - assert a.b == 2 - assert a.a == 1 - assert a.__class__ is A - assert a.__dict__ == {'b': 2} - - def test_isinstance(self): - class A: - pass - class B(A): - pass - class C(A): - pass - assert isinstance(B(), A) - assert isinstance(B(), B) - assert not isinstance(B(), C) - assert not isinstance(A(), B) - assert isinstance(B(), (A, C)) - assert isinstance(B(), (C, (), (C, B))) - assert not isinstance(B(), ()) - - def test_issubclass(self): - class A: - pass - class B(A): - pass - class C(A): - pass - assert issubclass(A, A) - assert not issubclass(A, B) - assert not issubclass(A, C) - assert issubclass(B, A) - assert issubclass(B, B) - assert not issubclass(B, C) - - def test_mutate_class_special(self): - class A: - a = 1 - A.__name__ = 'B' - assert A.__name__ == 'B' - assert A.a == 1 - A.__dict__ = {'a': 5} - assert A.a == 5 - class B: - a = 17 - b = 18 - class C(A): - c = 19 - assert C.a == 5 - assert C.c == 19 - C.__bases__ = (B, ) - assert C.a == 17 - assert C.b == 18 - assert C.c == 19 - C.__bases__ = (B, A) - assert C.a == 17 - assert C.b == 18 - assert C.c == 19 - C.__bases__ = (A, B) - assert C.a == 5 - assert C.b == 18 - assert C.c == 19 - - def test_class_repr(self): - d = {} - exec "class A: pass" in d # to have no __module__ - A = d['A'] - assert repr(A).startswith(" a2) is a2 - - def test_eq_order(self): - # this gives the ordering of equality-related functions on top of - # CPython **for old-style classes**. - class A: - def __eq__(self, other): return self.__class__.__name__+':A.eq' - def __ne__(self, other): return self.__class__.__name__+':A.ne' - def __lt__(self, other): return self.__class__.__name__+':A.lt' - def __le__(self, other): return self.__class__.__name__+':A.le' - def __gt__(self, other): return self.__class__.__name__+':A.gt' - def __ge__(self, other): return self.__class__.__name__+':A.ge' - class B: - def __eq__(self, other): return self.__class__.__name__+':B.eq' - def __ne__(self, other): return self.__class__.__name__+':B.ne' - def __lt__(self, other): return self.__class__.__name__+':B.lt' - def __le__(self, other): return self.__class__.__name__+':B.le' - def __gt__(self, other): return self.__class__.__name__+':B.gt' - def __ge__(self, other): return self.__class__.__name__+':B.ge' - # - assert (A() == B()) == 'A:A.eq' - assert (A() != B()) == 'A:A.ne' - assert (A() < B()) == 'A:A.lt' - assert (A() <= B()) == 'A:A.le' - assert (A() > B()) == 'A:A.gt' - assert (A() >= B()) == 'A:A.ge' - # - assert (B() == A()) == 'B:B.eq' - assert (B() != A()) == 'B:B.ne' - assert (B() < A()) == 'B:B.lt' - assert (B() <= A()) == 'B:B.le' - assert (B() > A()) == 'B:B.gt' - assert (B() >= A()) == 'B:B.ge' - # - class C(A): - def __eq__(self, other): return self.__class__.__name__+':C.eq' - def __ne__(self, other): return self.__class__.__name__+':C.ne' - def __lt__(self, other): return self.__class__.__name__+':C.lt' - def __le__(self, other): return self.__class__.__name__+':C.le' - def __gt__(self, other): return self.__class__.__name__+':C.gt' - def __ge__(self, other): return self.__class__.__name__+':C.ge' - # - assert (A() == C()) == 'A:A.eq' - assert (A() != C()) == 'A:A.ne' - assert (A() < C()) == 'A:A.lt' - assert (A() <= C()) == 'A:A.le' - assert (A() > C()) == 'A:A.gt' - assert (A() >= C()) == 'A:A.ge' - # - assert (C() == A()) == 'C:C.eq' - assert (C() != A()) == 'C:C.ne' - assert (C() < A()) == 'C:C.lt' - assert (C() <= A()) == 'C:C.le' - assert (C() > A()) == 'C:C.gt' - assert (C() >= A()) == 'C:C.ge' - # - class D(A): - pass - # - assert (A() == D()) == 'A:A.eq' - assert (A() != D()) == 'A:A.ne' - assert (A() < D()) == 'A:A.lt' - assert (A() <= D()) == 'A:A.le' - assert (A() > D()) == 'A:A.gt' - assert (A() >= D()) == 'A:A.ge' - # - assert (D() == A()) == 'D:A.eq' - assert (D() != A()) == 'D:A.ne' - assert (D() < A()) == 'D:A.lt' - assert (D() <= A()) == 'D:A.le' - assert (D() > A()) == 'D:A.gt' - assert (D() >= A()) == 'D:A.ge' - - -class AppTestOldStyleClassStrDict(object): - def setup_class(cls): - if option.runappdirect: - py.test.skip("can only be run on py.py") - def is_strdict(space, w_class): - from pypy.objspace.std.dictmultiobject import StringDictStrategy - w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, StringDictStrategy)) - - cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) - - def test_strdict(self): - class A: - a = 1 - b = 2 - assert self.is_strdict(A) - -class AppTestOldStyleMapDict(AppTestOldstyle): - def setup_class(cls): - cls.space = gettestobjspace(**{"objspace.std.withmapdict": True}) - if option.runappdirect: - py.test.skip("can only be run on py.py") - def has_mapdict(space, w_inst): - return space.wrap(w_inst._get_mapdict_map() is not None) - cls.w_has_mapdict = cls.space.wrap(gateway.interp2app(has_mapdict)) - - - def test_has_mapdict(self): - class A: - def __init__(self): - self.x = 42 - a = A() - assert a.x == 42 - assert self.has_mapdict(a) - diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -60,7 +60,6 @@ import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject import pypy.module.cpyext.frameobject -import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject import pypy.module.cpyext.codecs diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -395,7 +395,7 @@ }.items(): GLOBALS['Py%s_Type#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) - for cpyname in 'Method List Int Long Dict Tuple Class'.split(): + for cpyname in 'Method List Int Long Dict Tuple'.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } ' 'Py%sObject' % (cpyname, )) build_exported_objects() diff --git a/pypy/module/cpyext/classobject.py b/pypy/module/cpyext/classobject.py deleted file mode 100644 --- a/pypy/module/cpyext/classobject.py +++ /dev/null @@ -1,39 +0,0 @@ -from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import ( - PyObjectFields, CANNOT_FAIL, - cpython_api, bootstrap_function, cpython_struct, build_type_checkers) -from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref, Py_DecRef, make_typedescr -from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.module.__builtin__.interp_classobj import W_ClassObject, W_InstanceObject - -PyClass_Check, PyClass_CheckExact = build_type_checkers("Class", W_ClassObject) -PyInstance_Check, PyInstance_CheckExact = build_type_checkers("Instance", W_InstanceObject) - - at cpython_api([PyObject, PyObject], PyObject) -def PyInstance_NewRaw(space, w_class, w_dict): - """Create a new instance of a specific class without calling its constructor. - class is the class of new object. The dict parameter will be used as the - object's __dict__; if NULL, a new dictionary will be created for the - instance.""" - if not isinstance(w_class, W_ClassObject): - return PyErr_BadInternalCall(space) - w_result = w_class.instantiate(space) - if w_dict is not None: - w_result.setdict(space, w_dict) - return w_result - - at cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL) -def _PyInstance_Lookup(space, w_instance, w_name): - name = space.str_w(w_name) - assert isinstance(w_instance, W_InstanceObject) - w_result = w_instance.getdictvalue(space, name) - if w_result is not None: - return w_result - return w_instance.w_class.lookup(space, name) - - at cpython_api([PyObject, PyObject, PyObject], PyObject) -def PyClass_New(space, w_bases, w_dict, w_name): - w_classobj = space.gettypefor(W_ClassObject) - return space.call_function(w_classobj, - w_name, w_bases, w_dict) - diff --git a/pypy/module/cpyext/include/pyerrors.h b/pypy/module/cpyext/include/pyerrors.h --- a/pypy/module/cpyext/include/pyerrors.h +++ b/pypy/module/cpyext/include/pyerrors.h @@ -8,7 +8,7 @@ #endif #define PyExceptionClass_Check(x) \ - (PyClass_Check((x)) || (PyType_Check((x)) && \ + ((PyType_Check((x)) && \ PyObject_IsSubclass((x), PyExc_BaseException))) PyObject *PyErr_NewException(const char *name, PyObject *base, PyObject *dict); diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -32,7 +32,6 @@ from pypy.rlib.rstring import rsplit from pypy.rlib.objectmodel import specialize from pypy.module.__builtin__.abstractinst import abstract_issubclass_w -from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.rlib import jit WARN_ABOUT_MISSING_SLOT_FUNCTIONS = False @@ -551,9 +550,6 @@ w_winner = None w_base = None for w_base_i in bases_w: - if isinstance(w_base_i, W_ClassObject): - # old-style base - continue assert isinstance(w_base_i, W_TypeObject) w_candidate = solid_base(space, w_base_i) if not w_winner: diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -5,7 +5,6 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.typedef import default_identity_hash from pypy.tool.sourcetools import compile2, func_with_new_name -from pypy.module.__builtin__.interp_classobj import W_InstanceObject from pypy.rlib.objectmodel import specialize def object_getattribute(space): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -410,10 +410,8 @@ self._become(new_obj) def user_setup(self, space, w_subtype): - from pypy.module.__builtin__.interp_classobj import W_InstanceObject self.space = space - assert (not self.typedef.hasdict or - self.typedef is W_InstanceObject.typedef) + assert not self.typedef.hasdict self._init_empty(w_subtype.terminator) def getslotvalue(self, index): diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -79,9 +79,6 @@ # exceptions & builtins self.make_builtins() - # the type of old-style classes - self.w_classobj = self.builtin.get('__metaclass__') - # final setup self.setup_builtin_modules() # Adding transparent proxy call diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py --- a/pypy/objspace/std/typetype.py +++ b/pypy/objspace/std/typetype.py @@ -35,8 +35,6 @@ w_winner = w_typetype for base in bases_w: w_typ = space.type(base) - if space.is_w(w_typ, space.w_classobj): - continue # special-case old-style classes if space.is_true(space.issubtype(w_winner, w_typ)): continue if space.is_true(space.issubtype(w_typ, w_winner)): diff --git a/pypy/translator/geninterplevel.py b/pypy/translator/geninterplevel.py --- a/pypy/translator/geninterplevel.py +++ b/pypy/translator/geninterplevel.py @@ -892,7 +892,7 @@ # XXX there seems to be no working support for member descriptors ??? type(types.GeneratorType.gi_frame): (eval_helper, "member_descriptor", 'type(property.fdel)'), - types.ClassType: 'space.w_classobj', + types.ClassType: 'space.w_type', types.MethodType: (eval_helper, "instancemethod", "type((lambda:42).__get__(42))"), type(Ellipsis): (eval_helper, 'EllipsisType', 'types.EllipsisType'), From noreply at buildbot.pypy.org Tue Dec 20 01:32:47 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 01:32:47 +0100 (CET) Subject: [pypy-commit] pypy py3k: Exception checks are not abstract anymore Message-ID: <20111220003247.6DE20823F8@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50741:ed5224a1b668 Date: 2011-12-19 23:33 +0100 http://bitbucket.org/pypy/pypy/changeset/ed5224a1b668/ Log: Exception checks are not abstract anymore diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -129,7 +129,3 @@ space.abstract_issubclass_w = ab.abstract_issubclass_w.__get__(space) space.abstract_isclass_w = ab.abstract_isclass_w.__get__(space) space.abstract_getclass = ab.abstract_getclass.__get__(space) - space.exception_is_valid_class_w = ab.exception_is_valid_class_w.__get__(space) - space.exception_is_valid_obj_as_class_w = ab.exception_is_valid_obj_as_class_w.__get__(space) - space.exception_getclass = ab.exception_getclass.__get__(space) - space.exception_issubclass_w = ab.exception_issubclass_w.__get__(space) diff --git a/pypy/module/__builtin__/abstractinst.py b/pypy/module/__builtin__/abstractinst.py --- a/pypy/module/__builtin__/abstractinst.py +++ b/pypy/module/__builtin__/abstractinst.py @@ -152,21 +152,6 @@ " or tuple of classes and types") return _issubclass_recurse(space, w_derived, w_klass_or_tuple) -# ------------------------------------------------------------ -# Exception helpers - -def exception_is_valid_obj_as_class_w(space, w_obj): - return BaseObjSpace.exception_is_valid_obj_as_class_w(space, w_obj) - -def exception_is_valid_class_w(space, w_cls): - return BaseObjSpace.exception_is_valid_class_w(space, w_cls) - -def exception_getclass(space, w_obj): - return BaseObjSpace.exception_getclass(space, w_obj) - -def exception_issubclass_w(space, w_cls1, w_cls2): - return BaseObjSpace.exception_issubclass_w(space, w_cls1, w_cls2) - # ____________________________________________________________ # App-level interface From noreply at buildbot.pypy.org Tue Dec 20 01:32:48 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 01:32:48 +0100 (CET) Subject: [pypy-commit] pypy py3k: remove __builtins__._issubtype Message-ID: <20111220003248.E25EB823F8@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50742:6ae88cc085dc Date: 2011-12-19 23:47 +0100 http://bitbucket.org/pypy/pypy/changeset/6ae88cc085dc/ Log: remove __builtins__._issubtype diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -61,7 +61,6 @@ 'coerce' : 'operation.coerce', 'divmod' : 'operation.divmod', 'format' : 'operation.format', - '_issubtype' : 'operation._issubtype', 'issubclass' : 'abstractinst.app_issubclass', 'isinstance' : 'abstractinst.app_isinstance', 'getattr' : 'operation.getattr', diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py --- a/pypy/module/__builtin__/operation.py +++ b/pypy/module/__builtin__/operation.py @@ -119,10 +119,6 @@ """Return the tuple ((x-x%y)/y, x%y). Invariant: div*y + mod == x.""" return space.divmod(w_x, w_y) -# semi-private: works only for new-style classes. -def _issubtype(space, w_cls1, w_cls2): - return space.issubtype(w_cls1, w_cls2) - # ____________________________________________________________ # Here 0.30103 is an upper bound for log10(2) diff --git a/pypy/translator/geninterplevel.py b/pypy/translator/geninterplevel.py --- a/pypy/translator/geninterplevel.py +++ b/pypy/translator/geninterplevel.py @@ -182,12 +182,6 @@ # catching all builtins in advance, to avoid problems # with modified builtins - # add a dummy _issubtype() to builtins - if not hasattr(__builtin__, '_issubtype'): - def _issubtype(cls1, cls2): - raise TypeError, "this dummy should *not* be reached" - __builtin__._issubtype = _issubtype - class bltinstub: def __init__(self, name): self.__name__ = name From noreply at buildbot.pypy.org Tue Dec 20 09:42:30 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 09:42:30 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: hg merge default Message-ID: <20111220084230.15E26820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50743:4c82192bdff7 Date: 2011-12-20 08:55 +0100 http://bitbucket.org/pypy/pypy/changeset/4c82192bdff7/ Log: hg merge default Reactivate this branch, and cancel its merge on trunk, which was definitely not really good. diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -190,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -706,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,265 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -363,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -408,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -433,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -444,161 +425,48 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.arg_classes, self.result_type) -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,87 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -735,49 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - assert not self.layoutbuilder.has_finalizer(S) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -791,108 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETINTERIORFIELD_GC ------ - if op.getopnum() == rop.SETINTERIORFIELD_GC: - val = op.getarg(0) - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -369,29 +324,30 @@ # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert descr2.repr_of_descr() == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert descr2i.repr_of_descr() == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert descr3.repr_of_descr() == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert descr3i.repr_of_descr() == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert descr4.repr_of_descr() == '' # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert descr4i.repr_of_descr() == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert descr4f.repr_of_descr() == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert descr5f.repr_of_descr() == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +357,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +377,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +401,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,211 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2930,6 +2930,8 @@ # overflowing value: fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" def test_compile_loop_with_target(self): i0 = BoxInt() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -70,10 +70,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -108,20 +104,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -275,7 +257,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() @@ -865,8 +848,8 @@ high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] low_part = intmask(low_part) high_part = intmask(high_part) - self.mc.MOV_bi(to_loc.value, low_part) - self.mc.MOV_bi(to_loc.value + 4, high_part) + self.mc.MOV32_bi(to_loc.value, low_part) + self.mc.MOV32_bi(to_loc.value + 4, high_part) def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1357,46 +1340,10 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) - def genop_new_with_vtable(self, op, arglocs, result_loc): - assert result_loc is eax - loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, ImmedLoc) - arglocs = arglocs[:-1] - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - self.set_vtable(eax, loc_vtable) + # ---------- - def set_vtable(self, loc, loc_vtable): - if self.cpu.vtable_offset is not None: - assert isinstance(loc, RegLoc) - assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert isinstance(loc, RegLoc) - assert isinstance(loc_num_elem, ImmedLoc) - self.mc.MOV(mem(loc, ofs_length), loc_num_elem) - - # XXX genop_new is abused for all varsized mallocs with Boehm, for now - # (instead of genop_new_array, genop_newstr, genop_newunicode) - def genop_new(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_new_array(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_array_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newstr(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_str_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newunicode(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_unicode_func_addr, arglocs, eax) + def genop_call_malloc_gc(self, op, arglocs, result_loc): + self.genop_call(op, arglocs, result_loc) self.propagate_memoryerror_if_eax_is_null() def propagate_memoryerror_if_eax_is_null(self): @@ -2065,6 +2012,8 @@ self._genop_call(op, arglocs, resloc, force_index) def _genop_call(self, op, arglocs, resloc, force_index): + from pypy.jit.backend.llsupport.descr import CallDescr + sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -2079,13 +2028,16 @@ else: tmp = eax + descr = op.getdescr() + assert isinstance(descr, CallDescr) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types(), - callconv=op.getdescr().get_call_conv()) + argtypes=descr.get_arg_types(), + callconv=descr.get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return - if op.getdescr().get_return_type() == 'L': + if descr.get_result_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long self.mc.MOV_br(resloc.value + 4, edx.value) # XXX should ideally not move the result on the stack, @@ -2094,7 +2046,7 @@ # can just be always a stack location else: self.mc.FSTPL_b(resloc.value) # float return - elif op.getdescr().get_return_type() == 'S': + elif descr.get_result_type() == 'S': # singlefloat return assert resloc is eax if IS_X86_32: @@ -2292,9 +2244,9 @@ # # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: - from pypy.jit.backend.llsupport.descr import BaseFieldDescr + from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset self.mc.MOV(eax, arglocs[1]) self.mc.MOV_mi((eax.value, ofs), 0) @@ -2497,9 +2449,8 @@ else: self.mc.JMP(imm(target)) - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): - size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) - size = (size + WORD-1) & ~(WORD-1) # round up + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size): + assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) @@ -2535,9 +2486,6 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - # on 64-bits, 'tid' is a value that fits in 31 bits - assert rx86.fits_in_32bits(tid) - self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -16,8 +16,8 @@ from pypy.jit.codewriter import heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr +from pypy.jit.backend.llsupport.descr import FieldDescr, ArrayDescr +from pypy.jit.backend.llsupport.descr import CallDescr, SizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox @@ -870,9 +870,9 @@ def _consider_call(self, op, guard_not_forced_op=None): calldescr = op.getdescr() - assert isinstance(calldescr, BaseCallDescr) + assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - 1 - size = calldescr.get_result_size(self.translate_support_code) + size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm1 @@ -917,12 +917,15 @@ consider_call_release_gil = consider_call_may_force + def consider_call_malloc_gc(self, op): + self._consider_call(op) + def consider_call_assembler(self, op, guard_op): descr = op.getdescr() assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size(self.translate_support_code) + size = jd.portal_calldescr.get_result_size() vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.getarg(vable_index)) @@ -957,21 +960,10 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb - def fastpath_malloc_fixedsize(self, op, descr): - assert isinstance(descr, BaseSizeDescr) - self._do_fastpath_malloc(op, descr.size, descr.tid) - - def fastpath_malloc_varsize(self, op, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - size = basesize + itemsize * num_elem - self._do_fastpath_malloc(op, size, arraydescr.tid) - self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) - - def _do_fastpath_malloc(self, op, size, tid): - gc_ll_descr = self.assembler.cpu.gc_ll_descr + def consider_call_malloc_nursery(self, op): + size_box = op.getarg(0) + assert isinstance(size_box, ConstInt) + size = size_box.getint() self.rm.force_allocate_reg(op.result, selected_reg=eax) # # We need edx as a temporary, but otherwise don't save any more @@ -980,86 +972,39 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) # + gc_ll_descr = self.assembler.cpu.gc_ll_descr self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - size, tid, - ) - - def consider_new(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.getdescr()): - self.fastpath_malloc_fixedsize(op, op.getdescr()) - else: - args = gc_ll_descr.args_for_new(op.getdescr()) - arglocs = [imm(x) for x in args] - return self._call(op, arglocs) - - def consider_new_with_vtable(self, op): - classint = op.getarg(0).getint() - descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) - if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self.fastpath_malloc_fixedsize(op, descrsize) - self.assembler.set_vtable(eax, imm(classint)) - # result of fastpath malloc is in eax - else: - args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) - - def consider_newstr(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_newunicode(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_new_array(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(box_num_elem)) - self._call(op, arglocs) + size) def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - ofs = arraydescr.get_base_size(self.translate_support_code) - size = arraydescr.get_item_size(self.translate_support_code) - ptr = arraydescr.is_array_of_pointers() + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize sign = arraydescr.is_item_signed() - return size, ofs, ofs_length, ptr, sign + return size, ofs, sign def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset - size = fielddescr.get_field_size(self.translate_support_code) - ptr = fielddescr.is_pointer_field() + size = fielddescr.field_size sign = fielddescr.is_field_signed() - return imm(ofs), imm(size), ptr, sign + return imm(ofs), imm(size), sign + _unpack_fielddescr._always_inline_ = True def _unpack_interiorfielddescr(self, descr): assert isinstance(descr, InteriorFieldDescr) arraydescr = descr.arraydescr - ofs = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + ofs = arraydescr.basesize + itemsize = arraydescr.itemsize + fieldsize = descr.fielddescr.field_size sign = descr.fielddescr.is_field_signed() ofs += descr.fielddescr.offset return imm(ofs), imm(itemsize), imm(fieldsize), sign def consider_setfield_gc(self, op): - ofs_loc, size_loc, _, _ = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -1117,7 +1062,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - itemsize, ofs, _, _, _ = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, _ = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if itemsize == 1: @@ -1134,7 +1079,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _, sign = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, sign = self._unpack_fielddescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -1150,7 +1095,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - itemsize, ofs, _, _, sign = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, sign = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -1229,8 +1174,8 @@ def consider_arraylen_gc(self, op): arraydescr = op.getdescr() - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.backend.llsupport.descr import GcCache +from pypy.jit.backend.llsupport.descr import GcCache, FieldDescr, FLAG_SIGNED from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc @@ -17,7 +17,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcPtrFieldDescr +from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.jit.backend.x86.test.test_regalloc import MockAssembler from pypy.jit.backend.x86.test.test_regalloc import BaseTestRegalloc @@ -41,20 +41,15 @@ return ['compressed'] + shape[1:] class MockGcDescr(GcCache): - def get_funcptr_for_new(self): - return 123 - get_funcptr_for_newarray = get_funcptr_for_new - get_funcptr_for_newstr = get_funcptr_for_new - get_funcptr_for_newunicode = get_funcptr_for_new get_malloc_slowpath_addr = None - + write_barrier_descr = None moving_gc = True gcrootmap = MockGcRootMap() def initialize(self): pass - record_constptrs = GcLLDescr_framework.record_constptrs.im_func + _record_constptrs = GcLLDescr_framework._record_constptrs.im_func rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func class TestRegallocDirectGcIntegration(object): @@ -170,42 +165,32 @@ ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) +NOT_INITIALIZED = chr(0xdd) + class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - expected_malloc_slowpath_size = WORD*2 + write_barrier_descr = None def __init__(self): - GcCache.__init__(self, False) + GcLLDescription.__init__(self, None) # create a nursery - NTP = rffi.CArray(lltype.Signed) - self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, + NTP = rffi.CArray(lltype.Char) + self.nursery = lltype.malloc(NTP, 64, flavor='raw') + for i in range(64): + self.nursery[i] = NOT_INITIALIZED + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 16*WORD - self.addrs[2] = 0 - # 16 WORDs + self.addrs[1] = self.addrs[0] + 64 + self.calls = [] def malloc_slowpath(size): - assert size == self.expected_malloc_slowpath_size + self.calls.append(size) + # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size - self.addrs[2] += 1 return nadr - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) - self._counter = 123000 - - def can_inline_malloc(self, descr): - return True - - def get_funcptr_for_new(self): - return 42 -# return llhelper(lltype.Ptr(self.NEW_TP), self.new) - - def init_size_descr(self, S, descr): - descr.tid = self._counter - self._counter += 1 + self.generate_function('malloc_nursery', malloc_slowpath, + [lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): return rffi.cast(lltype.Signed, self.addrs) @@ -214,204 +199,61 @@ return rffi.cast(lltype.Signed, self.addrs) + WORD def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) + return self.get_malloc_fn_addr('malloc_nursery') - get_funcptr_for_newarray = None - get_funcptr_for_newstr = None - get_funcptr_for_newunicode = None + def check_nothing_in_nursery(self): + # CALL_MALLOC_NURSERY should not write anything in the nursery + for i in range(64): + assert self.nursery[i] == NOT_INITIALIZED class TestMallocFastpath(BaseTestRegalloc): def setup_method(self, method): cpu = CPU(None, None) - cpu.vtable_offset = WORD cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() + self.cpu = cpu - # hack: specify 'tid' explicitly, because this test is not running - # with the gc transformer - NODE = lltype.GcStruct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) - valuedescr = cpu.fielddescrof(NODE, 'value') - - self.cpu = cpu - self.nodedescr = nodedescr - vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - vtable_int = cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(vtable)) - NODE2 = lltype.GcStruct('node2', - ('parent', rclass.OBJECT), - ('tid', lltype.Signed), - ('vtable', lltype.Ptr(rclass.OBJECT_VTABLE))) - descrsize = cpu.sizeof(NODE2) - heaptracker.register_known_gctype(cpu, vtable, NODE2) - self.descrsize = descrsize - self.vtable_int = vtable_int - - self.namespace = locals().copy() - def test_malloc_fastpath(self): ops = ''' - [i0] - p0 = new(descr=nodedescr) - setfield_gc(p0, i0, descr=valuedescr) - finish(p0) + [] + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(16) + finish(p0, p1, p2) ''' - self.interpret(ops, [42]) - # check the nursery + self.interpret(ops, []) + # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.nodedescr.tid - assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 48 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 64 + # slowpath never called + assert gc_ll_descr.calls == [] def test_malloc_slowpath(self): ops = ''' [] - p0 = new(descr=nodedescr) - p1 = new(descr=nodedescr) - p2 = new(descr=nodedescr) - p3 = new(descr=nodedescr) - p4 = new(descr=nodedescr) - p5 = new(descr=nodedescr) - p6 = new(descr=nodedescr) - p7 = new(descr=nodedescr) - p8 = new(descr=nodedescr) - finish(p0, p1, p2, p3, p4, p5, p6, p7, p8) + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(24) # overflow + finish(p0, p1, p2) ''' self.interpret(ops, []) + # check the returned pointers + gc_ll_descr = self.cpu.gc_ll_descr + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 0 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once - gc_ll_descr = self.cpu.gc_ll_descr - nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nadr + (WORD*2) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_new_with_vtable(self): - ops = ''' - [i0, i1] - p0 = new_with_vtable(ConstClass(vtable)) - guard_class(p0, ConstClass(vtable)) [i0] - finish(i1) - ''' - self.interpret(ops, [0, 1]) - assert self.getint(0) == 1 - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.descrsize.tid - assert gc_ll_descr.nursery[1] == self.vtable_int - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - -class Seen(Exception): - pass - -class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): - def can_inline_malloc_varsize(self, arraydescr, num_elem): - return num_elem < 5 - def get_funcptr_for_newarray(self): - return 52 - def init_array_descr(self, A, descr): - descr.tid = self._counter - self._counter += 1 - def args_for_new_array(self, descr): - raise Seen("args_for_new_array") - -class TestMallocVarsizeFastpath(BaseTestRegalloc): - def setup_method(self, method): - cpu = CPU(None, None) - cpu.vtable_offset = WORD - cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() - cpu.setup_once() - self.cpu = cpu - - ARRAY = lltype.GcArray(lltype.Signed) - arraydescr = cpu.arraydescrof(ARRAY) - self.arraydescr = arraydescr - ARRAYCHAR = lltype.GcArray(lltype.Char) - arraychardescr = cpu.arraydescrof(ARRAYCHAR) - - self.namespace = locals().copy() - - def test_malloc_varsize_fastpath(self): - # Hack. Running the GcLLDescr_framework without really having - # a complete GC means that we end up with both the tid and the - # length being at offset 0. In this case, so the length overwrites - # the tid. This is of course only the case in this test class. - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 142, descr=arraydescr) - setarrayitem_gc(p0, 3, 143, descr=arraydescr) - finish(p0) - ''' - self.interpret(ops, []) - # check the nursery - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == 4 - assert gc_ll_descr.nursery[1] == 142 - assert gc_ll_descr.nursery[4] == 143 - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - def test_malloc_varsize_slowpath(self): - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 420, descr=arraydescr) - setarrayitem_gc(p0, 3, 430, descr=arraydescr) - p1 = new_array(4, descr=arraydescr) - setarrayitem_gc(p1, 0, 421, descr=arraydescr) - setarrayitem_gc(p1, 3, 431, descr=arraydescr) - p2 = new_array(4, descr=arraydescr) - setarrayitem_gc(p2, 0, 422, descr=arraydescr) - setarrayitem_gc(p2, 3, 432, descr=arraydescr) - p3 = new_array(4, descr=arraydescr) - setarrayitem_gc(p3, 0, 423, descr=arraydescr) - setarrayitem_gc(p3, 3, 433, descr=arraydescr) - finish(p0, p1, p2, p3) - ''' - gc_ll_descr = self.cpu.gc_ll_descr - gc_ll_descr.expected_malloc_slowpath_size = 5*WORD - self.interpret(ops, []) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_malloc_varsize_too_big(self): - ops = ''' - [] - p0 = new_array(5, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_varsize_variable(self): - ops = ''' - [i0] - p0 = new_array(i0, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_array_of_char(self): - # check that fastpath_malloc_varsize() respects the alignment - # of the pointer in the nursery - ops = ''' - [] - p1 = new_array(1, descr=arraychardescr) - p2 = new_array(2, descr=arraychardescr) - p3 = new_array(3, descr=arraychardescr) - p4 = new_array(4, descr=arraychardescr) - finish(p1, p2, p3, p4) - ''' - self.interpret(ops, []) - p1 = self.getptr(0, llmemory.GCREF) - p2 = self.getptr(1, llmemory.GCREF) - p3 = self.getptr(2, llmemory.GCREF) - p4 = self.getptr(3, llmemory.GCREF) - assert p1._obj.intval & (WORD-1) == 0 # aligned - assert p2._obj.intval & (WORD-1) == 0 # aligned - assert p3._obj.intval & (WORD-1) == 0 # aligned - assert p4._obj.intval & (WORD-1) == 0 # aligned + assert gc_ll_descr.calls == [24] diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -249,7 +249,7 @@ while len(result) < count: x = fn() keys = [x._getregkey()] - if isinstance(x, StackLoc) and x.width > WORD: + if isinstance(x, StackLoc) and x.get_width() > WORD: keys.append(keys[0] + WORD) for key in keys: if key in seen: @@ -267,7 +267,7 @@ for i, loc in enumerate(locations): if isinstance(loc, RegLoc): if loc.is_xmm: - if loc.width > WORD: + if loc.get_width() > WORD: newvalue = ('value-xmm-%d' % i, 'value-xmm-hiword-%d' % i) else: @@ -276,8 +276,8 @@ else: regs1[loc.value] = 'value-int-%d' % i elif isinstance(loc, StackLoc): - stack[loc.value] = 'value-width%d-%d' % (loc.width, i) - if loc.width > WORD: + stack[loc.value] = 'value-width%d-%d' % (loc.get_width(), i) + if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: assert isinstance(loc, ImmedLoc) @@ -299,7 +299,7 @@ # def read(loc, expected_width=None): if expected_width is not None: - assert loc.width == expected_width + assert loc.get_width() == expected_width if isinstance(loc, RegLoc): if loc.is_xmm: return regs2[loc.value] @@ -307,7 +307,7 @@ return regs1[loc.value] if isinstance(loc, StackLoc): got = stack[loc.value] - if loc.width > WORD: + if loc.get_width() > WORD: got = (got, stack[loc.value+WORD]) return got if isinstance(loc, ImmedLoc): @@ -321,7 +321,7 @@ else: regs1[loc.value] = newvalue elif isinstance(loc, StackLoc): - if loc.width > WORD: + if loc.get_width() > WORD: newval1, newval2 = newvalue stack[loc.value] = newval1 stack[loc.value+WORD] = newval2 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -69,6 +69,7 @@ return ctypes.cast(res.value._obj.intval, ctypes.POINTER(item_tp)) def test_allocations(self): + py.test.skip("rewrite or kill") from pypy.rpython.lltypesystem import rstr allocs = [None] diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -69,16 +69,17 @@ def get_functions_to_patch(): from pypy.jit.backend.llsupport import gc # - can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc - def can_inline_malloc2(*args): + can_use_nursery_malloc1 = gc.GcLLDescr_framework.can_use_nursery_malloc + def can_use_nursery_malloc2(*args): try: if os.environ['PYPY_NO_INLINE_MALLOC']: return False except KeyError: pass - return can_inline_malloc1(*args) + return can_use_nursery_malloc1(*args) # - return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + return {(gc.GcLLDescr_framework, 'can_use_nursery_malloc'): + can_use_nursery_malloc2} def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -498,27 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - d = op.args[1].value.copy() - d.pop('flavor') - add_memory_pressure = d.pop('add_memory_pressure', False) - zero = d.pop('zero', False) - track_allocation = d.pop('track_allocation', True) - if d: - raise UnsupportedMallocFlags(d) - ARRAY = op.args[0].value - name = 'raw_malloc' - if zero: - name += '_zero' - if add_memory_pressure: - name += '_add_memory_pressure' - if not track_allocation: - name += '_no_track_allocation' - return self._do_builtin_call(op, name, - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -531,11 +533,18 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO @@ -736,6 +745,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,26 +599,75 @@ return p return _ll_0_alloc_with_del - def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) - return _ll_1_raw_malloc - return build_ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - build_ll_1_raw_malloc = build_raw_malloc_builder() - build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) - build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) - build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) - build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) - build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) - build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) - build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -550,7 +550,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str assert op1.opname == '-live-' assert op1.args == [] @@ -564,7 +564,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str assert op1.opname == '-live-' assert op1.args == [] @@ -578,6 +578,35 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -46,7 +46,7 @@ # get the function address as an integer func = argboxes[0].getint() # do the call using the correct function from the cpu - rettype = descr.get_return_type() + rettype = descr.get_result_type() if rettype == INT or rettype == 'S': # *S*ingle float try: result = cpu.bh_call_i(func, descr, args_i, args_r, args_f) @@ -344,6 +344,8 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.CALL_MALLOC_GC, + rop.CALL_MALLOC_NURSERY, rop.LABEL, ): # list of opcodes never executed by pyjitpl continue diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -142,59 +142,6 @@ def repr_of_descr(self): return '%r' % (self,) - def get_arg_types(self): - """ Implement in call descr. - Must return a string of INT, REF and FLOAT ('i', 'r', 'f'). - """ - raise NotImplementedError - - def get_return_type(self): - """ Implement in call descr. - Must return INT, REF, FLOAT, or 'v' for void. - On 32-bit (hack) it can also be 'L' for longlongs. - Additionally it can be 'S' for singlefloats. - """ - raise NotImplementedError - - def get_extra_info(self): - """ Implement in call descr - """ - raise NotImplementedError - - def is_array_of_pointers(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_floats(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_structs(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_pointer_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def is_float_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def as_vtable_size_descr(self): - """ Implement for size descr representing objects with vtables. - Returns self. (it's an annotation hack) - """ - raise NotImplementedError - - def count_fields_if_immutable(self): - return -1 - def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -21,6 +21,7 @@ # class MemoryManager(object): + NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -36,13 +37,13 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK self.alive_loops = {} self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK else: self.max_age = max_age if check_frequency <= 0: diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -246,15 +246,16 @@ self.force_lazy_setfields_and_arrayitems_for_guard()) return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or # handled specially - opnum == rop.SETFIELD_RAW or # no effect on GC struct/array - opnum == rop.SETARRAYITEM_GC or # handled specially - opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.STRSETITEM or # no effect on GC struct/array - opnum == rop.UNICODESETITEM or # no effect on GC struct/array - opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever - opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array - opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7755,6 +7755,22 @@ """ self.optimize_loop(ops, expected) + def test_setinteriorfield_should_not_clear_cache(self): + ops = """ + [i0, p0] + i2 = getfield_gc(p0, descr=adescr) + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0) + """ + expected = """ + [i0, p0, i2] + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0, i2) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -508,6 +508,8 @@ #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation 'CALL_PURE/*d', # removed before it's passed to the backend + 'CALL_MALLOC_GC/*d', # like CALL, but NULL => propagate MemoryError + 'CALL_MALLOC_NURSERY/1', # nursery malloc, const number of bytes, zeroed '_CALL_LAST', '_CANRAISE_LAST', # ----- end of can_raise operations ----- diff --git a/pypy/jit/metainterp/test/test_executor.py b/pypy/jit/metainterp/test/test_executor.py --- a/pypy/jit/metainterp/test/test_executor.py +++ b/pypy/jit/metainterp/test/test_executor.py @@ -18,7 +18,7 @@ pass class FakeCallDescr(FakeDescr): - def get_return_type(self): + def get_result_type(self): return history.FLOAT class FakeFieldDescr(FakeDescr): diff --git a/pypy/jit/metainterp/test/test_rawmem.py b/pypy/jit/metainterp/test/test_rawmem.py --- a/pypy/jit/metainterp/test/test_rawmem.py +++ b/pypy/jit/metainterp/test/test_rawmem.py @@ -8,7 +8,7 @@ VOID_TP = lltype.Array(lltype.Void, hints={"nolength": True, "uncast_on_llgraph": True}) class A(object): def __init__(self, x): - self.storage = rffi.cast(lltype.Ptr(VOID_TP), x)\ + self.storage = rffi.cast(lltype.Ptr(VOID_TP), x) def f(n): x = lltype.malloc(TP, n, flavor="raw", zero=True) @@ -19,4 +19,14 @@ lltype.free(x, flavor="raw") return s res = self.interp_operations(f, [10]) - assert res == 1.0 \ No newline at end of file + + def test_fixed_size_malloc(self): + TIMEVAL = lltype.Struct('dummy', ('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)) + def f(): + p = lltype.malloc(TIMEVAL, flavor='raw') + lltype.free(p, flavor='raw') + return 42 + res = self.interp_operations(f, []) + assert res == 42 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'finish': 1}) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -166,6 +166,7 @@ # The latest_generation_seen is older than the current generation. # Adjust by multiplying self.counter N times by decay_factor, i.e. # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). + assert self.counter >= 0 N = generation - self.latest_generation_seen factor = math.exp(log_decay_factor * N) self.counter = int(self.counter * factor) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -285,3 +285,10 @@ [], lineno=1, col_offset=0) ]) exec compile(body, '', 'exec') + + def test_invalid_sum(self): + import _ast as ast + pos = dict(lineno=2, col_offset=3) + m = ast.Module([ast.Expr(ast.expr(**pos), **pos)]) + exc = raises(TypeError, compile, m, "", "exec") + diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -161,11 +161,16 @@ def test_shutdown(self): import socket, ssl, sys, gc - if sys.platform == 'darwin': - skip("get also on CPython: error: [Errno 0]") ss = socket.ssl(self.s) ss.write("hello\n") - assert ss.shutdown() is self.s._sock + try: + result = ss.shutdown() + except socket.error, e: + # xxx obscure case; throwing errno 0 is pretty odd... + if e.errno == 0: + skip("Shutdown raised errno 0. CPython does this too") + raise + assert result is self.s._sock raises(ssl.SSLError, ss.write, "hello\n") del ss; gc.collect() diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -329,11 +329,16 @@ special_ops = {'repr': True, 'userdel': True, 'hash': True} for opname, _, arity, special_methods in ObjSpace.MethodTable: - if opname in special_ops: + if opname in special_ops or not special_methods: continue nonspaceargs = ", ".join(["w_obj%s" % i for i in range(arity)]) code = "def func(space, %s):\n '''%s'''\n" % (nonspaceargs, opname) - for i in range(arity): + assert arity >= len(special_methods) + forcing_count = len(special_methods) + if opname.startswith('inplace_'): + assert arity == 2 + forcing_count = arity + for i in range(forcing_count): code += " w_obj%s = force(space, w_obj%s)\n" % (i, i) code += " return space.%s(%s)" % (opname, nonspaceargs) exec py.code.Source(code).compile() diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -466,3 +466,44 @@ # No exception should be raised here gc.collect() + def test_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + a3 = p1 + p2 + assert a3 is a2 + + def test_inplace_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1 += p2 + assert p1 is a2 + + def test_setattr(self): + import _weakref + class A(object): + def __setitem__(self, key, value): + self.setkey = key + self.setvalue = value + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1[p2] = 42 + assert a1.setkey is p2 + assert a1.setvalue == 42 + # + p1[42] = p2 + assert a1.setkey == 42 + assert a1.setvalue is p2 diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -53,7 +53,6 @@ i = start for j in range(arr.size): arr[j] = i - j += 1 i += step return arr diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -133,7 +133,7 @@ descr__new__, get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - pass + descr__new__, get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -581,6 +581,7 @@ def descr_get_dtype(self, space): return space.wrap(self.find_dtype()) + @jit.unroll_safe def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,34 +1,90 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi +from pypy.module.micronumpy import interp_dtype +from pypy.objspace.std.strutil import strip_spaces FLOAT_SIZE = rffi.sizeof(lltype.Float) - at unwrap_spec(s=str) -def fromstring(space, s): +def _fromstring_text(space, s, count, sep, length, dtype): from pypy.module.micronumpy.interp_numarray import W_NDimArray + + sep_stripped = strip_spaces(sep) + skip_bad_vals = len(sep_stripped) == 0 + + items = [] + num_items = 0 + idx = 0 + + while (num_items < count or count == -1) and idx < len(s): + nextidx = s.find(sep, idx) + if nextidx < 0: + nextidx = length + piece = strip_spaces(s[idx:nextidx]) + if len(piece) > 0 or not skip_bad_vals: + if len(piece) == 0 and not skip_bad_vals: + val = dtype.itemtype.default_fromstring(space) + else: + try: + val = dtype.coerce(space, space.wrap(piece)) + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + gotit = False + while not gotit and len(piece) > 0: + piece = piece[:-1] + try: + val = dtype.coerce(space, space.wrap(piece)) + gotit = True + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + if not gotit: + val = dtype.itemtype.default_fromstring(space) + nextidx = length + items.append(val) + num_items += 1 + idx = nextidx + 1 + + if count > num_items: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(num_items, [num_items], dtype=dtype) + for i, val in enumerate(items): + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + +def _fromstring_bin(space, s, count, length, dtype): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + itemsize = dtype.itemtype.get_element_size() + if count == -1: + count = length / itemsize + if length % itemsize != 0: + raise operationerrfmt(space.w_ValueError, + "string length %d not divisable by item size %d", + length, itemsize) + if count * itemsize > length: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(count, [count], dtype=dtype) + for i in range(count): + val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + + at unwrap_spec(s=str, count=int, sep=str) +def fromstring(space, s, w_dtype=None, count=-1, sep=''): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) length = len(s) - - if length % FLOAT_SIZE == 0: - number = length/FLOAT_SIZE + if sep == '': + return _fromstring_bin(space, s, count, length, dtype) else: - raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - - dtype = get_dtype_cache(space).w_float64dtype - a = W_NDimArray(number, [number], dtype=dtype) - - start = 0 - end = FLOAT_SIZE - i = 0 - while i < number: - part = s[start:end] - a.dtype.setitem(a.storage, i, dtype.box(runpack('d', part))) - i += 1 - start += FLOAT_SIZE - end += FLOAT_SIZE - - return space.wrap(a) + return _fromstring_text(space, s, count, sep, length, dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1194,13 +1194,110 @@ import struct BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) + cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) + cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) + cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) + cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) def test_fromstring(self): - from numpypy import fromstring + import sys + from numpypy import fromstring, array, uint8, float32, int32 + a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") + b = fromstring('\x01\x02', dtype=uint8) + assert a[0] == 1 + assert a[1] == 2 + c = fromstring(self.fdata, dtype=float32) + assert c[0] == float32(2.3) + d = fromstring("1 2", sep=' ', count=2, dtype=uint8) + assert len(d) == 2 + assert d[0] == 1 + assert d[1] == 2 + e = fromstring('3, 4,5', dtype=uint8, sep=',') + assert len(e) == 3 + assert e[0] == 3 + assert e[1] == 4 + assert e[2] == 5 + f = fromstring('\x01\x02\x03\x04\x05', dtype=uint8, count=3) + assert len(f) == 3 + assert f[0] == 1 + assert f[1] == 2 + assert f[2] == 3 + g = fromstring("1 2 3 ", dtype=uint8, sep=" ") + assert len(g) == 3 + assert g[0] == 1 + assert g[1] == 2 + assert g[2] == 3 + h = fromstring("1, , 2, 3", dtype=uint8, sep=",") + assert (h == [1,0,2,3]).all() + i = fromstring("1 2 3", dtype=uint8, sep=" ") + assert (i == [1,2,3]).all() + j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") + assert (j == [1,2,3]).all() + k = fromstring("1,x,2,3", dtype=uint8, sep=",") + assert (k == [1,0]).all() + l = fromstring("1,x,2,3", dtype='float32', sep=",") + assert (l == [1.0,-1.0]).all() + m = fromstring("1,,2,3", sep=",") + assert (m == [1.0,-1.0,2.0,3.0]).all() + n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + assert (n == [3]).all() + o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") + assert len(o) == 2 + assert o[0] == 1.0 + assert o[1] == 2.0 + p = fromstring("1.0,,2.0,3.0", sep=",") + assert (p == [1.0, -1.0, 2.0, 3.0]).all() + q = fromstring("1.0,,2.0,3.0", sep=" ") + assert (q == [1.0]).all() + r = fromstring("\x01\x00\x02", dtype='bool') + assert (r == [True, False, True]).all() + s = fromstring("1,2,3,,5", dtype=bool, sep=",") + assert (s == [True, True, True, False, True]).all() + t = fromstring("", bool) + assert (t == []).all() + u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int) + if sys.maxint > 2 ** 31 - 1: + assert (u == [1]).all() + else: + assert (u == [1, 0]).all() + + def test_fromstring_types(self): + from numpypy import (fromstring, int8, int16, int32, int64, uint8, + uint16, uint32, float32, float64) + + a = fromstring('\xFF', dtype=int8) + assert a[0] == -1 + b = fromstring('\xFF', dtype=uint8) + assert b[0] == 255 + c = fromstring('\xFF\xFF', dtype=int16) + assert c[0] == -1 + d = fromstring('\xFF\xFF', dtype=uint16) + assert d[0] == 65535 + e = fromstring('\xFF\xFF\xFF\xFF', dtype=int32) + assert e[0] == -1 + f = fromstring('\xFF\xFF\xFF\xFF', dtype=uint32) + assert repr(f[0]) == '4294967295' + g = fromstring('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', dtype=int64) + assert g[0] == -1 + h = fromstring(self.float32val, dtype=float32) + assert h[0] == float32(5.2) + i = fromstring(self.float64val, dtype=float64) + assert i[0] == float64(300.4) + j = fromstring(self.ulongval, dtype='L') + assert j[0] == 12 + + + def test_fromstring_invalid(self): + from numpypy import fromstring, uint16, uint8, int32 + #default dtype is 64-bit float, so 3 bytes should fail + raises(ValueError, fromstring, "\x01\x02\x03") + #3 bytes is not modulo 2 bytes (int16) + raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) + #5 bytes is larger than 3 bytes + raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) class AppTestRepr(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -8,6 +8,7 @@ from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rstruct.runpack import runpack def simple_unary_op(func): @@ -55,6 +56,7 @@ class Primitive(object): _mixin_ = True + def get_element_size(self): return rffi.sizeof(self.T) @@ -84,6 +86,9 @@ def _coerce(self, space, w_item): raise NotImplementedError + def default_fromstring(self, space): + raise NotImplementedError + def read(self, storage, width, i, offset): return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset @@ -102,6 +107,9 @@ width, storage, i, offset, value ) + def runpack_str(self, s): + return self.box(runpack(self.format_code, s)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -164,6 +172,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool BoxType = interp_boxes.W_BoolBox + format_code = "?" True = BoxType(True) False = BoxType(False) @@ -193,6 +202,9 @@ def for_computation(self, v): return int(v) + def default_fromstring(self, space): + return self.box(False) + class Integer(Primitive): _mixin_ = True @@ -206,6 +218,9 @@ def for_computation(self, v): return widen(v) + def default_fromstring(self, space): + return self.box(0) + @simple_binary_op def div(self, v1, v2): if v2 == 0: @@ -241,42 +256,52 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box + format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box + format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT BoxType = interp_boxes.W_Int16Box + format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box + format_code = "H" class Int32(BaseType, Integer): T = rffi.INT BoxType = interp_boxes.W_Int32Box + format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT BoxType = interp_boxes.W_UInt32Box + format_code = "I" class Long(BaseType, Integer): T = rffi.LONG BoxType = interp_boxes.W_LongBox + format_code = "l" class ULong(BaseType, Integer): T = rffi.ULONG BoxType = interp_boxes.W_ULongBox + format_code = "L" class Int64(BaseType, Integer): T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box + format_code = "q" class UInt64(BaseType, Integer): T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + format_code = "Q" def _coerce(self, space, w_item): try: @@ -304,6 +329,9 @@ def for_computation(self, v): return float(v) + def default_fromstring(self, space): + return self.box(-1.0) + @simple_binary_op def div(self, v1, v2): try: @@ -403,7 +431,9 @@ class Float32(BaseType, Float): T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box + format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box \ No newline at end of file + BoxType = interp_boxes.W_Float64Box + format_code = "d" \ No newline at end of file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -656,7 +656,11 @@ os.fsync(f) # <- should also work with a file, or anything finally: # with a fileno() method f.close() - raises(OSError, os.fsync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fsync(fd) + except OSError: + pass raises(ValueError, os.fsync, -1) if hasattr(os, 'fdatasync'): @@ -668,7 +672,11 @@ os.fdatasync(fd) finally: f.close() - raises(OSError, os.fdatasync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fdatasync(fd) + except OSError: + pass raises(ValueError, os.fdatasync, -1) if hasattr(os, 'fchdir'): diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -17,17 +17,8 @@ g() log = self.run(main, [500]) - # XXX XXX this test fails so far because of a detail that - # changed with jit-simplify-backendintf. We should try to - # think of a way to be more resistent against such details. - # The issue is that we now get one Tracing, then go back - # to the interpreter hoping to immediately run the JITted - # code; but instead, we Trace again, just because another - # counter was also about to reach its limit... - loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') + loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ - ... - label(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) @@ -36,7 +27,7 @@ jump(..., descr=...) """) assert loop.match_by_id("subtract", """ - setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me i2 = int_sub_ovf(i1, 42) guard_no_overflow(descr=...) """) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -31,9 +31,9 @@ imag2 = float2longlong(imag2) return real1 == real2 and imag1 == imag2 - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_COMPLEX as tag real = space.float_w(space.getattr(self, space.wrap("real"))) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,9 +34,9 @@ two = float2longlong(space.float_w(w_other)) return one == two - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_FLOAT as tag val = float2longlong(space.float_w(self)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -26,9 +26,9 @@ return self is w_other return space.int_w(self) == space.int_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_INT as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -18,9 +18,9 @@ return self is w_other return space.bigint_w(self).eq(space.bigint_w(w_other)) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_LONG as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -32,9 +32,9 @@ return False return space.str_w(self) is space.str_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.str_w(self))) @@ -514,44 +514,46 @@ if maxsplit == 0: return space.wrap(input) - #print "from replace, input: %s, sub: %s, by: %s" % (input, sub, by) + # An ok guess at the default size + builder = StringBuilder(len(input)) + first = True if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - substrings_w = [""] - for i in range(upper): - c = input[i] - substrings_w.append(c) - substrings_w.append(input[upper:]) + first = False + try: + for i in range(upper): + builder.append(by) + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) + except MemoryError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string too long") + ) else: start = 0 sublen = len(sub) - substrings_w = [] while maxsplit != 0: next = input.find(sub, start) if next < 0: break - substrings_w.append(input[start:next]) + if not first: + builder.append(by) + first = False + builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - substrings_w.append(input[start:]) + if not first: + builder.append(by) + builder.append_slice(input, start, len(input)) - try: - # XXX conservative estimate. If your strings are that close - # to overflowing, bad luck. - one = ovfcheck(len(substrings_w) * len(by)) - ovfcheck(one + len(input)) - except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("replace string is too long")) - - return space.wrap(by.join(substrings_w)) + return space.wrap(builder.build()) def str_replace__String_ANY_ANY_ANY(space, w_self, w_sub, w_by, w_maxsplit): diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -253,6 +253,12 @@ y = 2j assert id(x) != id(y) + def test_object_hash_immutable(self): + x = 42 + y = 40 + y += 2 + assert object.__hash__(x) == object.__hash__(y) + def test_isinstance_shortcut(): from pypy.objspace.std import objspace diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -737,13 +737,6 @@ iterable = "hello" raises(TypeError, len, iter(iterable)) - def test_overflow_replace(self): - import sys - if sys.maxint > 2**31-1: - skip("Wrong platform") - x = "A" * (2**16) - raises(OverflowError, x.replace, '', x) - class AppTestPrebuilt(AppTestStringObject): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withprebuiltchar": True}) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -32,9 +32,9 @@ return False return space.unicode_w(self) is space.unicode_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.unicode_w(self))) diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -79,19 +79,19 @@ longlong2float = rffi.llexternal( "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) float2longlong = rffi.llexternal( "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG, _callable=float2longlong_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -3,6 +3,7 @@ from pypy.annotation.model import (SomeObject, SomeString, s_None, SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) +from pypy.rlib.rarithmetic import ovfcheck from pypy.tool.pairtype import pair, pairtype from pypy.rpython.extregistry import ExtRegistryEntry @@ -52,25 +53,37 @@ class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): self.l = [] + self.size = 0 + + def _grow(self, size): + try: + self.size = ovfcheck(self.size + size) + except OverflowError: + raise MemoryError def append(self, s): assert isinstance(s, self.tp) self.l.append(s) + self._grow(len(s)) def append_slice(self, s, start, end): assert isinstance(s, self.tp) assert 0 <= start <= end <= len(s) - self.l.append(s[start:end]) + s = s[start:end] + self.l.append(s) + self._grow(len(s)) def append_multiple_char(self, c, times): assert isinstance(c, self.tp) self.l.append(c * times) + self._grow(times) def append_charpsize(self, s, size): l = [] for i in xrange(size): l.append(s[i]) self.l.append(self.tp("").join(l)) + self._grow(size) def build(self): return self.tp("").join(self.l) diff --git a/pypy/rpython/lltypesystem/llarena.py b/pypy/rpython/lltypesystem/llarena.py --- a/pypy/rpython/lltypesystem/llarena.py +++ b/pypy/rpython/lltypesystem/llarena.py @@ -374,6 +374,7 @@ following an object. For arenas containing heterogenous objects. If minsize is specified, it gives a minimum on the resulting size.""" return _round_up_for_allocation(size, minsize) +round_up_for_allocation._annenforceargs_ = [int, int] def _round_up_for_allocation(size, minsize): # internal return RoundedUpForAllocation(size, minsize) diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -16,6 +16,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.rstring import StringBuilder, UnicodeBuilder +from pypy.rlib import jit from pypy.rpython.lltypesystem import llmemory import os, sys @@ -249,8 +250,7 @@ wrapper = func_with_new_name(wrapper, name) if calling_conv != "c": - from pypy.rlib.jit import dont_look_inside - wrapper = dont_look_inside(wrapper) + wrapper = jit.dont_look_inside(wrapper) return wrapper @@ -697,6 +697,8 @@ return b.build() # str -> char* + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def get_nonmovingbuffer(data): """ Either returns a non-moving copy or performs neccessary pointer @@ -717,6 +719,8 @@ get_nonmovingbuffer._annenforceargs_ = [strtype] # (str, char*) -> None + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def free_nonmovingbuffer(data, buf): """ Either free a non-moving buffer or keep the original storage alive. diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -11,14 +11,17 @@ sys.exit(1) def heads(args): - g = os.popen(r"hg heads --topo %s --template '{branches} {node|short}\n'" + g = os.popen(r"hg heads --topo %s --template '{node|short}:{branches}\n'" % args, 'r') result = g.read() g.close() result = result.splitlines(False) - result = [s for s in result - if not s.startswith(' ') - and not s.startswith('closed-branches ')] + for line in result: + if len(line.split(':', 1)) != 2: + raise ValueError("'result' contains: %r" % line) + result = [s.split(':', 1) for s in result] + result = [(head, branch) for (head, branch) in result + if branch not in ['', 'closed-branches']] return result all_heads = heads("--closed") @@ -34,8 +37,7 @@ closed_heads.reverse() -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print '\t', branch print print 'The branches listed above will be merged to "closed-branches".' @@ -54,8 +56,7 @@ print '*** error %r' % (err,) sys.exit(1) -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print print '***** %s ***** %s *****' % (branch, head) do("hg up --clean closed-branches") diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/pypy/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/pypy/tool/jitlogparser/test/test_modulefinder.py @@ -3,7 +3,7 @@ import re, sys def setup_module(mod): - if sys.version_info[:2] != (2.6): + if sys.version_info[:2] != (2, 6): py.test.skip("Specific python 2.6 tests") def test_gather_code_py(): From noreply at buildbot.pypy.org Tue Dec 20 09:42:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 09:42:31 +0100 (CET) Subject: [pypy-commit] pypy default: Un-merge counter-decay, which was definitely not really good. Message-ID: <20111220084231.5AD8D823F8@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50744:a2b9604a9859 Date: 2011-12-20 08:57 +0100 http://bitbucket.org/pypy/pypy/changeset/a2b9604a9859/ Log: Un-merge counter-decay, which was definitely not really good. More work should be going on in the branch. This cancels 5309a1389556, e790db7af776 and 15811e23d71a. diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64, r_uint +from pypy.rlib.rarithmetic import r_int64 from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,7 +21,6 @@ # class MemoryManager(object): - NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -37,13 +36,12 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) self.alive_loops = {} - self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) else: self.max_age = max_age if check_frequency <= 0: @@ -51,11 +49,10 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self, do_cleanups_now=True): + def next_generation(self): self.current_generation += 1 - if do_cleanups_now and self.current_generation >= self.next_check: + if self.current_generation == self.next_check: self._kill_old_loops_now() - self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -84,22 +81,3 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") - - def get_current_generation_uint(self): - """Return the current generation, possibly truncated to a uint. - To use only as an approximation for decaying counters.""" - return r_uint(self.current_generation) - - def record_jitcell_dict(self, callback): - """NOT_RPYTHON. The given jitcell_dict is a dict that needs - occasional clean-ups of old cells. A cell is old if it never - reached the threshold, and its counter decayed to a tiny value.""" - # note that the various jitcell_dicts have different RPython types, - # so we have to make a different function for each one. These - # functions are chained to each other: each calls the previous one. - def cleanup_dict(): - callback() - cleanup_previous() - # - cleanup_previous = self._cleanup_jitcell_dicts - self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,27 +2910,6 @@ res = self.meta_interp(f, [32]) assert res == f(32) - def test_decay_counters(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def f(m, n): - while n > 0: - myjitdriver.jit_merge_point(m=m, n=n) - n += m - n -= m - n -= 1 - def main(): - f(5, 7) # run 7x with m=5 counter[m=5] = 7 - f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) - f(5, 5) # run 5x times with m=5 counter[m=5] = 8 - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=9, trace_eagerness=99) - self.check_trace_count(1) - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=8, trace_eagerness=99) - self.check_trace_count(2) - class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,4 +1,3 @@ -import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -9,7 +8,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat, r_uint +from pypy.rlib.rarithmetic import r_singlefloat def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -276,77 +275,3 @@ state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True - -def test_decay_counters(): - cell = JitCell(r_uint(5)) - cell.counter = 100 - cell.adjust_counter(r_uint(5), math.log(0.9)) - assert cell.counter == 100 - cell.adjust_counter(r_uint(6), math.log(0.9)) - assert cell.counter == 90 - cell.adjust_counter(r_uint(9), math.log(0.9)) - assert cell.counter == int(90 * (0.9**3)) - -def test_cleanup_jitcell_dict(): - from pypy.jit.metainterp.memmgr import MemoryManager - class FakeWarmRunnerDesc: - memory_manager = MemoryManager() - class cpu: - pass - class FakeJitDriverSD: - _green_args_spec = [lltype.Signed] - # - # Test creating tons of jitcells that remain at 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - cell1 = get_jitcell(True, -1) - assert len(warmstate._jitcell_dict) == 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 - # - for i in range(1, 20005): - get_jitcell(True, i) # should trigger a clean-up at 20001 - assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 - # - # Same test, with one jitcell that has a counter of BASE instead of 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - warmstate.set_param_decay_halflife(2) - warmstate.set_param_threshold(5) - warmstate.set_param_function_threshold(0) - get_jitcell = warmstate._make_jitcell_getter_default() - cell2 = get_jitcell(True, -2) - cell2.counter = BASE = warmstate.increment_threshold * 3 - # - for i in range(0, 20005): - get_jitcell(True, i) - assert len(warmstate._jitcell_dict) == (i % 19999) + 2 - # - assert cell2 in warmstate._jitcell_dict.values() - assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 - # - # Same test, with jitcells that are compiled and free by the memmgr - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - get_jitcell(True, -1) - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 - # - for i in range(1, 20005): - cell = get_jitcell(True, i) - cell.counter = -1 - cell.wref_procedure_token = None # or a dead weakref, equivalently - assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 - # - # Same test, with counter == -2 (rare case, kept alive) - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - cell = get_jitcell(True, -1) - cell.counter = -2 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 - # - for i in range(1, 20005): - cell = get_jitcell(True, i) - cell.counter = -2 - assert len(warmstate._jitcell_dict) == i + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,11 +64,9 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, - threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, decay_halflife=0, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, - **kwds): + function_threshold=4, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -85,16 +83,15 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(threshold) + jd.warmstate.set_param_threshold(3) # for tests jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(trace_eagerness) + jd.warmstate.set_param_trace_eagerness(2) # for tests jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) - jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref, math +import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask, r_uint +from pypy.rlib.rarithmetic import intmask from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -153,25 +153,6 @@ dont_trace_here = False wref_procedure_token = None - def __init__(self, generation): - # The stored 'counter' value follows an exponential decay model. - # Conceptually after every generation, it decays by getting - # multiplied by a constant <= 1.0. In practice, decaying occurs - # lazily: the following field records the latest seen generation - # number, and adjustment is done by adjust_counter() when needed. - self.latest_generation_seen = generation - - def adjust_counter(self, generation, log_decay_factor): - if generation != self.latest_generation_seen: - # The latest_generation_seen is older than the current generation. - # Adjust by multiplying self.counter N times by decay_factor, i.e. - # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). - assert self.counter >= 0 - N = generation - self.latest_generation_seen - factor = math.exp(log_decay_factor * N) - self.counter = int(self.counter * factor) - self.latest_generation_seen = generation - def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -191,6 +172,7 @@ class WarmEnterState(object): THRESHOLD_LIMIT = sys.maxint // 2 + default_jitcell_dict = None def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -231,17 +213,6 @@ def set_param_inlining(self, value): self.inlining = value - def set_param_decay_halflife(self, value): - # Use 0 or -1 to mean "no decay". Initialize the internal variable - # 'log_decay_factor'. It is choosen such that by multiplying the - # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every - # generation, then the counter will be divided by two after 'value' - # generations have passed. - if value <= 0: - self.log_decay_factor = 0.0 # log(1.0) - else: - self.log_decay_factor = math.log(0.5) / value - def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -311,11 +282,6 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) - memmgr = self.warmrunnerdesc.memory_manager - if memmgr is not None: - get_current_generation = memmgr.get_current_generation_uint - else: - get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -360,8 +326,6 @@ if cell.counter >= 0: # update the profiling counter - cell.adjust_counter(get_current_generation(), - self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n @@ -454,15 +418,6 @@ # return jit_getter - def _new_jitcell(self): - warmrunnerdesc = self.warmrunnerdesc - if (warmrunnerdesc is not None and - warmrunnerdesc.memory_manager is not None): - gen = warmrunnerdesc.memory_manager.get_current_generation_uint() - else: - gen = r_uint(0) - return JitCell(gen) - def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -492,53 +447,13 @@ except AttributeError: pass # - memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager - if memmgr: - def _cleanup_dict(): - minimum = sys.maxint - if self.increment_threshold > 0: - minimum = min(minimum, self.increment_threshold) - if self.increment_function_threshold > 0: - minimum = min(minimum, self.increment_function_threshold) - currentgen = memmgr.get_current_generation_uint() - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.adjust_counter(currentgen, self.log_decay_factor) - if cell.counter < minimum: - killme.append(key) - elif (cell.counter == -1 - and cell.get_procedure_token() is None): - killme.append(key) - for key in killme: - del jitcell_dict[key] - # - def _maybe_cleanup_dict(): - # If no tracing goes on at all because the jitcells are - # each time for new greenargs, the dictionary grows forever. - # So every one in a (rare) while, we decide to force an - # artificial next_generation() and _cleanup_dict(). - self._trigger_automatic_cleanup += 1 - if self._trigger_automatic_cleanup > 20000: - self._trigger_automatic_cleanup = 0 - memmgr.next_generation(do_cleanups_now=False) - _cleanup_dict() - # - self._trigger_automatic_cleanup = 0 - self._jitcell_dict = jitcell_dict # for tests - memmgr.record_jitcell_dict(_cleanup_dict) - else: - def _maybe_cleanup_dict(): - pass - # def get_jitcell(build, *greenargs): try: cell = jitcell_dict[greenargs] except KeyError: if not build: return None - _maybe_cleanup_dict() - cell = self._new_jitcell() + cell = JitCell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -549,10 +464,6 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} - # note that there is no equivalent of record_jitcell_dict() - # in the case of custom getters. We assume that the interpreter - # stores the JitCells on some objects that can go away by GC, - # like the PyCode objects in PyPy. # def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) @@ -574,7 +485,7 @@ if not build: return cell if cell is None: - cell = self._new_jitcell() + cell = JitCell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,7 +395,6 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', - 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() From noreply at buildbot.pypy.org Tue Dec 20 09:42:32 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 09:42:32 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: Merge default again, but keeping the pieces that I want to keep. Message-ID: <20111220084232.94B69820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50745:b3d18c96013e Date: 2011-12-20 09:19 +0100 http://bitbucket.org/pypy/pypy/changeset/b3d18c96013e/ Log: Merge default again, but keeping the pieces that I want to keep. The idea is to keep some mecanism to clean-up jitcell dicts, and to re- add some minimal form of decaying. diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64, r_uint +from pypy.rlib.rarithmetic import r_int64 from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,7 +21,6 @@ # class MemoryManager(object): - NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -37,13 +36,12 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) self.alive_loops = {} - self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) else: self.max_age = max_age if check_frequency <= 0: @@ -51,11 +49,10 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self, do_cleanups_now=True): + def next_generation(self): self.current_generation += 1 - if do_cleanups_now and self.current_generation >= self.next_check: + if self.current_generation == self.next_check: self._kill_old_loops_now() - self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -84,22 +81,3 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") - - def get_current_generation_uint(self): - """Return the current generation, possibly truncated to a uint. - To use only as an approximation for decaying counters.""" - return r_uint(self.current_generation) - - def record_jitcell_dict(self, callback): - """NOT_RPYTHON. The given jitcell_dict is a dict that needs - occasional clean-ups of old cells. A cell is old if it never - reached the threshold, and its counter decayed to a tiny value.""" - # note that the various jitcell_dicts have different RPython types, - # so we have to make a different function for each one. These - # functions are chained to each other: each calls the previous one. - def cleanup_dict(): - callback() - cleanup_previous() - # - cleanup_previous = self._cleanup_jitcell_dicts - self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,27 +2910,6 @@ res = self.meta_interp(f, [32]) assert res == f(32) - def test_decay_counters(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def f(m, n): - while n > 0: - myjitdriver.jit_merge_point(m=m, n=n) - n += m - n -= m - n -= 1 - def main(): - f(5, 7) # run 7x with m=5 counter[m=5] = 7 - f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) - f(5, 5) # run 5x times with m=5 counter[m=5] = 8 - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=9, trace_eagerness=99) - self.check_trace_count(1) - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=8, trace_eagerness=99) - self.check_trace_count(2) - class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,4 +1,3 @@ -import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -9,7 +8,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat, r_uint +from pypy.rlib.rarithmetic import r_singlefloat def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -277,16 +276,6 @@ res = state.can_never_inline(5, 42.5) assert res is True -def test_decay_counters(): - cell = JitCell(r_uint(5)) - cell.counter = 100 - cell.adjust_counter(r_uint(5), math.log(0.9)) - assert cell.counter == 100 - cell.adjust_counter(r_uint(6), math.log(0.9)) - assert cell.counter == 90 - cell.adjust_counter(r_uint(9), math.log(0.9)) - assert cell.counter == int(90 * (0.9**3)) - def test_cleanup_jitcell_dict(): from pypy.jit.metainterp.memmgr import MemoryManager class FakeWarmRunnerDesc: diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,11 +64,9 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, - threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, decay_halflife=0, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, - **kwds): + function_threshold=4, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -85,16 +83,15 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(threshold) + jd.warmstate.set_param_threshold(3) # for tests jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(trace_eagerness) + jd.warmstate.set_param_trace_eagerness(2) # for tests jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) - jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref, math +import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask, r_uint +from pypy.rlib.rarithmetic import intmask from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -153,25 +153,6 @@ dont_trace_here = False wref_procedure_token = None - def __init__(self, generation): - # The stored 'counter' value follows an exponential decay model. - # Conceptually after every generation, it decays by getting - # multiplied by a constant <= 1.0. In practice, decaying occurs - # lazily: the following field records the latest seen generation - # number, and adjustment is done by adjust_counter() when needed. - self.latest_generation_seen = generation - - def adjust_counter(self, generation, log_decay_factor): - if generation != self.latest_generation_seen: - # The latest_generation_seen is older than the current generation. - # Adjust by multiplying self.counter N times by decay_factor, i.e. - # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). - assert self.counter >= 0 - N = generation - self.latest_generation_seen - factor = math.exp(log_decay_factor * N) - self.counter = int(self.counter * factor) - self.latest_generation_seen = generation - def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -231,17 +212,6 @@ def set_param_inlining(self, value): self.inlining = value - def set_param_decay_halflife(self, value): - # Use 0 or -1 to mean "no decay". Initialize the internal variable - # 'log_decay_factor'. It is choosen such that by multiplying the - # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every - # generation, then the counter will be divided by two after 'value' - # generations have passed. - if value <= 0: - self.log_decay_factor = 0.0 # log(1.0) - else: - self.log_decay_factor = math.log(0.5) / value - def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -311,11 +281,6 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) - memmgr = self.warmrunnerdesc.memory_manager - if memmgr is not None: - get_current_generation = memmgr.get_current_generation_uint - else: - get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -360,8 +325,6 @@ if cell.counter >= 0: # update the profiling counter - cell.adjust_counter(get_current_generation(), - self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n @@ -454,15 +417,6 @@ # return jit_getter - def _new_jitcell(self): - warmrunnerdesc = self.warmrunnerdesc - if (warmrunnerdesc is not None and - warmrunnerdesc.memory_manager is not None): - gen = warmrunnerdesc.memory_manager.get_current_generation_uint() - else: - gen = r_uint(0) - return JitCell(gen) - def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -538,7 +492,7 @@ if not build: return None _maybe_cleanup_dict() - cell = self._new_jitcell() + cell = JitCell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -574,7 +528,7 @@ if not build: return cell if cell is None: - cell = self._new_jitcell() + cell = JitCell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,7 +395,6 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', - 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() From noreply at buildbot.pypy.org Tue Dec 20 09:42:33 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 09:42:33 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: Simplify and "stand-alone-ize" the clean-up of old jitcells from Message-ID: <20111220084233.E3B5C820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50746:5f38cbc2c7f7 Date: 2011-12-20 09:30 +0100 http://bitbucket.org/pypy/pypy/changeset/5f38cbc2c7f7/ Log: Simplify and "stand-alone-ize" the clean-up of old jitcells from the jitcell dict. diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -277,65 +277,50 @@ assert res is True def test_cleanup_jitcell_dict(): - from pypy.jit.metainterp.memmgr import MemoryManager - class FakeWarmRunnerDesc: - memory_manager = MemoryManager() - class cpu: - pass class FakeJitDriverSD: _green_args_spec = [lltype.Signed] # # Test creating tons of jitcells that remain at 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell1 = get_jitcell(True, -1) assert len(warmstate._jitcell_dict) == 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 # for i in range(1, 20005): get_jitcell(True, i) # should trigger a clean-up at 20001 assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 # # Same test, with one jitcell that has a counter of BASE instead of 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - warmstate.set_param_decay_halflife(2) - warmstate.set_param_threshold(5) - warmstate.set_param_function_threshold(0) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell2 = get_jitcell(True, -2) - cell2.counter = BASE = warmstate.increment_threshold * 3 + cell2.counter = BASE = warmstate.THRESHOLD_LIMIT // 2 # 50% # for i in range(0, 20005): get_jitcell(True, i) assert len(warmstate._jitcell_dict) == (i % 19999) + 2 # assert cell2 in warmstate._jitcell_dict.values() - assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + assert cell2.counter == int(BASE * 0.92) # decayed once # - # Same test, with jitcells that are compiled and free by the memmgr - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + # Same test, with jitcells that are compiled and freed by the memmgr + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() get_jitcell(True, -1) - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -1 cell.wref_procedure_token = None # or a dead weakref, equivalently assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # # Same test, with counter == -2 (rare case, kept alive) - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell = get_jitcell(True, -1) cell.counter = -2 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -2 assert len(warmstate._jitcell_dict) == i + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -446,44 +446,32 @@ except AttributeError: pass # - memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager - if memmgr: - def _cleanup_dict(): - minimum = sys.maxint - if self.increment_threshold > 0: - minimum = min(minimum, self.increment_threshold) - if self.increment_function_threshold > 0: - minimum = min(minimum, self.increment_function_threshold) - currentgen = memmgr.get_current_generation_uint() - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.adjust_counter(currentgen, self.log_decay_factor) - if cell.counter < minimum: - killme.append(key) - elif (cell.counter == -1 - and cell.get_procedure_token() is None): + def _cleanup_dict(): + minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.counter = int(cell.counter * 0.92) + if cell.counter < minimum: killme.append(key) - for key in killme: - del jitcell_dict[key] - # - def _maybe_cleanup_dict(): - # If no tracing goes on at all because the jitcells are - # each time for new greenargs, the dictionary grows forever. - # So every one in a (rare) while, we decide to force an - # artificial next_generation() and _cleanup_dict(). - self._trigger_automatic_cleanup += 1 - if self._trigger_automatic_cleanup > 20000: - self._trigger_automatic_cleanup = 0 - memmgr.next_generation(do_cleanups_now=False) - _cleanup_dict() - # - self._trigger_automatic_cleanup = 0 - self._jitcell_dict = jitcell_dict # for tests - memmgr.record_jitcell_dict(_cleanup_dict) - else: - def _maybe_cleanup_dict(): - pass + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # Once in a while, rarely, when too many entries have + # been put in the jitdict_dict, we do a cleanup phase: + # we decay all counters and kill entries with a too + # low counter. + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests # def get_jitcell(build, *greenargs): try: @@ -503,7 +491,7 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} - # note that there is no equivalent of record_jitcell_dict() + # note that there is no equivalent of _maybe_cleanup_dict() # in the case of custom getters. We assume that the interpreter # stores the JitCells on some objects that can go away by GC, # like the PyCode objects in PyPy. From noreply at buildbot.pypy.org Tue Dec 20 09:42:35 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 09:42:35 +0100 (CET) Subject: [pypy-commit] pypy default: Python 2.5 compat Message-ID: <20111220084235.17F00820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50747:2eaaedd0fe70 Date: 2011-12-20 09:39 +0100 http://bitbucket.org/pypy/pypy/changeset/2eaaedd0fe70/ Log: Python 2.5 compat diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,3 +1,4 @@ +from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken From noreply at buildbot.pypy.org Tue Dec 20 09:42:36 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 09:42:36 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: A minimal version of counter decaying. Message-ID: <20111220084236.3FFD0820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50748:85efaef762fb Date: 2011-12-20 09:41 +0100 http://bitbucket.org/pypy/pypy/changeset/85efaef762fb/ Log: A minimal version of counter decaying. diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -151,6 +151,7 @@ # counter == -2: tracing is currently going on for this cell counter = 0 dont_trace_here = False + extra_delay = chr(0) wref_procedure_token = None def get_procedure_token(self): @@ -315,6 +316,36 @@ # assert 0, "should have raised" + def bound_reached(cell, *args): + # bound reached, but we do a last check: if it is the first + # time we reach the bound, or if another loop or bridge was + # compiled since the last time we reached it, then decrease + # the counter by a few percents instead. It should avoid + # sudden bursts of JIT-compilation, and also corner cases + # where we suddenly compile more than one loop because all + # counters reach the bound at the same time, but where + # compiling all but the first one is pointless. + curgen = warmrunnerdesc.memory_manager.current_generation + curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits + if we_are_translated() and curgen != cell.extra_delay: + cell.counter = int(self.THRESHOLD_LIMIT * 0.98) + cell.extra_delay = curgen + return + # + if not confirm_enter_jit(*args): + cell.counter = 0 + return + # start tracing + from pypy.jit.metainterp.pyjitpl import MetaInterp + metainterp = MetaInterp(metainterp_sd, jitdriver_sd) + # set counter to -2, to mean "tracing in effect" + cell.counter = -2 + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + finally: + if cell.counter == -2: + cell.counter = 0 + def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. @@ -329,19 +360,9 @@ if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n return - if not confirm_enter_jit(*args): - cell.counter = 0 + else: + bound_reached(cell, *args) return - # bound reached; start tracing - from pypy.jit.metainterp.pyjitpl import MetaInterp - metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - # set counter to -2, to mean "tracing in effect" - cell.counter = -2 - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - finally: - if cell.counter == -2: - cell.counter = 0 else: if cell.counter != -1: assert cell.counter == -2 From noreply at buildbot.pypy.org Tue Dec 20 11:18:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 11:18:53 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the repr of CallDescrs to not include a ", " because Message-ID: <20111220101853.A0B1E820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50749:db66545368fd Date: 2011-12-20 10:13 +0000 http://bitbucket.org/pypy/pypy/changeset/db66545368fd/ Log: Fix the repr of CallDescrs to not include a "," because that confuses the jitlogparser to no end... diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -425,7 +425,7 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '' % (self.arg_classes, self.result_type) + return '' % (self.arg_classes, self.result_type) def map_type_to_argclass(ARG, accept_void=False): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -313,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -320,34 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) o = symbolic.get_size(lltype.Ptr(S), False) - assert descr3.repr_of_descr() == '' % o + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert descr4.repr_of_descr() == '' + assert repr_of_descr(descr4) == '' # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert descr4i.repr_of_descr() == '' + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert descr4f.repr_of_descr() == '' + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert descr5f.repr_of_descr() == '' + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) From noreply at buildbot.pypy.org Tue Dec 20 13:44:08 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Tue, 20 Dec 2011 13:44:08 +0100 (CET) Subject: [pypy-commit] pypy set-strategies: merged set- with liststrategies. when initializing a set with lists they can copy the storage and strategy from that list without wrapping the storages content Message-ID: <20111220124408.DC187823F8@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: set-strategies Changeset: r50751:b0d872ae3261 Date: 2011-12-20 13:42 +0100 http://bitbucket.org/pypy/pypy/changeset/b0d872ae3261/ Log: merged set- with liststrategies. when initializing a set with lists they can copy the storage and strategy from that list without wrapping the storages content diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -508,6 +508,11 @@ def getitems_copy(self, w_list): return self._getitems_range(w_list, True) + getitems_wrapped = getitems_copy + + def getitems_unwrapped(self, w_list): + return self._getitems_range(w_list, False) + def getstorage_copy(self, w_list): # tuple is unmutable return w_list.lstorage @@ -698,6 +703,11 @@ def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + getitems_wrapped = getitems_copy + + def getitems_unwrapped(self, w_list): + return self.unerase(w_list.lstorage) + @jit.unroll_safe def getitems_unroll(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] @@ -926,6 +936,8 @@ def getitems(self, w_list): return self.unerase(w_list.lstorage) + getitems_wrapped = getitems + class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 _applevel_repr = "int" diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -13,6 +13,8 @@ from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.stringobject import W_StringObject +from pypy.objspace.std.listobject import IntegerListStrategy, StringListStrategy,\ + EmptyListStrategy, RangeListStrategy, ObjectListStrategy, FloatListStrategy class W_BaseSetObject(W_Object): typedef = None @@ -280,6 +282,9 @@ def get_empty_storage(self): return self.erase(None) + def get_storage_from_w_list(self, w_list): + return self.get_empty_storage() + def is_correct_type(self, w_key): return False @@ -384,6 +389,14 @@ setdata[self.unwrap(w_item)] = None return self.erase(setdata) + def get_storage_from_w_list(self, w_list): + items = w_list.strategy.getitems_unwrapped(w_list) + + setdata = self.get_empty_dict() + for item in items: + setdata[item] = None + return self.erase(setdata) + def length(self, w_set): return len(self.unerase(w_set.sstorage)) @@ -746,6 +759,14 @@ def get_empty_storage(self): return self.erase(self.get_empty_dict()) + def get_storage_from_w_list(self, w_list): + items = w_list.strategy.getitems_wrapped(w_list) + + setdata = self.get_empty_dict() + for item in items: + setdata[item] = None + return self.erase(setdata) + def get_empty_dict(self): return newset(self.space) @@ -883,6 +904,22 @@ def newset(space): return r_dict(space.eq_w, space.hash_w, force_non_null=True) +_strategy_map = { + EmptyListStrategy: EmptySetStrategy, + IntegerListStrategy: IntegerSetStrategy, + RangeListStrategy: IntegerSetStrategy, + StringListStrategy: StringSetStrategy, + FloatListStrategy: ObjectSetStrategy, + ObjectListStrategy: ObjectSetStrategy +} + +def set_strategy_and_setdata_from_listobject(space, w_set, w_list): + strategy_class = _strategy_map[w_list.strategy.__class__] + strategy = space.fromcache(strategy_class) + + w_set.sstorage = strategy.get_storage_from_w_list(w_list) + w_set.strategy = strategy + def set_strategy_and_setdata(space, w_set, w_iterable): from pypy.objspace.std.intobject import W_IntObject if w_iterable is None : @@ -895,6 +932,10 @@ w_set.sstorage = w_iterable.get_storage_copy() return + if isinstance(w_iterable, W_ListObject): + set_strategy_and_setdata_from_listobject(space, w_set, w_iterable) + return + iterable_w = space.listview(w_iterable) if len(iterable_w) == 0: diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -8,7 +8,7 @@ is not too wrong. """ import py.test -from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject +from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject, IntegerSetStrategy from pypy.objspace.std.setobject import _initialize_set from pypy.objspace.std.setobject import newset from pypy.objspace.std.setobject import and__Set_Set @@ -83,6 +83,45 @@ result = set_intersection__Set(space, a, [d,c,b]) assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("")))) + def test_create_set_from_list(self): + from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy + from pypy.objspace.std.floatobject import W_FloatObject + from pypy.objspace.std.model import W_Object + + w = self.space.wrap + intstr = self.space.fromcache(IntegerSetStrategy) + tmp_func = intstr.get_storage_from_list + # test if get_storage_from_list is no longer used + intstr.get_storage_from_list = None + + w_list = W_ListObject(self.space, [w(1), w(2), w(3)]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is intstr + assert intstr.unerase(w_set.sstorage) == {1:None, 2:None, 3:None} + + w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(StringSetStrategy) + assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} + + w_list = W_ListObject(self.space, [w("1"), w(2), w("3")]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) + for item in w_set.strategy.unerase(w_set.sstorage): + assert isinstance(item, W_Object) + + w_list = W_ListObject(self.space, [w(1.0), w(2.0), w(3.0)]) + w_set = W_SetObject(self.space) + _initialize_set(self.space, w_set, w_list) + assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) + for item in w_set.strategy.unerase(w_set.sstorage): + assert isinstance(item, W_FloatObject) + + # changed cached object, need to change it back for other tests to pass + intstr.get_storage_from_list = tmp_func class AppTestAppSetTest: diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py --- a/pypy/objspace/std/test/test_setstrategies.py +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -5,7 +5,7 @@ class TestW_SetStrategies: def wrapped(self, l): - return W_ListObject([self.space.wrap(x) for x in l]) + return W_ListObject(self.space, [self.space.wrap(x) for x in l]) def test_from_list(self): s = W_SetObject(self.space, self.wrapped([1,2,3,4,5])) From noreply at buildbot.pypy.org Tue Dec 20 13:44:07 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Tue, 20 Dec 2011 13:44:07 +0100 (CET) Subject: [pypy-commit] pypy set-strategies: merge default Message-ID: <20111220124407.A5093820B7@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: set-strategies Changeset: r50750:a715f89d7712 Date: 2011-12-15 10:28 +0100 http://bitbucket.org/pypy/pypy/changeset/a715f89d7712/ Log: merge default diff too long, truncating to 10000 out of 27525 lines diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 @@ -307,7 +308,7 @@ self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo - self.typename = self.type.__name__ + self.typename = getattr(self.type, "__name__", "???") self.traceback = py.code.Traceback(tb) def __repr__(self): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -69,8 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -304,5 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. + .. include:: _ref.txt diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -188,6 +187,12 @@ # ------------------------------------------------------------------- + def is_w(self, space, w_other): + return self is w_other + + def immutable_unique_id(self, space): + return None + def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) raise OperationError(space.w_TypeError, w_msg) @@ -482,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -513,8 +528,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -681,9 +696,20 @@ """shortcut for space.is_true(space.eq(w_obj1, w_obj2))""" return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2)) - def is_w(self, w_obj1, w_obj2): - """shortcut for space.is_true(space.is_(w_obj1, w_obj2))""" - return self.is_true(self.is_(w_obj1, w_obj2)) + def is_(self, w_one, w_two): + return self.newbool(self.is_w(w_one, w_two)) + + def is_w(self, w_one, w_two): + # done by a method call on w_two (and not on w_one, because of the + # expected programming style where we say "if x is None" or + # "if x is object"). + return w_two.is_w(self, w_one) + + def id(self, w_obj): + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -1023,9 +1049,6 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) - def id(self, w_obj): - return self.wrap(compute_unique_id(w_obj)) - # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the __builtin__ @@ -1597,6 +1620,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -20,7 +21,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -48,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -322,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -347,6 +361,16 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -381,13 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -521,10 +557,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -617,6 +654,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -959,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1432,6 +1479,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1510,12 +1561,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] @@ -1779,9 +1835,11 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -138,29 +138,30 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + jitcell_token.compiled_loop_token = clt + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -183,9 +185,11 @@ llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types, descr.extrainfo, descr.width) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -239,9 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,21 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/asmmemmgr.py b/pypy/jit/backend/llsupport/asmmemmgr.py --- a/pypy/jit/backend/llsupport/asmmemmgr.py +++ b/pypy/jit/backend/llsupport/asmmemmgr.py @@ -37,25 +37,25 @@ self._add_free_block(smaller_stop, stop) stop = smaller_stop result = (start, stop) - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result # pair (start, stop) def free(self, start, stop): """Free a block (start, stop) returned by a previous malloc().""" - self.total_mallocs -= (stop - start) + self.total_mallocs -= r_uint(stop - start) self._add_free_block(start, stop) def open_malloc(self, minsize): """Allocate at least minsize bytes. Returns (start, stop).""" result = self._allocate_block(minsize) (start, stop) = result - self.total_mallocs += stop - start + self.total_mallocs += r_uint(stop - start) return result def open_free(self, middle, stop): """Used for freeing the end of an open-allocated block of memory.""" if stop - middle >= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -648,14 +648,10 @@ # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1<= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +123,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +146,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +169,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -570,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] @@ -40,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -280,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -303,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -325,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -346,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -107,12 +108,12 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -253,13 +254,13 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -284,12 +285,12 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,22 +32,19 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -106,10 +103,9 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -118,19 +114,20 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -139,19 +136,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -162,15 +162,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -190,15 +192,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -206,14 +210,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -226,17 +229,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -244,14 +251,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -261,19 +267,20 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -290,18 +297,17 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -311,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -320,20 +326,19 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -350,20 +355,20 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -419,14 +424,12 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1082,16 +1085,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1109,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1149,30 +1144,33 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1214,7 +1212,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1222,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1271,7 +1267,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1281,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1330,19 +1324,20 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1400,15 +1395,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1675,15 +1669,14 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1700,9 +1693,9 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1718,14 +1711,13 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1895,18 +1887,14 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1940,18 +1928,14 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -1986,19 +1970,15 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2031,10 +2011,9 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2091,14 +2070,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2147,13 +2126,12 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2169,12 +2147,10 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2183,9 +2159,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2201,9 +2175,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2212,9 +2184,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2415,7 +2385,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 @@ -2423,9 +2393,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2435,11 +2404,10 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -2471,12 +2439,12 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2486,11 +2454,11 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2499,11 +2467,11 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2561,12 +2529,12 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2578,13 +2546,13 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2596,7 +2564,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) @@ -2604,10 +2572,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2958,13 +2925,137 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [BoxInt()] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2, -9) + assert fail.identifier == 42 + class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,9 +3,10 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -179,7 +180,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -525,29 +526,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +581,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +612,22 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -608,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -676,33 +717,55 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,8 +2,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -152,14 +153,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -310,12 +310,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -326,7 +325,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -422,12 +421,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -443,37 +438,35 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, - frame_depth+param_depth) + clt.frame_depth = frame_depth + clt.param_depth = param_depth + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, - rawstart + directbootstrappos, + rawstart + looppos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -484,18 +477,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -548,6 +540,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +663,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,20 +685,24 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -793,152 +797,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -965,7 +838,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -976,13 +849,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV_bi(to_loc.value, low_part) + self.mc.MOV_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1134,18 +1019,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -1882,10 +1767,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1901,7 +1786,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position + pos = loc.position + if pos < 0: + mc.writechar(chr(self.CODE_INPUTARG)) + pos = ~pos + n = self.CODE_FROMSTACK//4 + pos else: assert isinstance(loc, RegLoc) n = loc.value @@ -1921,6 +1810,7 @@ descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] + code_inputarg = False while 1: # decode the next instruction from the bytecode code = rffi.cast(lltype.Signed, bytecode[0]) @@ -1939,11 +1829,17 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: continue + elif code == self.CODE_INPUTARG: + code_inputarg = True + continue else: # 'code' identifies a register kind = code & 3 @@ -1959,6 +1855,7 @@ def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! self.fail_ebp = allregisters[16 + ebp.value] + code_inputarg = False num = 0 value_hi = 0 while 1: @@ -1979,6 +1876,9 @@ # load the value from the stack kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: @@ -1991,6 +1891,9 @@ if code == self.CODE_HOLE: num += 1 continue + if code == self.CODE_INPUTARG: + code_inputarg = True + continue assert code == self.CODE_STOP break code >>= 2 @@ -2095,9 +1998,9 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA in _call_footer below throws - # away most of the frame, including all the PUSHes that we did just - # above. + # _call_header_with_stack_check(). The LEA in _call_footer below + # throws away most of the frame, including all the PUSHes that we + # did just above. self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -2180,7 +2083,7 @@ argtypes=op.getdescr().get_arg_types(), callconv=op.getdescr().get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return if op.getdescr().get_return_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long @@ -2344,11 +2247,11 @@ fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() - assert isinstance(descr, LoopToken) - assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) + assert isinstance(descr, JitCellToken) + assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs # - # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + # Write a call to the target assembler + self._emit_call(fail_index, imm(descr._x86_function_addr), arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None @@ -2578,15 +2481,21 @@ gcrootmap.put(self.gcrootmap_retaddr_forced, mark) self.gcrootmap_retaddr_forced = -1 - def target_arglocs(self, loop_token): - return loop_token._x86_arglocs - - def closing_jump(self, loop_token): - if loop_token is self.currently_compiling_loop: + def closing_jump(self, target_token): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = target_token._x86_clt._debug_nbargs + assert my_nbargs == target_nbargs + # + target = target_token._x86_loop_code + if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(self.looppos - curpos) + self.mc.JMP_l(target - curpos) else: - self.mc.JMP(imm(loop_token._x86_loop_code)) + self.mc.JMP(imm(target)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) @@ -2659,11 +2568,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,7 +12,7 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: @@ -31,7 +31,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +66,13 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) @@ -87,7 +94,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -5,7 +5,8 @@ import os from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ResOperation, BoxPtr, ConstFloat, - BoxFloat, LoopToken, INT, REF, FLOAT) + BoxFloat, INT, REF, FLOAT, + TargetToken, JitCellToken) from pypy.jit.backend.x86.regloc import * from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rlib.objectmodel import we_are_translated @@ -27,7 +28,7 @@ class X86RegisterManager(RegisterManager): box_types = [INT, REF] - all_regs = [eax, ecx, edx, ebx, esi, edi] + all_regs = [ecx, eax, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] frame_reg = ebp @@ -59,7 +60,7 @@ class X86_64_RegisterManager(X86RegisterManager): # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -129,15 +130,19 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -159,6 +164,8 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None + self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -167,74 +174,83 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = self._compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + self._set_initial_bindings(inputargs) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 13 + return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): - # XXX we can sort out here by longevity if we need something - # more optimal - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - # Don't use all_regs[0] for passing arguments around a loop. - # Must be kept in sync with consider_jump(). - # XXX this should probably go to llsupport/regalloc.py - xmmtmp = self.xrm.free_regs.pop(0) - tmpreg = self.rm.free_regs.pop(0) - assert tmpreg == X86RegisterManager.all_regs[0] - assert xmmtmp == X86XMMRegisterManager.all_regs[0] - for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: - if arg.type == FLOAT: - # xxx is it really a good idea? at the first CALL they - # will all be flushed anyway - reg = self.xrm.try_allocate_reg(arg) + def _set_initial_bindings(self, inputargs): + if IS_X86_64: + inputargs = self._set_initial_bindings_regs_64(inputargs) + # ... + # stack layout: arg2 + # arg1 + # arg0 + # return address + # saved ebp <-- ebp points here + # ... + cur_frame_pos = - 1 - FRAME_FIXED_SIZE + assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD + assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD + # + for box in inputargs: + assert isinstance(box, Box) + # + if IS_X86_32 and box.type == FLOAT: + cur_frame_pos -= 2 + else: + cur_frame_pos -= 1 + loc = self.fm.frame_pos(cur_frame_pos, box.type) + self.fm.set_binding(box, loc) + + def _set_initial_bindings_regs_64(self, inputargs): + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + # + pass_on_stack = [] + # + for box in inputargs: + assert isinstance(box, Box) + # + if box.type == FLOAT: + if len(unused_xmm) > 0: + ask = unused_xmm.pop() + got = self.xrm.try_allocate_reg(box, selected_reg=ask) + assert ask == got else: - reg = self.rm.try_allocate_reg(arg) - if reg: - loc = reg + pass_on_stack.append(box) else: - loc = self.fm.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc - # otherwise we have it saved on stack, so no worry - self.rm.free_regs.insert(0, tmpreg) - self.xrm.free_regs.insert(0, xmmtmp) - assert tmpreg not in nonfloatlocs - assert xmmtmp not in floatlocs - # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op args, which is a non-resizable list - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + if len(unused_gpr) > 0: + ask = unused_gpr.pop() + got = self.rm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + # + return pass_on_stack def possibly_free_var(self, var): if var.type == FLOAT: @@ -287,15 +303,15 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _compute_loop_consts(self, inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts + #def _compute_loop_consts(self, inputargs, jump, looptoken): + # if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: + # loop_consts = {} + # else: + # loop_consts = {} + # for i in range(len(inputargs)): + # if inputargs[i] is jump.getarg(i): + # loop_consts[inputargs[i]] = i + # return loop_consts def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py @@ -311,7 +327,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -320,7 +336,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -356,7 +372,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -445,13 +461,26 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop + def flush_loop(self): + # rare case: if the loop is too short, pad with NOPs + mc = self.assembler.mc + while mc.get_relative_pos() < self.min_bytes_before_label: + mc.NOP() + def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -459,10 +488,16 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -470,7 +505,8 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + self.last_real_usage = last_real_usage + # longevity = {} for arg in produced: if arg in last_used: @@ -486,7 +522,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity + self.longevity = longevity def loc(self, v): if v is None: # xxx kludgy @@ -883,7 +919,7 @@ def consider_call_assembler(self, op, guard_op): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) @@ -1313,35 +1349,72 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of 'fm' based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + self.final_jump_op = op + descr = op.getdescr() + assert isinstance(descr, TargetToken) + if descr._x86_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): + arglocs = descr._x86_arglocs + jump_op = self.final_jump_op + assert len(arglocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) + if isinstance(box, Box): + loc = arglocs[i] + if isinstance(loc, StackLoc): + self.fm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) + arglocs = descr._x86_arglocs self.jump_target_descr = descr - nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) - # compute 'tmploc' to be all_regs[0] by spilling what is there - box = TempBox() - box1 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - tmploc = self.rm.force_allocate_reg(box, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None + # Do the remapping remap_frame_layout_mixed(assembler, - src_locations1, dst_locations1, tmploc, + src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(box) - self.xrm.possibly_free_var(box1) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1357,7 +1430,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) @@ -1392,6 +1465,56 @@ # the FORCE_TOKEN operation returns directly 'ebp' self.rm.force_allocate_frame_reg(op.result) + def consider_label(self, op): + descr = op.getdescr() + assert isinstance(descr, TargetToken) + inputargs = op.getarglist() + arglocs = [None] * len(inputargs) + # + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) + # + # we need to make sure that no variable is stored in ebp + for arg in inputargs: + if self.loc(arg) is ebp: + loc2 = self.fm.loc(arg) + self.assembler.mc.MOV(loc2, ebp) + self.rm.bindings_to_frame_reg.clear() + # + for i in range(len(inputargs)): + arg = inputargs[i] + assert isinstance(arg, Box) + loc = self.loc(arg) + assert loc is not ebp + arglocs[i] = loc + if isinstance(loc, RegLoc): + self.fm.mark_as_free(arg) + # + # if we are too close to the start of the loop, the label's target may + # get overridden by redirect_call_assembler(). (rare case) + self.flush_loop() + # + descr._x86_arglocs = arglocs + descr._x86_loop_code = self.assembler.mc.get_relative_pos() + descr._x86_clt = self.assembler.current_clt + self.assembler.target_tokens_currently_compiling[descr] = None + self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) @@ -1447,3 +1570,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If 0, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,14 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): - assert ebp_offset < 0 # so no confusion with RegLoc.value + def __init__(self, position, ebp_offset, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -64,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -75,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -92,9 +104,11 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True - width = WORD + +class ImmedLoc(ImmediateAssemblerLocation): + _immutable_ = True _location_code = 'i' def __init__(self, value): @@ -105,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): return "ImmedLoc(%d)" % (self.value) @@ -117,7 +134,6 @@ class AddressLoc(AssemblerLocation): _immutable_ = True - width = WORD # The address is base_loc + (scaled_loc << scale) + static_offset def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): assert 0 <= scale < 4 @@ -146,6 +162,9 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) + def get_width(self): + return WORD + def value_a(self): return self.loc_a @@ -180,32 +199,34 @@ raise AssertionError(self._location_code) return result -class ConstFloatLoc(AssemblerLocation): - # XXX: We have to use this class instead of just AddressLoc because - # we want a width of 8 (... I think. Check this!) +class ConstFloatLoc(ImmediateAssemblerLocation): _immutable_ = True - width = 8 _location_code = 'j' def __init__(self, address): self.value = address + def get_width(self): + return 8 + def __repr__(self): return '' % (self.value,) if IS_X86_32: - class FloatImmedLoc(AssemblerLocation): + class FloatImmedLoc(ImmediateAssemblerLocation): # This stands for an immediate float. It cannot be directly used in # any assembler instruction. Instead, it is meant to be decomposed # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function # instead; see below. _immutable_ = True - width = 8 _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage + def get_width(self): + return 8 + def low_part(self): return intmask(self.aslonglong) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.objectmodel import we_are_translated +from pypy.jit.codewriter import longlong from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS @@ -21,7 +22,6 @@ supports_floats = True supports_singlefloats = True - BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) dont_keepalive_stuff = False # for tests with_threads = False @@ -91,15 +91,6 @@ return self.assembler.assemble_bridge(faildescr, inputargs, operations, original_loop_token, log=log) - def set_future_value_int(self, index, intvalue): - self.assembler.fail_boxes_int.setitem(index, intvalue) - - def set_future_value_float(self, index, floatvalue): - self.assembler.fail_boxes_float.setitem(index, floatvalue) - - def set_future_value_ref(self, index, ptrvalue): - self.assembler.fail_boxes_ptr.setitem(index, ptrvalue) - def get_latest_value_int(self, index): return self.assembler.fail_boxes_int.getitem(index) @@ -122,27 +113,28 @@ # the FORCE_TOKEN operation and this helper both return 'ebp'. return self.assembler.fail_ebp - def execute_token(self, executable_token): - addr = executable_token._x86_bootstrap_code - #llop.debug_print(lltype.Void, ">>>> Entering", addr) - func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) - fail_index = self._execute_call(func) - #llop.debug_print(lltype.Void, "<<<< Back") - return self.get_fail_descr_from_number(fail_index) - - def _execute_call(self, func): - # help flow objspace - prev_interpreter = None - if not self.translate_support_code: - prev_interpreter = LLInterpreter.current_interpreter - LLInterpreter.current_interpreter = self.debug_ll_interpreter - res = 0 - try: - res = func() - finally: + def make_execute_token(self, *ARGS): + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) + # + def execute_token(executable_token, *args): + clt = executable_token.compiled_loop_token + assert len(args) == clt._debug_nbargs + # + addr = executable_token._x86_function_addr + func = rffi.cast(FUNCPTR, addr) + #llop.debug_print(lltype.Void, ">>>> Entering", addr) + prev_interpreter = None # help flow space if not self.translate_support_code: - LLInterpreter.current_interpreter = prev_interpreter - return res + prev_interpreter = LLInterpreter.current_interpreter + LLInterpreter.current_interpreter = self.debug_ll_interpreter + try: + fail_index = func(*args) + finally: + if not self.translate_support_code: + LLInterpreter.current_interpreter = prev_interpreter + #llop.debug_print(lltype.Void, "<<<< Back") + return self.get_fail_descr_from_number(fail_index) + return execute_token def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) @@ -215,14 +207,3 @@ super(CPU_X86_64, self).__init__(*args, **kwargs) CPU = CPU386 - -# silence warnings -##history.LoopToken._x86_param_depth = 0 -##history.LoopToken._x86_arglocs = (None, None) -##history.LoopToken._x86_frame_depth = 0 -##history.LoopToken._x86_bootstrap_code = 0 -##history.LoopToken._x86_direct_bootstrap_code = 0 -##history.LoopToken._x86_loop_code = 0 -##history.LoopToken._x86_debug_checksum = 0 -##compile.AbstractFailDescr._x86_current_depths = (0, 0) -##compile.AbstractFailDescr._x86_adr_jump_offset = 0 diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -46,12 +46,13 @@ xmm2] assert len(failargs) == len(locs) assembler.write_failure_recovery_description(mc, failargs, locs) - nums = [Assembler386.DESCR_INT + 4*(16+0), - Assembler386.DESCR_REF + 4*(16+1), - Assembler386.DESCR_FLOAT + 4*(16+10), - Assembler386.DESCR_INT + 4*(16+100), - Assembler386.DESCR_REF + 4*(16+101), - Assembler386.DESCR_FLOAT + 4*(16+110), + base = 8 + 8*IS_X86_64 + nums = [Assembler386.DESCR_INT + 4*(base+0), + Assembler386.DESCR_REF + 4*(base+1), + Assembler386.DESCR_FLOAT + 4*(base+10), + Assembler386.DESCR_INT + 4*(base+100), + Assembler386.DESCR_REF + 4*(base+101), + Assembler386.DESCR_FLOAT + 4*(base+110), Assembler386.CODE_HOLE, Assembler386.CODE_HOLE, Assembler386.DESCR_INT + 4*ebx.value, diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, TreeLoop + BoxPtr, ConstPtr, TreeLoop, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo @@ -113,6 +113,8 @@ descr0 = cpu.fielddescrof(S, 'int') ptr0 = struct_ref + targettoken = TargetToken() + namespace = locals().copy() def test_basic(self): @@ -136,6 +138,7 @@ def test_bug_0(self): ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, descr=targettoken) guard_value(i2, 1) [i2, i3, i4, i5, i6, i7, i0, i1, i8] guard_class(i4, 138998336) [i4, i5, i6, i7, i0, i1, i8] i11 = getfield_gc(i4, descr=descr0) @@ -163,7 +166,7 @@ guard_false(i32) [i4, i6, i7, i0, i1, i24] i33 = getfield_gc(i0, descr=descr0) guard_value(i33, ConstPtr(ptr0)) [i4, i6, i7, i0, i1, i33, i24] - jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24) + jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -71,6 +71,18 @@ ('mov', eax, s24), ('mov', s12, edi)] +def test_no_tmp_reg(): + assembler = MockAssembler() + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) + remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], None) + assert assembler.ops == [('push', s8), + ('pop', s20), + ('mov', eax, s24), + ('mov', s12, edi)] + def test_reordering(): assembler = MockAssembler() s8 = frame_pos(8, INT) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -5,10 +5,11 @@ def test_compile_bridge_not_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -18,22 +19,22 @@ finish(i3, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 def test_compile_bridge_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) - previous = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + previous = loop._jitcelltoken.compiled_loop_token.frame_depth + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -42,19 +43,18 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].getdescr() + descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 assert self.getint(1) == 22 @@ -64,28 +64,30 @@ def test_bridge_jump_to_other_loop(self): loop = self.interpret(''' [i0, i10, i11, i12, i13, i14, i15, i16] + label(i0, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1, i10, i11, i12, i13, i14, i15, i16) - ''', [0]) + jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) + ''', [0, 0, 0, 0, 0, 0, 0, 0]) other_loop = self.interpret(''' - [i3] + [i3, i10, i11, i12, i13, i14, i15, i16] + label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] - jump(i3) - ''', [1]) + jump(i3, descr=targettoken2) + ''', [1, 0, 0, 0, 0, 0, 0, 0]) ops = ''' [i3] - jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=looptoken) + jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, other_loop, 0, looptoken=loop.token) - self.cpu.set_future_value_int(0, 1) - fail = self.run(other_loop) + bridge = self.attach_bridge(ops, other_loop, 1) + fail = self.run(other_loop, 1, 0, 0, 0, 0, 0, 0, 0) assert fail.identifier == 1 def test_bridge_jumps_to_self_deeper(self): loop = self.interpret(''' [i0, i1, i2, i31, i32, i33] + label(i0, i1, i2, i31, i32, i33, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i30 = int_add(i1, i2) @@ -94,8 +96,8 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i30, 1, i30, i30, i30) - ''', [0]) + jump(i3, i30, 1, i30, i30, i30, descr=targettoken) + ''', [0, 0, 0, 0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' @@ -104,28 +106,28 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) - jump(i3, i12, i11, i10, i6, i7, descr=looptoken) + jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 5, looptoken=loop.token) - guard_op = loop.operations[5] - loop_frame_depth = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth + bridge = self.attach_bridge(ops, loop, 6) + guard_op = loop.operations[6] + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 def test_bridge_jumps_to_self_shallower(self): loop = self.interpret(''' [i0, i1, i2] + label(i0, i1, i2, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i3 = int_add(i0, 1) @@ -133,19 +135,16 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i1, i2) - ''', [0]) + jump(i3, i1, i2, descr=targettoken) + ''', [0, 0, 0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' [i97, i3] - jump(i3, 0, 1, descr=looptoken) + jump(i3, 0, 1, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 4, looptoken=loop.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + bridge = self.attach_bridge(ops, loop, 5) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, LoopToken, BasicFailDescr + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass @@ -96,10 +96,16 @@ raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + targettoken = TargetToken() + targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 @@ -134,21 +140,31 @@ def interpret(self, ops, args, run=True): loop = self.parse(ops) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - for i, arg in enumerate(args): + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + arguments = [] + for arg in args: if isinstance(arg, int): - self.cpu.set_future_value_int(i, arg) + arguments.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) - self.cpu.set_future_value_float(i, arg) + arguments.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) - self.cpu.set_future_value_ref(i, llgcref) + arguments.append(llgcref) + loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, *arguments) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.original_jitcell_token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -167,10 +183,7 @@ gcref = self.cpu.get_latest_value_ref(index) return lltype.cast_opaque_ptr(T, gcref) - def attach_bridge(self, ops, loop, guard_op_index, looptoken=None, **kwds): - if looptoken is not None: - self.namespace = self.namespace.copy() - self.namespace['looptoken'] = looptoken + def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) @@ -178,20 +191,21 @@ [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, - loop.token) + loop._jitcelltoken) return bridge - def run(self, loop): - return self.cpu.execute_token(loop.token) + def run(self, loop, *arguments): + return self.cpu.execute_token(loop._jitcelltoken, *arguments) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -199,29 +213,30 @@ def test_two_loops_and_a_bridge(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(i0, 1) i5 = int_lt(i4, 20) guard_true(i5) [i4, i1, i2, i3] - jump(i4, i1, i2, i3) + jump(i4, i1, i2, i3, descr=targettoken) ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' - [i5] + [i5, i6, i7, i8] + label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) i4 = int_add(i3, 1) i2 = int_lt(i4, 30) guard_true(i2) [i4] - jump(i4) + jump(i4, descr=targettoken2) ''' - loop2 = self.interpret(ops2, [0]) + loop2 = self.interpret(ops2, [0, 0, 0, 0]) bridge_ops = ''' [i4] - jump(i4, i4, i4, i4, descr=looptoken) + jump(i4, i4, i4, i4, descr=targettoken) ''' - bridge = self.attach_bridge(bridge_ops, loop2, 4, looptoken=loop.token) - self.cpu.set_future_value_int(0, 0) - self.run(loop2) + bridge = self.attach_bridge(bridge_ops, loop2, 5) + self.run(loop2, 0, 0, 0, 0) assert self.getint(0) == 31 assert self.getint(1) == 30 assert self.getint(2) == 30 @@ -230,10 +245,11 @@ def test_pointer_arg(self): ops = ''' [i0, p0] + label(i0, p0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 10) guard_true(i2) [p0] - jump(i1, p0) + jump(i1, p0, descr=targettoken) ''' S = lltype.GcStruct('S') ptr = lltype.malloc(S) @@ -258,8 +274,7 @@ loop = self.interpret(ops, [0]) assert self.getint(0) == 1 bridge = self.attach_bridge(bridge_ops, loop, 2) - self.cpu.set_future_value_int(0, 0) - self.run(loop) + self.run(loop, 0) assert self.getint(0) == 1 def test_inputarg_unused(self): @@ -285,9 +300,7 @@ assert self.getint(0) == 0 assert self.getint(1) == 10 bridge = self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - self.run(loop) + self.run(loop, 0, 10) assert self.getint(0) == 0 assert self.getint(1) == 10 @@ -304,17 +317,16 @@ finish(1, 2) ''' self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 1) - self.run(loop) + self.run(loop, 0, 1) def test_spill_for_constant(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(3, i1) i5 = int_lt(i4, 30) guard_true(i5) [i0, i4, i2, i3] - jump(1, i4, 3, 4) + jump(1, i4, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1, 30, 3, 4] @@ -322,31 +334,34 @@ def test_spill_for_constant_lshift(self): ops = ''' [i0, i2, i1, i3] + label(i0, i2, i1, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 3, i5, 4) + jump(i4, 3, i5, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, i5, 3, 4) + jump(i4, i5, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i3, i1, i2] + label(i0, i3, i1, i2, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 4, i5, 3) + jump(i4, 4, i5, 3, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] @@ -354,11 +369,12 @@ def test_result_selected_reg_via_neg(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i6 = int_neg(i2) i7 = int_add(1, i1) i4 = int_lt(i7, 10) guard_true(i4) [i0, i6, i7] - jump(1, i7, i2, i6) + jump(1, i7, i2, i6, descr=targettoken) ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] @@ -366,11 +382,12 @@ def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lt(i0, i1) i5 = int_add(i3, 1) i6 = int_lt(i5, 30) guard_true(i6) [i4] - jump(i0, i1, i4, i5) + jump(i0, i1, i4, i5, descr=targettoken) ''' self.interpret(ops, [0, 10, 0, 0]) assert self.getint(0) == 1 @@ -378,12 +395,13 @@ def test_jump_different_args(self): ops = ''' [i0, i15, i16, i18, i1, i2, i3] + label(i0, i15, i16, i18, i1, i2, i3, descr=targettoken) i4 = int_add(i3, 1) i5 = int_lt(i4, 20) guard_true(i5) [i2, i1] - jump(i0, i18, i15, i16, i2, i1, i4) + jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' - self.interpret(ops, [0, 1, 2, 3]) + self.interpret(ops, [0, 1, 2, 3, 0, 0, 0]) def test_op_result_unused(self): ops = ''' @@ -417,11 +435,24 @@ finish(i0, i1, i2, i3, i4, i5, i6, i7, i8) ''' self.attach_bridge(bridge_ops, loop, 1) - for i in range(9): - self.cpu.set_future_value_int(i, i) - self.run(loop) + self.run(loop, 0, 1, 2, 3, 4, 5, 6, 7, 8) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + if IS_X86_64: + assert len(regalloc.rm.reg_bindings) == 4 + assert len(regalloc.fm.bindings) == 0 + else: + assert len(regalloc.rm.reg_bindings) == 0 + assert len(regalloc.fm.bindings) == 4 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): @@ -438,6 +469,7 @@ class TestRegallocMoreRegisters(BaseTestRegalloc): cpu = BaseTestRegalloc.cpu + targettoken = TargetToken() S = lltype.GcStruct('S', ('field', lltype.Char)) fielddescr = cpu.fielddescrof(S, 'field') @@ -510,6 +542,7 @@ def test_division_optimized(self): ops = ''' [i7, i6] + label(i7, i6, descr=targettoken) i18 = int_floordiv(i7, i6) i19 = int_xor(i7, i6) i21 = int_lt(i19, 0) @@ -517,7 +550,7 @@ i23 = int_is_true(i22) i24 = int_eq(i6, 4) guard_false(i24) [i18] - jump(i18, i6) + jump(i18, i6, descr=targettoken) ''' self.interpret(ops, [10, 4]) assert self.getint(0) == 2 @@ -586,9 +619,10 @@ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(1) + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(1) def test_two_calls(self): ops = ''' @@ -597,9 +631,10 @@ i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) finish(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(2) + loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) + assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9] + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(2) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -612,7 +647,8 @@ ''' loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 - assert loop.token._x86_param_depth == self.expected_param_depth(10) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(10) def test_bridge_calls_1(self): ops = ''' @@ -632,9 +668,7 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 5*7 def test_bridge_calls_2(self): @@ -655,8 +689,6 @@ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 29 diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -1,6 +1,6 @@ import py from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, LoopToken + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD @@ -20,10 +20,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 9) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 9) assert cpu.get_latest_value_int(0) == (9 >> 3) assert cpu.get_latest_value_int(1) == (~18) @@ -43,10 +42,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -10) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -10) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == -1000 assert cpu.get_latest_value_int(2) == 1 @@ -140,19 +138,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -13) - cpu.set_future_value_int(1, 10) - cpu.set_future_value_int(2, 10) - cpu.set_future_value_int(3, 8) - cpu.set_future_value_int(4, -8) - cpu.set_future_value_int(5, -16) - cpu.set_future_value_int(6, -18) - cpu.set_future_value_int(7, 46) - cpu.set_future_value_int(8, -12) - cpu.set_future_value_int(9, 26) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 0 assert cpu.get_latest_value_int(2) == 0 @@ -255,19 +243,9 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 17) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, -6) - cpu.set_future_value_int(3, 6) - cpu.set_future_value_int(4, 1) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, 13) - cpu.set_future_value_int(7, 9) - cpu.set_future_value_int(8, 49) - cpu.set_future_value_int(9, 8) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 8 assert cpu.get_latest_value_int(2) == 1 diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -1,9 +1,10 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr, rclass from pypy.rpython.annlowlevel import llhelper -from pypy.jit.metainterp.history import ResOperation, LoopToken +from pypy.jit.metainterp.history import ResOperation, TargetToken, JitCellToken from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstFloat, - ConstPtr, Box, BoxFloat, BasicFailDescr) + ConstPtr, Box, BoxFloat, + BasicFailDescr) from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD from pypy.jit.backend.x86.rx86 import fits_in_32bits @@ -279,13 +280,9 @@ descr=BasicFailDescr()), ] ops[-2].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) - if op == rop.INT_IS_TRUE: - self.cpu.set_future_value_int(0, b.value) - else: - self.cpu.set_future_value_ref(0, b.value) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_latest_value_int(0) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, @@ -329,11 +326,10 @@ ] ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) - for i, box in enumerate(inputargs): - self.cpu.set_future_value_int(i, box.value) - self.cpu.execute_token(looptoken) + inputvalues = [box.value for box in inputargs] + self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_latest_value_int(0) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: @@ -353,9 +349,10 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.number = 17 class FakeString(object): def __init__(self, val): @@ -365,14 +362,15 @@ return self.val operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) + operations[-2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" @@ -385,7 +383,7 @@ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -397,8 +395,7 @@ assert address >= loopaddress + loopsize assert size >= 10 # randomish number - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -408,11 +405,13 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] debug._log = dlog = debug.DebugLog() @@ -499,12 +498,10 @@ ops[3].setfailargs([]) ops[5].setfailargs([]) ops[7].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) - self.cpu.set_future_value_int(0, 123450) - self.cpu.set_future_value_int(1, 123408) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 123450, 123408) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert self.cpu.get_latest_value_int(1) == 42 @@ -523,19 +520,20 @@ loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -547,16 +545,17 @@ def test_debugger_checksum(self): loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) - assert ops.token._x86_debug_checksum == sum([op.getopnum() + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.execute_token(looptoken, 0) + assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + n -= x.foo + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -241,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + return True # better safe than sorry + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,24 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + return + # disabled for now + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -479,13 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -498,11 +533,18 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO @@ -703,6 +745,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) @@ -1053,35 +1098,20 @@ # jit.codewriter.support. for _op, _oopspec in [('llong_invert', 'INVERT'), - ('ullong_invert', 'INVERT'), ('llong_lt', 'LT'), ('llong_le', 'LE'), ('llong_eq', 'EQ'), ('llong_ne', 'NE'), ('llong_gt', 'GT'), ('llong_ge', 'GE'), - ('ullong_lt', 'ULT'), - ('ullong_le', 'ULE'), - ('ullong_eq', 'EQ'), - ('ullong_ne', 'NE'), - ('ullong_gt', 'UGT'), - ('ullong_ge', 'UGE'), ('llong_add', 'ADD'), ('llong_sub', 'SUB'), ('llong_mul', 'MUL'), ('llong_and', 'AND'), ('llong_or', 'OR'), ('llong_xor', 'XOR'), - ('ullong_add', 'ADD'), - ('ullong_sub', 'SUB'), - ('ullong_mul', 'MUL'), - ('ullong_and', 'AND'), - ('ullong_or', 'OR'), - ('ullong_xor', 'XOR'), ('llong_lshift', 'LSHIFT'), ('llong_rshift', 'RSHIFT'), - ('ullong_lshift', 'LSHIFT'), - ('ullong_rshift', 'URSHIFT'), ('cast_int_to_longlong', 'FROM_INT'), ('truncate_longlong_to_int', 'TO_INT'), ('cast_float_to_longlong', 'FROM_FLOAT'), @@ -1104,6 +1134,21 @@ ('cast_uint_to_ulonglong', 'FROM_UINT'), ('cast_float_to_ulonglong', 'FROM_FLOAT'), ('cast_ulonglong_to_float', 'U_TO_FLOAT'), + ('ullong_invert', 'INVERT'), + ('ullong_lt', 'ULT'), + ('ullong_le', 'ULE'), + ('ullong_eq', 'EQ'), + ('ullong_ne', 'NE'), + ('ullong_gt', 'UGT'), + ('ullong_ge', 'UGE'), + ('ullong_add', 'ADD'), + ('ullong_sub', 'SUB'), + ('ullong_mul', 'MUL'), + ('ullong_and', 'AND'), + ('ullong_or', 'OR'), + ('ullong_xor', 'XOR'), + ('ullong_lshift', 'LSHIFT'), + ('ullong_rshift', 'URSHIFT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): @@ -1134,7 +1179,7 @@ def rewrite_op_llong_is_true(self, op): v = varoftype(op.args[0].concretetype) - op0 = SpaceOperation('cast_int_to_longlong', + op0 = SpaceOperation('cast_primitive', [Constant(0, lltype.Signed)], v) args = [op.args[0], v] diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -258,6 +258,9 @@ y = ~r_ulonglong(xll) return u_to_longlong(y) +def _ll_1_ullong_invert(xull): + return ~xull + def _ll_2_llong_lt(xll, yll): return xll < yll @@ -276,16 +279,22 @@ def _ll_2_llong_ge(xll, yll): return xll >= yll -def _ll_2_llong_ult(xull, yull): +def _ll_2_ullong_eq(xull, yull): + return xull == yull + +def _ll_2_ullong_ne(xull, yull): + return xull != yull + +def _ll_2_ullong_ult(xull, yull): return xull < yull -def _ll_2_llong_ule(xull, yull): +def _ll_2_ullong_ule(xull, yull): return xull <= yull -def _ll_2_llong_ugt(xull, yull): +def _ll_2_ullong_ugt(xull, yull): return xull > yull -def _ll_2_llong_uge(xull, yull): +def _ll_2_ullong_uge(xull, yull): return xull >= yull def _ll_2_llong_add(xll, yll): @@ -312,14 +321,41 @@ z = r_ulonglong(xll) ^ r_ulonglong(yll) return u_to_longlong(z) +def _ll_2_ullong_add(xull, yull): + z = (xull) + (yull) + return (z) + +def _ll_2_ullong_sub(xull, yull): + z = (xull) - (yull) + return (z) + +def _ll_2_ullong_mul(xull, yull): + z = (xull) * (yull) + return (z) + +def _ll_2_ullong_and(xull, yull): + z = (xull) & (yull) + return (z) + +def _ll_2_ullong_or(xull, yull): + z = (xull) | (yull) + return (z) + +def _ll_2_ullong_xor(xull, yull): + z = (xull) ^ (yull) + return (z) + def _ll_2_llong_lshift(xll, y): z = r_ulonglong(xll) << y return u_to_longlong(z) +def _ll_2_ullong_lshift(xull, y): + return xull << y + def _ll_2_llong_rshift(xll, y): return xll >> y -def _ll_2_llong_urshift(xull, y): +def _ll_2_ullong_urshift(xull, y): return xull >> y def _ll_1_llong_from_int(x): @@ -563,15 +599,75 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,73 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1209,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): @@ -1501,7 +1504,6 @@ all_virtuals=None): from pypy.jit.metainterp.resume import blackhole_from_resumedata #debug_start('jit-blackhole') - metainterp_sd.profiler.start_blackhole() blackholeinterp = blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, jitdriver_sd, @@ -1515,10 +1517,9 @@ current_exc = blackholeinterp._prepare_resume_from_failure( resumedescr.guard_opnum, dont_change_position) - try: - _run_forever(blackholeinterp, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(blackholeinterp, current_exc) + #finally: #debug_stop('jit-blackhole') def convert_and_run_from_pyjitpl(metainterp, raising_exception=False): @@ -1526,7 +1527,6 @@ # 'metainterp.framestack'. #debug_start('jit-blackhole') metainterp_sd = metainterp.staticdata - metainterp_sd.profiler.start_blackhole() nextbh = None for frame in metainterp.framestack: curbh = metainterp_sd.blackholeinterpbuilder.acquire_interp() @@ -1543,8 +1543,7 @@ firstbh.exception_last_value = current_exc current_exc = lltype.nullptr(rclass.OBJECTPTR.TO) # - try: - _run_forever(firstbh, current_exc) - finally: - metainterp_sd.profiler.end_blackhole() + #try: + _run_forever(firstbh, current_exc) + #finally: #debug_stop('jit-blackhole') diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -9,12 +9,13 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist -from pypy.jit.metainterp.history import TreeLoop, Box, History, LoopToken +from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt -from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const +from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const, ConstInt from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP from pypy.jit.codewriter import heaptracker, longlong @@ -23,7 +24,7 @@ from pypy.jit.metainterp.jitprof import ABORT_BRIDGE raise SwitchToBlackhole(ABORT_BRIDGE) -def show_loop(metainterp_sd, loop=None, error=None): +def show_procedures(metainterp_sd, procedure=None, error=None): # debugging if option.view or option.viewloops: if error: @@ -32,11 +33,12 @@ errmsg += ': ' + str(error) else: errmsg = None - if loop is None: # or type(loop) is TerminatingLoop: - extraloops = [] + if procedure is None: + extraprocedures = [] else: - extraloops = [loop] - metainterp_sd.stats.view(errmsg=errmsg, extraloops=extraloops) + extraprocedures = [procedure] + metainterp_sd.stats.view(errmsg=errmsg, + extraprocedures=extraprocedures) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -45,131 +47,261 @@ return loop -def make_loop_token(nb_args, jitdriver_sd): - loop_token = LoopToken() - loop_token.outermost_jitdriver_sd = jitdriver_sd - return loop_token +def make_jitcell_token(jitdriver_sd): + jitcell_token = JitCellToken() + jitcell_token.outermost_jitdriver_sd = jitdriver_sd + return jitcell_token def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ - # get the original loop token (corresponding to 'loop', or if that is - # a bridge, to the loop that this bridge belongs to) - looptoken = loop.token - assert looptoken is not None + # get the original jitcell token corresponding to jitcell form which + # this trace starts + original_jitcell_token = loop.original_jitcell_token + assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests - assert looptoken.generation > 0 # has been registered with memmgr - wref = weakref.ref(looptoken) + assert original_jitcell_token.generation > 0 # has been registered with memmgr + wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number - looptoken.compiled_loop_token.record_faildescr_index(n) - elif isinstance(descr, LoopToken): - # for a JUMP or a CALL_ASSEMBLER: record it as a potential jump. + original_jitcell_token.compiled_loop_token.record_faildescr_index(n) + elif isinstance(descr, JitCellToken): + # for a CALL_ASSEMBLER: record it as a potential jump. + if descr is not original_jitcell_token: + original_jitcell_token.record_jump_to(descr) + descr.exported_state = None + op._descr = None # clear reference, mostly for tests + elif isinstance(descr, TargetToken): + # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) - if descr is not looptoken: - looptoken.record_jump_to(descr) - op._descr = None # clear reference, mostly for tests + if descr.original_jitcell_token is not original_jitcell_token: + assert descr.original_jitcell_token is not None + original_jitcell_token.record_jump_to(descr.original_jitcell_token) + # exported_state is clear by optimizeopt when the short preamble is + # constrcucted. if that did not happen the label should not show up + # in a trace that will be used + assert descr.exported_state is None if not we_are_translated(): - op._jumptarget_number = descr.number + op._descr_wref = weakref.ref(op._descr) + op._descr = None # clear reference to prevent the history.Stats + # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken - loop.token = None + loop.original_jitcell_token = None if not we_are_translated(): - loop._looptoken_number = looptoken.number + loop._looptoken_number = original_jitcell_token.number # ____________________________________________________________ -def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, - start_resumedescr, full_preamble_needed=True): - """Try to compile a new loop by closing the current history back +def compile_loop(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, full_preamble_needed=True): + """Try to compile a new procedure by closing the current history back to the first operation. """ - from pypy.jit.metainterp.optimize import optimize_loop + from pypy.jit.metainterp.optimizeopt import optimize_trace history = metainterp.history - loop = create_empty_loop(metainterp) - loop.inputargs = history.inputargs[:] + metainterp_sd = metainterp.staticdata + jitdriver_sd = metainterp.jitdriver_sd + + if False: + part = partial_trace + assert False + procedur_token = metainterp.get_procedure_token(greenkey) + assert procedure_token + all_target_tokens = [] + else: + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.start_resumedescr = start_resumedescr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] + + loop = create_empty_loop(metainterp) + loop.inputargs = part.inputargs + loop.operations = part.operations + loop.quasi_immutable_deps = {} + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + while part.operations[-1].getopnum() == rop.LABEL: + inliner = Inliner(inputargs, jumpargs) + part.quasi_immutable_deps = None + part.operations = [part.operations[-1]] + \ + [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], + None, descr=jitcell_token)] + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens.append(target_token) + inputargs = jumpargs + jumpargs = part.operations[-1].getarglist() + + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + + loop.operations = loop.operations[:-1] + part.operations + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + + if not loop.quasi_immutable_deps: + loop.quasi_immutable_deps = None for box in loop.inputargs: assert isinstance(box, Box) - # make a copy, because optimize_loop can mutate the ops and descrs - h_ops = history.operations - loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] + + loop.original_jitcell_token = jitcell_token + for label in all_target_tokens: + assert isinstance(label, TargetToken) + label.original_jitcell_token = jitcell_token + if label.virtual_state and label.short_preamble: + metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) + jitcell_token.target_tokens = all_target_tokens + send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") + record_loop_or_bridge(metainterp_sd, loop) + return all_target_tokens[0] + +def compile_retrace(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, partial_trace, resumekey): + """Try to compile a new procedure by closing the current history back + to the first operation. + """ + from pypy.jit.metainterp.optimizeopt import optimize_trace + + history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.token = loop_token - loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP - loop.preamble = create_empty_loop(metainterp, 'Preamble ') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.preamble.start_resumedescr = start_resumedescr + loop_jitcell_token = metainterp.get_procedure_token(greenkey) + assert loop_jitcell_token + assert partial_trace.operations[-1].getopnum() == rop.LABEL + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + part.start_resumedescr = start_resumedescr + h_ops = history.operations + + part.operations = [partial_trace.operations[-1]] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] + label = part.operations[0] + orignial_label = label.clone() + assert label.getopnum() == rop.LABEL try: - old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop, - jitdriver_sd.warmstate.enable_opts) + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - debug_print("compile_new_loop: got an InvalidLoop") - return None - if old_loop_token is not None: - metainterp.staticdata.log("reusing old loop") - return old_loop_token + #return None # XXX: Dissable for now + # Fall back on jumping to preamble + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert target_token.exported_state + part.operations = [orignial_label] + \ + [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + None, descr=loop_jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + inline_short_preamble=False) + + except InvalidLoop: + return None + assert part.operations[-1].getopnum() != rop.LABEL + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert loop_jitcell_token.target_tokens + loop_jitcell_token.target_tokens.append(target_token) - if loop.preamble.operations is not None: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - record_loop_or_bridge(metainterp_sd, loop) - token = loop.preamble.token - if full_preamble_needed: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, - loop.preamble, "entry bridge") - insert_loop_token(old_loop_tokens, loop.preamble.token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.preamble.token) - record_loop_or_bridge(metainterp_sd, loop.preamble) - elif token.short_preamble: - short = token.short_preamble[-1] - metainterp_sd.logger_ops.log_short_preamble(short.inputargs, - short.operations) - return token - else: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - insert_loop_token(old_loop_tokens, loop_token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.token) - record_loop_or_bridge(metainterp_sd, loop) - return loop_token + loop = partial_trace + loop.operations = loop.operations[:-1] + part.operations -def insert_loop_token(old_loop_tokens, loop_token): - # Find where in old_loop_tokens we should insert this new loop_token. - # The following algo means "as late as possible, but before another - # loop token that would be more general and so completely mask off - # the new loop_token". - # XXX do we still need a list? - old_loop_tokens.append(loop_token) + quasi_immutable_deps = {} + if loop.quasi_immutable_deps: + quasi_immutable_deps.update(loop.quasi_immutable_deps) + if part.quasi_immutable_deps: + quasi_immutable_deps.update(part.quasi_immutable_deps) + if quasi_immutable_deps: + loop.quasi_immutable_deps = quasi_immutable_deps + + for box in loop.inputargs: + assert isinstance(box, Box) + + target_token = loop.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + target_token.original_jitcell_token = loop.original_jitcell_token + record_loop_or_bridge(metainterp_sd, loop) + return target_token + +def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): + vinfo = jitdriver_sd.virtualizable_info + extra_ops = [] + inputargs = loop.inputargs + vable_box = inputargs[jitdriver_sd.index_of_virtualizable] + i = jitdriver_sd.num_red_args + loop.inputargs = inputargs[:i] + for descr in vinfo.static_field_descrs: + assert i < len(inputargs) + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], box, descr)) + i += 1 + arrayindex = 0 + for descr in vinfo.array_field_descrs: + vable = vable_box.getref_base() + arraylen = vinfo.get_array_length(vable, arrayindex) + arraybox = BoxPtr() + extra_ops.append( + ResOperation(rop.GETFIELD_GC, [vable_box], arraybox, descr)) + arraydescr = vinfo.array_descrs[arrayindex] + assert i + arraylen <= len(inputargs) + for index in range(arraylen): + box = inputargs[i] + extra_ops.append( + ResOperation(rop.GETARRAYITEM_GC, + [arraybox, ConstInt(index)], + box, descr=arraydescr)) + i += 1 + arrayindex += 1 + assert i == len(inputargs) + loop.operations = extra_ops + loop.operations def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): - jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, + vinfo = jitdriver_sd.virtualizable_info + if vinfo is not None: + patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) + + original_jitcell_token = loop.original_jitcell_token + jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata - loop_token = loop.token - loop_token.number = n = globaldata.loopnumbering + original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): - show_loop(metainterp_sd, loop) + show_procedures(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) @@ -177,26 +309,19 @@ debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - loop.token, name=loopname) + original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): - if type != "entry bridge": - metainterp_sd.stats.compiled() - else: - loop._ignore_during_counting = True + metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) - short = loop.token.short_preamble - if short: - metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, - short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token) + metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): @@ -204,8 +329,9 @@ jitdriver_sd.on_compile_bridge(metainterp_sd.logger_ops, original_loop_token, operations, n) if not we_are_translated(): - show_loop(metainterp_sd) - TreeLoop.check_consistency_of(inputargs, operations) + show_procedures(metainterp_sd) + seen = dict.fromkeys(inputargs) + TreeLoop.check_consistency_of_branch(operations, seen) metainterp_sd.profiler.start_backend() operations = get_deep_immutable_oplist(operations) debug_start("jit-backend") @@ -221,9 +347,9 @@ # metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset) # - if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( - original_loop_token) + #if metainterp_sd.warmrunnerdesc is not None: # for tests + # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( + # original_loop_token) # ____________________________________________________________ @@ -263,7 +389,7 @@ raise metainterp_sd.ExitFrameWithExceptionRef(cpu, value) -class TerminatingLoopToken(LoopToken): +class TerminatingLoopToken(JitCellToken): # FIXME: kill? terminating = True def __init__(self, nargs, finishdescr): @@ -298,7 +424,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +435,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +455,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,18 +465,21 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) - assert 0, "unreachable" + assert 0, "unreachable" def _trace_and_compile_from_bridge(self, metainterp_sd, jitdriver_sd): # 'jitdriver_sd' corresponds to the outermost one, i.e. the one @@ -354,17 +488,27 @@ # jitdrivers. from pypy.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - return metainterp.handle_guard_failure(self) + metainterp.handle_guard_failure(self) _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +535,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -400,13 +553,13 @@ # We managed to create a bridge. Attach the new operations # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None - new_loop.token = metainterp.resumekey_original_loop_token + new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, - new_loop.token) + new_loop.original_jitcell_token) def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here @@ -589,44 +742,32 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs - # We make a new LoopToken for this entry bridge, and stick it - # to every guard in the loop. - new_loop_token = make_loop_token(len(redargs), jitdriver_sd) - new_loop.token = new_loop_token + new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - self.original_greenkey, - new_loop_token) - # store the new loop in compiled_merge_points_wref too - old_loop_tokens = metainterp.get_compiled_merge_points( - self.original_greenkey) - # it always goes at the end of the list, as it is the most - # general loop token - old_loop_tokens.append(new_loop_token) - metainterp.set_compiled_merge_points(self.original_greenkey, - old_loop_tokens) + jitdriver_sd.warmstate.attach_procedure_to_interp( + self.original_greenkey, jitcell_token) + metainterp_sd.stats.add_jitcell_token(jitcell_token) - def reset_counter_from_failure(self): - pass - -def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): +def compile_trace(metainterp, resumekey, start_resumedescr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ - from pypy.jit.metainterp.optimize import optimize_bridge + from pypy.jit.metainterp.optimizeopt import optimize_trace # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. - # + # # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. - new_loop = create_empty_loop(metainterp) - new_loop.inputargs = metainterp.history.inputargs[:] + new_trace = create_empty_loop(metainterp) + new_trace.inputargs = inputargs = metainterp.history.inputargs[:] # clone ops, as optimize_bridge can mutate the ops - new_loop.operations = [op.clone() for op in metainterp.history.operations] + + new_trace.operations = [op.clone() for op in metainterp.history.operations] + new_trace.start_resumedescr = start_resumedescr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): @@ -634,38 +775,25 @@ else: inline_short_preamble = True try: - target_loop_token = optimize_bridge(metainterp_sd, old_loop_tokens, - new_loop, state.enable_opts, - inline_short_preamble, retraced) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop debug_print('InvalidLoop in compile_new_bridge') return None - # Did it work? - if target_loop_token is not None: - # Yes, we managed to create a bridge. Dispatch to resumekey to + + if new_trace.operations[-1].getopnum() != rop.LABEL: + # We managed to create a bridge. Dispatch to resumekey to # know exactly what we must do (ResumeGuardDescr/ResumeFromInterpDescr) - prepare_last_operation(new_loop, target_loop_token) - resumekey.compile_and_attach(metainterp, new_loop) - record_loop_or_bridge(metainterp_sd, new_loop) - return target_loop_token - -def prepare_last_operation(new_loop, target_loop_token): - op = new_loop.operations[-1] - if not isinstance(target_loop_token, TerminatingLoopToken): - # normal case - #op.setdescr(target_loop_token) # patch the jump target - pass + target_token = new_trace.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, new_trace) + record_loop_or_bridge(metainterp_sd, new_trace) + return target_token else: - # The target_loop_token is a pseudo loop token, - # e.g. loop_tokens_done_with_this_frame_void[0] - # Replace the operation with the real operation we want, i.e. a FINISH - descr = target_loop_token.finishdescr - args = op.getarglist() - new_op = ResOperation(rop.FINISH, args, None, descr=descr) - new_loop.operations[-1] = new_op + metainterp.retrace_needed(new_trace) + return None + # ____________________________________________________________ @@ -676,21 +804,25 @@ assert exception, "PropagateExceptionDescr: no exception??" raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) -def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes, +def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redargtypes, memory_manager=None): """Make a LoopToken that corresponds to assembler code that just calls back the interpreter. Used temporarily: a fully compiled version of the code may end up replacing it. """ - # 'redboxes' is only used to know the types of red arguments. - inputargs = [box.clonebox() for box in redboxes] - loop_token = make_loop_token(len(inputargs), jitdriver_sd) - # 'nb_red_args' might be smaller than len(redboxes), - # because it doesn't include the virtualizable boxes. + jitcell_token = make_jitcell_token(jitdriver_sd) nb_red_args = jitdriver_sd.num_red_args + assert len(redargtypes) == nb_red_args + inputargs = [] + for kind in redargtypes: + if kind == history.INT: box = BoxInt() + elif kind == history.REF: box = BoxPtr() + elif kind == history.FLOAT: box = BoxFloat() + else: raise AssertionError + inputargs.append(box) k = jitdriver_sd.portal_runner_adr funcbox = history.ConstInt(heaptracker.adr2int(k)) - callargs = [funcbox] + greenboxes + inputargs[:nb_red_args] + callargs = [funcbox] + greenboxes + inputargs # result_type = jitdriver_sd.result_type if result_type == history.INT: @@ -717,7 +849,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, loop_token, log=False) + cpu.compile_loop(inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests - memory_manager.keep_loop_alive(loop_token) - return loop_token + memory_manager.keep_loop_alive(jitcell_token) + return jitcell_token diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -344,6 +344,7 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.LABEL, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,8 +12,9 @@ def get_display_text(self): return None -def display_loops(loops, errmsg=None, highlight_loops={}): - graphs = [(loop, highlight_loops.get(loop, 0)) for loop in loops] +def display_procedures(procedures, errmsg=None, highlight_procedures={}): + graphs = [(procedure, highlight_procedures.get(procedure, 0)) + for procedure in procedures] for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): @@ -25,18 +26,19 @@ def is_interesting_guard(op): return hasattr(op.getdescr(), '_debug_suboperations') +def getdescr(op): + if op._descr is not None: + return op._descr + if hasattr(op, '_descr_wref'): + return op._descr_wref() + return None + class ResOpGraphPage(GraphPage): def compute(self, graphs, errmsg=None): resopgen = ResOpGen() for graph, highlight in graphs: - if getattr(graph, 'token', None) is not None: - resopgen.jumps_to_graphs[graph.token] = graph - if getattr(graph, '_looptoken_number', None) is not None: - resopgen.jumps_to_graphs[graph._looptoken_number] = graph - - for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: resopgen.set_errmsg(errmsg) @@ -54,7 +56,7 @@ self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None - self.jumps_to_graphs = {} + self.target_tokens = {} def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -73,16 +75,21 @@ for graphindex in range(len(self.graphs)): self.block_starters[graphindex] = {0: True} for graphindex, graph in enumerate(self.graphs): - last_was_mergepoint = False + mergepointblock = None for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) if op.getopnum() == rop.DEBUG_MERGE_POINT: - if not last_was_mergepoint: - last_was_mergepoint = True - self.mark_starter(graphindex, i) + if mergepointblock is None: + mergepointblock = i + elif op.getopnum() == rop.LABEL: + self.mark_starter(graphindex, i) + self.target_tokens[getdescr(op)] = (graphindex, i) + mergepointblock = i else: - last_was_mergepoint = False + if mergepointblock is not None: + self.mark_starter(graphindex, mergepointblock) + mergepointblock = None def set_errmsg(self, errmsg): self.errmsg = errmsg @@ -172,24 +179,10 @@ (graphindex, opindex)) break if op.getopnum() == rop.JUMP: - tgt_g = -1 - tgt = None - tgt_number = getattr(op, '_jumptarget_number', None) - if tgt_number is not None: - tgt = self.jumps_to_graphs.get(tgt_number) - else: - tgt_descr = op.getdescr() - if tgt_descr is None: - tgt_g = graphindex - else: - tgt = self.jumps_to_graphs.get(tgt_descr.number) - if tgt is None: - tgt = self.jumps_to_graphs.get(tgt_descr) - if tgt is not None: - tgt_g = self.graphs.index(tgt) - if tgt_g != -1: + tgt_descr = getdescr(op) + if tgt_descr is not None and tgt_descr in self.target_tokens: self.genedge((graphindex, opstartindex), - (tgt_g, 0), + self.target_tokens[tgt_descr], weight="0") lines.append("") label = "\\l".join(lines) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker, longlong from pypy.rlib.objectmodel import compute_identity_hash +import weakref # ____________________________________________________________ @@ -123,9 +124,6 @@ def sort_key(self): raise NotImplementedError - def set_future_value(self, cpu, j): - raise NotImplementedError - def nonnull(self): raise NotImplementedError @@ -288,9 +286,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def same_constant(self, other): if isinstance(other, ConstInt): return self.value == other.value @@ -328,9 +323,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def same_constant(self, other): if isinstance(other, ConstFloat): return self.value == other.value @@ -377,9 +369,6 @@ def getaddr(self): return llmemory.cast_ptr_to_adr(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -431,9 +420,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - ## def getaddr(self): ## # so far this is used only when calling ## # CodeWriter.IndirectCallset.bytecode_for_address. We don't need a @@ -539,9 +525,6 @@ def _get_hash_(self): return make_hashable_int(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_int(j, self.value) - def nonnull(self): return self.value != 0 @@ -574,9 +557,6 @@ def _get_hash_(self): return longlong.gethash(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_float(j, self.value) - def nonnull(self): return self.value != longlong.ZEROF @@ -619,9 +599,6 @@ else: return 0 - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def nonnull(self): return bool(self.value) @@ -666,19 +643,12 @@ def nonnull(self): return bool(self.value) - def set_future_value(self, cpu, j): - cpu.set_future_value_ref(j, self.value) - def repr_rpython(self): return repr_rpython(self, 'bo') _getrepr_ = repr_object -def set_future_values(cpu, boxes): - for j in range(len(boxes)): - boxes[j].set_future_value(cpu, j) - # ____________________________________________________________ @@ -723,18 +693,17 @@ # ____________________________________________________________ -# The TreeLoop class contains a loop or a generalized loop, i.e. a tree -# of operations. Each branch ends in a jump which can go either to -# the top of the same loop, or to another TreeLoop; or it ends in a FINISH. +# The JitCellToken class is the root of a tree of traces. Each branch ends +# in a jump which goes to a LABEL operation; or it ends in a FINISH. -class LoopToken(AbstractDescr): +class JitCellToken(AbstractDescr): """Used for rop.JUMP, giving the target of the jump. This is different from TreeLoop: the TreeLoop class contains the whole loop, including 'operations', and goes away after the loop was compiled; but the LoopDescr remains alive and points to the generated assembler. """ - short_preamble = None + target_tokens = None failed_states = None retraced_count = 0 terminating = False # see TerminatingLoopToken in compile.py @@ -751,10 +720,11 @@ def __init__(self): # For memory management of assembled loops - self._keepalive_target_looktokens = {} # set of other LoopTokens + self._keepalive_jitcell_tokens = {} # set of other JitCellToken - def record_jump_to(self, target_loop_token): - self._keepalive_target_looktokens[target_loop_token] = None + def record_jump_to(self, jitcell_token): + assert isinstance(jitcell_token, JitCellToken) + self._keepalive_jitcell_tokens[jitcell_token] = None def __repr__(self): return '' % (self.number, self.generation) @@ -765,17 +735,49 @@ def dump(self): self.compiled_loop_token.cpu.dump_loop_token(self) +class TargetToken(AbstractDescr): + def __init__(self, targeting_jitcell_token=None): + # Warning, two different jitcell_tokens here! + # + # * 'targeting_jitcell_token' is only useful for the front-end, + # and it means: consider the LABEL that uses this TargetToken. + # At this position, the state is logically the one given + # by targeting_jitcell_token. So e.g. if we want to enter the + # JIT with some given green args, if the jitcell matches, then + # we can jump to this LABEL. + # + # * 'original_jitcell_token' is information from the backend's + # point of view: it means that this TargetToken is used in + # a LABEL that belongs to either: + # - a loop; then 'original_jitcell_token' is this loop + # - or a bridge; then 'original_jitcell_token' is the loop + # out of which we made this bridge + # + self.targeting_jitcell_token = targeting_jitcell_token + self.original_jitcell_token = None + + self.virtual_state = None + self.exported_state = None + class TreeLoop(object): inputargs = None operations = None - token = None call_pure_results = None logops = None quasi_immutable_deps = None + start_resumedescr = None + + def _token(*args): + raise Exception("TreeLoop.token is killed") + token = property(_token, _token) + + # This is the jitcell where the trace starts. Labels within the trace might + # belong to some other jitcells in the sens that jumping to this other + # jitcell will result in a jump to the label. + original_jitcell_token = None def __init__(self, name): self.name = name - # self.inputargs = list of distinct Boxes # self.operations = list of ResOperations # ops of the kind 'guard_xxx' contain a further list of operations, # which may itself contain 'guard_xxx' and so on, making a tree. @@ -808,6 +810,10 @@ def check_consistency(self): # for testing "NOT_RPYTHON" self.check_consistency_of(self.inputargs, self.operations) + for op in self.operations: + descr = op.getdescr() + if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): + assert descr.original_jitcell_token is self.original_jitcell_token @staticmethod def check_consistency_of(inputargs, operations): @@ -842,15 +848,23 @@ assert isinstance(box, Box) assert box not in seen seen[box] = True + if op.getopnum() == rop.LABEL: + inputargs = op.getarglist() + for box in inputargs: + assert isinstance(box, Box), "LABEL contains %r" % (box,) + seen = dict.fromkeys(inputargs) + assert len(seen) == len(inputargs), ( + "duplicate Box in the LABEL arguments") + assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: target = operations[-1].getdescr() if target is not None: - assert isinstance(target, LoopToken) + assert isinstance(target, TargetToken) def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -932,6 +946,9 @@ def clear(self): pass + def add_jitcell_token(self, token): + pass + class Stats(object): """For tests.""" @@ -944,17 +961,26 @@ self.loops = [] self.locations = [] self.aborted_keys = [] - self.invalidated_token_numbers = set() + self.invalidated_token_numbers = set() # <- not RPython + self.jitcell_token_wrefs = [] + self.jitcell_dicts = [] # <- not RPython def clear(self): del self.loops[:] del self.locations[:] del self.aborted_keys[:] + del self.jitcell_token_wrefs[:] self.invalidated_token_numbers.clear() self.compiled_count = 0 self.enter_count = 0 self.aborted_count = 0 + for dict in self.jitcell_dicts: + dict.clear() + def add_jitcell_token(self, token): + assert isinstance(token, JitCellToken) + self.jitcell_token_wrefs.append(weakref.ref(token)) + def set_history(self, history): self.operations = history.operations @@ -984,6 +1010,15 @@ def get_all_loops(self): return self.loops + def get_all_jitcell_tokens(self): + tokens = [t() for t in self.jitcell_token_wrefs] + if None in tokens: + assert False, "get_all_jitcell_tokens will not work as "+\ + "loops have been freed" + return tokens + + + def check_history(self, expected=None, **check): insns = {} for op in self.operations: @@ -999,16 +1034,90 @@ "found %d %r, expected %d" % (found, insn, expected_count)) return insns + def check_resops(self, expected=None, **check): + insns = {} + for loop in self.get_all_loops(): + insns = loop.summary(adding_insns=insns) + return self._check_insns(insns, expected, check) + + def _check_insns(self, insns, expected, check): + if expected is not None: + insns.pop('debug_merge_point', None) + insns.pop('label', None) + assert insns == expected + for insn, expected_count in check.items(): + getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist + found = insns.get(insn, 0) + assert found == expected_count, ( + "found %d %r, expected %d" % (found, insn, expected_count)) + return insns + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + + # XXX hacked version, ignore and remove me when jit-targets is merged. + loops = self.get_all_loops() + loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX + assert len(loops) == 1 + loop, = loops + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + insns = {} + for op in loop.operations: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + loops = self.get_all_loops() + assert len(loops) == 1 + loop = loops[0] + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + assert self.check_resops(jump=1) + labels = [op for op in loop.operations if op.getopnum() == rop.LABEL] + targets = [op._descr_wref() for op in labels] + assert None not in targets # TargetToken was freed, give up + target = jumpop._descr_wref() + assert target + assert targets.count(target) == 1 + i = loop.operations.index(labels[targets.index(target)]) + insns = {} + for op in loop.operations[i:]: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_loops(self, expected=None, everywhere=False, **check): insns = {} - for loop in self.loops: - if not everywhere: - if getattr(loop, '_ignore_during_counting', False): - continue + for loop in self.get_all_loops(): + #if not everywhere: + # if getattr(loop, '_ignore_during_counting', False): + # continue insns = loop.summary(adding_insns=insns) if expected is not None: insns.pop('debug_merge_point', None) - assert insns == expected + print + print + print " self.check_resops(%s)" % str(insns) + print + import pdb; pdb.set_trace() + else: + chk = ['%s=%d' % (i, insns.get(i, 0)) for i in check] + print + print + print " self.check_resops(%s)" % ', '.join(chk) + print + import pdb; pdb.set_trace() + return + for insn, expected_count in check.items(): getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist found = insns.get(insn, 0) @@ -1018,26 +1127,26 @@ def check_consistency(self): "NOT_RPYTHON" - for loop in self.loops: + for loop in self.get_all_loops(): loop.check_consistency() def maybe_view(self): if option.view: self.view() - def view(self, errmsg=None, extraloops=[]): - from pypy.jit.metainterp.graphpage import display_loops - loops = self.get_all_loops()[:] - for loop in extraloops: - if loop in loops: - loops.remove(loop) - loops.append(loop) - highlight_loops = dict.fromkeys(extraloops, 1) - for loop in loops: - if hasattr(loop, '_looptoken_number') and ( - loop._looptoken_number in self.invalidated_token_numbers): - highlight_loops.setdefault(loop, 2) - display_loops(loops, errmsg, highlight_loops) + def view(self, errmsg=None, extraprocedures=[]): + from pypy.jit.metainterp.graphpage import display_procedures + procedures = self.get_all_loops()[:] + for procedure in extraprocedures: + if procedure in procedures: + procedures.remove(procedure) + procedures.append(procedure) + highlight_procedures = dict.fromkeys(extraprocedures, 1) + for procedure in procedures: + if hasattr(procedure, '_looptoken_number') and ( + procedure._looptoken_number in self.invalidated_token_numbers): + highlight_procedures.setdefault(procedure, 2) + display_procedures(procedures, errmsg, highlight_procedures) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/inliner.py b/pypy/jit/metainterp/inliner.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/inliner.py @@ -0,0 +1,57 @@ +from pypy.jit.metainterp.history import Const +from pypy.jit.metainterp.resume import Snapshot + +class Inliner(object): + def __init__(self, inputargs, jump_args): + assert len(inputargs) == len(jump_args) + self.argmap = {} + for i in range(len(inputargs)): + if inputargs[i] in self.argmap: + assert self.argmap[inputargs[i]] == jump_args[i] + else: + self.argmap[inputargs[i]] = jump_args[i] + self.snapshot_map = {None: None} + + def inline_op(self, newop, ignore_result=False, clone=True, + ignore_failargs=False): + if clone: + newop = newop.clone() + args = newop.getarglist() + newop.initarglist([self.inline_arg(a) for a in args]) + + if newop.is_guard(): + args = newop.getfailargs() + if args and not ignore_failargs: + newop.setfailargs([self.inline_arg(a) for a in args]) + else: + newop.setfailargs([]) + + if newop.result and not ignore_result: + old_result = newop.result + newop.result = newop.result.clonebox() + self.argmap[old_result] = newop.result + + self.inline_descr_inplace(newop.getdescr()) + + return newop + + def inline_descr_inplace(self, descr): + from pypy.jit.metainterp.compile import ResumeGuardDescr + if isinstance(descr, ResumeGuardDescr): + descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) + + def inline_arg(self, arg): + if arg is None: + return None + if isinstance(arg, Const): + return arg + return self.argmap[arg] + + def inline_snapshot(self, snapshot): + if snapshot in self.snapshot_map: + return self.snapshot_map[snapshot] + boxes = [self.inline_arg(a) for a in snapshot.boxes] + new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) + self.snapshot_map[snapshot] = new_snapshot + return new_snapshot + diff --git a/pypy/jit/metainterp/jitdriver.py b/pypy/jit/metainterp/jitdriver.py --- a/pypy/jit/metainterp/jitdriver.py +++ b/pypy/jit/metainterp/jitdriver.py @@ -11,6 +11,7 @@ # self.portal_calldescr ... pypy.jit.metainterp.warmspot # self.num_green_args ... pypy.jit.metainterp.warmspot # self.num_red_args ... pypy.jit.metainterp.warmspot + # self.red_args_types ... pypy.jit.metainterp.warmspot # self.result_type ... pypy.jit.metainterp.warmspot # self.virtualizable_info... pypy.jit.metainterp.warmspot # self.greenfield_info ... pypy.jit.metainterp.warmspot diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -10,8 +10,6 @@ counters=""" TRACING BACKEND -RUNNING -BLACKHOLE OPS RECORDED_OPS GUARDS @@ -67,18 +65,6 @@ def end_backend(self): pass - def start_running(self): - pass - - def end_running(self): - pass - - def start_blackhole(self): - pass - - def end_blackhole(self): - pass - def count(self, kind, inc=1): pass @@ -134,16 +120,6 @@ def start_backend(self): self._start(BACKEND) def end_backend(self): self._end (BACKEND) - # Don't record times for 'running' and 'blackhole' because there are - # too many of them: calling time.time() is a major blocker. - # If you are interested in these numbers, use 'PYPYLOG=file' and - # look at the resulting file with pypy/tool/logparser.py. - def start_running(self): self.count(RUNNING) - def end_running(self): pass - - def start_blackhole(self): self.count(BLACKHOLE) - def end_blackhole(self): pass - def count(self, kind, inc=1): self.counters[kind] += inc @@ -165,8 +141,6 @@ calls = self.calls self._print_line_time("Tracing", cnt[TRACING], tim[TRACING]) self._print_line_time("Backend", cnt[BACKEND], tim[BACKEND]) - self._print_intline("Running asm", cnt[RUNNING]) - self._print_intline("Blackhole", cnt[BLACKHOLE]) line = "TOTAL: \t\t%f" % (self.tk - self.starttime, ) debug_print(line) self._print_intline("ops", cnt[OPS]) diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -4,13 +4,15 @@ from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap from pypy.jit.metainterp.optimizeopt.vstring import OptString -from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble +from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce from pypy.rlib.jit import PARAMETERS from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_start, debug_stop, debug_print + ALL_OPTS = [('intbounds', OptIntBounds), ('rewrite', OptRewrite), @@ -28,8 +30,7 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) -def build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble=True, retraced=False): +def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict @@ -45,12 +46,9 @@ optimizations.append(OptFfiCall()) if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + or 'heap' not in enable_opts or 'unroll' not in enable_opts): optimizations.append(OptSimplify()) - if inline_short_preamble: - optimizations = [OptInlineShortPreamble(retraced)] + optimizations - return optimizations, unroll @@ -80,3 +78,21 @@ if __name__ == '__main__': print ALL_OPTS_NAMES + +def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): + """Optimize loop.operations to remove internal overheadish operations. + """ + + debug_start("jit-optimize") + try: + loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, + loop.operations) + optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) + if unroll: + optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) + else: + optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer.propagate_all_forward() + finally: + debug_stop("jit-optimize") + diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -7,7 +7,7 @@ from pypy.rlib.libffi import Func from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.lltypesystem import llmemory, rffi class FuncInfo(object): @@ -234,10 +234,13 @@ # longlongs are treated as floats, see # e.g. llsupport/descr.py:getDescrClass is_float = True + elif kind == 'u': + # they're all False + pass else: assert False, "unsupported ffitype or kind" # - fieldsize = ffitype.c_size + fieldsize = rffi.getintfield(ffitype, 'c_size') return self.optimizer.cpu.interiorfielddescrof_dynamic( offset, width, fieldsize, is_pointer, is_float, is_signed ) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -500,8 +500,9 @@ else: return CVAL_ZERO - def propagate_all_forward(self): - self.clear_newoperations() + def propagate_all_forward(self, clear=True): + if clear: + self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) self.loop.operations = self.get_newoperations() @@ -564,9 +565,12 @@ descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - newboxes = modifier.finish(self.values, self.pendingfields) - if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here - compile.giveup() + try: + newboxes = modifier.finish(self.values, self.pendingfields) + if len(newboxes) > self.metainterp_sd.options.failargs_limit: + raise resume.TagOverflow + except resume.TagOverflow: + raise compile.giveup() descr.store_final_boxes(op, newboxes) # if op.getopnum() == rop.GUARD_VALUE: diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -1,9 +1,12 @@ from pypy.jit.metainterp.optimizeopt.optimizer import Optimization from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import ResOperation, rop - +from pypy.jit.metainterp.history import TargetToken, JitCellToken class OptSimplify(Optimization): + def __init__(self): + self.last_label_descr = None + def optimize_CALL_PURE(self, op): args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, @@ -28,6 +31,26 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + + def optimize_LABEL(self, op): + self.last_label_descr = op.getdescr() + self.emit_operation(op) + + def optimize_JUMP(self, op): + descr = op.getdescr() + assert isinstance(descr, JitCellToken) + if not descr.target_tokens: + assert self.last_label_descr is not None + target_token = self.last_label_descr + assert isinstance(target_token, TargetToken) + assert target_token.targeting_jitcell_token is descr + op.setdescr(self.last_label_descr) + else: + assert len(descr.target_tokens) == 1 + op.setdescr(descr.target_tokens[0]) + self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -0,0 +1,200 @@ +from pypy.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimize import InvalidLoop +from py.test import raises + +class BaseTestMultiLabel(BaseTest): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + + def optimize_loop(self, ops, expected): + loop = self.parse(ops) + if expected != "crash!": + expected = self.parse(expected) + + part = TreeLoop('part') + part.inputargs = loop.inputargs + part.start_resumedescr = FakeDescrWithSnapshot() + token = loop.original_jitcell_token + + optimized = TreeLoop('optimized') + optimized.inputargs = loop.inputargs + optimized.operations = [] + + labels = [i for i, op in enumerate(loop.operations) \ + if op.getopnum()==rop.LABEL] + prv = 0 + last_label = [] + for nxt in labels + [len(loop.operations)]: + assert prv != nxt + operations = last_label + loop.operations[prv:nxt] + if nxt < len(loop.operations): + label = loop.operations[nxt] + assert label.getopnum() == rop.LABEL + jumpop = ResOperation(rop.JUMP, label.getarglist(), + None, descr=token) + operations.append(jumpop) + part.operations = operations + self._do_optimize_loop(part, None) + if part.operations[-1].getopnum() == rop.LABEL: + last_label = [part.operations.pop()] + else: + last_label = [] + optimized.operations.extend(part.operations) + prv = nxt + 1 + + # + print + print "Optimized:" + if optimized.operations: + print '\n'.join([str(o) for o in optimized.operations]) + else: + print 'Failed!' + print + + assert expected != "crash!", "should have raised an exception" + self.assert_equal(optimized, expected) + + return optimized + + def test_simple(self): + ops = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1) + i3 = int_add(i1, 1) + escape(i3) + jump(i1) + """ + expected = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1, i2) + escape(i2) + jump(i1, i2) + """ + self.optimize_loop(ops, expected) + + def test_forced_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + escape(p3) + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_with_nonmatching_fields(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, 1, descr=valuedescr) + label(p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p4, 1, descr=nextdescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_arrays_with_nonmatching_lens(self): + ops = """ + [p1] + p2 = new_array(3, descr=arraydescr) + label(p2) + p4 = new_array(2, descr=arraydescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_1(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p4, 2, f0, descr=compleximagdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_2(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(2, descr=complexarraydescr) + setinteriorfield_gc(p4, 0, f0, descr=complexrealdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_array(self): + ops = """ + [p1] + p3 = new_array(3, descr=arraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_arraystruct(self): + ops = """ + [p1] + p3 = new_array(3, descr=complexarraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_turns_constant(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + guard_value(p3, ConstPtr(myptr)) [] + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_turns_not_equal(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3, p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + jump(p3, p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + +class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + pass + diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1,7 +1,8 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, FakeMetaInterpStaticData) + LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) +from pypy.jit.metainterp.history import TargetToken, JitCellToken from pypy.jit.metainterp.test.test_compile import FakeLogger import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize @@ -11,7 +12,6 @@ from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.rlib.rarithmetic import LONG_BIT - def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -116,9 +116,13 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" def optimize_loop(self, ops, optops, call_pure_results=None): - loop = self.parse(ops) - expected = self.parse(optops) + token = JitCellToken() + loop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=TargetToken(token))] + \ + loop.operations + if loop.operations[-1].getopnum() == rop.JUMP: + loop.operations[-1].setdescr(token) + expected = convert_old_style_to_targets(self.parse(optops), jump=True) self._do_optimize_loop(loop, call_pure_results) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,13 +1,13 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes) + LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation @@ -15,7 +15,7 @@ from pypy.jit.metainterp.optimizeopt.util import args_dict from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData from pypy.config.pypyoption import get_pypy_config - +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_build_opt_chain(): def check(chain, expected_names): @@ -23,49 +23,37 @@ assert names == expected_names # metainterp_sd = FakeMetaInterpStaticData(None) - chain, _ = build_opt_chain(metainterp_sd, "", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "") check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "heap:intbounds") - check(chain, ["OptInlineShortPreamble", "OptIntBounds", "OptHeap", "OptSimplify"]) + check(chain, ["OptIntBounds", "OptHeap", "OptSimplify"]) # chain, unroll = build_opt_chain(metainterp_sd, "unroll") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) assert unroll # - chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptFfiCall", "OptSimplify"]) # metainterp_sd.config = get_pypy_config(translating=True) assert not metainterp_sd.config.translation.jit_ffi - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptSimplify"]) # ____________________________________________________________ -class FakeDescr(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescr() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescr) - - class BaseTestWithUnroll(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -79,40 +67,41 @@ expected_preamble = self.parse(expected_preamble) if expected_short: expected_short = self.parse(expected_short) - loop.preamble = TreeLoop('preamble') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = LoopToken() - loop.preamble.start_resumedescr = FakeDescr() - # - self._do_optimize_loop(loop, call_pure_results) + + preamble = self.unroll_and_optimize(loop, call_pure_results) + # print print "Preamble:" - print loop.preamble.inputargs - if loop.preamble.operations: - print '\n'.join([str(o) for o in loop.preamble.operations]) + if preamble.operations: + print '\n'.join([str(o) for o in preamble.operations]) else: print 'Failed!' print print "Loop:" - print loop.inputargs print '\n'.join([str(o) for o in loop.operations]) print if expected_short: print "Short Preamble:" - short = loop.preamble.token.short_preamble[0] - print short.inputargs - print '\n'.join([str(o) for o in short.operations]) + short = loop.operations[0].getdescr().short_preamble + print '\n'.join([str(o) for o in short]) print assert expected != "crash!", "should have raised an exception" - self.assert_equal(loop, expected) + self.assert_equal(loop, convert_old_style_to_targets(expected, jump=True)) + assert loop.operations[0].getdescr() == loop.operations[-1].getdescr() if expected_preamble: - self.assert_equal(loop.preamble, expected_preamble, + self.assert_equal(preamble, convert_old_style_to_targets(expected_preamble, jump=False), text_right='expected preamble') + assert preamble.operations[-1].getdescr() == loop.operations[0].getdescr() if expected_short: - self.assert_equal(short, expected_short, + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, convert_old_style_to_targets(expected_short, jump=True), text_right='expected short preamble') + assert short[-1].getdescr() == loop.operations[0].getdescr() return loop @@ -234,7 +223,7 @@ """ % expected_value self.optimize_loop(ops, expected) - def test_reverse_of_cast(self): + def test_reverse_of_cast_1(self): ops = """ [i0] p0 = cast_int_to_ptr(i0) @@ -246,6 +235,8 @@ jump(i0) """ self.optimize_loop(ops, expected) + + def test_reverse_of_cast_2(self): ops = """ [p0] i1 = cast_ptr_to_int(p0) @@ -1181,6 +1172,7 @@ i1 = getfield_gc(p0, descr=valuedescr) i2 = int_sub(i1, 1) i3 = int_add(i0, i1) + i4 = same_as(i2) # This same_as should be killed by backend jump(i3, i2, i1) """ expected = """ @@ -1252,10 +1244,10 @@ i1 = int_add(i0, 1) p1 = new_with_vtable(ConstClass(node_vtable2)) p2 = new_with_vtable(ConstClass(node_vtable2)) - setfield_gc(p0, p1, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) setfield_gc(p2, p1, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p0, p1, descr=nextdescr) jump(p1) """ self.optimize_loop(ops, loop, preamble) @@ -1317,6 +1309,7 @@ p30 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) + p46 = same_as(p30) # This same_as should be killed by backend jump(i29, p30, p3) """ expected = """ @@ -1324,8 +1317,8 @@ i28 = int_add(i0, 1) i29 = int_add(i28, 1) p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) - setfield_gc(p30, i28, descr=nextdescr) jump(i29, p30, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2118,7 +2111,9 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i7 = same_as(i2) # This same_as should be killed by backend + i6 = same_as(i4) + jump(p1, i1, i2, i4, i6) """ expected = """ [p1, i1, i2, i4, i5] @@ -2148,7 +2143,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2177,7 +2173,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2207,7 +2204,9 @@ guard_true(i5) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i8 = same_as(i2) # This same_as should be killed by backend + i7 = same_as(i4) + jump(p1, i1, i2, i4, i7) """ expected = """ [p1, i1, i2, i4, i7] @@ -2433,7 +2432,8 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p4, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - jump(p1, i2, i4, p4, i4) + i101 = same_as(i4) + jump(p1, i2, i4, p4, i101) """ expected = """ [p1, i2, i4, p4, i5] @@ -3276,7 +3276,15 @@ setfield_gc(p1, i3, descr=valuedescr) jump(p1, i4, i3) ''' - self.optimize_loop(ops, ops, ops) + preamble = ''' + [p1, i1, i4] + setfield_gc(p1, i1, descr=valuedescr) + i3 = call_assembler(i1, descr=asmdescr) + setfield_gc(p1, i3, descr=valuedescr) + i143 = same_as(i3) # Should be killed by backend + jump(p1, i4, i3) + ''' + self.optimize_loop(ops, ops, preamble) def test_call_assembler_invalidates_heap_knowledge(self): ops = ''' @@ -3307,7 +3315,9 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i3, descr=valuedescr) - jump(p1, i4, i3, i3) + i148 = same_as(i3) + i147 = same_as(i3) + jump(p1, i4, i3, i148) ''' self.optimize_loop(ops, expected, preamble) @@ -3330,7 +3340,8 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i4, i3, i3) + i151 = same_as(i3) + jump(p1, i4, i3, i151) ''' self.optimize_loop(ops, expected, preamble) @@ -3350,7 +3361,8 @@ escape(i1) escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) - jump(i0, i4, i4) + i153 = same_as(i4) + jump(i0, i4, i153) ''' expected = ''' [i0, i4, i5] @@ -3380,7 +3392,8 @@ escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) guard_no_exception() [] - jump(i0, i4, i4) + i155 = same_as(i4) + jump(i0, i4, i155) ''' expected = ''' [i0, i2, i3] @@ -4198,6 +4211,7 @@ preamble = """ [p0] i0 = strlen(p0) + i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5418,6 +5432,7 @@ [p0] p1 = getfield_gc(p0, descr=valuedescr) setfield_gc(p0, p0, descr=valuedescr) + p4450 = same_as(p0) # Should be killed by backend jump(p0) """ expected = """ @@ -5653,7 +5668,8 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - jump(p2, p3, i2) + i7 = same_as(i2) + jump(p2, p3, i7) """ expected = """ [p1, p2, i1] @@ -5728,7 +5744,9 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - jump(p2, p3, p5, i2, i3) + i129 = same_as(i2) + i130 = same_as(i3) + jump(p2, p3, p5, i129, i130) """ expected = """ [p1, p2, p3, i1, i2] @@ -5788,7 +5806,8 @@ [p1, i1, i2, i3] escape(i3) i4 = int_sub(i2, i1) - jump(p1, i1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i1, i2, i4, i5) """ expected = """ [p1, i1, i2, i3, i4] @@ -5813,7 +5832,8 @@ escape(i5) i4 = int_sub(i2, i1) setfield_gc(p2, i4, descr=valuedescr) - jump(p1, i1, i2, p2, i4, i4) + i8 = same_as(i4) + jump(p1, i1, i2, p2, i8, i4) """ expected = """ [p1, i1, i2, p2, i5, i6] @@ -5939,7 +5959,8 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - jump(p4, i1, i2, p2, i5, i3, i4) + i9 = same_as(i4) + jump(p4, i1, i2, p2, i5, i3, i9) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6061,7 +6082,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - jump(p1, p2, p3, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, p3, i3, i11, i12) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6281,6 +6304,7 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) + i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6326,7 +6350,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - jump(p1, p2, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, i3, i11, i12) """ expected = """ [p1, p2, i3, i1, i2] @@ -6482,6 +6508,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] @@ -6614,7 +6655,8 @@ p188 = getarrayitem_gc(p187, 42, descr=) guard_value(p188, ConstPtr(myptr)) [] p25 = getfield_gc(ConstPtr(myptr), descr=otherdescr) - jump(p25, p187, i184, p25) + p26 = same_as(p25) + jump(p25, p187, i184, p26) """ short = """ [p1, p187, i184] @@ -6883,7 +6925,8 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - jump(p9, i843) + i0 = same_as(i843) + jump(p9, i0) """ short = """ [p9] @@ -6999,6 +7042,40 @@ """ self.optimize_loop(ops, expected) + def test_duplicated_aliased_virtual(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + jump(p3, p4) + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, expected) + + def test_imported_aliased_virtual_in_failargs(self): + ops = """ + [p1, p2, i0] + i2 = int_lt(i0, 10) + guard_true(i2) [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + i1 = int_add(i0, 1) + jump(p3, p4, i1) + """ + expected = """ + [i0] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize_loop(ops, expected) + def test_chained_virtuals(self): ops = """ [p0, p1] @@ -7575,7 +7652,8 @@ call(i2, descr=nonwritedescr) setfield_gc(p22, i1, descr=valuedescr) guard_nonnull_class(p18, ConstClass(node_vtable)) [] - jump(p22, p18, i1, i1) + i10 = same_as(i1) + jump(p22, p18, i1, i10) """ short = """ [p22, p18, i1] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -8,7 +8,8 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, Const, TreeLoop, BoxObj, - ConstObj, AbstractDescr) + ConstObj, AbstractDescr, + JitCellToken, TargetToken) from pypy.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.codewriter.effectinfo import EffectInfo @@ -18,6 +19,8 @@ from pypy.jit.metainterp import compile, resume, history from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.config.pypyoption import get_pypy_config +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -344,6 +347,11 @@ self.config = get_pypy_config(translating=True) self.config.translation.jit_ffi = True + class logger_noopt: + @classmethod + def log_loop(*args): + pass + class warmrunnerdesc: class memory_manager: retrace_limit = 5 @@ -394,7 +402,7 @@ expected.operations, False, remap, text_right) def _do_optimize_loop(self, loop, call_pure_results): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 + from pypy.jit.metainterp.optimizeopt import optimize_trace from pypy.jit.metainterp.optimizeopt.util import args_dict self.loop = loop @@ -408,7 +416,83 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - optimize_loop_1(metainterp_sd, loop, self.enable_opts) + optimize_trace(metainterp_sd, loop, self.enable_opts) + + def unroll_and_optimize(self, loop, call_pure_results=None): + operations = loop.operations + jumpop = operations[-1] + assert jumpop.getopnum() == rop.JUMP + inputargs = loop.inputargs + + jump_args = jumpop.getarglist()[:] + operations = operations[:-1] + cloned_operations = [op.clone() for op in operations] + + preamble = TreeLoop('preamble') + preamble.inputargs = inputargs + preamble.start_resumedescr = FakeDescrWithSnapshot() + + token = JitCellToken() + preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ + operations + \ + [ResOperation(rop.JUMP, jump_args, None, descr=token)] + self._do_optimize_loop(preamble, call_pure_results) + + assert preamble.operations[-1].getopnum() == rop.LABEL + + inliner = Inliner(inputargs, jump_args) + loop.start_resumedescr = preamble.start_resumedescr + loop.operations = [preamble.operations[-1]] + \ + [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], + None, descr=token)] + #[inliner.inline_op(jumpop)] + assert loop.operations[-1].getopnum() == rop.JUMP + assert loop.operations[0].getopnum() == rop.LABEL + loop.inputargs = loop.operations[0].getarglist() + + self._do_optimize_loop(loop, call_pure_results) + extra_same_as = [] + while loop.operations[0].getopnum() != rop.LABEL: + extra_same_as.append(loop.operations[0]) + del loop.operations[0] + + # Hack to prevent random order of same_as ops + extra_same_as.sort(key=lambda op: str(preamble.operations).find(str(op.getarg(0)))) + + for op in extra_same_as: + preamble.operations.insert(-1, op) + + return preamble + + +class FakeDescr(compile.ResumeGuardDescr): + def clone_if_mutable(self): + return FakeDescr() + def __eq__(self, other): + return isinstance(other, FakeDescr) + +class FakeDescrWithSnapshot(compile.ResumeGuardDescr): + class rd_snapshot: + class prev: + prev = None + boxes = [] + boxes = [] + def clone_if_mutable(self): + return FakeDescrWithSnapshot() + def __eq__(self, other): + return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) + + +def convert_old_style_to_targets(loop, jump): + newloop = TreeLoop(loop.name) + newloop.inputargs = loop.inputargs + newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=FakeDescr())] + \ + loop.operations + if not jump: + assert newloop.operations[-1].getopnum() == rop.JUMP + newloop.operations[-1] = ResOperation(rop.LABEL, newloop.operations[-1].getarglist(), None, descr=FakeDescr()) + return newloop # ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -1,11 +1,12 @@ from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes +from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState from pypy.jit.metainterp.compile import ResumeGuardDescr -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot from pypy.rlib.debug import debug_print @@ -13,63 +14,11 @@ # FIXME: Introduce some VirtualOptimizer super class instead -def optimize_unroll(metainterp_sd, loop, optimizations): +def optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble=True): opt = UnrollOptimizer(metainterp_sd, loop, optimizations) + opt.inline_short_preamble = inline_short_preamble opt.propagate_all_forward() -class Inliner(object): - def __init__(self, inputargs, jump_args): - assert len(inputargs) == len(jump_args) - self.argmap = {} - for i in range(len(inputargs)): - if inputargs[i] in self.argmap: - assert self.argmap[inputargs[i]] == jump_args[i] - else: - self.argmap[inputargs[i]] = jump_args[i] - self.snapshot_map = {None: None} - - def inline_op(self, newop, ignore_result=False, clone=True, - ignore_failargs=False): - if clone: - newop = newop.clone() - args = newop.getarglist() - newop.initarglist([self.inline_arg(a) for a in args]) - - if newop.is_guard(): - args = newop.getfailargs() - if args and not ignore_failargs: - newop.setfailargs([self.inline_arg(a) for a in args]) - else: - newop.setfailargs([]) - - if newop.result and not ignore_result: - old_result = newop.result - newop.result = newop.result.clonebox() - self.argmap[old_result] = newop.result - - self.inline_descr_inplace(newop.getdescr()) - - return newop - - def inline_descr_inplace(self, descr): - if isinstance(descr, ResumeGuardDescr): - descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) - - def inline_arg(self, arg): - if arg is None: - return None - if isinstance(arg, Const): - return arg - return self.argmap[arg] - - def inline_snapshot(self, snapshot): - if snapshot in self.snapshot_map: - return self.snapshot_map[snapshot] - boxes = [self.inline_arg(a) for a in snapshot.boxes] - new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) - self.snapshot_map[snapshot] = new_snapshot - return new_snapshot - class UnrollableOptimizer(Optimizer): def setup(self): self.importable_values = {} @@ -101,14 +50,13 @@ become the preamble or entry bridge (don't think there is a distinction anymore)""" + inline_short_preamble = True + did_import = False + def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) - self.cloned_operations = [] - for op in self.optimizer.loop.operations: - newop = op.clone() - self.cloned_operations.append(newop) - def fix_snapshot(self, loop, jump_args, snapshot): + def fix_snapshot(self, jump_args, snapshot): if snapshot is None: return None snapshot_args = snapshot.boxes @@ -116,116 +64,348 @@ for a in snapshot_args: a = self.getvalue(a).get_key_box() new_snapshot_args.append(a) - prev = self.fix_snapshot(loop, jump_args, snapshot.prev) + prev = self.fix_snapshot(jump_args, snapshot.prev) return Snapshot(prev, new_snapshot_args) def propagate_all_forward(self): loop = self.optimizer.loop + self.optimizer.clear_newoperations() + + + start_label = loop.operations[0] + if start_label.getopnum() == rop.LABEL: + loop.operations = loop.operations[1:] + # We need to emit the label op before import_state() as emitting it + # will clear heap caches + self.optimizer.send_extra_operation(start_label) + else: + start_label = None + jumpop = loop.operations[-1] if jumpop.getopnum() == rop.JUMP: loop.operations = loop.operations[:-1] else: - loopop = None + jumpop = None - self.optimizer.propagate_all_forward() + self.import_state(start_label) + self.optimizer.propagate_all_forward(clear=False) + if not jumpop: + return + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.did_import: - if jumpop: - assert jumpop.getdescr() is loop.token - jump_args = jumpop.getarglist() - jumpop.initarglist([]) + self.close_bridge(start_label) + self.finilize_short_preamble(start_label) + return + + cell_token = jumpop.getdescr() + assert isinstance(cell_token, JitCellToken) + stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) + + if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() - KillHugeIntBounds(self.optimizer).apply() + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + else: + assert stop_label + assert start_label + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + + self.close_loop(jumpop) + self.finilize_short_preamble(start_label) + + def export_state(self, targetop): + original_jump_args = targetop.getarglist() + jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] + + assert self.optimizer.loop.start_resumedescr + start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() + assert isinstance(start_resumedescr, ResumeGuardDescr) + start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) + # FIXME: I dont thnik we need fix_snapshot anymore + + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(jump_args) - loop.preamble.operations = self.optimizer.get_newoperations() - jump_args = [self.getvalue(a).get_key_box() for a in jump_args] + values = [self.getvalue(arg) for arg in jump_args] + inputargs = virtual_state.make_inputargs(values, self.optimizer) + short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() - self.start_resumedescr = start_resumedescr - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(loop, jump_args, - start_resumedescr.rd_snapshot) + constant_inputargs = {} + for box in jump_args: + const = self.get_constant_box(box) + if const: + constant_inputargs[box] = const - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(jump_args) + short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) + aliased_vrituals = {} + for i in range(len(original_jump_args)): + if original_jump_args[i] is not jump_args[i]: + if values[i].is_virtual(): + aliased_vrituals[original_jump_args[i]] = jump_args[i] + else: + short_boxes.alias(original_jump_args[i], jump_args[i]) + + self.optimizer.clear_newoperations() + for box in short_inputargs: + value = self.getvalue(box) + if value.is_virtual(): + value.force_box(self.optimizer) + inputarg_setup_ops = self.optimizer.get_newoperations() + + target_token = targetop.getdescr() + assert isinstance(target_token, TargetToken) + targetop.initarglist(inputargs) + target_token.virtual_state = virtual_state + target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] + target_token.start_resumedescr = start_resumedescr + target_token.exported_state = ExportedState(constant_inputargs, short_boxes, + inputarg_setup_ops, self.optimizer, + aliased_vrituals, jump_args) + + def import_state(self, targetop): + self.did_import = False + if not targetop: + # FIXME: Set up some sort of empty state with no virtuals? + return + target_token = targetop.getdescr() + if not target_token: + return + assert isinstance(target_token, TargetToken) + exported_state = target_token.exported_state + if not exported_state: + # FIXME: Set up some sort of empty state with no virtuals + return + self.did_import = True + + self.short = target_token.short_preamble[:] + self.short_seen = {} + self.short_boxes = exported_state.short_boxes.clone() + for box, const in exported_state.constant_inputargs.items(): + self.short_seen[box] = True + self.imported_state = exported_state + self.inputargs = targetop.getarglist() + self.initial_virtual_state = target_token.virtual_state + self.start_resumedescr = target_token.start_resumedescr + + seen = {} + for box in self.inputargs: + if box in seen: + continue + seen[box] = True + preamble_value = exported_state.optimizer.getvalue(box) + value = self.optimizer.getvalue(box) + value.import_from(preamble_value, self.optimizer) + + for newbox, oldbox in self.short_boxes.aliases.items(): + self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) + + # Setup the state of the new optimizer by emiting the + # short operations and discarding the result + self.optimizer.emitting_dissabled = True + for op in exported_state.inputarg_setup_ops: + self.optimizer.send_extra_operation(op) + seen = {} + + for op in self.short_boxes.operations(): + self.ensure_short_op_emitted(op, self.optimizer, seen) + if op and op.result: + preamble_value = exported_state.optimizer.getvalue(op.result) + value = self.optimizer.getvalue(op.result) + if not value.is_virtual(): + imp = ValueImporter(self, preamble_value, op) + self.optimizer.importable_values[value] = imp + newvalue = self.optimizer.getvalue(op.result) + newresult = newvalue.get_key_box() + if newresult is not op.result and not newvalue.is_constant(): + self.short_boxes.alias(newresult, op.result) + op = ResOperation(rop.SAME_AS, [op.result], newresult) + self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX + #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! + self.optimizer.flush() + self.optimizer.emitting_dissabled = False + + for box, key_box in exported_state.aliased_vrituals.items(): + self.optimizer.make_equal_to(box, self.getvalue(key_box)) + + def close_bridge(self, start_label): + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # We dont need to inline the short preamble we are creating as we are conneting + # the bridge to a different trace with a different short preamble + self.short_inliner = None + + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations): + op = newoperations[i] + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + for a in args: + self.import_box(a, inputargs, short_jumpargs, []) + i += 1 + newoperations = self.optimizer.get_newoperations() + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) + + def close_loop(self, jumpop): + virtual_state = self.initial_virtual_state + short_inputargs = self.short[0].getarglist() + constant_inputargs = self.imported_state.constant_inputargs + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # Construct jumpargs from the virtual state + original_jumpargs = jumpop.getarglist()[:] + values = [self.getvalue(arg) for arg in jumpop.getarglist()] + try: + jumpargs = virtual_state.make_inputargs(values, self.optimizer) + except BadVirtualState: + raise InvalidLoop + jumpop.initarglist(jumpargs) + + # Inline the short preamble at the end of the loop + jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) + assert len(short_inputargs) == len(jmp_to_short_args) + args = {} + for i in range(len(short_inputargs)): + if short_inputargs[i] in args: + if args[short_inputargs[i]] != jmp_to_short_args[i]: + raise InvalidLoop + args[short_inputargs[i]] = jmp_to_short_args[i] + self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) + for box, const in constant_inputargs.items(): + self.short_inliner.argmap[box] = const + for op in self.short[1:]: + newop = self.short_inliner.inline_op(op) + self.optimizer.send_extra_operation(newop) + + # Import boxes produced in the preamble but used in the loop + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = j = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations) or j < len(jumpargs): + if i == len(newoperations): + while j < len(jumpargs): + a = jumpargs[j] + if self.optimizer.loop.logops: + debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + j += 1 + else: + op = newoperations[i] + + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + + if self.optimizer.loop.logops: + debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + for a in args: + if self.optimizer.loop.logops: + debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + i += 1 + newoperations = self.optimizer.get_newoperations() + + jumpop.initarglist(jumpargs) + self.optimizer.send_extra_operation(jumpop) + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=jumpop.getdescr())) + + # Verify that the virtual state at the end of the loop is one + # that is compatible with the virtual state at the start of the loop + modifier = VirtualStateAdder(self.optimizer) + final_virtual_state = modifier.get_virtual_state(original_jumpargs) + debug_start('jit-log-virtualstate') + virtual_state.debug_print('Closed loop with ') + bad = {} + if not virtual_state.generalization_of(final_virtual_state, bad): + # We ended up with a virtual state that is not compatible + # and we are thus unable to jump to the start of the loop + final_virtual_state.debug_print("Bad virtual state at end of loop, ", + bad) + debug_stop('jit-log-virtualstate') + raise InvalidLoop - values = [self.getvalue(arg) for arg in jump_args] - inputargs = virtual_state.make_inputargs(values, self.optimizer) - short_inputargs = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) + debug_stop('jit-log-virtualstate') - self.constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - self.constant_inputargs[box] = const + maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards + if self.optimizer.emitted_guards > maxguards: + target_token = jumpop.getdescr() + assert isinstance(target_token, TargetToken) + target_token.targeting_jitcell_token.retraced_count = sys.maxint + + def finilize_short_preamble(self, start_label): + short = self.short + assert short[-1].getopnum() == rop.JUMP + target_token = start_label.getdescr() + assert isinstance(target_token, TargetToken) - sb = ShortBoxes(self.optimizer, inputargs + self.constant_inputargs.keys()) - self.short_boxes = sb + # Turn guards into conditional jumps to the preamble + for i in range(len(short)): + op = short[i] + if op.is_guard(): + op = op.clone() + op.setfailargs(None) + descr = target_token.start_resumedescr.clone_if_mutable() + op.setdescr(descr) + short[i] = op + + # Clone ops and boxes to get private versions and + short_inputargs = short[0].getarglist() + boxmap = {} + newargs = [None] * len(short_inputargs) + for i in range(len(short_inputargs)): + a = short_inputargs[i] + if a in boxmap: + newargs[i] = boxmap[a] + else: + newargs[i] = a.clonebox() + boxmap[a] = newargs[i] + inliner = Inliner(short_inputargs, newargs) + for box, const in self.imported_state.constant_inputargs.items(): + inliner.argmap[box] = const + for i in range(len(short)): + short[i] = inliner.inline_op(short[i]) + + target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.start_resumedescr) + + # Forget the values to allow them to be freed + for box in short[0].getarglist(): + box.forget_value() + for op in short: + if op.result: + op.result.forget_value() + target_token.short_preamble = self.short + target_token.exported_state = None + + + def FIXME_old_stuff(): preamble_optimizer = self.optimizer loop.preamble.quasi_immutable_deps = ( self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.new() loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps - logops = self.optimizer.loop.logops - if logops: - args = ", ".join([logops.repr_of_arg(arg) for arg in inputargs]) - debug_print('inputargs: ' + args) - args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs]) - debug_print('short inputargs: ' + args) - self.short_boxes.debug_print(logops) - - - # Force virtuals amoung the jump_args of the preamble to get the - # operations needed to setup the proper state of those virtuals - # in the peeled loop - inputarg_setup_ops = [] - preamble_optimizer.clear_newoperations() - seen = {} - for box in inputargs: - if box in seen: - continue - seen[box] = True - preamble_value = preamble_optimizer.getvalue(box) - value = self.optimizer.getvalue(box) - value.import_from(preamble_value, self.optimizer) - for box in short_inputargs: - if box in seen: - continue - seen[box] = True - value = preamble_optimizer.getvalue(box) - value.force_box(preamble_optimizer) - inputarg_setup_ops += preamble_optimizer.get_newoperations() - - # Setup the state of the new optimizer by emiting the - # short preamble operations and discarding the result - self.optimizer.emitting_dissabled = True - for op in inputarg_setup_ops: - self.optimizer.send_extra_operation(op) - seen = {} - for op in self.short_boxes.operations(): - self.ensure_short_op_emitted(op, self.optimizer, seen) - if op and op.result: - preamble_value = preamble_optimizer.getvalue(op.result) - value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): - imp = ValueImporter(self, preamble_value, op) - self.optimizer.importable_values[value] = imp - newresult = self.optimizer.getvalue(op.result).get_key_box() - if newresult is not op.result: - self.short_boxes.alias(newresult, op.result) - self.optimizer.flush() - self.optimizer.emitting_dissabled = False - - initial_inputargs_len = len(inputargs) - self.inliner = Inliner(loop.inputargs, jump_args) - - - short = self.inline(inputargs, self.cloned_operations, - loop.inputargs, short_inputargs, - virtual_state) loop.inputargs = inputargs args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\ @@ -241,149 +421,7 @@ loop.preamble.token.retraced_count = sys.maxint if short: - assert short[-1].getopnum() == rop.JUMP - short[-1].setdescr(loop.token) - - # Turn guards into conditional jumps to the preamble - for i in range(len(short)): - op = short[i] - if op.is_guard(): - op = op.clone() - op.setfailargs(None) - descr = self.start_resumedescr.clone_if_mutable() - op.setdescr(descr) - short[i] = op - - short_loop = TreeLoop('short preamble') - short_loop.inputargs = short_inputargs - short_loop.operations = short - - # Clone ops and boxes to get private versions and - boxmap = {} - newargs = [None] * len(short_loop.inputargs) - for i in range(len(short_loop.inputargs)): - a = short_loop.inputargs[i] - if a in boxmap: - newargs[i] = boxmap[a] - else: - newargs[i] = a.clonebox() - boxmap[a] = newargs[i] - inliner = Inliner(short_loop.inputargs, newargs) - for box, const in self.constant_inputargs.items(): - inliner.argmap[box] = const - short_loop.inputargs = newargs - ops = [inliner.inline_op(op) for op in short_loop.operations] - short_loop.operations = ops - descr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - short_loop.start_resumedescr = descr - - assert isinstance(loop.preamble.token, LoopToken) - if loop.preamble.token.short_preamble: - loop.preamble.token.short_preamble.append(short_loop) - else: - loop.preamble.token.short_preamble = [short_loop] - short_loop.virtual_state = virtual_state - - # Forget the values to allow them to be freed - for box in short_loop.inputargs: - box.forget_value() - for op in short_loop.operations: - if op.result: - op.result.forget_value() - - def inline(self, inputargs, loop_operations, loop_args, short_inputargs, virtual_state): - inliner = self.inliner - - short_jumpargs = inputargs[:] - - short = self.short = [] - short_seen = self.short_seen = {} - for box, const in self.constant_inputargs.items(): - short_seen[box] = True - - # This loop is equivalent to the main optimization loop in - # Optimizer.propagate_all_forward - jumpop = None - for newop in loop_operations: - newop = inliner.inline_op(newop, clone=False) - if newop.getopnum() == rop.JUMP: - jumpop = newop - break - - #self.optimizer.first_optimization.propagate_forward(newop) - self.optimizer.send_extra_operation(newop) - - self.boxes_created_this_iteration = {} - - assert jumpop - original_jumpargs = jumpop.getarglist()[:] - values = [self.getvalue(arg) for arg in jumpop.getarglist()] - jumpargs = virtual_state.make_inputargs(values, self.optimizer) - jumpop.initarglist(jumpargs) - jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - - for box, const in self.constant_inputargs.items(): - self.short_inliner.argmap[box] = const - - for op in short: - newop = self.short_inliner.inline_op(op) - self.optimizer.send_extra_operation(newop) - - newoperations = self.optimizer.get_newoperations() - - i = j = 0 - while i < len(newoperations) or j < len(jumpargs): - if i == len(newoperations): - while j < len(jumpargs): - a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - j += 1 - else: - op = newoperations[i] - - self.boxes_created_this_iteration[op.result] = True - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) - for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - i += 1 - newoperations = self.optimizer.get_newoperations() - - jumpop.initarglist(jumpargs) - self.optimizer.send_extra_operation(jumpop) - short.append(ResOperation(rop.JUMP, short_jumpargs, None)) - - modifier = VirtualStateAdder(self.optimizer) - final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') - bad = {} - if not virtual_state.generalization_of(final_virtual_state, bad): - # We ended up with a virtual state that is not compatible - # and we are thus unable to jump to the start of the loop - # XXX Is it possible to end up here? If so, consider: - # - Fallback on having the preamble jump to itself? - # - Would virtual_state.generate_guards make sense here? - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') - raise InvalidLoop - debug_stop('jit-log-virtualstate') - - return short + pass def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: @@ -399,19 +437,18 @@ guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) optimizer.send_extra_operation(guard) - def add_op_to_short(self, op, short, short_seen, emit=True, guards_needed=False): + def add_op_to_short(self, op, emit=True, guards_needed=False): if op is None: return None - if op.result is not None and op.result in short_seen: - if emit: + if op.result is not None and op.result in self.short_seen: + if emit and self.short_inliner: return self.short_inliner.inline_arg(op.result) else: return None for a in op.getarglist(): - if not isinstance(a, Const) and a not in short_seen: - self.add_op_to_short(self.short_boxes.producer(a), short, short_seen, - emit, guards_needed) + if not isinstance(a, Const) and a not in self.short_seen: + self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): descr = self.start_resumedescr.clone_if_mutable() op.setdescr(descr) @@ -421,9 +458,9 @@ else: value_guards = [] - short.append(op) - short_seen[op.result] = True - if emit: + self.short.append(op) + self.short_seen[op.result] = True + if emit and self.short_inliner: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) else: @@ -432,23 +469,22 @@ if op.is_ovf(): # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) for guard in value_guards: - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) if newop: return newop.result return None - def import_box(self, box, inputargs, short, short_jumpargs, - jumpargs, short_seen): + def import_box(self, box, inputargs, short_jumpargs, jumpargs): if isinstance(box, Const) or box in inputargs: return if box in self.boxes_created_this_iteration: return short_op = self.short_boxes.producer(box) - newresult = self.add_op_to_short(short_op, short, short_seen) + newresult = self.add_op_to_short(short_op) short_jumpargs.append(short_op.result) inputargs.append(box) @@ -456,98 +492,94 @@ if box in self.optimizer.values: box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) - -class OptInlineShortPreamble(Optimization): - def __init__(self, retraced): - self.retraced = retraced + def jump_to_already_compiled_trace(self, jumpop): + assert jumpop.getopnum() == rop.JUMP + cell_token = jumpop.getdescr() - def new(self): - return OptInlineShortPreamble(self.retraced) + assert isinstance(cell_token, JitCellToken) + if not cell_token.target_tokens: + return False - def propagate_forward(self, op): - if op.getopnum() == rop.JUMP: - loop_token = op.getdescr() - assert isinstance(loop_token, LoopToken) - short = loop_token.short_preamble - if short: - args = op.getarglist() - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + if not self.inline_short_preamble: + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return True - for sh in short: - ok = False - extra_guards = [] + args = jumpop.getarglist() + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(args) + debug_start('jit-log-virtualstate') + virtual_state.debug_print("Looking for ") - bad = {} - debugmsg = 'Did not match ' - if sh.virtual_state.generalization_of(virtual_state, bad): - ok = True - debugmsg = 'Matched ' - else: - try: - cpu = self.optimizer.cpu - sh.virtual_state.generate_guards(virtual_state, - args, cpu, - extra_guards) + for target in cell_token.target_tokens: + if not target.virtual_state: + continue + ok = False + extra_guards = [] - ok = True - debugmsg = 'Guarded to match ' - except InvalidLoop: - pass - sh.virtual_state.debug_print(debugmsg, bad) - - if ok: - debug_stop('jit-log-virtualstate') + bad = {} + debugmsg = 'Did not match ' + if target.virtual_state.generalization_of(virtual_state, bad): + ok = True + debugmsg = 'Matched ' + else: + try: + cpu = self.optimizer.cpu + target.virtual_state.generate_guards(virtual_state, + args, cpu, + extra_guards) - values = [self.getvalue(arg) - for arg in op.getarglist()] - args = sh.virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - inliner = Inliner(sh.inputargs, args) - - for guard in extra_guards: - if guard.is_guard(): - descr = sh.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - guard.setdescr(descr) - self.emit_operation(guard) - - try: - for shop in sh.operations: - newop = inliner.inline_op(shop) - self.emit_operation(newop) - except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") - self.emit_operation(op) - return + ok = True + debugmsg = 'Guarded to match ' + except InvalidLoop: + pass + target.virtual_state.debug_print(debugmsg, bad) + + if ok: debug_stop('jit-log-virtualstate') - retraced_count = loop_token.retraced_count - limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit - if not self.retraced and retraced_count self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -180,10 +188,15 @@ self.arraydescr is other.arraydescr) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState for i in range(len(self.fieldstate)): - v = value._items[i] + try: + v = value._items[i] + except IndexError: + raise BadVirtualState s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -248,12 +261,19 @@ s.enum(virtual_state) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayStructValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayStructValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): - v = value._items[i][self.fielddescrs[i][j]] + try: + v = value._items[i][self.fielddescrs[i][j]] + except IndexError: + raise BadVirtualState + except KeyError: + raise BadVirtualState s = self.fieldstate[p] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -546,18 +566,27 @@ self.aliases = {} self.rename = {} self.optimizer = optimizer - for box in surviving_boxes: - self.potential_ops[box] = None - optimizer.produce_potential_short_preamble_ops(self) - self.short_boxes = {} - self.short_boxes_in_production = {} + if surviving_boxes is not None: + for box in surviving_boxes: + self.potential_ops[box] = None + optimizer.produce_potential_short_preamble_ops(self) - for box in self.potential_ops.keys(): - try: - self.produce_short_preamble_box(box) - except BoxNotProducable: - pass + self.short_boxes = {} + self.short_boxes_in_production = {} + + for box in self.potential_ops.keys(): + try: + self.produce_short_preamble_box(box) + except BoxNotProducable: + pass + + def clone(self): + sb = ShortBoxes(self.optimizer, None) + sb.aliases.update(self.aliases) + sb.short_boxes = {} + sb.short_boxes.update(self.short_boxes) + return sb def prioritized_alternatives(self, box): if box not in self.alternatives: @@ -598,6 +627,7 @@ newbox = newop.result = op.result.clonebox() self.short_boxes[newop.result] = newop value = self.optimizer.getvalue(box) + self.optimizer.emit_operation(ResOperation(rop.SAME_AS, [box], newbox)) self.optimizer.make_equal_to(newbox, value) else: self.short_boxes[box] = op diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp import history, compile, resume from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstFloat -from pypy.jit.metainterp.history import Box +from pypy.jit.metainterp.history import Box, TargetToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import executor from pypy.jit.metainterp.logger import Logger @@ -22,7 +22,6 @@ from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr from pypy.jit.codewriter import heaptracker from pypy.jit.metainterp.optimizeopt.util import args_dict_box -from pypy.jit.metainterp.optimize import RetraceLoop # ____________________________________________________________ @@ -243,6 +242,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) @@ -1555,10 +1566,17 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None - self.retracing_loop_from = None + self.partial_trace = None + self.retracing_from = -1 self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + def retrace_needed(self, trace): + self.partial_trace = trace + self.retracing_from = len(self.history.operations) - 1 + self.heapcache.reset() + + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction f = self.newframe(jitcode, greenkey) @@ -1778,7 +1796,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate @@ -1793,7 +1810,7 @@ def _interpret(self): # Execute the frames forward until we raise a DoneWithThisFrame, - # a ExitFrameWithException, or a GenerateMergePoint exception. + # a ExitFrameWithException, or a ContinueRunningNormally exception. self.staticdata.stats.entered() while True: self.framestack[-1].run_one_step() @@ -1841,8 +1858,6 @@ self.seen_loop_header_for_jdindex = -1 try: self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1877,8 +1892,6 @@ if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(ABORT_BRIDGE) self.interpret() - except GenerateMergePoint, gmp: - return self.designate_target_loop(gmp) except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" @@ -1926,14 +1939,9 @@ # that failed; # - if self.resumekey is a ResumeFromInterpDescr, it starts directly # from the interpreter. - if not self.retracing_loop_from: - try: - self.compile_bridge(live_arg_boxes) - except RetraceLoop: - start = len(self.history.operations) - self.current_merge_points.append((live_arg_boxes, start)) - self.retracing_loop_from = RetraceState(self, live_arg_boxes) - return + if not self.partial_trace: + # FIXME: Support a retrace to be a bridge as well as a loop + self.compile_trace(live_arg_boxes, resumedescr) # raises in case it works -- which is the common case, hopefully, # at least for bridges starting from a guard. @@ -1955,14 +1963,10 @@ else: # Found! Compile it as a loop. # raises in case it works -- which is the common case - if self.retracing_loop_from and \ - self.retracing_loop_from.merge_point == j: - bridge_arg_boxes = self.retracing_loop_from.live_arg_boxes - self.compile_bridge_and_loop(original_boxes, \ - live_arg_boxes, start, - bridge_arg_boxes, resumedescr) - else: - self.compile(original_boxes, live_arg_boxes, start, resumedescr) + if self.partial_trace: + if start != self.retracing_from: + raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now + self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.staticdata.log('cancelled, tracing more...') #self.staticdata.log('cancelled, stopping tracing') @@ -1972,12 +1976,48 @@ start = len(self.history.operations) self.current_merge_points.append((live_arg_boxes, start)) - def designate_target_loop(self, gmp): - loop_token = gmp.target_loop_token + def _unpack_boxes(self, boxes, start, stop): + ints = []; refs = []; floats = [] + for i in range(start, stop): + box = boxes[i] + if box.type == history.INT: ints.append(box.getint()) + elif box.type == history.REF: refs.append(box.getref_base()) + elif box.type == history.FLOAT:floats.append(box.getfloatstorage()) + else: assert 0 + return ints[:], refs[:], floats[:] + + def raise_continue_running_normally(self, live_arg_boxes, loop_token): + self.history.inputargs = None + self.history.operations = None + # For simplicity, we just raise ContinueRunningNormally here and + # ignore the loop_token passed in. It means that we go back to + # interpreted mode, but it should come back very quickly to the + # JIT, find probably the same 'loop_token', and execute it. + if we_are_translated(): + num_green_args = self.jitdriver_sd.num_green_args + gi, gr, gf = self._unpack_boxes(live_arg_boxes, 0, num_green_args) + ri, rr, rf = self._unpack_boxes(live_arg_boxes, num_green_args, + len(live_arg_boxes)) + CRN = self.staticdata.ContinueRunningNormally + raise CRN(gi, gr, gf, ri, rr, rf) + else: + # However, in order to keep the existing tests working + # (which are based on the assumption that 'loop_token' is + # directly used here), a bit of custom non-translatable code... + self._nontranslated_run_directly(live_arg_boxes, loop_token) + assert 0, "unreachable" + + def _nontranslated_run_directly(self, live_arg_boxes, loop_token): + "NOT_RPYTHON" + args = [] num_green_args = self.jitdriver_sd.num_green_args - residual_args = gmp.argboxes[num_green_args:] - history.set_future_values(self.cpu, residual_args) - return loop_token + num_red_args = self.jitdriver_sd.num_red_args + for box in live_arg_boxes[num_green_args:num_green_args+num_red_args]: + if box.type == history.INT: args.append(box.getint()) + elif box.type == history.REF: args.append(box.getref_base()) + elif box.type == history.FLOAT: args.append(box.getfloatstorage()) + else: assert 0 + self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) def prepare_resume_from_failure(self, opnum, dont_change_position=False): frame = self.framestack[-1] @@ -2018,54 +2058,57 @@ from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) - def get_compiled_merge_points(self, greenkey): - """Get the list of looptokens corresponding to the greenkey. - Turns the (internal) list of weakrefs into regular refs. - """ + def get_procedure_token(self, greenkey): cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - return cell.get_compiled_merge_points() + return cell.get_procedure_token() + + def compile_loop(self, original_boxes, live_arg_boxes, start, start_resumedescr): + num_green_args = self.jitdriver_sd.num_green_args + greenkey = original_boxes[:num_green_args] + if not self.partial_trace: + assert self.get_procedure_token(greenkey) is None or \ + self.get_procedure_token(greenkey).target_tokens is None + if self.partial_trace: + target_token = compile.compile_retrace(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr, self.partial_trace, + self.resumekey) + else: + target_token = compile.compile_loop(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr) + if target_token is not None: + assert isinstance(target_token, TargetToken) + self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token) + self.staticdata.stats.add_jitcell_token(target_token.targeting_jitcell_token) - def set_compiled_merge_points(self, greenkey, looptokens): - cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - cell.set_compiled_merge_points(looptokens) - def compile(self, original_boxes, live_arg_boxes, start, start_resumedescr): - num_green_args = self.jitdriver_sd.num_green_args - original_inputargs = self.history.inputargs - self.history.inputargs = original_boxes[num_green_args:] - greenkey = original_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) - loop_token = compile.compile_new_loop(self, old_loop_tokens, - greenkey, start, start_resumedescr) - if loop_token is not None: # raise if it *worked* correctly - self.set_compiled_merge_points(greenkey, old_loop_tokens) - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, loop_token) + if target_token is not None: # raise if it *worked* correctly + assert isinstance(target_token, TargetToken) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) - self.history.inputargs = original_inputargs - self.history.operations.pop() # remove the JUMP - - def compile_bridge(self, live_arg_boxes): + def compile_trace(self, live_arg_boxes, start_resumedescr): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - if len(old_loop_tokens) == 0: + target_jitcell_token = self.get_procedure_token(greenkey) + if not target_jitcell_token: return - #if self.resumekey.guard_opnum == rop.GUARD_CLASS: - # return # Kepp tracing for another iteration - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) + if not target_jitcell_token.target_tokens: + return + + self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, + descr=target_jitcell_token) try: - target_loop_token = compile.compile_new_bridge(self, - old_loop_tokens, - self.resumekey) + target_token = compile.compile_trace(self, self.resumekey, start_resumedescr) finally: self.history.operations.pop() # remove the JUMP - if target_loop_token is not None: # raise if it *worked* correctly - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, target_loop_token) + if target_token is not None: # raise if it *worked* correctly + assert isinstance(target_token, TargetToken) + jitcell_token = target_token.targeting_jitcell_token + self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, bridge_arg_boxes, start_resumedescr): @@ -2101,10 +2144,8 @@ except RetraceLoop: assert False assert target_loop_token is not None - - self.history.inputargs = None - self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, old_loop_tokens[0]) + self.raise_continue_running_normally(live_arg_boxes, + old_loop_tokens[0]) def compile_done_with_this_frame(self, exitbox): self.gen_store_back_in_virtualizable() @@ -2126,21 +2167,21 @@ loop_tokens = sd.loop_tokens_done_with_this_frame_float else: assert False - self.history.record(rop.JUMP, exits, None) - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + # FIXME: kill TerminatingLoopToken? + # FIXME: can we call compile_trace? + token = loop_tokens[0].finishdescr + self.history.record(rop.FINISH, exits, None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() def compile_exit_frame_with_exception(self, valuebox): self.gen_store_back_in_virtualizable() - # temporarily put a JUMP to a pseudo-loop - self.history.record(rop.JUMP, [valuebox], None) sd = self.staticdata - loop_tokens = sd.loop_tokens_exit_frame_with_exception_ref - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr + self.history.record(rop.FINISH, [valuebox], None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() @specialize.arg(1) @@ -2382,22 +2423,6 @@ abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) - def gen_load_from_other_virtualizable(self, vinfo, vbox): - boxes = [] - assert vinfo is not None - for i in range(vinfo.num_static_extra_boxes): - descr = vinfo.static_field_descrs[i] - boxes.append(self.execute_and_record(rop.GETFIELD_GC, descr, vbox)) - virtualizable = vinfo.unwrap_virtualizable_box(vbox) - for k in range(vinfo.num_arrays): - descr = vinfo.array_field_descrs[k] - abox = self.execute_and_record(rop.GETFIELD_GC, descr, vbox) - descr = vinfo.array_descrs[k] - for j in range(vinfo.get_array_length(virtualizable, k)): - boxes.append(self.execute_and_record(rop.GETARRAYITEM_GC, descr, - abox, ConstInt(j))) - return boxes - def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) for frame in self.framestack: @@ -2469,25 +2494,13 @@ greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args - vinfo = targetjitdriver_sd.virtualizable_info - if vinfo is not None: - index = targetjitdriver_sd.index_of_virtualizable - vbox = args[index] - args = args + self.gen_load_from_other_virtualizable(vinfo, vbox) - # ^^^ and not "+=", which makes 'args' a resizable list warmrunnerstate = targetjitdriver_sd.warmstate - token = warmrunnerstate.get_assembler_token(greenargs, args) + token = warmrunnerstate.get_assembler_token(greenargs) op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) self.history.operations.append(op) # ____________________________________________________________ -class GenerateMergePoint(JitException): - def __init__(self, args, target_loop_token): - assert target_loop_token is not None - self.argboxes = args - self.target_loop_token = target_loop_token - class ChangeFrame(JitException): """Raised after we mutated metainterp.framestack, in order to force it to reload the current top-of-stack frame that gets interpreted.""" diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -369,6 +369,8 @@ 'FINISH/*d', '_FINAL_LAST', + 'LABEL/*d', + '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', 'GUARD_TRUE/1d', @@ -379,11 +381,11 @@ 'GUARD_ISNULL/1d', 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION/0d', - 'GUARD_EXCEPTION/1d', + 'GUARD_NO_EXCEPTION/0d', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', - 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- @@ -494,6 +496,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py --- a/pypy/jit/metainterp/resume.py +++ b/pypy/jit/metainterp/resume.py @@ -93,12 +93,14 @@ TAGMASK = 3 +class TagOverflow(Exception): + pass + def tag(value, tagbits): - if tagbits >> 2: - raise ValueError + assert 0 <= tagbits <= 3 sx = value >> 13 if sx != 0 and sx != -1: - raise ValueError + raise TagOverflow return rffi.r_short(value<<2|tagbits) def untag(value): @@ -153,7 +155,7 @@ return self._newconst(const) try: return tag(val, TAGINT) - except ValueError: + except TagOverflow: pass tagged = self.large_ints.get(val, UNASSIGNED) if not tagged_eq(tagged, UNASSIGNED): @@ -429,8 +431,7 @@ fieldnum = self._gettagged(fieldbox) # the index is limited to 2147483647 (64-bit machines only) if itemindex > 2147483647: - from pypy.jit.metainterp import compile - compile.giveup() + raise TagOverflow itemindex = rffi.cast(rffi.INT, itemindex) # rd_pendingfields[i].lldescr = lldescr diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -4,9 +4,9 @@ from pypy.rpython.ootypesystem import ootype from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.warmstate import unspecialize_value from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value from pypy.jit.codewriter.policy import JitPolicy from pypy.jit.codewriter import codewriter, longlong from pypy.rlib.rfloat import isnan @@ -16,15 +16,16 @@ from pypy.jit.codewriter import support class FakeJitCell(object): - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst + __product_token = None + def get_procedure_token(self): + return self.__product_token + def set_procedure_token(self, token): + self.__product_token = token class FakeWarmRunnerState(object): - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass + def attach_procedure_to_interp(self, greenkey, procedure_token): + cell = self.jit_cell_at_key(greenkey) + cell.set_procedure_token(procedure_token) def helper_func(self, FUNCPTR, func): from pypy.rpython.annlowlevel import llhelper @@ -132,16 +133,14 @@ def _run_with_machine_code(testself, args): metainterp = testself.metainterp num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented + procedure_token = metainterp.get_procedure_token(args[:num_green_args]) # a loop was successfully created by _run_with_pyjitpl(); call it cpu = metainterp.cpu + args1 = [] for i in range(len(args) - num_green_args): x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) + args1.append(unspecialize_value(x)) + faildescr = cpu.execute_token(procedure_token, *args1) assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') if metainterp.jitdriver_sd.result_type == history.INT: return cpu.get_latest_value_int(0) @@ -155,26 +154,36 @@ class JitMixin: basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" + def check_resops(self, expected=None, **check): + get_stats().check_resops(expected=expected, **check) + def check_simple_loop(self, expected=None, **check): + get_stats().check_simple_loop(expected=expected, **check) + + + + def check_trace_count(self, count): # was check_loop_count + # The number of traces compiled assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): + def check_trace_count_at_most(self, count): assert get_stats().compiled_count <= count + + def check_jitcell_token_count(self, count): # was check_tree_loop_count + assert len(get_stats().jitcell_token_wrefs) == count + + def check_target_token_count(self, count): + tokens = get_stats().get_all_jitcell_tokens() + n = sum ([len(t.target_tokens) for t in tokens]) + assert n == count + def check_enter_count(self, count): assert get_stats().enter_count == count def check_enter_count_at_most(self, count): assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + return # FIXME assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): assert get_stats().aborted_count == count def check_aborted_count_at_least(self, count): @@ -217,7 +226,7 @@ # this can be used after interp_operations if expected is not None: expected = dict(expected) - expected['jump'] = 1 + expected['finish'] = 1 self.metainterp.staticdata.stats.check_history(expected, **isns) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -9,12 +9,11 @@ from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.metainterp.warmspot import get_stats -from pypy.jit.metainterp.warmstate import set_future_value from pypy.rlib import rerased from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -66,7 +65,7 @@ res = self.interp_operations(f, [8, 98]) assert res == 110 - def test_loop(self): + def test_loop_1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) def f(x, y): res = 0 @@ -78,20 +77,20 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 42 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_trace_count(1) + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, + 'guard_true': 2, 'int_sub': 2}) + if self.basic: found = 0 - for op in get_stats().loops[0]._all_operations(): + for op in get_stats().get_all_loops()[0]._all_operations(): if op.getopname() == 'guard_true': liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) found += 1 - assert found == 1 + assert found == 2 def test_loop_variant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -107,8 +106,8 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) - self.check_loops(int_mul=1) + self.check_trace_count(1) + self.check_simple_loop(int_mul=1) def test_loop_variant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -124,8 +123,8 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) - self.check_loops(int_mul_ovf=1) + self.check_trace_count(1) + self.check_simple_loop(int_mul_ovf=1) def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -139,10 +138,11 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) + self.check_trace_count(1) + self.check_simple_loop(int_mul=0) + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, + 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) + def test_loop_invariant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -157,69 +157,63 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 308 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) + self.check_trace_count(1) + self.check_simple_loop(int_mul_ovf=0) + self.check_resops({'jump': 1, 'int_lshift': 2, 'int_gt': 2, + 'int_mul_ovf': 1, 'int_add': 4, + 'guard_true': 2, 'guard_no_overflow': 1, + 'int_sub': 2}) def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'n']) + def f(x, y, n): res = 0 while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, n=n, res=res) + myjitdriver.jit_merge_point(x=x, y=y, n=n, res=res) res += x * x - if y<16: + if y Author: hager Branch: ppc-jit-backend Changeset: r50752:b3404ddd47d9 Date: 2011-12-15 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/b3404ddd47d9/ Log: add emit_force_token diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -948,6 +948,10 @@ class ForceOpAssembler(object): _mixin_ = True + + def emit_force_token(self, op, arglocs, regalloc): + res_loc = arglocs[0] + self.mc.mr(res_loc.value, r.SPP.value) # from: ../x86/assembler.py:1668 # XXX Split into some helper methods From noreply at buildbot.pypy.org Tue Dec 20 15:38:03 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 15:38:03 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (arigo, hager): Reconsider spilling in PPC Message-ID: <20111220143803.472D3823F8@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50753:99496f3054f6 Date: 2011-12-20 15:28 +0100 http://bitbucket.org/pypy/pypy/changeset/99496f3054f6/ Log: (arigo, hager): Reconsider spilling in PPC diff --git a/pypy/jit/backend/ppc/ppcgen/arch.py b/pypy/jit/backend/ppc/ppcgen/arch.py --- a/pypy/jit/backend/ppc/ppcgen/arch.py +++ b/pypy/jit/backend/ppc/ppcgen/arch.py @@ -1,7 +1,8 @@ # Constants that depend on whether we are on 32-bit or 64-bit from pypy.jit.backend.ppc.ppcgen.register import (NONVOLATILES, - NONVOLATILES_FLOAT) + NONVOLATILES_FLOAT, + MANAGED_REGS) import sys if sys.maxint == (2**31 - 1): @@ -22,3 +23,5 @@ FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * DWORD FLOAT_INT_CONVERSION = WORD MAX_REG_PARAMS = 8 + +FORCE_INDEX_OFS = len(MANAGED_REGS) * WORD diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -185,7 +185,8 @@ spilling_pointer is the address of the FORCE_INDEX. """ - return self.decode_registers_and_descr(mem_loc, stack_pointer, spilling_pointer) + return self.decode_registers_and_descr(mem_loc, stack_pointer, + spilling_pointer) self.failure_recovery_func = failure_recovery_func @@ -201,10 +202,7 @@ ''' enc = rffi.cast(rffi.CCHARP, mem_loc) managed_size = WORD * len(r.MANAGED_REGS) - # XXX do some sanity considerations - spilling_depth = spp_loc - stack_loc + managed_size - spilling_area = rffi.cast(rffi.CCHARP, stack_loc + managed_size) - assert spilling_depth >= 0 + assert spp_loc > stack_loc regs = rffi.cast(rffi.CCHARP, spp_loc) @@ -235,7 +233,8 @@ if group == self.FLOAT_TYPE: assert 0, "not implemented yet" else: - value = decode32(spilling_area, spilling_depth - stack_location * WORD) + start = spp_loc - (stack_location + 1) * WORD + value = rffi.cast(rffi.LONGP, start)[0] else: # REG_LOC reg = ord(enc[i]) if group == self.FLOAT_TYPE: @@ -669,12 +668,15 @@ if op.has_no_side_effect() and op.result not in regalloc.longevity: regalloc.possibly_free_vars_for_op(op) elif self.can_merge_with_next_guard(op, pos, operations)\ - and opnum in (rop.CALL_RELEASE_GIL, rop.CALL_ASSEMBLER): # XXX fix + and opnum in (rop.CALL_RELEASE_GIL, rop.CALL_ASSEMBLER,\ + rop.CALL_MAY_FORCE): # XXX fix regalloc.next_instruction() arglocs = regalloc.operations_with_guard[opnum](regalloc, op, operations[pos+1]) operations_with_guard[opnum](self, op, operations[pos+1], arglocs, regalloc) + elif not we_are_translated() and op.getopnum() == -124: + regalloc.prepare_force_spill(op) else: arglocs = regalloc.operations[opnum](regalloc, op) if arglocs is not None: @@ -756,14 +758,14 @@ tok.pos_recovery_stub = pos memaddr = self.gen_exit_stub(descr, tok.failargs, - tok.faillocs, save_exc=tok.save_exc) + tok.faillocs, + save_exc=tok.save_exc) # store info on the descr descr._ppc_frame_depth = tok.faillocs[0].getint() descr._failure_recovery_code = memaddr descr._ppc_guard_pos = pos - def gen_exit_stub(self, descr, args, arglocs, fcond=c.NE, - save_exc=False): + def gen_exit_stub(self, descr, args, arglocs, save_exc=False): memaddr = self.gen_descr_encoding(descr, args, arglocs) # store addr in force index field diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -100,7 +100,7 @@ class PPCFrameManager(FrameManager): def __init__(self): FrameManager.__init__(self) - self.frame_depth = 1 + self.frame_depth = 0 @staticmethod def frame_pos(loc, type): @@ -865,6 +865,10 @@ assert (1 << scale) == size return size, scale, ofs, ofs_length, ptr + def prepare_force_spill(self, op): + self.force_spill_var(op.getarg(0)) + return [] + def add_none_argument(fn): return lambda self, op: fn(self, op, None) diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py --- a/pypy/jit/backend/ppc/runner.py +++ b/pypy/jit/backend/ppc/runner.py @@ -6,7 +6,7 @@ from pypy.jit.metainterp import history, compile from pypy.jit.metainterp.history import BoxPtr from pypy.jit.backend.x86.assembler import Assembler386 -from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS +from pypy.jit.backend.ppc.ppcgen.arch import FORCE_INDEX_OFS from pypy.jit.backend.x86.profagent import ProfileAgent from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU from pypy.jit.backend.x86 import regloc diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -540,6 +540,21 @@ result = self.cpu.get_latest_value_ref(0) assert result == u_box.value + def test_spilling(self): + ops = ''' + [i0] + force_spill(i0) + finish(i0) + ''' + loop = parse(ops, namespace=locals()) + looptoken = LoopToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + + self.cpu.set_future_value_int(0, 42) + fail = self.cpu.execute_token(looptoken) + result = self.cpu.get_latest_value_int(0) + assert result == 42 + def test_bh_call(self): cpu = self.cpu # @@ -632,6 +647,7 @@ 'float', descr=calldescr) assert abs(res.getfloat() - 4.6) < 0.0001 + def test_call_many_arguments(self): # Test calling a function with a large number of arguments (more than # 6, which will force passing some arguments on the stack on 64-bit) From noreply at buildbot.pypy.org Tue Dec 20 15:38:04 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 15:38:04 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: add convenience function for conditional absolute branches Message-ID: <20111220143804.6E95E823F9@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50754:981b61e0d09a Date: 2011-12-20 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/981b61e0d09a/ Log: add convenience function for conditional absolute branches diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -1007,6 +1007,13 @@ target_ofs = offset - pos self.bc(condition, 2, target_ofs) + def b_cond_abs(self, addr, condition): + assert condition in (c.EQ, c.NE) + self.alloc_scratch_reg(addr) + self.mtctr(r.SCRATCH.value) + self.free_scratch_reg() + self.bcctr(condition, 2) + def b_abs(self, address, trap=False): self.alloc_scratch_reg(address) self.mtctr(r.r0.value) From noreply at buildbot.pypy.org Tue Dec 20 15:38:05 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 15:38:05 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: begin of exceptopn handling during memory allocation Message-ID: <20111220143805.988F1823FB@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50755:81c255a5fb96 Date: 2011-12-20 15:36 +0100 http://bitbucket.org/pypy/pypy/changeset/81c255a5fb96/ Log: begin of exceptopn handling during memory allocation diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -838,7 +838,7 @@ self.mc.std(r.r0.value, r.r3.value, self.cpu.vtable_offset) def emit_new_array(self, op, arglocs, regalloc): - # XXX handle memory errors + self.propagate_memoryerror_if_r3_is_null() if len(arglocs) > 0: value_loc, base_loc, ofs_length = arglocs if IS_PPC_32: diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -102,6 +102,7 @@ self.current_clt = None self._regalloc = None self.max_stack_params = 0 + self.propagate_exception_path = 0 def _save_nonvolatiles(self): """ save nonvolatile GPRs in GPR SAVE AREA @@ -290,6 +291,21 @@ locs.append(loc) return locs + def _build_propagate_exception_path(self): + if self.cpu.propagate_exception_v < 0: + return + + mc = PPCBuilder() + with Saved_Volatiles(mc): + addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + mc.bl_abs(addr) + #mc.alloc_scratch_reg(self.cpu.propagate_exception_v) + #mc.mr(r.RES.value, r.SCRATCH.value) + #mc.free_scratch_reg() + mc.load_imm(r.RES, self.cpu.propagate_exception_v) + mc.prepare_insts_blocks() + self.propagate_exception_path = mc.materialize(self.cpu.asmmemmgr, []) + def _gen_leave_jitted_hook_code(self, save_exc=False): mc = PPCBuilder() @@ -328,7 +344,7 @@ # load parameters into parameter registers if IS_PPC_32: mc.lwz(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding - else: + else: mc.ld(r.r3.value, r.SPP.value, self.ENCODING_AREA) mc.mr(r.r4.value, r.SP.value) # load stack pointer mc.mr(r.r5.value, r.SPP.value) # load spilling pointer @@ -495,6 +511,7 @@ gc_ll_descr.initialize() ll_new = gc_ll_descr.get_funcptr_for_new() self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) + self._build_propagate_exception_path() if gc_ll_descr.get_funcptr_for_newarray is not None: ll_new_array = gc_ll_descr.get_funcptr_for_newarray() self.malloc_array_func_addr = rffi.cast(lltype.Signed, @@ -955,6 +972,10 @@ assert gcrootmap.is_shadow_stack gcrootmap.write_callshape(mark, force_index) + def propagate_memoryerror_if_r3_is_null(self): + self.mc.cmp_op(0, r.RES.value, 0, imm=True) + self.mc.b_cond_abs(self.propagate_exception_path, c.EQ) + def write_new_force_index(self): # for shadowstack only: get a new, unused force_index number and # write it to FORCE_INDEX_OFS. Used to record the call shape From noreply at buildbot.pypy.org Tue Dec 20 15:38:06 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 15:38:06 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: merge Message-ID: <20111220143806.CABD1823FC@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50756:4b2b09579148 Date: 2011-12-20 15:37 +0100 http://bitbucket.org/pypy/pypy/changeset/4b2b09579148/ Log: merge diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -561,26 +561,34 @@ self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') # self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG - self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( + (self.jit_wb_if_flag_byteofs, + self.jit_wb_if_flag_singlebyte, + self.jit_wb_if_flag_bitpos) = ( self.extract_flag_byte(self.jit_wb_if_flag)) # if hasattr(GCClass, 'JIT_WB_CARDS_SET'): self.jit_wb_cards_set = GCClass.JIT_WB_CARDS_SET self.jit_wb_card_page_shift = GCClass.JIT_WB_CARD_PAGE_SHIFT - self.jit_wb_cards_set_byteofs, self.jit_wb_cards_set_singlebyte = ( + (self.jit_wb_cards_set_byteofs, + self.jit_wb_cards_set_singlebyte, + self.jit_wb_cards_set_bitpos) = ( self.extract_flag_byte(self.jit_wb_cards_set)) else: self.jit_wb_cards_set = 0 def extract_flag_byte(self, flag_word): # if convenient for the backend, we compute the info about - # the flag as (byte-offset, single-byte-flag). + # the flag as (byte-offset, single-byte-flag, bit-position-in-word). + # Note that flag_word == 1 << bit_position_in_word. import struct value = struct.pack("l", flag_word) assert value.count('\x00') == len(value) - 1 # only one byte is != 0 i = 0 while value[i] == '\x00': i += 1 - return (i, struct.unpack('b', value[i])[0]) + bitpos = 0 + while flag_word > (1 << bitpos): bitpos += 1 + assert flag_word == (1 << bitpos) + return (i, struct.unpack('b', value[i])[0], bitpos) def get_write_barrier_fn(self, cpu): llop1 = self.llop1 diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -898,22 +898,14 @@ else: self.mc.ld(r.SCRATCH.value, loc_base.value, 0) - # offset to the byte we are interested in - byte_offset = descr.jit_wb_if_flag_byteofs - single_byte = descr.jit_wb_if_flag_singlebyte - - # examine which bit in the byte is set - for i in range(8): - if 1 << i == single_byte: - n = i - break + # get the position of the bit we want to test + bitpos = descr.jit_wb_if_flag_bitpos if IS_PPC_32: - # compute the position of the bit we want to test - bitpos = (3 - byte_offset) * 8 + n - # ^^^^^^^^^^^^^^^ due to endianess # put this bit to the rightmost bitposition of r0 - self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, 32 - bitpos, 31, 31) + if bitpos > 0: + self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, + 32 - bitpos, 31, 31) # test whether this bit is set self.mc.cmpwi(0, r.SCRATCH.value, 1) else: diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1961,6 +1961,7 @@ jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 + jit_wb_if_flag_bitpos = 12 def get_write_barrier_fn(self, cpu): return funcbox.getint() # @@ -1998,6 +1999,7 @@ jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 + jit_wb_if_flag_bitpos = 12 jit_wb_cards_set = 0 def get_write_barrier_from_array_fn(self, cpu): return funcbox.getint() @@ -2044,9 +2046,11 @@ jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 + jit_wb_if_flag_bitpos = 12 jit_wb_cards_set = 8192 jit_wb_cards_set_byteofs = struct.pack("i", 8192).index('\x20') jit_wb_cards_set_singlebyte = 0x20 + jit_wb_cards_set_bitpos = 13 jit_wb_card_page_shift = 7 def get_write_barrier_from_array_fn(self, cpu): return funcbox.getint() From noreply at buildbot.pypy.org Tue Dec 20 15:53:32 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 15:53:32 +0100 (CET) Subject: [pypy-commit] pypy default: Change again the repr of CallDescrs, this time to standardize the format Message-ID: <20111220145332.EC48D820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50757:b3fc00ecbb30 Date: 2011-12-20 13:55 +0000 http://bitbucket.org/pypy/pypy/changeset/b3fc00ecbb30/ Log: Change again the repr of CallDescrs, this time to standardize the format with the other Descrs and to include more information. diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -425,7 +425,15 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '' % (self.arg_classes, self.result_type) + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res def map_type_to_argclass(ARG, accept_void=False): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -342,16 +342,16 @@ assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert repr_of_descr(descr4) == '' + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert repr_of_descr(descr4i) == '' + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert repr_of_descr(descr4f) == '' + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert repr_of_descr(descr5f) == '' + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) From noreply at buildbot.pypy.org Tue Dec 20 15:53:34 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 15:53:34 +0100 (CET) Subject: [pypy-commit] pypy default: - fix the tests to expect the new format of descrs. Message-ID: <20111220145334.27995820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50758:85a5e1fe1ad8 Date: 2011-12-20 14:53 +0000 http://bitbucket.org/pypy/pypy/changeset/85a5e1fe1ad8/ Log: - fix the tests to expect the new format of descrs. - fix match() to always raise InvalidMatch if the match fails because I found that at least one test was not doing "assert match(..)" but just "match(..)" which always passes... - test_generator still fails, but that should be fixed on the new version of the 'counter-decay' branch. diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -311,7 +311,7 @@ # to repeat it every time ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker_cond0 = int_lt(ticker0, 0) guard_false(ticker_cond0, descr=...) """ @@ -320,9 +320,9 @@ # this is the ticker check generated if we have threads thread_ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker1 = int_sub(ticker0, _) - setfield_raw(ticker_address, ticker1, descr=) + setfield_raw(ticker_address, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) guard_false(ticker_cond0, descr=...) """ @@ -330,7 +330,7 @@ # # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ - ticker2 = getfield_raw(ticker_address, descr=) + ticker2 = getfield_raw(ticker_address, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -451,7 +451,6 @@ try: self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: - #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 print "Loops don't match" print "=================" @@ -464,7 +463,7 @@ print print "Expected:" print format(expected_src) - return False + raise # always propagate the exception in case of mismatch else: return True diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,8 +7,9 @@ from pypy.tool.udir import udir from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - TraceWithIds, OpMatcher +from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, + find_ids, TraceWithIds, + OpMatcher, InvalidMatch) class BaseTestPyPyC(object): def setup_class(cls): @@ -115,13 +116,18 @@ assert opcodes_names == ['LOAD_FAST', 'LOAD_CONST', 'BINARY_ADD', 'STORE_FAST'] -class TestOpMatcher(object): +class TestOpMatcher_(object): def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations) - return matcher.match(src2, **kwds) + try: + res = matcher.match(src2, **kwds) + assert res is True + return True + except InvalidMatch: + return False def test_match_var(self): match_var = OpMatcher([]).match_var @@ -447,7 +453,7 @@ jump(p0, p1, p2, p3, i8, descr=...) """) # - assert not loop.match(""" + py.test.raises(InvalidMatch, loop.match, """ i6 = int_lt(i4, 1003) guard_true(i6) i8 = int_add(i5, 1) # variable mismatch @@ -492,9 +498,8 @@ guard_no_exception(descr=...) """) # - assert not loop.match_by_id('ntohs', """ + py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) p12 = call(ConstClass(foobar), 1, descr=...) guard_no_exception(descr=...) """) - diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py @@ -35,7 +35,7 @@ guard_not_invalidated(descr=...) i17 = force_token() setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>) - f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) + f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) """ % pow_addr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -42,7 +42,7 @@ guard_not_invalidated(descr=...) i13 = int_lt(i7, i9) guard_true(i13, descr=...) - i15 = getarrayitem_raw(i10, i7, descr=<.*ArrayNoLengthDescr>) + i15 = getarrayitem_raw(i10, i7, descr=) i16 = int_add_ovf(i8, i15) guard_no_overflow(descr=...) i18 = int_add(i7, 1) @@ -72,17 +72,17 @@ guard_true(i13, descr=...) guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=<.*ArrayNoLengthDescr>) + i14 = getarrayitem_raw(i10, i8, descr=) i15 = int_add_ovf(i9, i14) guard_no_overflow(descr=...) i17 = int_sub(i8, 640) # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=<.*ArrayNoLengthDescr>) + i18 = getarrayitem_raw(i11, i17, descr=) i19 = int_add_ovf(i18, i15) guard_no_overflow(descr=...) # on 64bit, there is a guard checking that i19 actually fits into 32bit ... - setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>) + setarrayitem_raw(i11, i8, _, descr=) i28 = int_add(i8, 1) --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=...) @@ -107,10 +107,10 @@ guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - f13 = getarrayitem_raw(i8, i6, descr=) + f13 = getarrayitem_raw(i8, i6, descr=) f15 = float_add(f13, 20.500000) - setarrayitem_raw(i8, i6, f15, descr=) - f16 = getarrayitem_raw(i8, i6, descr=) + setarrayitem_raw(i8, i6, f15, descr=) + f16 = getarrayitem_raw(i8, i6, descr=) i18 = float_eq(f16, 42.000000) guard_true(i18, descr=...) i20 = int_add(i6, 1) @@ -132,28 +132,24 @@ log = self.run(main, []) assert log.result == 321 loop, = log.loops_by_filename(self.filepath) - if sys.maxint == 2147483647: - arraydescr = 'UnsignedArrayNoLengthDescr' - else: - arraydescr = 'UINTArrayNoLengthDescr' assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - i13 = getarrayitem_raw(i8, i6, descr=<%s>) + i13 = getarrayitem_raw(i8, i6, descr=) f14 = cast_singlefloat_to_float(i13) f16 = float_add(f14, 20.500000) i17 = cast_float_to_singlefloat(f16) - setarrayitem_raw(i8, i6,i17, descr=<%s>) - i18 = getarrayitem_raw(i8, i6, descr=<%s>) + setarrayitem_raw(i8, i6,i17, descr=) + i18 = getarrayitem_raw(i8, i6, descr=) f19 = cast_singlefloat_to_float(i18) i21 = float_eq(f19, 42.000000) guard_true(i21, descr=...) i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """ % (arraydescr, arraydescr, arraydescr)) + """) def test_zeropadded(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -75,12 +75,12 @@ assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p38 = call(ConstClass(getexecutioncontext), descr=) - p39 = getfield_gc(p38, descr=) + p38 = call(ConstClass(getexecutioncontext), descr=) + p39 = getfield_gc(p38, descr=) i40 = force_token() - p41 = getfield_gc(p38, descr=) + p41 = getfield_gc(p38, descr=) guard_isnull(p41, descr=...) - i42 = getfield_gc(p38, descr=) + i42 = getfield_gc(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -192,7 +192,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ - p14 = getarrayitem_gc_pure(p8, i9, descr=) + p14 = getarrayitem_gc_pure(p8, i9, descr=) i14 = force_token() i16 = force_token() """) @@ -336,15 +336,15 @@ loop, = log.loops_by_filename(self.filepath) # the int strategy is used here assert loop.match_by_id('append', """ - i13 = getfield_gc(p8, descr=) + i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) # Will be killed by the backend - p15 = getfield_gc(p8, descr=) - i17 = arraylen_gc(p15, descr=) - call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... + p15 = getfield_gc(p8, descr=) + i17 = arraylen_gc(p15, descr=) + call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... guard_no_exception(descr=...) - p17 = getfield_gc(p8, descr=) - setarrayitem_gc(p17, i13, i12, descr=) + p17 = getfield_gc(p8, descr=) + setarrayitem_gc(p17, i13, i12, descr=) """) def test_blockstack_virtualizable(self): @@ -368,13 +368,13 @@ ... i20 = force_token() p22 = new_with_vtable(19511408) - p24 = new_array(1, descr=) + p24 = new_array(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) - setfield_gc(p0, i20, descr=) - setfield_gc(p26, ConstPtr(ptr22), descr=) - setarrayitem_gc(p24, 0, p26, descr=) - setfield_gc(p22, p24, descr=) - p32 = call_may_force(11376960, p18, p22, descr=) + setfield_gc(p0, i20, descr=) + setfield_gc(p26, ConstPtr(ptr22), descr=) + setarrayitem_gc(p24, 0, p26, descr=) + setfield_gc(p22, p24, descr=) + p32 = call_may_force(11376960, p18, p22, descr=) ... """) @@ -415,26 +415,26 @@ guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) guard_value(i4, 0, descr=...) guard_value(p3, ConstPtr(ptr14), descr=...) - i15 = getfield_gc_pure(p8, descr=) + i15 = getfield_gc_pure(p8, descr=) i17 = int_lt(i15, 5000) guard_true(i17, descr=...) - p18 = getfield_gc(p0, descr=) + p18 = getfield_gc(p0, descr=) guard_value(p18, ConstPtr(ptr19), descr=...) - p20 = getfield_gc(p18, descr=) + p20 = getfield_gc(p18, descr=) guard_value(p20, ConstPtr(ptr21), descr=...) guard_not_invalidated(descr=...) # most importantly, there is no getarrayitem_gc here - p23 = call(ConstClass(getexecutioncontext), descr=) - p24 = getfield_gc(p23, descr=) + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) i25 = force_token() - p26 = getfield_gc(p23, descr=) + p26 = getfield_gc(p23, descr=) guard_isnull(p26, descr=...) - i27 = getfield_gc(p23, descr=) + i27 = getfield_gc(p23, descr=) i28 = int_is_zero(i27) guard_true(i28, descr=...) - p30 = getfield_gc(ConstPtr(ptr29), descr=) + p30 = getfield_gc(ConstPtr(ptr29), descr=) guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) - i32 = getfield_gc_pure(p30, descr=) + i32 = getfield_gc_pure(p30, descr=) i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- @@ -452,15 +452,15 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- p22 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p22, i13, descr=) - setfield_gc(p4, p22, descr=) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -46,7 +46,7 @@ assert loop.match_by_id("getitem", """ i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -86,28 +86,28 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_int_str), i5, descr=) guard_no_exception(descr=...) - i12 = call(ConstClass(ll_strhash), p10, descr=) + i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) - setfield_gc(p13, 16, descr=) + p15 = new_array(8, descr=) + setfield_gc(p13, p15, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) - call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) guard_false(i27, descr=...) - p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p28 = getfield_gc(p13, descr=) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure(p29, descr=) + i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,11 +16,11 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p10 = getfield_gc(p0, descr=) + p10 = getfield_gc(p0, descr=) guard_value(p10, ConstPtr(ptr11), descr=...) - p12 = getfield_gc(p10, descr=) + p12 = getfield_gc(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc(ConstPtr(p17), descr=) + p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -125,8 +125,8 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p20, i11, descr=) - setfield_gc(ConstPtr(ptr21), p20, descr=) + setfield_gc(p20, i11, descr=) + setfield_gc(ConstPtr(ptr21), p20, descr=) jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -23,8 +23,8 @@ f1 = cast_int_to_float(i0) i3 = float_le(f1, 0) guard_false(i3, descr=...) - f2 = call(ConstClass(log), f1, descr=) - f3 = call(ConstClass(log10), f1, descr=) + f2 = call(ConstClass(log), f1, descr=) + f3 = call(ConstClass(log10), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i4 = int_add(i0, 1) @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) @@ -84,7 +84,7 @@ i4 = int_or(i2, i3) i5 = int_is_true(i4) guard_false(i5, descr=...) - f2 = call(ConstClass(fmod), f1, 2.0, descr=) + f2 = call(ConstClass(fmod), f1, 2.0, descr=) f3 = float_add(f0, f2) i6 = int_sub(i0, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -204,18 +204,18 @@ assert log.result == 1000000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i14 = getfield_gc(p12, descr=) + i14 = getfield_gc(p12, descr=) i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p16 = getfield_gc(p12, descr=) - p17 = getarrayitem_gc(p16, i12, descr=) + p16 = getfield_gc(p12, descr=) + p17 = getarrayitem_gc(p16, i12, descr=) i19 = int_add(i12, 1) - setfield_gc(p9, i19, descr=) + setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, 146982464, descr=...) - i21 = getfield_gc(p17, descr=) + i21 = getfield_gc(p17, descr=) i23 = int_lt(0, i21) guard_true(i23, descr=...) - i24 = getfield_gc(p17, descr=) + i24 = getfield_gc(p17, descr=) i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,6 +1,9 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +# XXX review the descrs to replace some EF=4 with EF=3 (elidable) + + class TestString(BaseTestPyPyC): def test_lookup_default_encoding(self): def main(n): @@ -72,7 +75,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p28 = call(ConstClass(strip_spaces), p25, descr=) + p28 = call(ConstClass(strip_spaces), p25, descr=) guard_no_exception(descr=...) i29 = strlen(p28) i30 = int_is_true(i29) @@ -88,9 +91,9 @@ guard_false(i41, descr=...) i43 = int_eq(i39, 43) guard_false(i43, descr=...) - i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) + i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) guard_false(i43, descr=...) - i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) + i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) guard_false(i46, descr=...) p51 = new_with_vtable(21136408) setfield_gc(p51, _, descr=...) # 7 setfields, but the order is dict-order-dependent @@ -100,9 +103,9 @@ setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) - p55 = call(ConstClass(parse_digit_string), p51, descr=) + p55 = call(ConstClass(parse_digit_string), p51, descr=) guard_no_exception(descr=...) - i57 = call(ConstClass(rbigint.toint), p55, descr=) + i57 = call(ConstClass(rbigint.toint), p55, descr=) guard_no_exception(descr=...) i58 = int_add_ovf(i6, i57) guard_no_overflow(descr=...) @@ -125,7 +128,7 @@ i7 = int_gt(i4, 0) guard_true(i7, descr=...) guard_not_invalidated(descr=...) - p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) + p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) guard_no_exception(descr=...) i10 = strlen(p9) i11 = int_is_true(i10) @@ -149,7 +152,7 @@ copystrcontent(p9, p21, 0, i25, i10) i33 = int_lt(i30, 23) guard_true(i33, descr=...) - p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) + p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) guard_no_exception(descr=...) i37 = strlen(p35) i38 = int_add_ovf(i5, i37) @@ -192,6 +195,6 @@ strsetitem(p35, 3, 104) strsetitem(p35, 4, 95) copystrcontent(p31, p35, 0, 5, i32) - i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) - guard_value(i49, 1, descr=) + i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) + guard_value(i49, 1, descr=...) ''') From noreply at buildbot.pypy.org Tue Dec 20 15:54:32 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 15:54:32 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: hg merge default Message-ID: <20111220145432.96D98820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50759:5330b853e079 Date: 2011-12-20 14:54 +0000 http://bitbucket.org/pypy/pypy/changeset/5330b853e079/ Log: hg merge default diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -425,7 +425,15 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '' % (self.arg_classes, self.result_type) + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res def map_type_to_argclass(ARG, accept_void=False): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -313,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -320,34 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) o = symbolic.get_size(lltype.Ptr(S), False) - assert descr3.repr_of_descr() == '' % o + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert descr4.repr_of_descr() == '' + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert descr4i.repr_of_descr() == '' + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert descr4f.repr_of_descr() == '' + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert descr5f.repr_of_descr() == '' + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,3 +1,4 @@ +from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -311,7 +311,7 @@ # to repeat it every time ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker_cond0 = int_lt(ticker0, 0) guard_false(ticker_cond0, descr=...) """ @@ -320,9 +320,9 @@ # this is the ticker check generated if we have threads thread_ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker1 = int_sub(ticker0, _) - setfield_raw(ticker_address, ticker1, descr=) + setfield_raw(ticker_address, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) guard_false(ticker_cond0, descr=...) """ @@ -330,7 +330,7 @@ # # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ - ticker2 = getfield_raw(ticker_address, descr=) + ticker2 = getfield_raw(ticker_address, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -451,7 +451,6 @@ try: self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: - #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 print "Loops don't match" print "=================" @@ -464,7 +463,7 @@ print print "Expected:" print format(expected_src) - return False + raise # always propagate the exception in case of mismatch else: return True diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,8 +7,9 @@ from pypy.tool.udir import udir from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - TraceWithIds, OpMatcher +from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, + find_ids, TraceWithIds, + OpMatcher, InvalidMatch) class BaseTestPyPyC(object): def setup_class(cls): @@ -115,13 +116,18 @@ assert opcodes_names == ['LOAD_FAST', 'LOAD_CONST', 'BINARY_ADD', 'STORE_FAST'] -class TestOpMatcher(object): +class TestOpMatcher_(object): def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations) - return matcher.match(src2, **kwds) + try: + res = matcher.match(src2, **kwds) + assert res is True + return True + except InvalidMatch: + return False def test_match_var(self): match_var = OpMatcher([]).match_var @@ -447,7 +453,7 @@ jump(p0, p1, p2, p3, i8, descr=...) """) # - assert not loop.match(""" + py.test.raises(InvalidMatch, loop.match, """ i6 = int_lt(i4, 1003) guard_true(i6) i8 = int_add(i5, 1) # variable mismatch @@ -492,9 +498,8 @@ guard_no_exception(descr=...) """) # - assert not loop.match_by_id('ntohs', """ + py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) p12 = call(ConstClass(foobar), 1, descr=...) guard_no_exception(descr=...) """) - diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py @@ -35,7 +35,7 @@ guard_not_invalidated(descr=...) i17 = force_token() setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>) - f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) + f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) """ % pow_addr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -42,7 +42,7 @@ guard_not_invalidated(descr=...) i13 = int_lt(i7, i9) guard_true(i13, descr=...) - i15 = getarrayitem_raw(i10, i7, descr=<.*ArrayNoLengthDescr>) + i15 = getarrayitem_raw(i10, i7, descr=) i16 = int_add_ovf(i8, i15) guard_no_overflow(descr=...) i18 = int_add(i7, 1) @@ -72,17 +72,17 @@ guard_true(i13, descr=...) guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=<.*ArrayNoLengthDescr>) + i14 = getarrayitem_raw(i10, i8, descr=) i15 = int_add_ovf(i9, i14) guard_no_overflow(descr=...) i17 = int_sub(i8, 640) # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=<.*ArrayNoLengthDescr>) + i18 = getarrayitem_raw(i11, i17, descr=) i19 = int_add_ovf(i18, i15) guard_no_overflow(descr=...) # on 64bit, there is a guard checking that i19 actually fits into 32bit ... - setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>) + setarrayitem_raw(i11, i8, _, descr=) i28 = int_add(i8, 1) --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=...) @@ -107,10 +107,10 @@ guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - f13 = getarrayitem_raw(i8, i6, descr=) + f13 = getarrayitem_raw(i8, i6, descr=) f15 = float_add(f13, 20.500000) - setarrayitem_raw(i8, i6, f15, descr=) - f16 = getarrayitem_raw(i8, i6, descr=) + setarrayitem_raw(i8, i6, f15, descr=) + f16 = getarrayitem_raw(i8, i6, descr=) i18 = float_eq(f16, 42.000000) guard_true(i18, descr=...) i20 = int_add(i6, 1) @@ -132,28 +132,24 @@ log = self.run(main, []) assert log.result == 321 loop, = log.loops_by_filename(self.filepath) - if sys.maxint == 2147483647: - arraydescr = 'UnsignedArrayNoLengthDescr' - else: - arraydescr = 'UINTArrayNoLengthDescr' assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - i13 = getarrayitem_raw(i8, i6, descr=<%s>) + i13 = getarrayitem_raw(i8, i6, descr=) f14 = cast_singlefloat_to_float(i13) f16 = float_add(f14, 20.500000) i17 = cast_float_to_singlefloat(f16) - setarrayitem_raw(i8, i6,i17, descr=<%s>) - i18 = getarrayitem_raw(i8, i6, descr=<%s>) + setarrayitem_raw(i8, i6,i17, descr=) + i18 = getarrayitem_raw(i8, i6, descr=) f19 = cast_singlefloat_to_float(i18) i21 = float_eq(f19, 42.000000) guard_true(i21, descr=...) i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """ % (arraydescr, arraydescr, arraydescr)) + """) def test_zeropadded(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -75,12 +75,12 @@ assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p38 = call(ConstClass(getexecutioncontext), descr=) - p39 = getfield_gc(p38, descr=) + p38 = call(ConstClass(getexecutioncontext), descr=) + p39 = getfield_gc(p38, descr=) i40 = force_token() - p41 = getfield_gc(p38, descr=) + p41 = getfield_gc(p38, descr=) guard_isnull(p41, descr=...) - i42 = getfield_gc(p38, descr=) + i42 = getfield_gc(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -192,7 +192,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ - p14 = getarrayitem_gc_pure(p8, i9, descr=) + p14 = getarrayitem_gc_pure(p8, i9, descr=) i14 = force_token() i16 = force_token() """) @@ -336,15 +336,15 @@ loop, = log.loops_by_filename(self.filepath) # the int strategy is used here assert loop.match_by_id('append', """ - i13 = getfield_gc(p8, descr=) + i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) # Will be killed by the backend - p15 = getfield_gc(p8, descr=) - i17 = arraylen_gc(p15, descr=) - call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... + p15 = getfield_gc(p8, descr=) + i17 = arraylen_gc(p15, descr=) + call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... guard_no_exception(descr=...) - p17 = getfield_gc(p8, descr=) - setarrayitem_gc(p17, i13, i12, descr=) + p17 = getfield_gc(p8, descr=) + setarrayitem_gc(p17, i13, i12, descr=) """) def test_blockstack_virtualizable(self): @@ -368,13 +368,13 @@ ... i20 = force_token() p22 = new_with_vtable(19511408) - p24 = new_array(1, descr=) + p24 = new_array(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) - setfield_gc(p0, i20, descr=) - setfield_gc(p26, ConstPtr(ptr22), descr=) - setarrayitem_gc(p24, 0, p26, descr=) - setfield_gc(p22, p24, descr=) - p32 = call_may_force(11376960, p18, p22, descr=) + setfield_gc(p0, i20, descr=) + setfield_gc(p26, ConstPtr(ptr22), descr=) + setarrayitem_gc(p24, 0, p26, descr=) + setfield_gc(p22, p24, descr=) + p32 = call_may_force(11376960, p18, p22, descr=) ... """) @@ -415,26 +415,26 @@ guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) guard_value(i4, 0, descr=...) guard_value(p3, ConstPtr(ptr14), descr=...) - i15 = getfield_gc_pure(p8, descr=) + i15 = getfield_gc_pure(p8, descr=) i17 = int_lt(i15, 5000) guard_true(i17, descr=...) - p18 = getfield_gc(p0, descr=) + p18 = getfield_gc(p0, descr=) guard_value(p18, ConstPtr(ptr19), descr=...) - p20 = getfield_gc(p18, descr=) + p20 = getfield_gc(p18, descr=) guard_value(p20, ConstPtr(ptr21), descr=...) guard_not_invalidated(descr=...) # most importantly, there is no getarrayitem_gc here - p23 = call(ConstClass(getexecutioncontext), descr=) - p24 = getfield_gc(p23, descr=) + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) i25 = force_token() - p26 = getfield_gc(p23, descr=) + p26 = getfield_gc(p23, descr=) guard_isnull(p26, descr=...) - i27 = getfield_gc(p23, descr=) + i27 = getfield_gc(p23, descr=) i28 = int_is_zero(i27) guard_true(i28, descr=...) - p30 = getfield_gc(ConstPtr(ptr29), descr=) + p30 = getfield_gc(ConstPtr(ptr29), descr=) guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) - i32 = getfield_gc_pure(p30, descr=) + i32 = getfield_gc_pure(p30, descr=) i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- @@ -452,15 +452,15 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- p22 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p22, i13, descr=) - setfield_gc(p4, p22, descr=) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -46,7 +46,7 @@ assert loop.match_by_id("getitem", """ i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -86,28 +86,28 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_int_str), i5, descr=) guard_no_exception(descr=...) - i12 = call(ConstClass(ll_strhash), p10, descr=) + i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) - setfield_gc(p13, 16, descr=) + p15 = new_array(8, descr=) + setfield_gc(p13, p15, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) - call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) guard_false(i27, descr=...) - p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p28 = getfield_gc(p13, descr=) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure(p29, descr=) + i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,11 +16,11 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p10 = getfield_gc(p0, descr=) + p10 = getfield_gc(p0, descr=) guard_value(p10, ConstPtr(ptr11), descr=...) - p12 = getfield_gc(p10, descr=) + p12 = getfield_gc(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc(ConstPtr(p17), descr=) + p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -125,8 +125,8 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p20, i11, descr=) - setfield_gc(ConstPtr(ptr21), p20, descr=) + setfield_gc(p20, i11, descr=) + setfield_gc(ConstPtr(ptr21), p20, descr=) jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -23,8 +23,8 @@ f1 = cast_int_to_float(i0) i3 = float_le(f1, 0) guard_false(i3, descr=...) - f2 = call(ConstClass(log), f1, descr=) - f3 = call(ConstClass(log10), f1, descr=) + f2 = call(ConstClass(log), f1, descr=) + f3 = call(ConstClass(log10), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i4 = int_add(i0, 1) @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) @@ -84,7 +84,7 @@ i4 = int_or(i2, i3) i5 = int_is_true(i4) guard_false(i5, descr=...) - f2 = call(ConstClass(fmod), f1, 2.0, descr=) + f2 = call(ConstClass(fmod), f1, 2.0, descr=) f3 = float_add(f0, f2) i6 = int_sub(i0, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -204,18 +204,18 @@ assert log.result == 1000000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i14 = getfield_gc(p12, descr=) + i14 = getfield_gc(p12, descr=) i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p16 = getfield_gc(p12, descr=) - p17 = getarrayitem_gc(p16, i12, descr=) + p16 = getfield_gc(p12, descr=) + p17 = getarrayitem_gc(p16, i12, descr=) i19 = int_add(i12, 1) - setfield_gc(p9, i19, descr=) + setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, 146982464, descr=...) - i21 = getfield_gc(p17, descr=) + i21 = getfield_gc(p17, descr=) i23 = int_lt(0, i21) guard_true(i23, descr=...) - i24 = getfield_gc(p17, descr=) + i24 = getfield_gc(p17, descr=) i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,6 +1,9 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +# XXX review the descrs to replace some EF=4 with EF=3 (elidable) + + class TestString(BaseTestPyPyC): def test_lookup_default_encoding(self): def main(n): @@ -72,7 +75,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p28 = call(ConstClass(strip_spaces), p25, descr=) + p28 = call(ConstClass(strip_spaces), p25, descr=) guard_no_exception(descr=...) i29 = strlen(p28) i30 = int_is_true(i29) @@ -88,9 +91,9 @@ guard_false(i41, descr=...) i43 = int_eq(i39, 43) guard_false(i43, descr=...) - i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) + i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) guard_false(i43, descr=...) - i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) + i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) guard_false(i46, descr=...) p51 = new_with_vtable(21136408) setfield_gc(p51, _, descr=...) # 7 setfields, but the order is dict-order-dependent @@ -100,9 +103,9 @@ setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) - p55 = call(ConstClass(parse_digit_string), p51, descr=) + p55 = call(ConstClass(parse_digit_string), p51, descr=) guard_no_exception(descr=...) - i57 = call(ConstClass(rbigint.toint), p55, descr=) + i57 = call(ConstClass(rbigint.toint), p55, descr=) guard_no_exception(descr=...) i58 = int_add_ovf(i6, i57) guard_no_overflow(descr=...) @@ -125,7 +128,7 @@ i7 = int_gt(i4, 0) guard_true(i7, descr=...) guard_not_invalidated(descr=...) - p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) + p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) guard_no_exception(descr=...) i10 = strlen(p9) i11 = int_is_true(i10) @@ -149,7 +152,7 @@ copystrcontent(p9, p21, 0, i25, i10) i33 = int_lt(i30, 23) guard_true(i33, descr=...) - p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) + p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) guard_no_exception(descr=...) i37 = strlen(p35) i38 = int_add_ovf(i5, i37) @@ -192,6 +195,6 @@ strsetitem(p35, 3, 104) strsetitem(p35, 4, 95) copystrcontent(p31, p35, 0, 5, i32) - i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) - guard_value(i49, 1, descr=) + i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) + guard_value(i49, 1, descr=...) ''') From noreply at buildbot.pypy.org Tue Dec 20 16:19:58 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Tue, 20 Dec 2011 16:19:58 +0100 (CET) Subject: [pypy-commit] pypy set-strategies: better approach for merging sets with lists Message-ID: <20111220151958.BB5AF820B7@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: set-strategies Changeset: r50760:01dbcc06249a Date: 2011-12-20 16:19 +0100 http://bitbucket.org/pypy/pypy/changeset/01dbcc06249a/ Log: better approach for merging sets with lists diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -193,6 +193,11 @@ """ Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None. """ return self.strategy.getitems_str(self) + + def getitems_int(self): + """ Return the items in the list as unwrapped strings. If the list does + not use the list strategy, return None. """ + return self.strategy.getitems_int(self) # ___________________________________________________ @@ -292,6 +297,9 @@ def getitems_str(self, w_list): return None + def getitems_int(self, w_list): + return None + def getstorage_copy(self, w_list): raise NotImplementedError @@ -502,17 +510,15 @@ raise IndexError return start + i * step + def getitems_int(self, w_list): + return self._getitems_range(w_list, False) + def getitem(self, w_list, i): return self.wrap(self._getitem_unwrapped(w_list, i)) def getitems_copy(self, w_list): return self._getitems_range(w_list, True) - getitems_wrapped = getitems_copy - - def getitems_unwrapped(self, w_list): - return self._getitems_range(w_list, False) - def getstorage_copy(self, w_list): # tuple is unmutable return w_list.lstorage @@ -703,11 +709,6 @@ def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] - getitems_wrapped = getitems_copy - - def getitems_unwrapped(self, w_list): - return self.unerase(w_list.lstorage) - @jit.unroll_safe def getitems_unroll(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] @@ -936,8 +937,6 @@ def getitems(self, w_list): return self.unerase(w_list.lstorage) - getitems_wrapped = getitems - class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 _applevel_repr = "int" @@ -965,6 +964,9 @@ if reverse: l.reverse() + def getitems_int(self, w_list): + return self.unerase(w_list.lstorage) + class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0.0 _applevel_repr = "float" @@ -1022,7 +1024,6 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) - # _______________________________________________________ init_signature = Signature(['sequence'], None, None) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -440,6 +440,11 @@ return w_obj.getitems_str() return None + def listview_int(self, w_obj): + if isinstance(w_obj, W_ListObject): + return w_obj.getitems_int() + return None + def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): a, b, c = w_slice.indices3(self, self.int_w(w_length)) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -13,8 +13,6 @@ from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.stringobject import W_StringObject -from pypy.objspace.std.listobject import IntegerListStrategy, StringListStrategy,\ - EmptyListStrategy, RangeListStrategy, ObjectListStrategy, FloatListStrategy class W_BaseSetObject(W_Object): typedef = None @@ -282,9 +280,6 @@ def get_empty_storage(self): return self.erase(None) - def get_storage_from_w_list(self, w_list): - return self.get_empty_storage() - def is_correct_type(self, w_key): return False @@ -389,9 +384,7 @@ setdata[self.unwrap(w_item)] = None return self.erase(setdata) - def get_storage_from_w_list(self, w_list): - items = w_list.strategy.getitems_unwrapped(w_list) - + def get_storage_from_unwrapped_list(self, items): setdata = self.get_empty_dict() for item in items: setdata[item] = None @@ -759,14 +752,6 @@ def get_empty_storage(self): return self.erase(self.get_empty_dict()) - def get_storage_from_w_list(self, w_list): - items = w_list.strategy.getitems_wrapped(w_list) - - setdata = self.get_empty_dict() - for item in items: - setdata[item] = None - return self.erase(setdata) - def get_empty_dict(self): return newset(self.space) @@ -904,22 +889,6 @@ def newset(space): return r_dict(space.eq_w, space.hash_w, force_non_null=True) -_strategy_map = { - EmptyListStrategy: EmptySetStrategy, - IntegerListStrategy: IntegerSetStrategy, - RangeListStrategy: IntegerSetStrategy, - StringListStrategy: StringSetStrategy, - FloatListStrategy: ObjectSetStrategy, - ObjectListStrategy: ObjectSetStrategy -} - -def set_strategy_and_setdata_from_listobject(space, w_set, w_list): - strategy_class = _strategy_map[w_list.strategy.__class__] - strategy = space.fromcache(strategy_class) - - w_set.sstorage = strategy.get_storage_from_w_list(w_list) - w_set.strategy = strategy - def set_strategy_and_setdata(space, w_set, w_iterable): from pypy.objspace.std.intobject import W_IntObject if w_iterable is None : @@ -932,8 +901,18 @@ w_set.sstorage = w_iterable.get_storage_copy() return - if isinstance(w_iterable, W_ListObject): - set_strategy_and_setdata_from_listobject(space, w_set, w_iterable) + stringlist = space.listview_str(w_iterable) + if stringlist != None: + strategy = space.fromcache(StringSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_storage_from_unwrapped_list(stringlist) + return + + intlist = space.listview_int(w_iterable) + if intlist != None: + strategy = space.fromcache(IntegerSetStrategy) + w_set.strategy = strategy + w_set.sstorage = strategy.get_storage_from_unwrapped_list(intlist) return iterable_w = space.listview(w_iterable) From noreply at buildbot.pypy.org Tue Dec 20 17:15:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 17:15:19 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: Fix the test. Message-ID: <20111220161519.A4356820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50761:083617e059dd Date: 2011-12-20 16:13 +0000 http://bitbucket.org/pypy/pypy/changeset/083617e059dd/ Log: Fix the test. diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -21,9 +21,9 @@ assert loop.match_by_id("generator", """ i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p45, i29, descr=) - setarrayitem_gc(p8, 0, p45, descr=) - i47 = arraylen_gc(p8, descr=) # Should be removed by backend + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) assert loop.match_by_id("subtract", """ From noreply at buildbot.pypy.org Tue Dec 20 17:15:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 17:15:20 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: The basic threshold is now larger by 2% than it used to be. Message-ID: <20111220161520.CF848820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50762:91ce4e8c26be Date: 2011-12-20 16:15 +0000 http://bitbucket.org/pypy/pypy/changeset/91ce4e8c26be/ Log: The basic threshold is now larger by 2% than it used to be. diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -46,7 +46,7 @@ r *= n n -= 1 return r - log = self.run(fact, [7], threshold=5) + log = self.run(fact, [7], threshold=4) assert log.result == 5040 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -55,8 +55,8 @@ i += int(long(string.digits[i % len(string.digits)], 16)) return i - log = self.run(main, [1000]) - assert log.result == main(1000) + log = self.run(main, [1100]) + assert log.result == main(1100) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i11 = int_lt(i6, i7) From noreply at buildbot.pypy.org Tue Dec 20 17:29:19 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 17:29:19 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (arigo, hager): Remove unnecessary argument from decoding function Message-ID: <20111220162919.7B8C2820B7@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50763:fa0b1ce8e088 Date: 2011-12-20 16:18 +0100 http://bitbucket.org/pypy/pypy/changeset/fa0b1ce8e088/ Log: (arigo, hager): Remove unnecessary argument from decoding function diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -177,35 +177,28 @@ def setup_failure_recovery(self): @rgc.no_collect - def failure_recovery_func(mem_loc, stack_pointer, spilling_pointer): + def failure_recovery_func(mem_loc, spilling_pointer): """ mem_loc is a structure in memory describing where the values for the failargs are stored. - - stack_pointer is the address of top of the stack. spilling_pointer is the address of the FORCE_INDEX. """ - return self.decode_registers_and_descr(mem_loc, stack_pointer, - spilling_pointer) + return self.decode_registers_and_descr(mem_loc, spilling_pointer) self.failure_recovery_func = failure_recovery_func recovery_func_sign = lltype.Ptr(lltype.FuncType([lltype.Signed, - lltype.Signed, lltype.Signed], lltype.Signed)) + lltype.Signed], lltype.Signed)) @rgc.no_collect - def decode_registers_and_descr(self, mem_loc, stack_loc, spp_loc): + def decode_registers_and_descr(self, mem_loc, spp_loc): ''' mem_loc : pointer to encoded state - stack_loc : pointer to top of the stack spp_loc : pointer to begin of the spilling area ''' enc = rffi.cast(rffi.CCHARP, mem_loc) managed_size = WORD * len(r.MANAGED_REGS) - - assert spp_loc > stack_loc - regs = rffi.cast(rffi.CCHARP, spp_loc) i = -1 fail_index = -1 @@ -346,8 +339,7 @@ mc.lwz(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding else: mc.ld(r.r3.value, r.SPP.value, self.ENCODING_AREA) - mc.mr(r.r4.value, r.SP.value) # load stack pointer - mc.mr(r.r5.value, r.SPP.value) # load spilling pointer + mc.mr(r.r4.value, r.SPP.value) # load spilling pointer # # load address of decoding function into SCRATCH mc.alloc_scratch_reg(addr) From noreply at buildbot.pypy.org Tue Dec 20 17:29:20 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 17:29:20 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: making jump conditions more clear Message-ID: <20111220162920.9BD9A820B7@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50764:c6c19fd92a42 Date: 2011-12-20 17:28 +0100 http://bitbucket.org/pypy/pypy/changeset/c6c19fd92a42/ Log: making jump conditions more clear diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -1003,16 +1003,21 @@ self.b(target_ofs) def b_cond_offset(self, offset, condition): + BI = condition[0] + BO = condition[1] + pos = self.currpos() target_ofs = offset - pos - self.bc(condition, 2, target_ofs) + self.bc(BO, BI, target_ofs) def b_cond_abs(self, addr, condition): - assert condition in (c.EQ, c.NE) + BI = condition[0] + BO = condition[1] + self.alloc_scratch_reg(addr) self.mtctr(r.SCRATCH.value) self.free_scratch_reg() - self.bcctr(condition, 2) + self.bcctr(BO, BI) def b_abs(self, address, trap=False): self.alloc_scratch_reg(address) diff --git a/pypy/jit/backend/ppc/ppcgen/condition.py b/pypy/jit/backend/ppc/ppcgen/condition.py --- a/pypy/jit/backend/ppc/ppcgen/condition.py +++ b/pypy/jit/backend/ppc/ppcgen/condition.py @@ -1,9 +1,16 @@ -LE = 0 -NE = 4 -GT = 2 -LT = 3 -EQ = 12 -GE = 33 +# CONDITION = (BI (number of bit tested in CR), BO (12 if bit is 1, else 4)) + +SET = 12 +UNSET = 4 + +LE = (1, UNSET) +NE = (2, UNSET) +GT = (1, SET) +LT = (0, SET) +EQ = (2, SET) +GE = (0, UNSET) + +# values below are random ... U_LT = 50 U_LE = 60 @@ -12,5 +19,3 @@ IS_TRUE = 90 IS_ZERO = 100 - -opposites = {LE: GT, NE: EQ, LT: GE, GE: LT, EQ: NE, GT: LE} From noreply at buildbot.pypy.org Tue Dec 20 17:33:48 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 17:33:48 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (arigo, hager): guard_not_forced Message-ID: <20111220163348.F28E9820B7@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50765:9fd7e0e76cbb Date: 2011-12-20 17:33 +0100 http://bitbucket.org/pypy/pypy/changeset/9fd7e0e76cbb/ Log: (arigo, hager): guard_not_forced diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -1082,12 +1082,16 @@ self._emit_guard(guard_op, regalloc._prepare_guard(guard_op), c.EQ) def emit_guard_call_may_force(self, op, guard_op, arglocs, regalloc): - self.mc.mr(r.r0.value, r.SP.value) + ENCODING_AREA = len(r.MANAGED_REGS) * WORD + self.mc.alloc_scratch_reg() if IS_PPC_32: - self.mc.cmpwi(r.r0.value, 0) + self.mc.lwz(r.SCRATCH.value, r.SPP.value, ENCODING_AREA) + self.mc.cmpwi(0, r.SCRATCH.value, 0) else: - self.mc.cmpdi(r.r0.value, 0) - self._emit_guard(guard_op, arglocs, c.EQ) + self.mc.ld(r.SCRATCH.value, r.SPP.value, ENCODING_AREA) + self.mc.cmpdi(0, r.SCRATCH.value, 0) + self.mc.free_scratch_reg() + self._emit_guard(guard_op, arglocs, c.LT) emit_guard_call_release_gil = emit_guard_call_may_force diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -38,6 +38,7 @@ from pypy.rlib import rgc from pypy.rpython.annlowlevel import llhelper from pypy.rlib.objectmodel import we_are_translated +from pypy.rpython.lltypesystem.lloperation import llop memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address, rffi.SIZE_T], lltype.Void, @@ -932,6 +933,11 @@ else: raise AssertionError('Trying to pop to an invalid location') + def leave_jitted_hook(self): + ptrs = self.fail_boxes_ptr.ar + llop.gc_assume_young_pointers(lltype.Void, + llmemory.cast_ptr_to_adr(ptrs)) + def _ensure_result_bit_extension(self, resloc, size, signed): if size == 1: if not signed: #unsigned char diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py --- a/pypy/jit/backend/ppc/runner.py +++ b/pypy/jit/backend/ppc/runner.py @@ -14,6 +14,7 @@ from pypy.jit.backend.ppc.ppcgen.ppc_assembler import AssemblerPPC from pypy.jit.backend.ppc.ppcgen.arch import NONVOLATILES, GPR_SAVE_AREA, WORD from pypy.jit.backend.ppc.ppcgen.regalloc import PPCRegisterManager, PPCFrameManager +from pypy.jit.backend.ppc.ppcgen import register as r import sys from pypy.tool.ansi_print import ansi_log @@ -88,6 +89,24 @@ adr = llmemory.cast_ptr_to_adr(x) return PPC_64_CPU.cast_adr_to_int(adr) + def force(self, spilling_pointer): + TP = rffi.CArrayPtr(lltype.Signed) + + addr_of_force_index = spilling_pointer + len(r.MANAGED_REGS) * WORD + + fail_index = rffi.cast(TP, addr_of_force_index)[0] + assert fail_index >= 0, "already forced!" + faildescr = self.get_fail_descr_from_number(fail_index) + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index + + # start of "no gc operation!" block + fail_index_2 = self.asm.failure_recovery_func( + faildescr._failure_recovery_code, spilling_pointer) + self.asm.leave_jitted_hook() + # end of "no gc operation!" block + assert fail_index == fail_index_2 + return faildescr + # return the number of values that can be returned def get_latest_value_count(self): return self.asm.fail_boxes_count From noreply at buildbot.pypy.org Tue Dec 20 19:06:47 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 19:06:47 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: implemented GUARD_NOT_INVALIDATED Message-ID: <20111220180647.5C290820B7@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50766:c28d89a90f9e Date: 2011-12-20 19:06 +0100 http://bitbucket.org/pypy/pypy/changeset/c28d89a90f9e/ Log: implemented GUARD_NOT_INVALIDATED diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -284,6 +284,8 @@ raise NotImplementedError self._cmp_guard_class(op, arglocs, regalloc) + def emit_guard_not_invalidated(self, op, locs, regalloc): + return self._emit_guard(op, locs, c.EQ, is_guard_not_invalidated=True) class MiscOpAssembler(object): diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -579,9 +579,6 @@ self._teardown() def assemble_bridge(self, faildescr, inputargs, operations, looptoken, log): - - assert 0, "Bridges do not work yet because they need to dynamically adjust the SP" - self.setup(looptoken, operations) assert isinstance(faildescr, AbstractFailDescr) code = faildescr._failure_recovery_code @@ -807,7 +804,8 @@ mc.prepare_insts_blocks(True) mc.copy_to_raw_memory(block_start + tok.offset) else: - assert 0, "not implemented yet" + clt.invalidate_positions.append((block_start + tok.offset, + descr._ppc_guard_pos - tok.offset)) def patch_trace(self, faildescr, looptoken, bridge_addr, regalloc): # The first instruction (word) is not overwritten, because it is the diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -336,6 +336,7 @@ return locs prepare_guard_overflow = prepare_guard_no_overflow + prepare_guard_not_invalidated = prepare_guard_no_overflow def prepare_guard_exception(self, op): boxes = list(op.getarglist()) diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py --- a/pypy/jit/backend/ppc/runner.py +++ b/pypy/jit/backend/ppc/runner.py @@ -14,6 +14,7 @@ from pypy.jit.backend.ppc.ppcgen.ppc_assembler import AssemblerPPC from pypy.jit.backend.ppc.ppcgen.arch import NONVOLATILES, GPR_SAVE_AREA, WORD from pypy.jit.backend.ppc.ppcgen.regalloc import PPCRegisterManager, PPCFrameManager +from pypy.jit.backend.ppc.ppcgen.codebuilder import PPCBuilder from pypy.jit.backend.ppc.ppcgen import register as r import sys @@ -130,3 +131,20 @@ def teardown(self): self.patch_list = None self.reg_map = None + + def invalidate_loop(self, looptoken): + """Activate all GUARD_NOT_INVALIDATED in the loop and its attached + bridges. Before this call, all GUARD_NOT_INVALIDATED do nothing; + after this call, they all fail. Note that afterwards, if one such + guard fails often enough, it has a bridge attached to it; it is + possible then to re-call invalidate_loop() on the same looptoken, + which must invalidate all newer GUARD_NOT_INVALIDATED, but not the + old one that already has a bridge attached to it.""" + + for jmp, tgt in looptoken.compiled_loop_token.invalidate_positions: + mc = PPCBuilder() + mc.b_offset(tgt) + mc.prepare_insts_blocks() + mc.copy_to_raw_memory(jmp) + # positions invalidated + looptoken.compiled_loop_token.invalidate_positions = [] From noreply at buildbot.pypy.org Tue Dec 20 19:09:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Dec 2011 19:09:53 +0100 (CET) Subject: [pypy-commit] pypy default: merge counter-decay again: simplified version, just requiring 2% Message-ID: <20111220180953.DB89A820B7@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50767:a21cab6475c1 Date: 2011-12-20 19:09 +0100 http://bitbucket.org/pypy/pypy/changeset/a21cab6475c1/ Log: merge counter-decay again: simplified version, just requiring 2% extra counts on loops if a piece of assembler has been produced in the meantime. See included explanations for motivation. diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -275,3 +275,52 @@ state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True + +def test_cleanup_jitcell_dict(): + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed] + # + # Test creating tons of jitcells that remain at 0 + warmstate = WarmEnterState(None, FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell1 = get_jitcell(True, -1) + assert len(warmstate._jitcell_dict) == 1 + # + for i in range(1, 20005): + get_jitcell(True, i) # should trigger a clean-up at 20001 + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + # + # Same test, with one jitcell that has a counter of BASE instead of 0 + warmstate = WarmEnterState(None, FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell2 = get_jitcell(True, -2) + cell2.counter = BASE = warmstate.THRESHOLD_LIMIT // 2 # 50% + # + for i in range(0, 20005): + get_jitcell(True, i) + assert len(warmstate._jitcell_dict) == (i % 19999) + 2 + # + assert cell2 in warmstate._jitcell_dict.values() + assert cell2.counter == int(BASE * 0.92) # decayed once + # + # Same test, with jitcells that are compiled and freed by the memmgr + warmstate = WarmEnterState(None, FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + get_jitcell(True, -1) + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -1 + cell.wref_procedure_token = None # or a dead weakref, equivalently + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + # + # Same test, with counter == -2 (rare case, kept alive) + warmstate = WarmEnterState(None, FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell = get_jitcell(True, -1) + cell.counter = -2 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -2 + assert len(warmstate._jitcell_dict) == i + 1 diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -151,6 +151,7 @@ # counter == -2: tracing is currently going on for this cell counter = 0 dont_trace_here = False + extra_delay = chr(0) wref_procedure_token = None def get_procedure_token(self): @@ -172,7 +173,6 @@ class WarmEnterState(object): THRESHOLD_LIMIT = sys.maxint // 2 - default_jitcell_dict = None def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -316,6 +316,36 @@ # assert 0, "should have raised" + def bound_reached(cell, *args): + # bound reached, but we do a last check: if it is the first + # time we reach the bound, or if another loop or bridge was + # compiled since the last time we reached it, then decrease + # the counter by a few percents instead. It should avoid + # sudden bursts of JIT-compilation, and also corner cases + # where we suddenly compile more than one loop because all + # counters reach the bound at the same time, but where + # compiling all but the first one is pointless. + curgen = warmrunnerdesc.memory_manager.current_generation + curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits + if we_are_translated() and curgen != cell.extra_delay: + cell.counter = int(self.THRESHOLD_LIMIT * 0.98) + cell.extra_delay = curgen + return + # + if not confirm_enter_jit(*args): + cell.counter = 0 + return + # start tracing + from pypy.jit.metainterp.pyjitpl import MetaInterp + metainterp = MetaInterp(metainterp_sd, jitdriver_sd) + # set counter to -2, to mean "tracing in effect" + cell.counter = -2 + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + finally: + if cell.counter == -2: + cell.counter = 0 + def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. @@ -330,19 +360,9 @@ if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n return - if not confirm_enter_jit(*args): - cell.counter = 0 + else: + bound_reached(cell, *args) return - # bound reached; start tracing - from pypy.jit.metainterp.pyjitpl import MetaInterp - metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - # set counter to -2, to mean "tracing in effect" - cell.counter = -2 - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - finally: - if cell.counter == -2: - cell.counter = 0 else: if cell.counter != -1: assert cell.counter == -2 @@ -447,12 +467,40 @@ except AttributeError: pass # + def _cleanup_dict(): + minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.counter = int(cell.counter * 0.92) + if cell.counter < minimum: + killme.append(key) + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # Once in a while, rarely, when too many entries have + # been put in the jitdict_dict, we do a cleanup phase: + # we decay all counters and kill entries with a too + # low counter. + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests + # def get_jitcell(build, *greenargs): try: cell = jitcell_dict[greenargs] except KeyError: if not build: return None + _maybe_cleanup_dict() cell = JitCell() jitcell_dict[greenargs] = cell return cell @@ -464,6 +512,10 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} + # note that there is no equivalent of _maybe_cleanup_dict() + # in the case of custom getters. We assume that the interpreter + # stores the JitCells on some objects that can go away by GC, + # like the PyCode objects in PyPy. # def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -21,9 +21,9 @@ assert loop.match_by_id("generator", """ i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p45, i29, descr=) - setarrayitem_gc(p8, 0, p45, descr=) - i47 = arraylen_gc(p8, descr=) # Should be removed by backend + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) assert loop.match_by_id("subtract", """ diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -46,7 +46,7 @@ r *= n n -= 1 return r - log = self.run(fact, [7], threshold=5) + log = self.run(fact, [7], threshold=4) assert log.result == 5040 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -55,8 +55,8 @@ i += int(long(string.digits[i % len(string.digits)], 16)) return i - log = self.run(main, [1000]) - assert log.result == main(1000) + log = self.run(main, [1100]) + assert log.result == main(1100) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i11 = int_lt(i6, i7) From noreply at buildbot.pypy.org Tue Dec 20 20:13:10 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 20:13:10 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: call create_sig instead of array_sig, for symmetry mostly Message-ID: <20111220191310.5C8FD820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50768:a9d8fe792078 Date: 2011-12-20 21:12 +0200 http://bitbucket.org/pypy/pypy/changeset/a9d8fe792078/ Log: call create_sig instead of array_sig, for symmetry mostly diff --git a/pypy/module/micronumpy/REVIEW.txt b/pypy/module/micronumpy/REVIEW.txt --- a/pypy/module/micronumpy/REVIEW.txt +++ b/pypy/module/micronumpy/REVIEW.txt @@ -1,8 +1,6 @@ REVIEW NOTES ============ -* Scalar.reshape should turn the value into an array correct for an input of - ``1`` or ``(1,)``. * VirtualSlice vs. W_NDimSlice? * Call{1, 2}.create_sig, should it call forced_result.create_sig(), instead of array_sig()? If not, why not? diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -753,7 +753,7 @@ def create_sig(self, res_shape): if self.forced_result is not None: - return self.forced_result.array_sig(res_shape) + return self.forced_result.create_sig(res_shape) return signature.Call1(self.ufunc, self.name, self.values.create_sig(res_shape)) @@ -777,7 +777,7 @@ def create_sig(self, res_shape): if self.forced_result is not None: - return self.forced_result.array_sig(res_shape) + return self.forced_result.create_sig(res_shape) return signature.Call2(self.ufunc, self.name, self.calc_dtype, self.left.create_sig(res_shape), self.right.create_sig(res_shape)) From noreply at buildbot.pypy.org Tue Dec 20 20:14:48 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 20:14:48 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: call directly parent class init Message-ID: <20111220191448.5E8CB820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50769:26c22a795ee9 Date: 2011-12-20 21:14 +0200 http://bitbucket.org/pypy/pypy/changeset/26c22a795ee9/ Log: call directly parent class init diff --git a/pypy/module/micronumpy/REVIEW.txt b/pypy/module/micronumpy/REVIEW.txt --- a/pypy/module/micronumpy/REVIEW.txt +++ b/pypy/module/micronumpy/REVIEW.txt @@ -2,8 +2,6 @@ ============ * VirtualSlice vs. W_NDimSlice? -* Call{1, 2}.create_sig, should it call forced_result.create_sig(), instead of - array_sig()? If not, why not? * W_NDimSlice.__init__ calls ConcreteArray.__init__ instead of ViewArray.__init__, W_FlatIterator as well. * Better names for sigeq and sigeq2, sighash doesn't say if numberings are diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -970,7 +970,7 @@ size *= sh self.strides = strides self.backstrides = backstrides - ConcreteArray.__init__(self, size, shape, parent.dtype, parent.order, + ViewArray.__init__(self, size, shape, parent.dtype, parent.order, parent) self.start = start @@ -1224,7 +1224,7 @@ size *= sh self.strides = [arr.strides[-1]] self.backstrides = [arr.backstrides[-1]] - ConcreteArray.__init__(self, size, [size], arr.dtype, arr.order, + ViewArray.__init__(self, size, [size], arr.dtype, arr.order, arr) self.shapelen = len(arr.shape) self.iter = OneDimIterator(arr.start, self.strides[0], From noreply at buildbot.pypy.org Tue Dec 20 20:17:11 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 20:17:11 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: improve names a bit Message-ID: <20111220191711.08198820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50770:034b7e34339e Date: 2011-12-20 21:16 +0200 http://bitbucket.org/pypy/pypy/changeset/034b7e34339e/ Log: improve names a bit diff --git a/pypy/module/micronumpy/REVIEW.txt b/pypy/module/micronumpy/REVIEW.txt --- a/pypy/module/micronumpy/REVIEW.txt +++ b/pypy/module/micronumpy/REVIEW.txt @@ -4,8 +4,6 @@ * VirtualSlice vs. W_NDimSlice? * W_NDimSlice.__init__ calls ConcreteArray.__init__ instead of ViewArray.__init__, W_FlatIterator as well. -* Better names for sigeq and sigeq2, sighash doesn't say if numberings are - included in the hash. * Cleanup of the iterator and array caching/numbering. It's a mess right now: * _creater_iter updates the arraylist * Why do Scalars need an iterator at all? diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -8,7 +8,9 @@ def sigeq(one, two): return one.eq(two) -def sigeq2(one, two): +def sigeq_numbering(one, two): + """ Cache for iterator numbering should not compare array numbers + """ return one.eq(two, compare_array_no=False) def sighash(sig): @@ -71,7 +73,7 @@ iter_no = 0 def invent_numbering(self): - cache = r_dict(sigeq2, sighash) + cache = r_dict(sigeq_numbering, sighash) allnumbers = [] self._invent_numbering(cache, allnumbers) From noreply at buildbot.pypy.org Tue Dec 20 20:30:35 2011 From: noreply at buildbot.pypy.org (hager) Date: Tue, 20 Dec 2011 20:30:35 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: factored out some distinctions of cases between PPC32 and PPC64 Message-ID: <20111220193035.344E6820B7@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50771:17e99f1ed45a Date: 2011-12-20 20:30 +0100 http://bitbucket.org/pypy/pypy/changeset/17e99f1ed45a/ Log: factored out some distinctions of cases between PPC32 and PPC64 diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -1033,6 +1033,30 @@ self.free_scratch_reg() self.bctrl() + def load(self, target_reg, base_reg, offset): + if IS_PPC_32: + self.lwz(target_reg, base_reg, offset) + else: + self.ld(target_reg, base_reg, offset) + + def loadx(self, target_reg, base_reg, offset_reg): + if IS_PPC_32: + self.lwzx(target_reg, base_reg, offset_reg) + else: + self.ldx(target_reg, base_reg. offset_reg) + + def store(self, from_reg, base_reg, offset): + if IS_PPC_32: + self.stw(from_reg, base_reg, offset) + else: + self.std(from_reg, base_reg, offset) + + def storex(self, from_reg, base_reg, offset_reg): + if IS_PPC_32: + self.stwx(from_reg, base_reg, offset_reg) + else: + self.stdx(from_reg, base_reg, offset_reg) + def prepare_insts_blocks(self, show=False): self.assemble(show) insts = self.insts diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -191,35 +191,28 @@ def emit_guard_true(self, op, arglocs, regalloc): l0 = arglocs[0] failargs = arglocs[1:] - if IS_PPC_32: - self.mc.cmpwi(l0.value, 0) - else: - self.mc.cmpdi(l0.value, 0) + self.mc.cmp_op(0, l0.value, 0, imm=True) self._emit_guard(op, failargs, c.EQ) # # ^^^^ If this condition is met, # # then the guard fails. def emit_guard_false(self, op, arglocs, regalloc): - l0 = arglocs[0] - failargs = arglocs[1:] - if IS_PPC_32: - self.mc.cmpwi(l0.value, 0) - else: - self.mc.cmpdi(l0.value, 0) - self._emit_guard(op, failargs, c.NE) + l0 = arglocs[0] + failargs = arglocs[1:] + self.mc.cmp_op(0, l0.value, 0, imm=True) + self._emit_guard(op, failargs, c.NE) # TODO - Evaluate whether this can be done with # SO bit instead of OV bit => usage of CR # instead of XER could be more efficient def _emit_ovf_guard(self, op, arglocs, cond): # move content of XER to GPR - self.mc.mfspr(r.r0.value, 1) + self.mc.alloc_scratch_reg() + self.mc.mfspr(r.SCRATCH.value, 1) # shift and mask to get comparison result - self.mc.rlwinm(r.r0.value, r.r0.value, 1, 0, 0) - if IS_PPC_32: - self.mc.cmpwi(r.r0.value, 0) - else: - self.mc.cmpdi(r.r0.value, 0) + self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, 1, 0, 0) + self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True) + self.mc.free_scratch_reg() self._emit_guard(op, arglocs, cond) def emit_guard_no_overflow(self, op, arglocs, regalloc): @@ -235,15 +228,9 @@ if l0.is_reg(): if l1.is_imm(): - if IS_PPC_32: - self.mc.cmpwi(l0.value, l1.getint()) - else: - self.mc.cmpdi(l0.value, l1.getint()) + self.mc.cmp_op(0, l0.value, l1.getint(), imm=True) else: - if IS_PPC_32: - self.mc.cmpw(l0.value, l1.value) - else: - self.mc.cmpd(l0.value, l1.value) + self.mc.cmp_op(0, l0.value, l1.value) else: assert 0, "not implemented yet" self._emit_guard(op, failargs, c.NE) @@ -254,17 +241,13 @@ def _cmp_guard_class(self, op, locs, regalloc): offset = locs[2] if offset is not None: + self.mc.alloc_scratch_reg() if offset.is_imm(): - if IS_PPC_32: - self.mc.lwz(r.r0.value, locs[0].value, offset.value) - else: - self.mc.ld(r.r0.value, locs[0].value, offset.value) + self.mc.load(r.SCRATCH.value, locs[0].value, offset.value) else: - if IS_PPC_32: - self.mc.lwzx(r.r0.value, locs[0].value, offset.value) - else: - self.mc.ldx(r.r0.value, locs[0].value, offset.value) - self.mc.cmp(r.r0.value, locs[1].value) + self.mc.loadx(r.SCRATCH.value, locs[0].value, offset.value) + self.mc.cmp_op(0, r.SCRATCH.value, locs[1].value) + self.mc.free_scratch_reg() else: assert 0, "not implemented yet" self._emit_guard(op, locs[3:], c.NE) @@ -274,10 +257,7 @@ def emit_guard_nonnull_class(self, op, arglocs, regalloc): offset = self.cpu.vtable_offset - if IS_PPC_32: - self.mc.cmpwi(arglocs[0].value, 0) - else: - self.mc.cmpdi(arglocs[0].value, 0) + self.mc.cmp_op(0, arglocs[0].value, 0, imm=True) if offset is not None: self._emit_guard(op, arglocs[3:], c.EQ) else: @@ -317,12 +297,8 @@ loc = arglocs[0] failargs = arglocs[1:] - if IS_PPC_32: - self.mc.lwz(loc.value, loc.value, 0) - self.mc.cmpwi(0, loc.value, 0) - else: - self.mc.ld(loc.value, loc.value, 0) - self.mc.cmpdi(0, loc.value, 0) + self.mc.load(loc.value, loc.value, 0) + self.mc.cmp_op(0, loc.value, 0, imm=True) self._emit_guard(op, failargs, c.NE, save_exc=True) @@ -332,30 +308,19 @@ self.mc.load_imm(loc1, pos_exception.value) self.mc.alloc_scratch_reg() - if IS_PPC_32: - self.mc.lwz(r.SCRATCH.value, loc1.value, 0) - self.mc.cmpw(0, r.SCRATCH.value, loc.value) - else: - self.mc.ld(r.SCRATCH.value, loc1.value, 0) - self.mc.cmpd(0, r.SCRATCH.value, loc.value) + self.mc.load(r.SCRATCH.value, loc1.value, 0) + self.mc.cmp_op(0, r.SCRATCH.value, loc.value) self.mc.free_scratch_reg() self._emit_guard(op, failargs, c.NE, save_exc=True) self.mc.load_imm(loc, pos_exc_value.value) if resloc: - if IS_PPC_32: - self.mc.lwz(resloc.value, loc.value, 0) - else: - self.mc.ld(resloc.value, loc.value, 0) + self.mc.load(resloc.value, loc.value, 0) self.mc.alloc_scratch_reg(0) - if IS_PPC_32: - self.mc.stw(r.SCRATCH.value, loc.value, 0) - self.mc.stw(r.SCRATCH.value, loc1.value, 0) - else: - self.mc.sd(r.SCRATCH.value, loc.value, 0) - self.mc.sd(r.SCRATCH.value, loc1.value, 0) + self.mc.store(r.SCRATCH.value, loc.value, 0) + self.mc.store(r.SCRATCH.value, loc1.value, 0) self.mc.free_scratch_reg() def emit_call(self, op, args, regalloc, force_index=-1): @@ -410,12 +375,8 @@ for i, arg in enumerate(stack_args): offset = param_offset + i * WORD if arg is not None: - #self.mc.load_imm(r.SCRATCH, arg.value) self.regalloc_mov(regalloc.loc(arg), r.SCRATCH) - if IS_PPC_32: - self.mc.stw(r.SCRATCH.value, r.SP.value, offset) - else: - self.mc.std(r.SCRATCH.value, r.SP.value, offset) + self.mc.store(r.SCRATCH.value, r.SP.value, offset) self.mc.free_scratch_reg() # collect variables that need to go in registers @@ -541,10 +502,7 @@ def emit_arraylen_gc(self, op, arglocs, regalloc): res, base_loc, ofs = arglocs - if IS_PPC_32: - self.mc.lwz(res.value, base_loc.value, ofs.value) - else: - self.mc.ld(res.value, base_loc.value, ofs.value) + self.mc.load(res.value, base_loc.value, ofs.value) def emit_setarrayitem_gc(self, op, arglocs, regalloc): value_loc, base_loc, ofs_loc, scale, ofs, scratch_reg = arglocs @@ -622,15 +580,9 @@ def emit_strlen(self, op, arglocs, regalloc): l0, l1, res = arglocs if l1.is_imm(): - if IS_PPC_32: - self.mc.lwz(res.value, l0.value, l1.getint()) - else: - self.mc.ld(res.value, l0.value, l1.getint()) + self.mc.load(res.value, l0.value, l1.getint()) else: - if IS_PPC_32: - self.mc.lwzx(res.value, l0.value, l1.value) - else: - self.mc.ldx(res.value, l0.value, l1.value) + self.mc.loadx(res.value, l0.value, l1.value) def emit_strgetitem(self, op, arglocs, regalloc): res, base_loc, ofs_loc, basesize = arglocs @@ -833,25 +785,19 @@ def set_vtable(self, box, vtable): if self.cpu.vtable_offset is not None: adr = rffi.cast(lltype.Signed, vtable) - self.mc.load_imm(r.r0, adr) - if IS_PPC_32: - self.mc.stw(r.r0.value, r.r3.value, self.cpu.vtable_offset) - else: - self.mc.std(r.r0.value, r.r3.value, self.cpu.vtable_offset) + self.mc.alloc_scratch_reg(adr) + self.mc.store(r.SCRATCH.value, r.RES.value, self.cpu.vtable_offset) + self.mc.free_scratch_reg() def emit_new_array(self, op, arglocs, regalloc): self.propagate_memoryerror_if_r3_is_null() if len(arglocs) > 0: value_loc, base_loc, ofs_length = arglocs - if IS_PPC_32: - self.mc.stw(value_loc.value, base_loc.value, ofs_length.value) - else: - self.mc.std(value_loc.value, base_loc.value, ofs_length.value) + self.mc.store(value_loc.value, base_loc.value, ofs_length.value) emit_newstr = emit_new_array emit_newunicode = emit_new_array - def write_new_force_index(self): # for shadowstack only: get a new, unused force_index number and # write it to FORCE_INDEX_OFS. Used to record the call shape @@ -895,10 +841,7 @@ loc_base = arglocs[0] self.mc.alloc_scratch_reg() - if IS_PPC_32: - self.mc.lwz(r.SCRATCH.value, loc_base.value, 0) - else: - self.mc.ld(r.SCRATCH.value, loc_base.value, 0) + self.mc.load(r.SCRATCH.value, loc_base.value, 0) # get the position of the bit we want to test bitpos = descr.jit_wb_if_flag_bitpos @@ -977,10 +920,7 @@ resloc = regalloc.try_allocate_reg(resbox) assert resloc is r.RES self.mc.alloc_scratch_reg(value) - if IS_PPC_32: - self.mc.cmpw(0, resloc.value, r.SCRATCH.value) - else: - self.mc.cmpd(0, resloc.value, r.SCRATCH.value) + self.mc.cmp_op(0, resloc.value, r.SCRATCH.value) self.mc.free_scratch_reg() regalloc.possibly_free_var(resbox) @@ -1034,10 +974,7 @@ self.alloc_scratch_reg() self.mov_loc_loc(arglocs[1], r.SCRATCH) self.mc.li(resloc.value, 0) - if IS_PPC_32: - self.mc.stwx(resloc.value, 0, r.SCRATCH.value) - else: - self.mc.stdx(resloc.value, 0, r.SCRATCH.value) + self.mc.storex(resloc.value, 0, r.SCRATCH.value) self.free_scratch_reg() regalloc.possibly_free_var(resbox) @@ -1058,10 +995,7 @@ if op.result.type == FLOAT: assert 0, "not implemented yet" else: - if IS_PPC_32: - self.mc.lwzx(resloc.value, 0, r.SCRATCH.value) - else: - self.mc.ldx(resloc.value, 0, r.SCRATCH.value) + self.mc.loadx(resloc.value, 0, r.SCRATCH.value) self.mc.free_scratch_reg() # merge point @@ -1072,12 +1006,8 @@ pmc.overwrite() self.mc.alloc_scratch_reg() - if IS_PPC_32: - self.mc.cmpwi(0, r.SCRATCH.value, 0) - self.mc.lwz(r.SCRATCH.value, r.SPP.value, 0) - else: - self.mc.cmpdi(0, r.SCRATCH.value, 0) - self.mc.ld(r.SCRATCH.value, r.SPP.value, 0) + self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True) + self.mc.load(r.SCRATCH.value, r.SPP.value, 0) self.mc.cror(2, 1, 2) self.mc.free_scratch_reg() @@ -1086,12 +1016,8 @@ def emit_guard_call_may_force(self, op, guard_op, arglocs, regalloc): ENCODING_AREA = len(r.MANAGED_REGS) * WORD self.mc.alloc_scratch_reg() - if IS_PPC_32: - self.mc.lwz(r.SCRATCH.value, r.SPP.value, ENCODING_AREA) - self.mc.cmpwi(0, r.SCRATCH.value, 0) - else: - self.mc.ld(r.SCRATCH.value, r.SPP.value, ENCODING_AREA) - self.mc.cmpdi(0, r.SCRATCH.value, 0) + self.mc.load(r.SCRATCH.value, r.SPP.value, ENCODING_AREA) + self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True) self.mc.free_scratch_reg() self._emit_guard(guard_op, arglocs, c.LT) diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -112,23 +112,15 @@ # save r31 later on if reg.value == r.SPP.value: continue - if IS_PPC_32: - self.mc.stw(reg.value, r.SPP.value, - self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) - else: - self.mc.std(reg.value, r.SPP.value, - self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) + self.mc.store(reg.value, r.SPP.value, + self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) def _restore_nonvolatiles(self, mc, spp_reg): """ restore nonvolatile GPRs from GPR SAVE AREA """ for i, reg in enumerate(NONVOLATILES): - if IS_PPC_32: - mc.lwz(reg.value, spp_reg.value, - self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) - else: - mc.ld(reg.value, spp_reg.value, - self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) + mc.load(reg.value, spp_reg.value, + self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -151,13 +143,12 @@ self.mc.mflr(r.SCRATCH.value) # move old link register # save old link register in previous frame self.mc.stw(r.SCRATCH.value, r.SP.value, frame_depth + WORD) - # save r31 at the bottom of the stack frame - self.mc.stw(r.SPP.value, r.SP.value, WORD) else: self.mc.stdu(r.SP.value, r.SP.value, -frame_depth) self.mc.mflr(r.SCRATCH.value) self.mc.std(r.SCRATCH.value, r.SP.value, frame_depth + 2 * WORD) - self.mc.std(r.SPP.value, r.SP.value, WORD) + # save SPP at the bottom of the stack frame + self.mc.store(r.SPP.value, r.SP.value, WORD) # compute spilling pointer (SPP) self.mc.addi(r.SPP.value, r.SP.value, @@ -168,12 +159,8 @@ assert NONVOLATILES[-1] == r.SPP ofs_to_r31 = (self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * (len(NONVOLATILES)-1)) - if IS_PPC_32: - self.mc.lwz(r.r30.value, r.SP.value, WORD) - self.mc.stw(r.r30.value, r.SPP.value, ofs_to_r31) - else: - self.mc.ld(r.r30.value, r.SP.value, WORD) - self.mc.std(r.r30.value, r.SPP.value, ofs_to_r31) + self.mc.load(r.r30.value, r.SP.value, WORD) + self.mc.store(r.r30.value, r.SPP.value, ofs_to_r31) def setup_failure_recovery(self): @@ -336,10 +323,7 @@ r11_value = descr[2] # load parameters into parameter registers - if IS_PPC_32: - mc.lwz(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding - else: - mc.ld(r.r3.value, r.SPP.value, self.ENCODING_AREA) + mc.load(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding mc.mr(r.r4.value, r.SPP.value) # load spilling pointer # # load address of decoding function into SCRATCH @@ -361,10 +345,7 @@ mc.mr(r.r5.value, r.SPP.value) self._restore_nonvolatiles(mc, r.r5) # load old backchain into r4 - if IS_PPC_32: - mc.lwz(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + WORD) - else: - mc.ld(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + 2 * WORD) + mc.load(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + WORD) mc.mtlr(r.r4.value) # restore LR # From SPP, we have a constant offset to the old backchain. We use the # SPP to re-establish the old backchain because this exit stub is @@ -380,10 +361,7 @@ """ for i in range(len(r.MANAGED_REGS)): reg = r.MANAGED_REGS[i] - if IS_PPC_32: - mc.stw(reg.value, r.SPP.value, i * WORD) - else: - mc.std(reg.value, r.SPP.value, i * WORD) + mc.store(reg.value, r.SPP.value, i * WORD) # Load parameters from fail args into locations (stack or registers) def gen_bootstrap_code(self, nonfloatlocs, inputargs): @@ -453,10 +431,7 @@ else: loc = nonfloatlocs[i] if loc.is_reg(): - if IS_PPC_32: - self.mc.lwz(loc.value, r.SPP.value, stack_position) - else: - self.mc.ld(loc.value, r.SPP.value, stack_position) + self.mc.load(loc.value, r.SPP.value, stack_position) count += 1 elif loc.is_vfp_reg(): assert 0, "not implemented yet" @@ -466,10 +441,7 @@ elif loc.type == INT or loc.type == REF: count += 1 self.mc.alloc_scratch_reg() - if IS_PPC_32: - self.mc.lwz(r.SCRATCH.value, r.SPP.value, stack_position) - else: - self.mc.ld(r.SCRATCH.value, r.SPP.value, stack_position) + self.mc.load(r.SCRATCH.value, r.SPP.value, stack_position) self.mov_loc_loc(r.SCRATCH, loc) self.mc.free_scratch_reg() else: @@ -777,10 +749,7 @@ # store addr in force index field self.mc.alloc_scratch_reg(memaddr) - if IS_PPC_32: - self.mc.stw(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) - else: - self.mc.std(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) + self.mc.store(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) self.mc.free_scratch_reg() if save_exc: @@ -834,10 +803,7 @@ self.mc.alloc_scratch_reg() offset = loc.as_key() * WORD - WORD self.mc.load_imm(r.SCRATCH.value, value) - if IS_PPC_32: - self.mc.stw(r.SCRATCH.value, r.SPP.value, offset) - else: - self.mc.std(r.SCRATCH.value, r.SPP.value, offset) + self.mc.store(r.SCRATCH.value, r.SPP.value, offset) self.mc.free_scratch_reg() return assert 0, "not supported location" @@ -846,21 +812,14 @@ # move from memory to register if loc.is_reg(): reg = loc.as_key() - if IS_PPC_32: - self.mc.lwz(reg, r.SPP.value, offset) - else: - self.mc.ld(reg, r.SPP.value, offset) + self.mc.load(reg, r.SPP.value, offset) return # move in memory elif loc.is_stack(): target_offset = loc.as_key() * WORD - WORD self.mc.alloc_scratch_reg() - if IS_PPC_32: - self.mc.lwz(r.SCRATCH.value, r.SPP.value, offset) - self.mc.stw(r.SCRATCH.value, r.SPP.value, target_offset) - else: - self.mc.ld(r.SCRATCH.value, r.SPP.value, offset) - self.mc.std(r.SCRATCH.value, r.SPP.value, target_offset) + self.mc.load(r.SCRATCH.value, r.SPP.value, offset) + self.mc.store(r.SCRATCH.value, r.SPP.value, target_offset) self.mc.free_scratch_reg() return assert 0, "not supported location" @@ -874,10 +833,7 @@ # move to memory elif loc.is_stack(): offset = loc.as_key() * WORD - WORD - if IS_PPC_32: - self.mc.stw(reg, r.SPP.value, offset) - else: - self.mc.std(reg, r.SPP.value, offset) + self.mc.store(reg, r.SPP.value, offset) return assert 0, "not supported location" assert 0, "not supported location" @@ -987,11 +943,10 @@ return 0 def _write_fail_index(self, fail_index): + self.mc.alloc_scratch_reg(fail_index) self.mc.load_imm(r.SCRATCH, fail_index) - if IS_PPC_32: - self.mc.stw(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) - else: - self.mc.std(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) + self.mc.store(r.SCRATCH.value, r.SPP.value, self.ENCODING_AREA) + self.mc.free_scratch_reg() def load(self, loc, value): assert loc.is_reg() and value.is_imm() From noreply at buildbot.pypy.org Tue Dec 20 21:01:18 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 Dec 2011 21:01:18 +0100 (CET) Subject: [pypy-commit] pypy windows-no-err-dlg: prevent windows testrunner from opening system error dialog boxes Message-ID: <20111220200118.7E5BE820B7@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: windows-no-err-dlg Changeset: r50772:f01058ed1577 Date: 2011-12-20 21:59 +0200 http://bitbucket.org/pypy/pypy/changeset/f01058ed1577/ Log: prevent windows testrunner from opening system error dialog boxes diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -21,7 +21,17 @@ win32api.CloseHandle(proch) except pywintypes.error, e: pass - + #Try to avoid opeing a dialog box if one of the tests causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = winapi.SetErrorMode(flags) + winapi.SetErrorMode(old_mode | flags) + SIGKILL = SIGTERM = 0 READ_MODE = 'rU' WRITE_MODE = 'wb' From noreply at buildbot.pypy.org Tue Dec 20 21:41:56 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 Dec 2011 21:41:56 +0100 (CET) Subject: [pypy-commit] pypy windows-no-err-dlg: close Message-ID: <20111220204156.DE85E820B7@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: windows-no-err-dlg Changeset: r50773:10e52e09cda7 Date: 2011-12-20 22:37 +0200 http://bitbucket.org/pypy/pypy/changeset/10e52e09cda7/ Log: close diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -31,7 +31,7 @@ #Since there is no GetErrorMode, do a double Set old_mode = winapi.SetErrorMode(flags) winapi.SetErrorMode(old_mode | flags) - + SIGKILL = SIGTERM = 0 READ_MODE = 'rU' WRITE_MODE = 'wb' From noreply at buildbot.pypy.org Tue Dec 20 21:41:58 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 Dec 2011 21:41:58 +0100 (CET) Subject: [pypy-commit] pypy default: prevent system error dialog on windows Message-ID: <20111220204158.08891823F8@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r50774:b16fca1fa6ec Date: 2011-12-20 22:39 +0200 http://bitbucket.org/pypy/pypy/changeset/b16fca1fa6ec/ Log: prevent system error dialog on windows diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -21,6 +21,16 @@ win32api.CloseHandle(proch) except pywintypes.error, e: pass + #Try to avoid opeing a dialog box if one of the tests causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = winapi.SetErrorMode(flags) + winapi.SetErrorMode(old_mode | flags) SIGKILL = SIGTERM = 0 READ_MODE = 'rU' From noreply at buildbot.pypy.org Tue Dec 20 21:58:17 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 21:58:17 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: merge in default Message-ID: <20111220205817.A4290820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50775:342a652c6fed Date: 2011-12-20 22:01 +0200 http://bitbucket.org/pypy/pypy/changeset/342a652c6fed/ Log: merge in default diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,265 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -363,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -408,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -433,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -444,161 +425,56 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,87 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -735,49 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - assert not self.layoutbuilder.has_finalizer(S) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -791,108 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETINTERIORFIELD_GC ------ - if op.getopnum() == rop.SETINTERIORFIELD_GC: - val = op.getarg(0) - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -358,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -365,33 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +361,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +381,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +405,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,211 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2930,6 +2930,8 @@ # overflowing value: fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" def test_compile_loop_with_target(self): i0 = BoxInt() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -70,10 +70,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -108,20 +104,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -275,7 +257,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() @@ -865,8 +848,8 @@ high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] low_part = intmask(low_part) high_part = intmask(high_part) - self.mc.MOV_bi(to_loc.value, low_part) - self.mc.MOV_bi(to_loc.value + 4, high_part) + self.mc.MOV32_bi(to_loc.value, low_part) + self.mc.MOV32_bi(to_loc.value + 4, high_part) def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1357,46 +1340,10 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) - def genop_new_with_vtable(self, op, arglocs, result_loc): - assert result_loc is eax - loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, ImmedLoc) - arglocs = arglocs[:-1] - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - self.set_vtable(eax, loc_vtable) + # ---------- - def set_vtable(self, loc, loc_vtable): - if self.cpu.vtable_offset is not None: - assert isinstance(loc, RegLoc) - assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert isinstance(loc, RegLoc) - assert isinstance(loc_num_elem, ImmedLoc) - self.mc.MOV(mem(loc, ofs_length), loc_num_elem) - - # XXX genop_new is abused for all varsized mallocs with Boehm, for now - # (instead of genop_new_array, genop_newstr, genop_newunicode) - def genop_new(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_new_array(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_array_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newstr(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_str_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newunicode(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_unicode_func_addr, arglocs, eax) + def genop_call_malloc_gc(self, op, arglocs, result_loc): + self.genop_call(op, arglocs, result_loc) self.propagate_memoryerror_if_eax_is_null() def propagate_memoryerror_if_eax_is_null(self): @@ -2065,6 +2012,8 @@ self._genop_call(op, arglocs, resloc, force_index) def _genop_call(self, op, arglocs, resloc, force_index): + from pypy.jit.backend.llsupport.descr import CallDescr + sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -2079,13 +2028,16 @@ else: tmp = eax + descr = op.getdescr() + assert isinstance(descr, CallDescr) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types(), - callconv=op.getdescr().get_call_conv()) + argtypes=descr.get_arg_types(), + callconv=descr.get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return - if op.getdescr().get_return_type() == 'L': + if descr.get_result_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long self.mc.MOV_br(resloc.value + 4, edx.value) # XXX should ideally not move the result on the stack, @@ -2094,7 +2046,7 @@ # can just be always a stack location else: self.mc.FSTPL_b(resloc.value) # float return - elif op.getdescr().get_return_type() == 'S': + elif descr.get_result_type() == 'S': # singlefloat return assert resloc is eax if IS_X86_32: @@ -2292,9 +2244,9 @@ # # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: - from pypy.jit.backend.llsupport.descr import BaseFieldDescr + from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset self.mc.MOV(eax, arglocs[1]) self.mc.MOV_mi((eax.value, ofs), 0) @@ -2497,9 +2449,8 @@ else: self.mc.JMP(imm(target)) - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): - size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) - size = (size + WORD-1) & ~(WORD-1) # round up + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size): + assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) @@ -2535,9 +2486,6 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - # on 64-bits, 'tid' is a value that fits in 31 bits - assert rx86.fits_in_32bits(tid) - self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -16,8 +16,8 @@ from pypy.jit.codewriter import heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr +from pypy.jit.backend.llsupport.descr import FieldDescr, ArrayDescr +from pypy.jit.backend.llsupport.descr import CallDescr, SizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox @@ -870,9 +870,9 @@ def _consider_call(self, op, guard_not_forced_op=None): calldescr = op.getdescr() - assert isinstance(calldescr, BaseCallDescr) + assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - 1 - size = calldescr.get_result_size(self.translate_support_code) + size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm1 @@ -917,12 +917,15 @@ consider_call_release_gil = consider_call_may_force + def consider_call_malloc_gc(self, op): + self._consider_call(op) + def consider_call_assembler(self, op, guard_op): descr = op.getdescr() assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size(self.translate_support_code) + size = jd.portal_calldescr.get_result_size() vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.getarg(vable_index)) @@ -957,21 +960,10 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb - def fastpath_malloc_fixedsize(self, op, descr): - assert isinstance(descr, BaseSizeDescr) - self._do_fastpath_malloc(op, descr.size, descr.tid) - - def fastpath_malloc_varsize(self, op, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - size = basesize + itemsize * num_elem - self._do_fastpath_malloc(op, size, arraydescr.tid) - self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) - - def _do_fastpath_malloc(self, op, size, tid): - gc_ll_descr = self.assembler.cpu.gc_ll_descr + def consider_call_malloc_nursery(self, op): + size_box = op.getarg(0) + assert isinstance(size_box, ConstInt) + size = size_box.getint() self.rm.force_allocate_reg(op.result, selected_reg=eax) # # We need edx as a temporary, but otherwise don't save any more @@ -980,86 +972,39 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) # + gc_ll_descr = self.assembler.cpu.gc_ll_descr self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - size, tid, - ) - - def consider_new(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.getdescr()): - self.fastpath_malloc_fixedsize(op, op.getdescr()) - else: - args = gc_ll_descr.args_for_new(op.getdescr()) - arglocs = [imm(x) for x in args] - return self._call(op, arglocs) - - def consider_new_with_vtable(self, op): - classint = op.getarg(0).getint() - descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) - if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self.fastpath_malloc_fixedsize(op, descrsize) - self.assembler.set_vtable(eax, imm(classint)) - # result of fastpath malloc is in eax - else: - args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) - - def consider_newstr(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_newunicode(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_new_array(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(box_num_elem)) - self._call(op, arglocs) + size) def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - ofs = arraydescr.get_base_size(self.translate_support_code) - size = arraydescr.get_item_size(self.translate_support_code) - ptr = arraydescr.is_array_of_pointers() + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize sign = arraydescr.is_item_signed() - return size, ofs, ofs_length, ptr, sign + return size, ofs, sign def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset - size = fielddescr.get_field_size(self.translate_support_code) - ptr = fielddescr.is_pointer_field() + size = fielddescr.field_size sign = fielddescr.is_field_signed() - return imm(ofs), imm(size), ptr, sign + return imm(ofs), imm(size), sign + _unpack_fielddescr._always_inline_ = True def _unpack_interiorfielddescr(self, descr): assert isinstance(descr, InteriorFieldDescr) arraydescr = descr.arraydescr - ofs = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + ofs = arraydescr.basesize + itemsize = arraydescr.itemsize + fieldsize = descr.fielddescr.field_size sign = descr.fielddescr.is_field_signed() ofs += descr.fielddescr.offset return imm(ofs), imm(itemsize), imm(fieldsize), sign def consider_setfield_gc(self, op): - ofs_loc, size_loc, _, _ = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -1117,7 +1062,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - itemsize, ofs, _, _, _ = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, _ = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if itemsize == 1: @@ -1134,7 +1079,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _, sign = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, sign = self._unpack_fielddescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -1150,7 +1095,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - itemsize, ofs, _, _, sign = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, sign = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -1229,8 +1174,8 @@ def consider_arraylen_gc(self, op): arraydescr = op.getdescr() - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.backend.llsupport.descr import GcCache +from pypy.jit.backend.llsupport.descr import GcCache, FieldDescr, FLAG_SIGNED from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc @@ -17,7 +17,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcPtrFieldDescr +from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.jit.backend.x86.test.test_regalloc import MockAssembler from pypy.jit.backend.x86.test.test_regalloc import BaseTestRegalloc @@ -41,20 +41,15 @@ return ['compressed'] + shape[1:] class MockGcDescr(GcCache): - def get_funcptr_for_new(self): - return 123 - get_funcptr_for_newarray = get_funcptr_for_new - get_funcptr_for_newstr = get_funcptr_for_new - get_funcptr_for_newunicode = get_funcptr_for_new get_malloc_slowpath_addr = None - + write_barrier_descr = None moving_gc = True gcrootmap = MockGcRootMap() def initialize(self): pass - record_constptrs = GcLLDescr_framework.record_constptrs.im_func + _record_constptrs = GcLLDescr_framework._record_constptrs.im_func rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func class TestRegallocDirectGcIntegration(object): @@ -170,42 +165,32 @@ ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) +NOT_INITIALIZED = chr(0xdd) + class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - expected_malloc_slowpath_size = WORD*2 + write_barrier_descr = None def __init__(self): - GcCache.__init__(self, False) + GcLLDescription.__init__(self, None) # create a nursery - NTP = rffi.CArray(lltype.Signed) - self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, + NTP = rffi.CArray(lltype.Char) + self.nursery = lltype.malloc(NTP, 64, flavor='raw') + for i in range(64): + self.nursery[i] = NOT_INITIALIZED + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 16*WORD - self.addrs[2] = 0 - # 16 WORDs + self.addrs[1] = self.addrs[0] + 64 + self.calls = [] def malloc_slowpath(size): - assert size == self.expected_malloc_slowpath_size + self.calls.append(size) + # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size - self.addrs[2] += 1 return nadr - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) - self._counter = 123000 - - def can_inline_malloc(self, descr): - return True - - def get_funcptr_for_new(self): - return 42 -# return llhelper(lltype.Ptr(self.NEW_TP), self.new) - - def init_size_descr(self, S, descr): - descr.tid = self._counter - self._counter += 1 + self.generate_function('malloc_nursery', malloc_slowpath, + [lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): return rffi.cast(lltype.Signed, self.addrs) @@ -214,204 +199,61 @@ return rffi.cast(lltype.Signed, self.addrs) + WORD def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) + return self.get_malloc_fn_addr('malloc_nursery') - get_funcptr_for_newarray = None - get_funcptr_for_newstr = None - get_funcptr_for_newunicode = None + def check_nothing_in_nursery(self): + # CALL_MALLOC_NURSERY should not write anything in the nursery + for i in range(64): + assert self.nursery[i] == NOT_INITIALIZED class TestMallocFastpath(BaseTestRegalloc): def setup_method(self, method): cpu = CPU(None, None) - cpu.vtable_offset = WORD cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() + self.cpu = cpu - # hack: specify 'tid' explicitly, because this test is not running - # with the gc transformer - NODE = lltype.GcStruct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) - valuedescr = cpu.fielddescrof(NODE, 'value') - - self.cpu = cpu - self.nodedescr = nodedescr - vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - vtable_int = cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(vtable)) - NODE2 = lltype.GcStruct('node2', - ('parent', rclass.OBJECT), - ('tid', lltype.Signed), - ('vtable', lltype.Ptr(rclass.OBJECT_VTABLE))) - descrsize = cpu.sizeof(NODE2) - heaptracker.register_known_gctype(cpu, vtable, NODE2) - self.descrsize = descrsize - self.vtable_int = vtable_int - - self.namespace = locals().copy() - def test_malloc_fastpath(self): ops = ''' - [i0] - p0 = new(descr=nodedescr) - setfield_gc(p0, i0, descr=valuedescr) - finish(p0) + [] + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(16) + finish(p0, p1, p2) ''' - self.interpret(ops, [42]) - # check the nursery + self.interpret(ops, []) + # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.nodedescr.tid - assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 48 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 64 + # slowpath never called + assert gc_ll_descr.calls == [] def test_malloc_slowpath(self): ops = ''' [] - p0 = new(descr=nodedescr) - p1 = new(descr=nodedescr) - p2 = new(descr=nodedescr) - p3 = new(descr=nodedescr) - p4 = new(descr=nodedescr) - p5 = new(descr=nodedescr) - p6 = new(descr=nodedescr) - p7 = new(descr=nodedescr) - p8 = new(descr=nodedescr) - finish(p0, p1, p2, p3, p4, p5, p6, p7, p8) + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(24) # overflow + finish(p0, p1, p2) ''' self.interpret(ops, []) + # check the returned pointers + gc_ll_descr = self.cpu.gc_ll_descr + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 0 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once - gc_ll_descr = self.cpu.gc_ll_descr - nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nadr + (WORD*2) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_new_with_vtable(self): - ops = ''' - [i0, i1] - p0 = new_with_vtable(ConstClass(vtable)) - guard_class(p0, ConstClass(vtable)) [i0] - finish(i1) - ''' - self.interpret(ops, [0, 1]) - assert self.getint(0) == 1 - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.descrsize.tid - assert gc_ll_descr.nursery[1] == self.vtable_int - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - -class Seen(Exception): - pass - -class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): - def can_inline_malloc_varsize(self, arraydescr, num_elem): - return num_elem < 5 - def get_funcptr_for_newarray(self): - return 52 - def init_array_descr(self, A, descr): - descr.tid = self._counter - self._counter += 1 - def args_for_new_array(self, descr): - raise Seen("args_for_new_array") - -class TestMallocVarsizeFastpath(BaseTestRegalloc): - def setup_method(self, method): - cpu = CPU(None, None) - cpu.vtable_offset = WORD - cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() - cpu.setup_once() - self.cpu = cpu - - ARRAY = lltype.GcArray(lltype.Signed) - arraydescr = cpu.arraydescrof(ARRAY) - self.arraydescr = arraydescr - ARRAYCHAR = lltype.GcArray(lltype.Char) - arraychardescr = cpu.arraydescrof(ARRAYCHAR) - - self.namespace = locals().copy() - - def test_malloc_varsize_fastpath(self): - # Hack. Running the GcLLDescr_framework without really having - # a complete GC means that we end up with both the tid and the - # length being at offset 0. In this case, so the length overwrites - # the tid. This is of course only the case in this test class. - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 142, descr=arraydescr) - setarrayitem_gc(p0, 3, 143, descr=arraydescr) - finish(p0) - ''' - self.interpret(ops, []) - # check the nursery - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == 4 - assert gc_ll_descr.nursery[1] == 142 - assert gc_ll_descr.nursery[4] == 143 - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - def test_malloc_varsize_slowpath(self): - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 420, descr=arraydescr) - setarrayitem_gc(p0, 3, 430, descr=arraydescr) - p1 = new_array(4, descr=arraydescr) - setarrayitem_gc(p1, 0, 421, descr=arraydescr) - setarrayitem_gc(p1, 3, 431, descr=arraydescr) - p2 = new_array(4, descr=arraydescr) - setarrayitem_gc(p2, 0, 422, descr=arraydescr) - setarrayitem_gc(p2, 3, 432, descr=arraydescr) - p3 = new_array(4, descr=arraydescr) - setarrayitem_gc(p3, 0, 423, descr=arraydescr) - setarrayitem_gc(p3, 3, 433, descr=arraydescr) - finish(p0, p1, p2, p3) - ''' - gc_ll_descr = self.cpu.gc_ll_descr - gc_ll_descr.expected_malloc_slowpath_size = 5*WORD - self.interpret(ops, []) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_malloc_varsize_too_big(self): - ops = ''' - [] - p0 = new_array(5, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_varsize_variable(self): - ops = ''' - [i0] - p0 = new_array(i0, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_array_of_char(self): - # check that fastpath_malloc_varsize() respects the alignment - # of the pointer in the nursery - ops = ''' - [] - p1 = new_array(1, descr=arraychardescr) - p2 = new_array(2, descr=arraychardescr) - p3 = new_array(3, descr=arraychardescr) - p4 = new_array(4, descr=arraychardescr) - finish(p1, p2, p3, p4) - ''' - self.interpret(ops, []) - p1 = self.getptr(0, llmemory.GCREF) - p2 = self.getptr(1, llmemory.GCREF) - p3 = self.getptr(2, llmemory.GCREF) - p4 = self.getptr(3, llmemory.GCREF) - assert p1._obj.intval & (WORD-1) == 0 # aligned - assert p2._obj.intval & (WORD-1) == 0 # aligned - assert p3._obj.intval & (WORD-1) == 0 # aligned - assert p4._obj.intval & (WORD-1) == 0 # aligned + assert gc_ll_descr.calls == [24] diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -69,6 +69,7 @@ return ctypes.cast(res.value._obj.intval, ctypes.POINTER(item_tp)) def test_allocations(self): + py.test.skip("rewrite or kill") from pypy.rpython.lltypesystem import rstr allocs = [None] diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -69,16 +69,17 @@ def get_functions_to_patch(): from pypy.jit.backend.llsupport import gc # - can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc - def can_inline_malloc2(*args): + can_use_nursery_malloc1 = gc.GcLLDescr_framework.can_use_nursery_malloc + def can_use_nursery_malloc2(*args): try: if os.environ['PYPY_NO_INLINE_MALLOC']: return False except KeyError: pass - return can_inline_malloc1(*args) + return can_use_nursery_malloc1(*args) # - return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + return {(gc.GcLLDescr_framework, 'can_use_nursery_malloc'): + can_use_nursery_malloc2} def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -46,7 +46,7 @@ # get the function address as an integer func = argboxes[0].getint() # do the call using the correct function from the cpu - rettype = descr.get_return_type() + rettype = descr.get_result_type() if rettype == INT or rettype == 'S': # *S*ingle float try: result = cpu.bh_call_i(func, descr, args_i, args_r, args_f) @@ -344,6 +344,8 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.CALL_MALLOC_GC, + rop.CALL_MALLOC_NURSERY, rop.LABEL, ): # list of opcodes never executed by pyjitpl continue diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -142,59 +142,6 @@ def repr_of_descr(self): return '%r' % (self,) - def get_arg_types(self): - """ Implement in call descr. - Must return a string of INT, REF and FLOAT ('i', 'r', 'f'). - """ - raise NotImplementedError - - def get_return_type(self): - """ Implement in call descr. - Must return INT, REF, FLOAT, or 'v' for void. - On 32-bit (hack) it can also be 'L' for longlongs. - Additionally it can be 'S' for singlefloats. - """ - raise NotImplementedError - - def get_extra_info(self): - """ Implement in call descr - """ - raise NotImplementedError - - def is_array_of_pointers(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_floats(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_structs(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_pointer_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def is_float_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def as_vtable_size_descr(self): - """ Implement for size descr representing objects with vtables. - Returns self. (it's an annotation hack) - """ - raise NotImplementedError - - def count_fields_if_immutable(self): - return -1 - def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64, r_uint +from pypy.rlib.rarithmetic import r_int64 from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,7 +21,6 @@ # class MemoryManager(object): - NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -37,13 +36,12 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) self.alive_loops = {} - self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) else: self.max_age = max_age if check_frequency <= 0: @@ -51,11 +49,10 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self, do_cleanups_now=True): + def next_generation(self): self.current_generation += 1 - if do_cleanups_now and self.current_generation >= self.next_check: + if self.current_generation == self.next_check: self._kill_old_loops_now() - self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -84,22 +81,3 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") - - def get_current_generation_uint(self): - """Return the current generation, possibly truncated to a uint. - To use only as an approximation for decaying counters.""" - return r_uint(self.current_generation) - - def record_jitcell_dict(self, callback): - """NOT_RPYTHON. The given jitcell_dict is a dict that needs - occasional clean-ups of old cells. A cell is old if it never - reached the threshold, and its counter decayed to a tiny value.""" - # note that the various jitcell_dicts have different RPython types, - # so we have to make a different function for each one. These - # functions are chained to each other: each calls the previous one. - def cleanup_dict(): - callback() - cleanup_previous() - # - cleanup_previous = self._cleanup_jitcell_dicts - self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,3 +1,4 @@ +from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -508,6 +508,8 @@ #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation 'CALL_PURE/*d', # removed before it's passed to the backend + 'CALL_MALLOC_GC/*d', # like CALL, but NULL => propagate MemoryError + 'CALL_MALLOC_NURSERY/1', # nursery malloc, const number of bytes, zeroed '_CALL_LAST', '_CANRAISE_LAST', # ----- end of can_raise operations ----- diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,27 +2910,6 @@ res = self.meta_interp(f, [32]) assert res == f(32) - def test_decay_counters(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def f(m, n): - while n > 0: - myjitdriver.jit_merge_point(m=m, n=n) - n += m - n -= m - n -= 1 - def main(): - f(5, 7) # run 7x with m=5 counter[m=5] = 7 - f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) - f(5, 5) # run 5x times with m=5 counter[m=5] = 8 - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=9, trace_eagerness=99) - self.check_trace_count(1) - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=8, trace_eagerness=99) - self.check_trace_count(2) - class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_executor.py b/pypy/jit/metainterp/test/test_executor.py --- a/pypy/jit/metainterp/test/test_executor.py +++ b/pypy/jit/metainterp/test/test_executor.py @@ -18,7 +18,7 @@ pass class FakeCallDescr(FakeDescr): - def get_return_type(self): + def get_result_type(self): return history.FLOAT class FakeFieldDescr(FakeDescr): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,4 +1,3 @@ -import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -9,7 +8,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat, r_uint +from pypy.rlib.rarithmetic import r_singlefloat def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -277,76 +276,51 @@ res = state.can_never_inline(5, 42.5) assert res is True -def test_decay_counters(): - cell = JitCell(r_uint(5)) - cell.counter = 100 - cell.adjust_counter(r_uint(5), math.log(0.9)) - assert cell.counter == 100 - cell.adjust_counter(r_uint(6), math.log(0.9)) - assert cell.counter == 90 - cell.adjust_counter(r_uint(9), math.log(0.9)) - assert cell.counter == int(90 * (0.9**3)) - def test_cleanup_jitcell_dict(): - from pypy.jit.metainterp.memmgr import MemoryManager - class FakeWarmRunnerDesc: - memory_manager = MemoryManager() - class cpu: - pass class FakeJitDriverSD: _green_args_spec = [lltype.Signed] # # Test creating tons of jitcells that remain at 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell1 = get_jitcell(True, -1) assert len(warmstate._jitcell_dict) == 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 # for i in range(1, 20005): get_jitcell(True, i) # should trigger a clean-up at 20001 assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 # # Same test, with one jitcell that has a counter of BASE instead of 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - warmstate.set_param_decay_halflife(2) - warmstate.set_param_threshold(5) - warmstate.set_param_function_threshold(0) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell2 = get_jitcell(True, -2) - cell2.counter = BASE = warmstate.increment_threshold * 3 + cell2.counter = BASE = warmstate.THRESHOLD_LIMIT // 2 # 50% # for i in range(0, 20005): get_jitcell(True, i) assert len(warmstate._jitcell_dict) == (i % 19999) + 2 # assert cell2 in warmstate._jitcell_dict.values() - assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + assert cell2.counter == int(BASE * 0.92) # decayed once # - # Same test, with jitcells that are compiled and free by the memmgr - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + # Same test, with jitcells that are compiled and freed by the memmgr + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() get_jitcell(True, -1) - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -1 cell.wref_procedure_token = None # or a dead weakref, equivalently assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # # Same test, with counter == -2 (rare case, kept alive) - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell = get_jitcell(True, -1) cell.counter = -2 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -2 assert len(warmstate._jitcell_dict) == i + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,11 +64,9 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, - threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, decay_halflife=0, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, - **kwds): + function_threshold=4, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -85,16 +83,15 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(threshold) + jd.warmstate.set_param_threshold(3) # for tests jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(trace_eagerness) + jd.warmstate.set_param_trace_eagerness(2) # for tests jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) - jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref, math +import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask, r_uint +from pypy.rlib.rarithmetic import intmask from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -151,27 +151,9 @@ # counter == -2: tracing is currently going on for this cell counter = 0 dont_trace_here = False + extra_delay = chr(0) wref_procedure_token = None - def __init__(self, generation): - # The stored 'counter' value follows an exponential decay model. - # Conceptually after every generation, it decays by getting - # multiplied by a constant <= 1.0. In practice, decaying occurs - # lazily: the following field records the latest seen generation - # number, and adjustment is done by adjust_counter() when needed. - self.latest_generation_seen = generation - - def adjust_counter(self, generation, log_decay_factor): - if generation != self.latest_generation_seen: - # The latest_generation_seen is older than the current generation. - # Adjust by multiplying self.counter N times by decay_factor, i.e. - # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). - assert self.counter >= 0 - N = generation - self.latest_generation_seen - factor = math.exp(log_decay_factor * N) - self.counter = int(self.counter * factor) - self.latest_generation_seen = generation - def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -231,17 +213,6 @@ def set_param_inlining(self, value): self.inlining = value - def set_param_decay_halflife(self, value): - # Use 0 or -1 to mean "no decay". Initialize the internal variable - # 'log_decay_factor'. It is choosen such that by multiplying the - # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every - # generation, then the counter will be divided by two after 'value' - # generations have passed. - if value <= 0: - self.log_decay_factor = 0.0 # log(1.0) - else: - self.log_decay_factor = math.log(0.5) / value - def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -311,11 +282,6 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) - memmgr = self.warmrunnerdesc.memory_manager - if memmgr is not None: - get_current_generation = memmgr.get_current_generation_uint - else: - get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -350,6 +316,36 @@ # assert 0, "should have raised" + def bound_reached(cell, *args): + # bound reached, but we do a last check: if it is the first + # time we reach the bound, or if another loop or bridge was + # compiled since the last time we reached it, then decrease + # the counter by a few percents instead. It should avoid + # sudden bursts of JIT-compilation, and also corner cases + # where we suddenly compile more than one loop because all + # counters reach the bound at the same time, but where + # compiling all but the first one is pointless. + curgen = warmrunnerdesc.memory_manager.current_generation + curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits + if we_are_translated() and curgen != cell.extra_delay: + cell.counter = int(self.THRESHOLD_LIMIT * 0.98) + cell.extra_delay = curgen + return + # + if not confirm_enter_jit(*args): + cell.counter = 0 + return + # start tracing + from pypy.jit.metainterp.pyjitpl import MetaInterp + metainterp = MetaInterp(metainterp_sd, jitdriver_sd) + # set counter to -2, to mean "tracing in effect" + cell.counter = -2 + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + finally: + if cell.counter == -2: + cell.counter = 0 + def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. @@ -360,25 +356,13 @@ if cell.counter >= 0: # update the profiling counter - cell.adjust_counter(get_current_generation(), - self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n return - if not confirm_enter_jit(*args): - cell.counter = 0 + else: + bound_reached(cell, *args) return - # bound reached; start tracing - from pypy.jit.metainterp.pyjitpl import MetaInterp - metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - # set counter to -2, to mean "tracing in effect" - cell.counter = -2 - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - finally: - if cell.counter == -2: - cell.counter = 0 else: if cell.counter != -1: assert cell.counter == -2 @@ -454,15 +438,6 @@ # return jit_getter - def _new_jitcell(self): - warmrunnerdesc = self.warmrunnerdesc - if (warmrunnerdesc is not None and - warmrunnerdesc.memory_manager is not None): - gen = warmrunnerdesc.memory_manager.get_current_generation_uint() - else: - gen = r_uint(0) - return JitCell(gen) - def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -492,44 +467,32 @@ except AttributeError: pass # - memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager - if memmgr: - def _cleanup_dict(): - minimum = sys.maxint - if self.increment_threshold > 0: - minimum = min(minimum, self.increment_threshold) - if self.increment_function_threshold > 0: - minimum = min(minimum, self.increment_function_threshold) - currentgen = memmgr.get_current_generation_uint() - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.adjust_counter(currentgen, self.log_decay_factor) - if cell.counter < minimum: - killme.append(key) - elif (cell.counter == -1 - and cell.get_procedure_token() is None): + def _cleanup_dict(): + minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.counter = int(cell.counter * 0.92) + if cell.counter < minimum: killme.append(key) - for key in killme: - del jitcell_dict[key] - # - def _maybe_cleanup_dict(): - # If no tracing goes on at all because the jitcells are - # each time for new greenargs, the dictionary grows forever. - # So every one in a (rare) while, we decide to force an - # artificial next_generation() and _cleanup_dict(). - self._trigger_automatic_cleanup += 1 - if self._trigger_automatic_cleanup > 20000: - self._trigger_automatic_cleanup = 0 - memmgr.next_generation(do_cleanups_now=False) - _cleanup_dict() - # - self._trigger_automatic_cleanup = 0 - self._jitcell_dict = jitcell_dict # for tests - memmgr.record_jitcell_dict(_cleanup_dict) - else: - def _maybe_cleanup_dict(): - pass + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # Once in a while, rarely, when too many entries have + # been put in the jitdict_dict, we do a cleanup phase: + # we decay all counters and kill entries with a too + # low counter. + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests # def get_jitcell(build, *greenargs): try: @@ -538,7 +501,7 @@ if not build: return None _maybe_cleanup_dict() - cell = self._new_jitcell() + cell = JitCell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -549,7 +512,7 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} - # note that there is no equivalent of record_jitcell_dict() + # note that there is no equivalent of _maybe_cleanup_dict() # in the case of custom getters. We assume that the interpreter # stores the JitCells on some objects that can go away by GC, # like the PyCode objects in PyPy. @@ -574,7 +537,7 @@ if not build: return cell if cell is None: - cell = self._new_jitcell() + cell = JitCell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -329,11 +329,16 @@ special_ops = {'repr': True, 'userdel': True, 'hash': True} for opname, _, arity, special_methods in ObjSpace.MethodTable: - if opname in special_ops: + if opname in special_ops or not special_methods: continue nonspaceargs = ", ".join(["w_obj%s" % i for i in range(arity)]) code = "def func(space, %s):\n '''%s'''\n" % (nonspaceargs, opname) - for i in range(arity): + assert arity >= len(special_methods) + forcing_count = len(special_methods) + if opname.startswith('inplace_'): + assert arity == 2 + forcing_count = arity + for i in range(forcing_count): code += " w_obj%s = force(space, w_obj%s)\n" % (i, i) code += " return space.%s(%s)" % (opname, nonspaceargs) exec py.code.Source(code).compile() diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -466,3 +466,44 @@ # No exception should be raised here gc.collect() + def test_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + a3 = p1 + p2 + assert a3 is a2 + + def test_inplace_add(self): + import _weakref + class A(object): + def __add__(self, other): + return other + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1 += p2 + assert p1 is a2 + + def test_setattr(self): + import _weakref + class A(object): + def __setitem__(self, key, value): + self.setkey = key + self.setvalue = value + a1 = A() + a2 = A() + p1 = _weakref.proxy(a1) + p2 = _weakref.proxy(a2) + p1[p2] = 42 + assert a1.setkey is p2 + assert a1.setvalue == 42 + # + p1[42] = p2 + assert a1.setkey == 42 + assert a1.setvalue is p2 diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -53,7 +53,6 @@ i = start for j in range(arr.size): arr[j] = i - j += 1 i += step return arr diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -311,7 +311,7 @@ # to repeat it every time ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker_cond0 = int_lt(ticker0, 0) guard_false(ticker_cond0, descr=...) """ @@ -320,9 +320,9 @@ # this is the ticker check generated if we have threads thread_ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker1 = int_sub(ticker0, _) - setfield_raw(ticker_address, ticker1, descr=) + setfield_raw(ticker_address, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) guard_false(ticker_cond0, descr=...) """ @@ -330,7 +330,7 @@ # # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ - ticker2 = getfield_raw(ticker_address, descr=) + ticker2 = getfield_raw(ticker_address, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -451,7 +451,6 @@ try: self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: - #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 print "Loops don't match" print "=================" @@ -464,7 +463,7 @@ print print "Expected:" print format(expected_src) - return False + raise # always propagate the exception in case of mismatch else: return True diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,8 +7,9 @@ from pypy.tool.udir import udir from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - TraceWithIds, OpMatcher +from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, + find_ids, TraceWithIds, + OpMatcher, InvalidMatch) class BaseTestPyPyC(object): def setup_class(cls): @@ -115,13 +116,18 @@ assert opcodes_names == ['LOAD_FAST', 'LOAD_CONST', 'BINARY_ADD', 'STORE_FAST'] -class TestOpMatcher(object): +class TestOpMatcher_(object): def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations) - return matcher.match(src2, **kwds) + try: + res = matcher.match(src2, **kwds) + assert res is True + return True + except InvalidMatch: + return False def test_match_var(self): match_var = OpMatcher([]).match_var @@ -447,7 +453,7 @@ jump(p0, p1, p2, p3, i8, descr=...) """) # - assert not loop.match(""" + py.test.raises(InvalidMatch, loop.match, """ i6 = int_lt(i4, 1003) guard_true(i6) i8 = int_add(i5, 1) # variable mismatch @@ -492,9 +498,8 @@ guard_no_exception(descr=...) """) # - assert not loop.match_by_id('ntohs', """ + py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) p12 = call(ConstClass(foobar), 1, descr=...) guard_no_exception(descr=...) """) - diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py @@ -35,7 +35,7 @@ guard_not_invalidated(descr=...) i17 = force_token() setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>) - f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) + f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) """ % pow_addr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -42,7 +42,7 @@ guard_not_invalidated(descr=...) i13 = int_lt(i7, i9) guard_true(i13, descr=...) - i15 = getarrayitem_raw(i10, i7, descr=<.*ArrayNoLengthDescr>) + i15 = getarrayitem_raw(i10, i7, descr=) i16 = int_add_ovf(i8, i15) guard_no_overflow(descr=...) i18 = int_add(i7, 1) @@ -72,17 +72,17 @@ guard_true(i13, descr=...) guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=<.*ArrayNoLengthDescr>) + i14 = getarrayitem_raw(i10, i8, descr=) i15 = int_add_ovf(i9, i14) guard_no_overflow(descr=...) i17 = int_sub(i8, 640) # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=<.*ArrayNoLengthDescr>) + i18 = getarrayitem_raw(i11, i17, descr=) i19 = int_add_ovf(i18, i15) guard_no_overflow(descr=...) # on 64bit, there is a guard checking that i19 actually fits into 32bit ... - setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>) + setarrayitem_raw(i11, i8, _, descr=) i28 = int_add(i8, 1) --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=...) @@ -107,10 +107,10 @@ guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - f13 = getarrayitem_raw(i8, i6, descr=) + f13 = getarrayitem_raw(i8, i6, descr=) f15 = float_add(f13, 20.500000) - setarrayitem_raw(i8, i6, f15, descr=) - f16 = getarrayitem_raw(i8, i6, descr=) + setarrayitem_raw(i8, i6, f15, descr=) + f16 = getarrayitem_raw(i8, i6, descr=) i18 = float_eq(f16, 42.000000) guard_true(i18, descr=...) i20 = int_add(i6, 1) @@ -132,28 +132,24 @@ log = self.run(main, []) assert log.result == 321 loop, = log.loops_by_filename(self.filepath) - if sys.maxint == 2147483647: - arraydescr = 'UnsignedArrayNoLengthDescr' - else: - arraydescr = 'UINTArrayNoLengthDescr' assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - i13 = getarrayitem_raw(i8, i6, descr=<%s>) + i13 = getarrayitem_raw(i8, i6, descr=) f14 = cast_singlefloat_to_float(i13) f16 = float_add(f14, 20.500000) i17 = cast_float_to_singlefloat(f16) - setarrayitem_raw(i8, i6,i17, descr=<%s>) - i18 = getarrayitem_raw(i8, i6, descr=<%s>) + setarrayitem_raw(i8, i6,i17, descr=) + i18 = getarrayitem_raw(i8, i6, descr=) f19 = cast_singlefloat_to_float(i18) i21 = float_eq(f19, 42.000000) guard_true(i21, descr=...) i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """ % (arraydescr, arraydescr, arraydescr)) + """) def test_zeropadded(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -75,12 +75,12 @@ assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p38 = call(ConstClass(getexecutioncontext), descr=) - p39 = getfield_gc(p38, descr=) + p38 = call(ConstClass(getexecutioncontext), descr=) + p39 = getfield_gc(p38, descr=) i40 = force_token() - p41 = getfield_gc(p38, descr=) + p41 = getfield_gc(p38, descr=) guard_isnull(p41, descr=...) - i42 = getfield_gc(p38, descr=) + i42 = getfield_gc(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -192,7 +192,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ - p14 = getarrayitem_gc_pure(p8, i9, descr=) + p14 = getarrayitem_gc_pure(p8, i9, descr=) i14 = force_token() i16 = force_token() """) @@ -336,15 +336,15 @@ loop, = log.loops_by_filename(self.filepath) # the int strategy is used here assert loop.match_by_id('append', """ - i13 = getfield_gc(p8, descr=) + i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) # Will be killed by the backend - p15 = getfield_gc(p8, descr=) - i17 = arraylen_gc(p15, descr=) - call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... + p15 = getfield_gc(p8, descr=) + i17 = arraylen_gc(p15, descr=) + call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... guard_no_exception(descr=...) - p17 = getfield_gc(p8, descr=) - setarrayitem_gc(p17, i13, i12, descr=) + p17 = getfield_gc(p8, descr=) + setarrayitem_gc(p17, i13, i12, descr=) """) def test_blockstack_virtualizable(self): @@ -368,13 +368,13 @@ ... i20 = force_token() p22 = new_with_vtable(19511408) - p24 = new_array(1, descr=) + p24 = new_array(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) - setfield_gc(p0, i20, descr=) - setfield_gc(p26, ConstPtr(ptr22), descr=) - setarrayitem_gc(p24, 0, p26, descr=) - setfield_gc(p22, p24, descr=) - p32 = call_may_force(11376960, p18, p22, descr=) + setfield_gc(p0, i20, descr=) + setfield_gc(p26, ConstPtr(ptr22), descr=) + setarrayitem_gc(p24, 0, p26, descr=) + setfield_gc(p22, p24, descr=) + p32 = call_may_force(11376960, p18, p22, descr=) ... """) @@ -415,26 +415,26 @@ guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) guard_value(i4, 0, descr=...) guard_value(p3, ConstPtr(ptr14), descr=...) - i15 = getfield_gc_pure(p8, descr=) + i15 = getfield_gc_pure(p8, descr=) i17 = int_lt(i15, 5000) guard_true(i17, descr=...) - p18 = getfield_gc(p0, descr=) + p18 = getfield_gc(p0, descr=) guard_value(p18, ConstPtr(ptr19), descr=...) - p20 = getfield_gc(p18, descr=) + p20 = getfield_gc(p18, descr=) guard_value(p20, ConstPtr(ptr21), descr=...) guard_not_invalidated(descr=...) # most importantly, there is no getarrayitem_gc here - p23 = call(ConstClass(getexecutioncontext), descr=) - p24 = getfield_gc(p23, descr=) + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) i25 = force_token() - p26 = getfield_gc(p23, descr=) + p26 = getfield_gc(p23, descr=) guard_isnull(p26, descr=...) - i27 = getfield_gc(p23, descr=) + i27 = getfield_gc(p23, descr=) i28 = int_is_zero(i27) guard_true(i28, descr=...) - p30 = getfield_gc(ConstPtr(ptr29), descr=) + p30 = getfield_gc(ConstPtr(ptr29), descr=) guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) - i32 = getfield_gc_pure(p30, descr=) + i32 = getfield_gc_pure(p30, descr=) i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- @@ -452,15 +452,15 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- p22 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p22, i13, descr=) - setfield_gc(p4, p22, descr=) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -46,7 +46,7 @@ assert loop.match_by_id("getitem", """ i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -86,28 +86,28 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_int_str), i5, descr=) guard_no_exception(descr=...) - i12 = call(ConstClass(ll_strhash), p10, descr=) + i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) - setfield_gc(p13, 16, descr=) + p15 = new_array(8, descr=) + setfield_gc(p13, p15, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) - call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) guard_false(i27, descr=...) - p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p28 = getfield_gc(p13, descr=) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure(p29, descr=) + i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -17,24 +17,17 @@ g() log = self.run(main, [500]) - # XXX XXX this test fails so far because of a detail that - # changed with jit-simplify-backendintf. We should try to - # think of a way to be more resistent against such details. - # The issue is that we now get one Tracing, then go back - # to the interpreter hoping to immediately run the JITted - # code; but instead, we Trace again, just because another - # counter was also about to reach its limit... loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p45, i29, descr=) - setarrayitem_gc(p8, 0, p45, descr=) - i47 = arraylen_gc(p8, descr=) # Should be removed by backend + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) assert loop.match_by_id("subtract", """ - setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me i2 = int_sub_ovf(i1, 42) guard_no_overflow(descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,11 +16,11 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p10 = getfield_gc(p0, descr=) + p10 = getfield_gc(p0, descr=) guard_value(p10, ConstPtr(ptr11), descr=...) - p12 = getfield_gc(p10, descr=) + p12 = getfield_gc(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc(ConstPtr(p17), descr=) + p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -125,8 +125,8 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p20, i11, descr=) - setfield_gc(ConstPtr(ptr21), p20, descr=) + setfield_gc(p20, i11, descr=) + setfield_gc(ConstPtr(ptr21), p20, descr=) jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -23,8 +23,8 @@ f1 = cast_int_to_float(i0) i3 = float_le(f1, 0) guard_false(i3, descr=...) - f2 = call(ConstClass(log), f1, descr=) - f3 = call(ConstClass(log10), f1, descr=) + f2 = call(ConstClass(log), f1, descr=) + f3 = call(ConstClass(log10), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i4 = int_add(i0, 1) @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) @@ -84,7 +84,7 @@ i4 = int_or(i2, i3) i5 = int_is_true(i4) guard_false(i5, descr=...) - f2 = call(ConstClass(fmod), f1, 2.0, descr=) + f2 = call(ConstClass(fmod), f1, 2.0, descr=) f3 = float_add(f0, f2) i6 = int_sub(i0, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -46,7 +46,7 @@ r *= n n -= 1 return r - log = self.run(fact, [7], threshold=5) + log = self.run(fact, [7], threshold=4) assert log.result == 5040 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" @@ -204,18 +204,18 @@ assert log.result == 1000000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i14 = getfield_gc(p12, descr=) + i14 = getfield_gc(p12, descr=) i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p16 = getfield_gc(p12, descr=) - p17 = getarrayitem_gc(p16, i12, descr=) + p16 = getfield_gc(p12, descr=) + p17 = getarrayitem_gc(p16, i12, descr=) i19 = int_add(i12, 1) - setfield_gc(p9, i19, descr=) + setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, 146982464, descr=...) - i21 = getfield_gc(p17, descr=) + i21 = getfield_gc(p17, descr=) i23 = int_lt(0, i21) guard_true(i23, descr=...) - i24 = getfield_gc(p17, descr=) + i24 = getfield_gc(p17, descr=) i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,6 +1,9 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +# XXX review the descrs to replace some EF=4 with EF=3 (elidable) + + class TestString(BaseTestPyPyC): def test_lookup_default_encoding(self): def main(n): @@ -52,8 +55,8 @@ i += int(long(string.digits[i % len(string.digits)], 16)) return i - log = self.run(main, [1000]) - assert log.result == main(1000) + log = self.run(main, [1100]) + assert log.result == main(1100) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i11 = int_lt(i6, i7) @@ -72,7 +75,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p28 = call(ConstClass(strip_spaces), p25, descr=) + p28 = call(ConstClass(strip_spaces), p25, descr=) guard_no_exception(descr=...) i29 = strlen(p28) i30 = int_is_true(i29) @@ -88,9 +91,9 @@ guard_false(i41, descr=...) i43 = int_eq(i39, 43) guard_false(i43, descr=...) - i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) + i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) guard_false(i43, descr=...) - i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) + i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) guard_false(i46, descr=...) p51 = new_with_vtable(21136408) setfield_gc(p51, _, descr=...) # 7 setfields, but the order is dict-order-dependent @@ -100,9 +103,9 @@ setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) - p55 = call(ConstClass(parse_digit_string), p51, descr=) + p55 = call(ConstClass(parse_digit_string), p51, descr=) guard_no_exception(descr=...) - i57 = call(ConstClass(rbigint.toint), p55, descr=) + i57 = call(ConstClass(rbigint.toint), p55, descr=) guard_no_exception(descr=...) i58 = int_add_ovf(i6, i57) guard_no_overflow(descr=...) @@ -125,7 +128,7 @@ i7 = int_gt(i4, 0) guard_true(i7, descr=...) guard_not_invalidated(descr=...) - p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) + p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) guard_no_exception(descr=...) i10 = strlen(p9) i11 = int_is_true(i10) @@ -149,7 +152,7 @@ copystrcontent(p9, p21, 0, i25, i10) i33 = int_lt(i30, 23) guard_true(i33, descr=...) - p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) + p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) guard_no_exception(descr=...) i37 = strlen(p35) i38 = int_add_ovf(i5, i37) @@ -192,6 +195,6 @@ strsetitem(p35, 3, 104) strsetitem(p35, 4, 95) copystrcontent(p31, p35, 0, 5, i32) - i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) - guard_value(i49, 1, descr=) + i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) + guard_value(i49, 1, descr=...) ''') diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -524,11 +524,16 @@ upper = maxsplit - 1 assert upper >= 0 first = False - for i in range(upper): + try: + for i in range(upper): + builder.append(by) + builder.append(input[i]) builder.append(by) - builder.append(input[i]) - builder.append(by) - builder.append_slice(input, upper, len(input)) + builder.append_slice(input, upper, len(input)) + except MemoryError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string too long") + ) else: start = 0 sublen = len(sub) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -737,13 +737,6 @@ iterable = "hello" raises(TypeError, len, iter(iterable)) - def test_overflow_replace(self): - import sys - if sys.maxint > 2**31-1: - skip("Wrong platform") - x = "A" * (2**16) - raises(OverflowError, x.replace, '', x) - class AppTestPrebuilt(AppTestStringObject): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withprebuiltchar": True}) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,7 +395,6 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', - 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -3,6 +3,7 @@ from pypy.annotation.model import (SomeObject, SomeString, s_None, SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) +from pypy.rlib.rarithmetic import ovfcheck from pypy.tool.pairtype import pair, pairtype from pypy.rpython.extregistry import ExtRegistryEntry @@ -52,25 +53,37 @@ class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): self.l = [] + self.size = 0 + + def _grow(self, size): + try: + self.size = ovfcheck(self.size + size) + except OverflowError: + raise MemoryError def append(self, s): assert isinstance(s, self.tp) self.l.append(s) + self._grow(len(s)) def append_slice(self, s, start, end): assert isinstance(s, self.tp) assert 0 <= start <= end <= len(s) - self.l.append(s[start:end]) + s = s[start:end] + self.l.append(s) + self._grow(len(s)) def append_multiple_char(self, c, times): assert isinstance(c, self.tp) self.l.append(c * times) + self._grow(times) def append_charpsize(self, s, size): l = [] for i in xrange(size): l.append(s[i]) self.l.append(self.tp("").join(l)) + self._grow(size) def build(self): return self.tp("").join(self.l) diff --git a/pypy/rpython/lltypesystem/llarena.py b/pypy/rpython/lltypesystem/llarena.py --- a/pypy/rpython/lltypesystem/llarena.py +++ b/pypy/rpython/lltypesystem/llarena.py @@ -374,6 +374,7 @@ following an object. For arenas containing heterogenous objects. If minsize is specified, it gives a minimum on the resulting size.""" return _round_up_for_allocation(size, minsize) +round_up_for_allocation._annenforceargs_ = [int, int] def _round_up_for_allocation(size, minsize): # internal return RoundedUpForAllocation(size, minsize) diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/pypy/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/pypy/tool/jitlogparser/test/test_modulefinder.py @@ -3,7 +3,7 @@ import re, sys def setup_module(mod): - if sys.version_info[:2] != (2.6): + if sys.version_info[:2] != (2, 6): py.test.skip("Specific python 2.6 tests") def test_gather_code_py(): From noreply at buildbot.pypy.org Tue Dec 20 21:58:18 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 21:58:18 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: add a test I wrote one day Message-ID: <20111220205818.C79F6820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50776:97c31fd53d0e Date: 2011-12-20 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/97c31fd53d0e/ Log: add a test I wrote one day diff --git a/pypy/jit/metainterp/test/test_jitportal.py b/pypy/jit/metainterp/test/test_jitportal.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_jitportal.py @@ -0,0 +1,34 @@ + +from pypy.rlib.jit import JitDriver, JitPortal +from pypy.jit.metainterp.test.support import LLJitMixin +from pypy.jit.codewriter.policy import PortalPolicy + +class TestJitPortal(LLJitMixin): + def test_abort_quasi_immut(self): + class MyJitPortal(JitPortal): + def abort(self, *args): + xxxx + + portal = MyJitPortal() + + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + foo.a += 1 + x -= 1 + return total + # + assert f(100, 7) == 721 + res = self.meta_interp(f, [100, 7], policy=PortalPolicy(portal)) + assert res == 721 + From noreply at buildbot.pypy.org Tue Dec 20 22:18:17 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Dec 2011 22:18:17 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: merged default in Message-ID: <20111220211817.D4262820B7@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50777:f5fa3640940a Date: 2011-12-20 14:22 -0600 http://bitbucket.org/pypy/pypy/changeset/f5fa3640940a/ Log: merged default in diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -425,7 +425,15 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '' % (self.arg_classes, self.result_type) + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res def map_type_to_argclass(ARG, accept_void=False): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -313,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -320,34 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) o = symbolic.get_size(lltype.Ptr(S), False) - assert descr3.repr_of_descr() == '' % o + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert descr4.repr_of_descr() == '' + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert descr4i.repr_of_descr() == '' + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert descr4f.repr_of_descr() == '' + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert descr5f.repr_of_descr() == '' + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64, r_uint +from pypy.rlib.rarithmetic import r_int64 from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,7 +21,6 @@ # class MemoryManager(object): - NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -37,13 +36,12 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) self.alive_loops = {} - self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) else: self.max_age = max_age if check_frequency <= 0: @@ -51,11 +49,10 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self, do_cleanups_now=True): + def next_generation(self): self.current_generation += 1 - if do_cleanups_now and self.current_generation >= self.next_check: + if self.current_generation == self.next_check: self._kill_old_loops_now() - self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -84,22 +81,3 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") - - def get_current_generation_uint(self): - """Return the current generation, possibly truncated to a uint. - To use only as an approximation for decaying counters.""" - return r_uint(self.current_generation) - - def record_jitcell_dict(self, callback): - """NOT_RPYTHON. The given jitcell_dict is a dict that needs - occasional clean-ups of old cells. A cell is old if it never - reached the threshold, and its counter decayed to a tiny value.""" - # note that the various jitcell_dicts have different RPython types, - # so we have to make a different function for each one. These - # functions are chained to each other: each calls the previous one. - def cleanup_dict(): - callback() - cleanup_previous() - # - cleanup_previous = self._cleanup_jitcell_dicts - self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,3 +1,4 @@ +from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,27 +2910,6 @@ res = self.meta_interp(f, [32]) assert res == f(32) - def test_decay_counters(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def f(m, n): - while n > 0: - myjitdriver.jit_merge_point(m=m, n=n) - n += m - n -= m - n -= 1 - def main(): - f(5, 7) # run 7x with m=5 counter[m=5] = 7 - f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) - f(5, 5) # run 5x times with m=5 counter[m=5] = 8 - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=9, trace_eagerness=99) - self.check_trace_count(1) - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=8, trace_eagerness=99) - self.check_trace_count(2) - class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,4 +1,3 @@ -import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -9,7 +8,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat, r_uint +from pypy.rlib.rarithmetic import r_singlefloat def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -277,76 +276,51 @@ res = state.can_never_inline(5, 42.5) assert res is True -def test_decay_counters(): - cell = JitCell(r_uint(5)) - cell.counter = 100 - cell.adjust_counter(r_uint(5), math.log(0.9)) - assert cell.counter == 100 - cell.adjust_counter(r_uint(6), math.log(0.9)) - assert cell.counter == 90 - cell.adjust_counter(r_uint(9), math.log(0.9)) - assert cell.counter == int(90 * (0.9**3)) - def test_cleanup_jitcell_dict(): - from pypy.jit.metainterp.memmgr import MemoryManager - class FakeWarmRunnerDesc: - memory_manager = MemoryManager() - class cpu: - pass class FakeJitDriverSD: _green_args_spec = [lltype.Signed] # # Test creating tons of jitcells that remain at 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell1 = get_jitcell(True, -1) assert len(warmstate._jitcell_dict) == 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 # for i in range(1, 20005): get_jitcell(True, i) # should trigger a clean-up at 20001 assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 # # Same test, with one jitcell that has a counter of BASE instead of 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - warmstate.set_param_decay_halflife(2) - warmstate.set_param_threshold(5) - warmstate.set_param_function_threshold(0) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell2 = get_jitcell(True, -2) - cell2.counter = BASE = warmstate.increment_threshold * 3 + cell2.counter = BASE = warmstate.THRESHOLD_LIMIT // 2 # 50% # for i in range(0, 20005): get_jitcell(True, i) assert len(warmstate._jitcell_dict) == (i % 19999) + 2 # assert cell2 in warmstate._jitcell_dict.values() - assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + assert cell2.counter == int(BASE * 0.92) # decayed once # - # Same test, with jitcells that are compiled and free by the memmgr - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + # Same test, with jitcells that are compiled and freed by the memmgr + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() get_jitcell(True, -1) - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -1 cell.wref_procedure_token = None # or a dead weakref, equivalently assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # # Same test, with counter == -2 (rare case, kept alive) - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell = get_jitcell(True, -1) cell.counter = -2 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -2 assert len(warmstate._jitcell_dict) == i + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,11 +64,9 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, - threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, decay_halflife=0, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, - **kwds): + function_threshold=4, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -85,16 +83,15 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(threshold) + jd.warmstate.set_param_threshold(3) # for tests jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(trace_eagerness) + jd.warmstate.set_param_trace_eagerness(2) # for tests jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) - jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref, math +import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask, r_uint +from pypy.rlib.rarithmetic import intmask from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -151,27 +151,9 @@ # counter == -2: tracing is currently going on for this cell counter = 0 dont_trace_here = False + extra_delay = chr(0) wref_procedure_token = None - def __init__(self, generation): - # The stored 'counter' value follows an exponential decay model. - # Conceptually after every generation, it decays by getting - # multiplied by a constant <= 1.0. In practice, decaying occurs - # lazily: the following field records the latest seen generation - # number, and adjustment is done by adjust_counter() when needed. - self.latest_generation_seen = generation - - def adjust_counter(self, generation, log_decay_factor): - if generation != self.latest_generation_seen: - # The latest_generation_seen is older than the current generation. - # Adjust by multiplying self.counter N times by decay_factor, i.e. - # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). - assert self.counter >= 0 - N = generation - self.latest_generation_seen - factor = math.exp(log_decay_factor * N) - self.counter = int(self.counter * factor) - self.latest_generation_seen = generation - def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -231,17 +213,6 @@ def set_param_inlining(self, value): self.inlining = value - def set_param_decay_halflife(self, value): - # Use 0 or -1 to mean "no decay". Initialize the internal variable - # 'log_decay_factor'. It is choosen such that by multiplying the - # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every - # generation, then the counter will be divided by two after 'value' - # generations have passed. - if value <= 0: - self.log_decay_factor = 0.0 # log(1.0) - else: - self.log_decay_factor = math.log(0.5) / value - def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -311,11 +282,6 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) - memmgr = self.warmrunnerdesc.memory_manager - if memmgr is not None: - get_current_generation = memmgr.get_current_generation_uint - else: - get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -350,6 +316,36 @@ # assert 0, "should have raised" + def bound_reached(cell, *args): + # bound reached, but we do a last check: if it is the first + # time we reach the bound, or if another loop or bridge was + # compiled since the last time we reached it, then decrease + # the counter by a few percents instead. It should avoid + # sudden bursts of JIT-compilation, and also corner cases + # where we suddenly compile more than one loop because all + # counters reach the bound at the same time, but where + # compiling all but the first one is pointless. + curgen = warmrunnerdesc.memory_manager.current_generation + curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits + if we_are_translated() and curgen != cell.extra_delay: + cell.counter = int(self.THRESHOLD_LIMIT * 0.98) + cell.extra_delay = curgen + return + # + if not confirm_enter_jit(*args): + cell.counter = 0 + return + # start tracing + from pypy.jit.metainterp.pyjitpl import MetaInterp + metainterp = MetaInterp(metainterp_sd, jitdriver_sd) + # set counter to -2, to mean "tracing in effect" + cell.counter = -2 + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + finally: + if cell.counter == -2: + cell.counter = 0 + def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. @@ -360,25 +356,13 @@ if cell.counter >= 0: # update the profiling counter - cell.adjust_counter(get_current_generation(), - self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n return - if not confirm_enter_jit(*args): - cell.counter = 0 + else: + bound_reached(cell, *args) return - # bound reached; start tracing - from pypy.jit.metainterp.pyjitpl import MetaInterp - metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - # set counter to -2, to mean "tracing in effect" - cell.counter = -2 - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - finally: - if cell.counter == -2: - cell.counter = 0 else: if cell.counter != -1: assert cell.counter == -2 @@ -454,15 +438,6 @@ # return jit_getter - def _new_jitcell(self): - warmrunnerdesc = self.warmrunnerdesc - if (warmrunnerdesc is not None and - warmrunnerdesc.memory_manager is not None): - gen = warmrunnerdesc.memory_manager.get_current_generation_uint() - else: - gen = r_uint(0) - return JitCell(gen) - def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -492,44 +467,32 @@ except AttributeError: pass # - memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager - if memmgr: - def _cleanup_dict(): - minimum = sys.maxint - if self.increment_threshold > 0: - minimum = min(minimum, self.increment_threshold) - if self.increment_function_threshold > 0: - minimum = min(minimum, self.increment_function_threshold) - currentgen = memmgr.get_current_generation_uint() - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.adjust_counter(currentgen, self.log_decay_factor) - if cell.counter < minimum: - killme.append(key) - elif (cell.counter == -1 - and cell.get_procedure_token() is None): + def _cleanup_dict(): + minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.counter = int(cell.counter * 0.92) + if cell.counter < minimum: killme.append(key) - for key in killme: - del jitcell_dict[key] - # - def _maybe_cleanup_dict(): - # If no tracing goes on at all because the jitcells are - # each time for new greenargs, the dictionary grows forever. - # So every one in a (rare) while, we decide to force an - # artificial next_generation() and _cleanup_dict(). - self._trigger_automatic_cleanup += 1 - if self._trigger_automatic_cleanup > 20000: - self._trigger_automatic_cleanup = 0 - memmgr.next_generation(do_cleanups_now=False) - _cleanup_dict() - # - self._trigger_automatic_cleanup = 0 - self._jitcell_dict = jitcell_dict # for tests - memmgr.record_jitcell_dict(_cleanup_dict) - else: - def _maybe_cleanup_dict(): - pass + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # Once in a while, rarely, when too many entries have + # been put in the jitdict_dict, we do a cleanup phase: + # we decay all counters and kill entries with a too + # low counter. + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests # def get_jitcell(build, *greenargs): try: @@ -538,7 +501,7 @@ if not build: return None _maybe_cleanup_dict() - cell = self._new_jitcell() + cell = JitCell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -549,7 +512,7 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} - # note that there is no equivalent of record_jitcell_dict() + # note that there is no equivalent of _maybe_cleanup_dict() # in the case of custom getters. We assume that the interpreter # stores the JitCells on some objects that can go away by GC, # like the PyCode objects in PyPy. @@ -574,7 +537,7 @@ if not build: return cell if cell is None: - cell = self._new_jitcell() + cell = JitCell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -311,7 +311,7 @@ # to repeat it every time ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker_cond0 = int_lt(ticker0, 0) guard_false(ticker_cond0, descr=...) """ @@ -320,9 +320,9 @@ # this is the ticker check generated if we have threads thread_ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker1 = int_sub(ticker0, _) - setfield_raw(ticker_address, ticker1, descr=) + setfield_raw(ticker_address, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) guard_false(ticker_cond0, descr=...) """ @@ -330,7 +330,7 @@ # # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ - ticker2 = getfield_raw(ticker_address, descr=) + ticker2 = getfield_raw(ticker_address, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -451,7 +451,6 @@ try: self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: - #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 print "Loops don't match" print "=================" @@ -464,7 +463,7 @@ print print "Expected:" print format(expected_src) - return False + raise # always propagate the exception in case of mismatch else: return True diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,8 +7,9 @@ from pypy.tool.udir import udir from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - TraceWithIds, OpMatcher +from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, + find_ids, TraceWithIds, + OpMatcher, InvalidMatch) class BaseTestPyPyC(object): def setup_class(cls): @@ -115,13 +116,18 @@ assert opcodes_names == ['LOAD_FAST', 'LOAD_CONST', 'BINARY_ADD', 'STORE_FAST'] -class TestOpMatcher(object): +class TestOpMatcher_(object): def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations) - return matcher.match(src2, **kwds) + try: + res = matcher.match(src2, **kwds) + assert res is True + return True + except InvalidMatch: + return False def test_match_var(self): match_var = OpMatcher([]).match_var @@ -447,7 +453,7 @@ jump(p0, p1, p2, p3, i8, descr=...) """) # - assert not loop.match(""" + py.test.raises(InvalidMatch, loop.match, """ i6 = int_lt(i4, 1003) guard_true(i6) i8 = int_add(i5, 1) # variable mismatch @@ -492,9 +498,8 @@ guard_no_exception(descr=...) """) # - assert not loop.match_by_id('ntohs', """ + py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) p12 = call(ConstClass(foobar), 1, descr=...) guard_no_exception(descr=...) """) - diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py @@ -35,7 +35,7 @@ guard_not_invalidated(descr=...) i17 = force_token() setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>) - f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) + f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) """ % pow_addr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -42,7 +42,7 @@ guard_not_invalidated(descr=...) i13 = int_lt(i7, i9) guard_true(i13, descr=...) - i15 = getarrayitem_raw(i10, i7, descr=<.*ArrayNoLengthDescr>) + i15 = getarrayitem_raw(i10, i7, descr=) i16 = int_add_ovf(i8, i15) guard_no_overflow(descr=...) i18 = int_add(i7, 1) @@ -72,17 +72,17 @@ guard_true(i13, descr=...) guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=<.*ArrayNoLengthDescr>) + i14 = getarrayitem_raw(i10, i8, descr=) i15 = int_add_ovf(i9, i14) guard_no_overflow(descr=...) i17 = int_sub(i8, 640) # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=<.*ArrayNoLengthDescr>) + i18 = getarrayitem_raw(i11, i17, descr=) i19 = int_add_ovf(i18, i15) guard_no_overflow(descr=...) # on 64bit, there is a guard checking that i19 actually fits into 32bit ... - setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>) + setarrayitem_raw(i11, i8, _, descr=) i28 = int_add(i8, 1) --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=...) @@ -107,10 +107,10 @@ guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - f13 = getarrayitem_raw(i8, i6, descr=) + f13 = getarrayitem_raw(i8, i6, descr=) f15 = float_add(f13, 20.500000) - setarrayitem_raw(i8, i6, f15, descr=) - f16 = getarrayitem_raw(i8, i6, descr=) + setarrayitem_raw(i8, i6, f15, descr=) + f16 = getarrayitem_raw(i8, i6, descr=) i18 = float_eq(f16, 42.000000) guard_true(i18, descr=...) i20 = int_add(i6, 1) @@ -132,28 +132,24 @@ log = self.run(main, []) assert log.result == 321 loop, = log.loops_by_filename(self.filepath) - if sys.maxint == 2147483647: - arraydescr = 'UnsignedArrayNoLengthDescr' - else: - arraydescr = 'UINTArrayNoLengthDescr' assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - i13 = getarrayitem_raw(i8, i6, descr=<%s>) + i13 = getarrayitem_raw(i8, i6, descr=) f14 = cast_singlefloat_to_float(i13) f16 = float_add(f14, 20.500000) i17 = cast_float_to_singlefloat(f16) - setarrayitem_raw(i8, i6,i17, descr=<%s>) - i18 = getarrayitem_raw(i8, i6, descr=<%s>) + setarrayitem_raw(i8, i6,i17, descr=) + i18 = getarrayitem_raw(i8, i6, descr=) f19 = cast_singlefloat_to_float(i18) i21 = float_eq(f19, 42.000000) guard_true(i21, descr=...) i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """ % (arraydescr, arraydescr, arraydescr)) + """) def test_zeropadded(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -75,12 +75,12 @@ assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p38 = call(ConstClass(getexecutioncontext), descr=) - p39 = getfield_gc(p38, descr=) + p38 = call(ConstClass(getexecutioncontext), descr=) + p39 = getfield_gc(p38, descr=) i40 = force_token() - p41 = getfield_gc(p38, descr=) + p41 = getfield_gc(p38, descr=) guard_isnull(p41, descr=...) - i42 = getfield_gc(p38, descr=) + i42 = getfield_gc(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -192,7 +192,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ - p14 = getarrayitem_gc_pure(p8, i9, descr=) + p14 = getarrayitem_gc_pure(p8, i9, descr=) i14 = force_token() i16 = force_token() """) @@ -336,15 +336,15 @@ loop, = log.loops_by_filename(self.filepath) # the int strategy is used here assert loop.match_by_id('append', """ - i13 = getfield_gc(p8, descr=) + i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) # Will be killed by the backend - p15 = getfield_gc(p8, descr=) - i17 = arraylen_gc(p15, descr=) - call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... + p15 = getfield_gc(p8, descr=) + i17 = arraylen_gc(p15, descr=) + call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... guard_no_exception(descr=...) - p17 = getfield_gc(p8, descr=) - setarrayitem_gc(p17, i13, i12, descr=) + p17 = getfield_gc(p8, descr=) + setarrayitem_gc(p17, i13, i12, descr=) """) def test_blockstack_virtualizable(self): @@ -368,13 +368,13 @@ ... i20 = force_token() p22 = new_with_vtable(19511408) - p24 = new_array(1, descr=) + p24 = new_array(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) - setfield_gc(p0, i20, descr=) - setfield_gc(p26, ConstPtr(ptr22), descr=) - setarrayitem_gc(p24, 0, p26, descr=) - setfield_gc(p22, p24, descr=) - p32 = call_may_force(11376960, p18, p22, descr=) + setfield_gc(p0, i20, descr=) + setfield_gc(p26, ConstPtr(ptr22), descr=) + setarrayitem_gc(p24, 0, p26, descr=) + setfield_gc(p22, p24, descr=) + p32 = call_may_force(11376960, p18, p22, descr=) ... """) @@ -415,26 +415,26 @@ guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) guard_value(i4, 0, descr=...) guard_value(p3, ConstPtr(ptr14), descr=...) - i15 = getfield_gc_pure(p8, descr=) + i15 = getfield_gc_pure(p8, descr=) i17 = int_lt(i15, 5000) guard_true(i17, descr=...) - p18 = getfield_gc(p0, descr=) + p18 = getfield_gc(p0, descr=) guard_value(p18, ConstPtr(ptr19), descr=...) - p20 = getfield_gc(p18, descr=) + p20 = getfield_gc(p18, descr=) guard_value(p20, ConstPtr(ptr21), descr=...) guard_not_invalidated(descr=...) # most importantly, there is no getarrayitem_gc here - p23 = call(ConstClass(getexecutioncontext), descr=) - p24 = getfield_gc(p23, descr=) + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) i25 = force_token() - p26 = getfield_gc(p23, descr=) + p26 = getfield_gc(p23, descr=) guard_isnull(p26, descr=...) - i27 = getfield_gc(p23, descr=) + i27 = getfield_gc(p23, descr=) i28 = int_is_zero(i27) guard_true(i28, descr=...) - p30 = getfield_gc(ConstPtr(ptr29), descr=) + p30 = getfield_gc(ConstPtr(ptr29), descr=) guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) - i32 = getfield_gc_pure(p30, descr=) + i32 = getfield_gc_pure(p30, descr=) i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- @@ -452,15 +452,15 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- p22 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p22, i13, descr=) - setfield_gc(p4, p22, descr=) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -46,7 +46,7 @@ assert loop.match_by_id("getitem", """ i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -86,28 +86,28 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_int_str), i5, descr=) guard_no_exception(descr=...) - i12 = call(ConstClass(ll_strhash), p10, descr=) + i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) - setfield_gc(p13, 16, descr=) + p15 = new_array(8, descr=) + setfield_gc(p13, p15, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) - call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) guard_false(i27, descr=...) - p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p28 = getfield_gc(p13, descr=) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure(p29, descr=) + i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -21,9 +21,9 @@ assert loop.match_by_id("generator", """ i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p45, i29, descr=) - setarrayitem_gc(p8, 0, p45, descr=) - i47 = arraylen_gc(p8, descr=) # Should be removed by backend + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) assert loop.match_by_id("subtract", """ diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,11 +16,11 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p10 = getfield_gc(p0, descr=) + p10 = getfield_gc(p0, descr=) guard_value(p10, ConstPtr(ptr11), descr=...) - p12 = getfield_gc(p10, descr=) + p12 = getfield_gc(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc(ConstPtr(p17), descr=) + p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -125,8 +125,8 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p20, i11, descr=) - setfield_gc(ConstPtr(ptr21), p20, descr=) + setfield_gc(p20, i11, descr=) + setfield_gc(ConstPtr(ptr21), p20, descr=) jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -23,8 +23,8 @@ f1 = cast_int_to_float(i0) i3 = float_le(f1, 0) guard_false(i3, descr=...) - f2 = call(ConstClass(log), f1, descr=) - f3 = call(ConstClass(log10), f1, descr=) + f2 = call(ConstClass(log), f1, descr=) + f3 = call(ConstClass(log10), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i4 = int_add(i0, 1) @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) @@ -84,7 +84,7 @@ i4 = int_or(i2, i3) i5 = int_is_true(i4) guard_false(i5, descr=...) - f2 = call(ConstClass(fmod), f1, 2.0, descr=) + f2 = call(ConstClass(fmod), f1, 2.0, descr=) f3 = float_add(f0, f2) i6 = int_sub(i0, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -46,7 +46,7 @@ r *= n n -= 1 return r - log = self.run(fact, [7], threshold=5) + log = self.run(fact, [7], threshold=4) assert log.result == 5040 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" @@ -204,18 +204,18 @@ assert log.result == 1000000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i14 = getfield_gc(p12, descr=) + i14 = getfield_gc(p12, descr=) i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p16 = getfield_gc(p12, descr=) - p17 = getarrayitem_gc(p16, i12, descr=) + p16 = getfield_gc(p12, descr=) + p17 = getarrayitem_gc(p16, i12, descr=) i19 = int_add(i12, 1) - setfield_gc(p9, i19, descr=) + setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, 146982464, descr=...) - i21 = getfield_gc(p17, descr=) + i21 = getfield_gc(p17, descr=) i23 = int_lt(0, i21) guard_true(i23, descr=...) - i24 = getfield_gc(p17, descr=) + i24 = getfield_gc(p17, descr=) i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,6 +1,9 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +# XXX review the descrs to replace some EF=4 with EF=3 (elidable) + + class TestString(BaseTestPyPyC): def test_lookup_default_encoding(self): def main(n): @@ -52,8 +55,8 @@ i += int(long(string.digits[i % len(string.digits)], 16)) return i - log = self.run(main, [1000]) - assert log.result == main(1000) + log = self.run(main, [1100]) + assert log.result == main(1100) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i11 = int_lt(i6, i7) @@ -72,7 +75,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p28 = call(ConstClass(strip_spaces), p25, descr=) + p28 = call(ConstClass(strip_spaces), p25, descr=) guard_no_exception(descr=...) i29 = strlen(p28) i30 = int_is_true(i29) @@ -88,9 +91,9 @@ guard_false(i41, descr=...) i43 = int_eq(i39, 43) guard_false(i43, descr=...) - i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) + i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) guard_false(i43, descr=...) - i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) + i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) guard_false(i46, descr=...) p51 = new_with_vtable(21136408) setfield_gc(p51, _, descr=...) # 7 setfields, but the order is dict-order-dependent @@ -100,9 +103,9 @@ setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) - p55 = call(ConstClass(parse_digit_string), p51, descr=) + p55 = call(ConstClass(parse_digit_string), p51, descr=) guard_no_exception(descr=...) - i57 = call(ConstClass(rbigint.toint), p55, descr=) + i57 = call(ConstClass(rbigint.toint), p55, descr=) guard_no_exception(descr=...) i58 = int_add_ovf(i6, i57) guard_no_overflow(descr=...) @@ -125,7 +128,7 @@ i7 = int_gt(i4, 0) guard_true(i7, descr=...) guard_not_invalidated(descr=...) - p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) + p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) guard_no_exception(descr=...) i10 = strlen(p9) i11 = int_is_true(i10) @@ -149,7 +152,7 @@ copystrcontent(p9, p21, 0, i25, i10) i33 = int_lt(i30, 23) guard_true(i33, descr=...) - p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) + p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) guard_no_exception(descr=...) i37 = strlen(p35) i38 = int_add_ovf(i5, i37) @@ -192,6 +195,6 @@ strsetitem(p35, 3, 104) strsetitem(p35, 4, 95) copystrcontent(p31, p35, 0, 5, i32) - i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) - guard_value(i49, 1, descr=) + i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) + guard_value(i49, 1, descr=...) ''') diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,7 +395,6 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', - 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() From noreply at buildbot.pypy.org Tue Dec 20 22:18:19 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Dec 2011 22:18:19 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: better name Message-ID: <20111220211819.039E4820B7@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50778:3ad573e7d40c Date: 2011-12-20 15:02 -0600 http://bitbucket.org/pypy/pypy/changeset/3ad573e7d40c/ Log: better name diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -8,7 +8,7 @@ def sigeq(one, two): return one.eq(two) -def sigeq_numbering(one, two): +def sigeq_no_numbering(one, two): """ Cache for iterator numbering should not compare array numbers """ return one.eq(two, compare_array_no=False) @@ -73,7 +73,7 @@ iter_no = 0 def invent_numbering(self): - cache = r_dict(sigeq_numbering, sighash) + cache = r_dict(sigeq_no_numbering, sighash) allnumbers = [] self._invent_numbering(cache, allnumbers) From noreply at buildbot.pypy.org Tue Dec 20 22:23:31 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Dec 2011 22:23:31 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: dead code Message-ID: <20111220212331.3419B820B7@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: refactor-signature Changeset: r50779:970ab217c79f Date: 2011-12-20 15:23 -0600 http://bitbucket.org/pypy/pypy/changeset/970ab217c79f/ Log: dead code diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -658,9 +658,6 @@ return self - def get_storage(self, space): - raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) - class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1051,13 +1048,6 @@ self.invalidated() self.dtype.setitem(self.storage, item, value) - def start_iter(self, res_shape=None): - if self.order == 'C': - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return ArrayIterator(self.size) - raise NotImplementedError # use ViewIterator simply, test it - def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) @@ -1065,9 +1055,6 @@ def create_sig(self, res_shape): return self.array_sig(res_shape) - def get_storage(self, space): - return self.storage - def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) From noreply at buildbot.pypy.org Tue Dec 20 22:25:49 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 22:25:49 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: cpyext: let buffer objects implement the C buffer interface. Message-ID: <20111220212549.38B4A820B7@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: pyarg-parsetuple-s-star-buffer Changeset: r50780:6f6ce72c22e5 Date: 2011-12-20 22:25 +0100 http://bitbucket.org/pypy/pypy/changeset/6f6ce72c22e5/ Log: cpyext: let buffer objects implement the C buffer interface. diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -2,7 +2,7 @@ from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) -from pypy.module.cpyext.pyobject import make_typedescr +from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer @@ -25,7 +25,7 @@ make_typedescr(space.gettypefor(Buffer).instancetypedef, basestruct=PyBufferObject.TO, attach=buffer_attach, - # dealloc=buffer_dealloc, + dealloc=buffer_dealloc, realize=buffer_realize) def buffer_attach(space, py_obj, w_obj): @@ -57,6 +57,10 @@ -# @cpython_api([PyObject], lltype.Void, external=False) -# def buffer_dealloc(space, py_obj): - + at cpython_api([PyObject], lltype.Void, external=False) +def buffer_dealloc(space, py_obj): + py_buf = rffi.cast(PyBufferObject, py_obj) + Py_DecRef(space, py_buf.c_b_base) + rffi.free_charp(py_buf.c_b_ptr) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -113,8 +113,6 @@ @specialize.memo() def _get_typedescr_1(typedef): - if typedef.name == "buffer": - import pdb; pdb.set_trace() try: return typedescr_cache[typedef] except KeyError: diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -137,13 +137,11 @@ if (!PyArg_ParseTuple(args, "s*", &buf)) { return NULL; } - printf("OH NO %s %d\\n", buf.buf, buf.len); - fflush(stdout); result = PyString_FromStringAndSize(buf.buf, buf.len); PyBuffer_Release(&buf); return result; ''') - assert buffer('foo\0bar\0baz') == pybuffer(buffer('foo\0bar\0baz')) + assert 'foo\0bar\0baz' == pybuffer(buffer('foo\0bar\0baz')) def test_pyarg_parse_charbuf_and_length(self): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -28,6 +28,7 @@ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError from pypy.rlib.rstring import rsplit from pypy.rlib.objectmodel import specialize @@ -418,8 +419,20 @@ Py_DecRef(space, pyref) return space.len_w(w_str) + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, + external=False, error=-1) +def buf_getreadbuffer(space, pyref, segment, ref): + from pypy.module.cpyext.bufferobject import PyBufferObject + if segment != 0: + raise OperationError(space.w_SystemError, space.wrap + ("accessing non-existent string segment")) + py_buf = rffi.cast(PyBufferObject, pyref) + ref[0] = py_buf.c_b_ptr + #Py_DecRef(space, pyref) + return py_buf.c_b_size + def setup_string_buffer_procs(space, pto): - c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True, immortal=True) c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, str_segcount.api_func.get_wrapper(space)) c_buf.c_bf_getreadbuffer = llhelper(str_getreadbuffer.api_func.functype, @@ -429,6 +442,14 @@ pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER +def setup_buffer_buffer_procs(space, pto): + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True, immortal=True) + c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, + str_segcount.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llhelper(buf_getreadbuffer.api_func.functype, + buf_getreadbuffer.api_func.get_wrapper(space)) + pto.c_tp_as_buffer = c_buf + @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): from pypy.module.cpyext.object import PyObject_dealloc @@ -484,6 +505,8 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) + if space.is_w(w_type, space.gettypefor(Buffer)): + setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, PyObject_Del.api_func.get_wrapper(space)) From noreply at buildbot.pypy.org Tue Dec 20 23:08:21 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 23:08:21 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: oops actually fix the test Message-ID: <20111220220821.6F2E7823F8@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50782:114830fb70f1 Date: 2011-12-21 00:06 +0200 http://bitbucket.org/pypy/pypy/changeset/114830fb70f1/ Log: oops actually fix the test diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -911,10 +911,10 @@ d = c[::2][::2] assert d[1] == 8 b = a + a - c = c[::2] + c = b[::2] c[:] = 3 assert b[0] == 3 - assert b[1] == 4 + assert b[1] == 2 def test_tolist_scalar(self): from numpypy import int32, bool_ From noreply at buildbot.pypy.org Tue Dec 20 23:08:20 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 23:08:20 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: kill some dead code and a failing test Message-ID: <20111220220820.4A841820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50781:5bb7c7ef1481 Date: 2011-12-21 00:05 +0200 http://bitbucket.org/pypy/pypy/changeset/5bb7c7ef1481/ Log: kill some dead code and a failing test diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -468,9 +468,6 @@ def descr_getitem(self, space, w_idx): if self._single_item_result(space, w_idx): concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) @@ -480,9 +477,6 @@ self.invalidated() if self._single_item_result(space, w_idx): concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) dtype = concrete.find_dtype() concrete.setitem(item, dtype.coerce(space, w_value)) @@ -642,9 +636,6 @@ def find_dtype(self): return self.dtype - def getitem(self, item): - raise NotImplementedError - def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): builder.append(self.dtype.itemtype.str_format(self.value)) @@ -657,10 +648,6 @@ def get_concrete_or_scalar(self): return self - - def get_storage(self, space): - raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) - class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1065,9 +1052,6 @@ def create_sig(self, res_shape): return self.array_sig(res_shape) - def get_storage(self, space): - return self.storage - def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -910,6 +910,11 @@ c = (a + a) d = c[::2][::2] assert d[1] == 8 + b = a + a + c = c[::2] + c[:] = 3 + assert b[0] == 3 + assert b[1] == 4 def test_tolist_scalar(self): from numpypy import int32, bool_ From noreply at buildbot.pypy.org Tue Dec 20 23:08:22 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 23:08:22 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: merge Message-ID: <20111220220822.AAA9B820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50783:1f10a3b24405 Date: 2011-12-21 00:07 +0200 http://bitbucket.org/pypy/pypy/changeset/1f10a3b24405/ Log: merge diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -425,7 +425,15 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '' % (self.arg_classes, self.result_type) + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res def map_type_to_argclass(ARG, accept_void=False): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -313,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -320,34 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) o = symbolic.get_size(lltype.Ptr(S), False) - assert descr3.repr_of_descr() == '' % o + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert descr4.repr_of_descr() == '' + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert descr4i.repr_of_descr() == '' + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert descr4f.repr_of_descr() == '' + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert descr5f.repr_of_descr() == '' + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64, r_uint +from pypy.rlib.rarithmetic import r_int64 from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,7 +21,6 @@ # class MemoryManager(object): - NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -37,13 +36,12 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) self.alive_loops = {} - self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) else: self.max_age = max_age if check_frequency <= 0: @@ -51,11 +49,10 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self, do_cleanups_now=True): + def next_generation(self): self.current_generation += 1 - if do_cleanups_now and self.current_generation >= self.next_check: + if self.current_generation == self.next_check: self._kill_old_loops_now() - self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -84,22 +81,3 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") - - def get_current_generation_uint(self): - """Return the current generation, possibly truncated to a uint. - To use only as an approximation for decaying counters.""" - return r_uint(self.current_generation) - - def record_jitcell_dict(self, callback): - """NOT_RPYTHON. The given jitcell_dict is a dict that needs - occasional clean-ups of old cells. A cell is old if it never - reached the threshold, and its counter decayed to a tiny value.""" - # note that the various jitcell_dicts have different RPython types, - # so we have to make a different function for each one. These - # functions are chained to each other: each calls the previous one. - def cleanup_dict(): - callback() - cleanup_previous() - # - cleanup_previous = self._cleanup_jitcell_dicts - self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,3 +1,4 @@ +from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,27 +2910,6 @@ res = self.meta_interp(f, [32]) assert res == f(32) - def test_decay_counters(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def f(m, n): - while n > 0: - myjitdriver.jit_merge_point(m=m, n=n) - n += m - n -= m - n -= 1 - def main(): - f(5, 7) # run 7x with m=5 counter[m=5] = 7 - f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) - f(5, 5) # run 5x times with m=5 counter[m=5] = 8 - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=9, trace_eagerness=99) - self.check_trace_count(1) - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=8, trace_eagerness=99) - self.check_trace_count(2) - class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,4 +1,3 @@ -import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -9,7 +8,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat, r_uint +from pypy.rlib.rarithmetic import r_singlefloat def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -277,76 +276,51 @@ res = state.can_never_inline(5, 42.5) assert res is True -def test_decay_counters(): - cell = JitCell(r_uint(5)) - cell.counter = 100 - cell.adjust_counter(r_uint(5), math.log(0.9)) - assert cell.counter == 100 - cell.adjust_counter(r_uint(6), math.log(0.9)) - assert cell.counter == 90 - cell.adjust_counter(r_uint(9), math.log(0.9)) - assert cell.counter == int(90 * (0.9**3)) - def test_cleanup_jitcell_dict(): - from pypy.jit.metainterp.memmgr import MemoryManager - class FakeWarmRunnerDesc: - memory_manager = MemoryManager() - class cpu: - pass class FakeJitDriverSD: _green_args_spec = [lltype.Signed] # # Test creating tons of jitcells that remain at 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell1 = get_jitcell(True, -1) assert len(warmstate._jitcell_dict) == 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 # for i in range(1, 20005): get_jitcell(True, i) # should trigger a clean-up at 20001 assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 # # Same test, with one jitcell that has a counter of BASE instead of 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - warmstate.set_param_decay_halflife(2) - warmstate.set_param_threshold(5) - warmstate.set_param_function_threshold(0) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell2 = get_jitcell(True, -2) - cell2.counter = BASE = warmstate.increment_threshold * 3 + cell2.counter = BASE = warmstate.THRESHOLD_LIMIT // 2 # 50% # for i in range(0, 20005): get_jitcell(True, i) assert len(warmstate._jitcell_dict) == (i % 19999) + 2 # assert cell2 in warmstate._jitcell_dict.values() - assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + assert cell2.counter == int(BASE * 0.92) # decayed once # - # Same test, with jitcells that are compiled and free by the memmgr - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + # Same test, with jitcells that are compiled and freed by the memmgr + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() get_jitcell(True, -1) - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -1 cell.wref_procedure_token = None # or a dead weakref, equivalently assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # # Same test, with counter == -2 (rare case, kept alive) - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell = get_jitcell(True, -1) cell.counter = -2 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -2 assert len(warmstate._jitcell_dict) == i + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,11 +64,9 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, - threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, decay_halflife=0, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, - **kwds): + function_threshold=4, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -85,16 +83,15 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(threshold) + jd.warmstate.set_param_threshold(3) # for tests jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(trace_eagerness) + jd.warmstate.set_param_trace_eagerness(2) # for tests jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) - jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref, math +import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask, r_uint +from pypy.rlib.rarithmetic import intmask from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -151,27 +151,9 @@ # counter == -2: tracing is currently going on for this cell counter = 0 dont_trace_here = False + extra_delay = chr(0) wref_procedure_token = None - def __init__(self, generation): - # The stored 'counter' value follows an exponential decay model. - # Conceptually after every generation, it decays by getting - # multiplied by a constant <= 1.0. In practice, decaying occurs - # lazily: the following field records the latest seen generation - # number, and adjustment is done by adjust_counter() when needed. - self.latest_generation_seen = generation - - def adjust_counter(self, generation, log_decay_factor): - if generation != self.latest_generation_seen: - # The latest_generation_seen is older than the current generation. - # Adjust by multiplying self.counter N times by decay_factor, i.e. - # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). - assert self.counter >= 0 - N = generation - self.latest_generation_seen - factor = math.exp(log_decay_factor * N) - self.counter = int(self.counter * factor) - self.latest_generation_seen = generation - def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -231,17 +213,6 @@ def set_param_inlining(self, value): self.inlining = value - def set_param_decay_halflife(self, value): - # Use 0 or -1 to mean "no decay". Initialize the internal variable - # 'log_decay_factor'. It is choosen such that by multiplying the - # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every - # generation, then the counter will be divided by two after 'value' - # generations have passed. - if value <= 0: - self.log_decay_factor = 0.0 # log(1.0) - else: - self.log_decay_factor = math.log(0.5) / value - def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -311,11 +282,6 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) - memmgr = self.warmrunnerdesc.memory_manager - if memmgr is not None: - get_current_generation = memmgr.get_current_generation_uint - else: - get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -350,6 +316,36 @@ # assert 0, "should have raised" + def bound_reached(cell, *args): + # bound reached, but we do a last check: if it is the first + # time we reach the bound, or if another loop or bridge was + # compiled since the last time we reached it, then decrease + # the counter by a few percents instead. It should avoid + # sudden bursts of JIT-compilation, and also corner cases + # where we suddenly compile more than one loop because all + # counters reach the bound at the same time, but where + # compiling all but the first one is pointless. + curgen = warmrunnerdesc.memory_manager.current_generation + curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits + if we_are_translated() and curgen != cell.extra_delay: + cell.counter = int(self.THRESHOLD_LIMIT * 0.98) + cell.extra_delay = curgen + return + # + if not confirm_enter_jit(*args): + cell.counter = 0 + return + # start tracing + from pypy.jit.metainterp.pyjitpl import MetaInterp + metainterp = MetaInterp(metainterp_sd, jitdriver_sd) + # set counter to -2, to mean "tracing in effect" + cell.counter = -2 + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + finally: + if cell.counter == -2: + cell.counter = 0 + def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. @@ -360,25 +356,13 @@ if cell.counter >= 0: # update the profiling counter - cell.adjust_counter(get_current_generation(), - self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n return - if not confirm_enter_jit(*args): - cell.counter = 0 + else: + bound_reached(cell, *args) return - # bound reached; start tracing - from pypy.jit.metainterp.pyjitpl import MetaInterp - metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - # set counter to -2, to mean "tracing in effect" - cell.counter = -2 - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - finally: - if cell.counter == -2: - cell.counter = 0 else: if cell.counter != -1: assert cell.counter == -2 @@ -454,15 +438,6 @@ # return jit_getter - def _new_jitcell(self): - warmrunnerdesc = self.warmrunnerdesc - if (warmrunnerdesc is not None and - warmrunnerdesc.memory_manager is not None): - gen = warmrunnerdesc.memory_manager.get_current_generation_uint() - else: - gen = r_uint(0) - return JitCell(gen) - def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -492,44 +467,32 @@ except AttributeError: pass # - memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager - if memmgr: - def _cleanup_dict(): - minimum = sys.maxint - if self.increment_threshold > 0: - minimum = min(minimum, self.increment_threshold) - if self.increment_function_threshold > 0: - minimum = min(minimum, self.increment_function_threshold) - currentgen = memmgr.get_current_generation_uint() - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.adjust_counter(currentgen, self.log_decay_factor) - if cell.counter < minimum: - killme.append(key) - elif (cell.counter == -1 - and cell.get_procedure_token() is None): + def _cleanup_dict(): + minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.counter = int(cell.counter * 0.92) + if cell.counter < minimum: killme.append(key) - for key in killme: - del jitcell_dict[key] - # - def _maybe_cleanup_dict(): - # If no tracing goes on at all because the jitcells are - # each time for new greenargs, the dictionary grows forever. - # So every one in a (rare) while, we decide to force an - # artificial next_generation() and _cleanup_dict(). - self._trigger_automatic_cleanup += 1 - if self._trigger_automatic_cleanup > 20000: - self._trigger_automatic_cleanup = 0 - memmgr.next_generation(do_cleanups_now=False) - _cleanup_dict() - # - self._trigger_automatic_cleanup = 0 - self._jitcell_dict = jitcell_dict # for tests - memmgr.record_jitcell_dict(_cleanup_dict) - else: - def _maybe_cleanup_dict(): - pass + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # Once in a while, rarely, when too many entries have + # been put in the jitdict_dict, we do a cleanup phase: + # we decay all counters and kill entries with a too + # low counter. + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests # def get_jitcell(build, *greenargs): try: @@ -538,7 +501,7 @@ if not build: return None _maybe_cleanup_dict() - cell = self._new_jitcell() + cell = JitCell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -549,7 +512,7 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} - # note that there is no equivalent of record_jitcell_dict() + # note that there is no equivalent of _maybe_cleanup_dict() # in the case of custom getters. We assume that the interpreter # stores the JitCells on some objects that can go away by GC, # like the PyCode objects in PyPy. @@ -574,7 +537,7 @@ if not build: return cell if cell is None: - cell = self._new_jitcell() + cell = JitCell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -648,6 +648,7 @@ def get_concrete_or_scalar(self): return self + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -1038,13 +1039,6 @@ self.invalidated() self.dtype.setitem(self.storage, item, value) - def start_iter(self, res_shape=None): - if self.order == 'C': - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return ArrayIterator(self.size) - raise NotImplementedError # use ViewIterator simply, test it - def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -8,7 +8,7 @@ def sigeq(one, two): return one.eq(two) -def sigeq_numbering(one, two): +def sigeq_no_numbering(one, two): """ Cache for iterator numbering should not compare array numbers """ return one.eq(two, compare_array_no=False) @@ -73,7 +73,7 @@ iter_no = 0 def invent_numbering(self): - cache = r_dict(sigeq_numbering, sighash) + cache = r_dict(sigeq_no_numbering, sighash) allnumbers = [] self._invent_numbering(cache, allnumbers) diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -311,7 +311,7 @@ # to repeat it every time ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker_cond0 = int_lt(ticker0, 0) guard_false(ticker_cond0, descr=...) """ @@ -320,9 +320,9 @@ # this is the ticker check generated if we have threads thread_ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker1 = int_sub(ticker0, _) - setfield_raw(ticker_address, ticker1, descr=) + setfield_raw(ticker_address, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) guard_false(ticker_cond0, descr=...) """ @@ -330,7 +330,7 @@ # # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ - ticker2 = getfield_raw(ticker_address, descr=) + ticker2 = getfield_raw(ticker_address, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -451,7 +451,6 @@ try: self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: - #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 print "Loops don't match" print "=================" @@ -464,7 +463,7 @@ print print "Expected:" print format(expected_src) - return False + raise # always propagate the exception in case of mismatch else: return True diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,8 +7,9 @@ from pypy.tool.udir import udir from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - TraceWithIds, OpMatcher +from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, + find_ids, TraceWithIds, + OpMatcher, InvalidMatch) class BaseTestPyPyC(object): def setup_class(cls): @@ -115,13 +116,18 @@ assert opcodes_names == ['LOAD_FAST', 'LOAD_CONST', 'BINARY_ADD', 'STORE_FAST'] -class TestOpMatcher(object): +class TestOpMatcher_(object): def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations) - return matcher.match(src2, **kwds) + try: + res = matcher.match(src2, **kwds) + assert res is True + return True + except InvalidMatch: + return False def test_match_var(self): match_var = OpMatcher([]).match_var @@ -447,7 +453,7 @@ jump(p0, p1, p2, p3, i8, descr=...) """) # - assert not loop.match(""" + py.test.raises(InvalidMatch, loop.match, """ i6 = int_lt(i4, 1003) guard_true(i6) i8 = int_add(i5, 1) # variable mismatch @@ -492,9 +498,8 @@ guard_no_exception(descr=...) """) # - assert not loop.match_by_id('ntohs', """ + py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) p12 = call(ConstClass(foobar), 1, descr=...) guard_no_exception(descr=...) """) - diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py @@ -35,7 +35,7 @@ guard_not_invalidated(descr=...) i17 = force_token() setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>) - f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) + f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) """ % pow_addr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -42,7 +42,7 @@ guard_not_invalidated(descr=...) i13 = int_lt(i7, i9) guard_true(i13, descr=...) - i15 = getarrayitem_raw(i10, i7, descr=<.*ArrayNoLengthDescr>) + i15 = getarrayitem_raw(i10, i7, descr=) i16 = int_add_ovf(i8, i15) guard_no_overflow(descr=...) i18 = int_add(i7, 1) @@ -72,17 +72,17 @@ guard_true(i13, descr=...) guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=<.*ArrayNoLengthDescr>) + i14 = getarrayitem_raw(i10, i8, descr=) i15 = int_add_ovf(i9, i14) guard_no_overflow(descr=...) i17 = int_sub(i8, 640) # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=<.*ArrayNoLengthDescr>) + i18 = getarrayitem_raw(i11, i17, descr=) i19 = int_add_ovf(i18, i15) guard_no_overflow(descr=...) # on 64bit, there is a guard checking that i19 actually fits into 32bit ... - setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>) + setarrayitem_raw(i11, i8, _, descr=) i28 = int_add(i8, 1) --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=...) @@ -107,10 +107,10 @@ guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - f13 = getarrayitem_raw(i8, i6, descr=) + f13 = getarrayitem_raw(i8, i6, descr=) f15 = float_add(f13, 20.500000) - setarrayitem_raw(i8, i6, f15, descr=) - f16 = getarrayitem_raw(i8, i6, descr=) + setarrayitem_raw(i8, i6, f15, descr=) + f16 = getarrayitem_raw(i8, i6, descr=) i18 = float_eq(f16, 42.000000) guard_true(i18, descr=...) i20 = int_add(i6, 1) @@ -132,28 +132,24 @@ log = self.run(main, []) assert log.result == 321 loop, = log.loops_by_filename(self.filepath) - if sys.maxint == 2147483647: - arraydescr = 'UnsignedArrayNoLengthDescr' - else: - arraydescr = 'UINTArrayNoLengthDescr' assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - i13 = getarrayitem_raw(i8, i6, descr=<%s>) + i13 = getarrayitem_raw(i8, i6, descr=) f14 = cast_singlefloat_to_float(i13) f16 = float_add(f14, 20.500000) i17 = cast_float_to_singlefloat(f16) - setarrayitem_raw(i8, i6,i17, descr=<%s>) - i18 = getarrayitem_raw(i8, i6, descr=<%s>) + setarrayitem_raw(i8, i6,i17, descr=) + i18 = getarrayitem_raw(i8, i6, descr=) f19 = cast_singlefloat_to_float(i18) i21 = float_eq(f19, 42.000000) guard_true(i21, descr=...) i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """ % (arraydescr, arraydescr, arraydescr)) + """) def test_zeropadded(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -75,12 +75,12 @@ assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p38 = call(ConstClass(getexecutioncontext), descr=) - p39 = getfield_gc(p38, descr=) + p38 = call(ConstClass(getexecutioncontext), descr=) + p39 = getfield_gc(p38, descr=) i40 = force_token() - p41 = getfield_gc(p38, descr=) + p41 = getfield_gc(p38, descr=) guard_isnull(p41, descr=...) - i42 = getfield_gc(p38, descr=) + i42 = getfield_gc(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -192,7 +192,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ - p14 = getarrayitem_gc_pure(p8, i9, descr=) + p14 = getarrayitem_gc_pure(p8, i9, descr=) i14 = force_token() i16 = force_token() """) @@ -336,15 +336,15 @@ loop, = log.loops_by_filename(self.filepath) # the int strategy is used here assert loop.match_by_id('append', """ - i13 = getfield_gc(p8, descr=) + i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) # Will be killed by the backend - p15 = getfield_gc(p8, descr=) - i17 = arraylen_gc(p15, descr=) - call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... + p15 = getfield_gc(p8, descr=) + i17 = arraylen_gc(p15, descr=) + call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... guard_no_exception(descr=...) - p17 = getfield_gc(p8, descr=) - setarrayitem_gc(p17, i13, i12, descr=) + p17 = getfield_gc(p8, descr=) + setarrayitem_gc(p17, i13, i12, descr=) """) def test_blockstack_virtualizable(self): @@ -368,13 +368,13 @@ ... i20 = force_token() p22 = new_with_vtable(19511408) - p24 = new_array(1, descr=) + p24 = new_array(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) - setfield_gc(p0, i20, descr=) - setfield_gc(p26, ConstPtr(ptr22), descr=) - setarrayitem_gc(p24, 0, p26, descr=) - setfield_gc(p22, p24, descr=) - p32 = call_may_force(11376960, p18, p22, descr=) + setfield_gc(p0, i20, descr=) + setfield_gc(p26, ConstPtr(ptr22), descr=) + setarrayitem_gc(p24, 0, p26, descr=) + setfield_gc(p22, p24, descr=) + p32 = call_may_force(11376960, p18, p22, descr=) ... """) @@ -415,26 +415,26 @@ guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) guard_value(i4, 0, descr=...) guard_value(p3, ConstPtr(ptr14), descr=...) - i15 = getfield_gc_pure(p8, descr=) + i15 = getfield_gc_pure(p8, descr=) i17 = int_lt(i15, 5000) guard_true(i17, descr=...) - p18 = getfield_gc(p0, descr=) + p18 = getfield_gc(p0, descr=) guard_value(p18, ConstPtr(ptr19), descr=...) - p20 = getfield_gc(p18, descr=) + p20 = getfield_gc(p18, descr=) guard_value(p20, ConstPtr(ptr21), descr=...) guard_not_invalidated(descr=...) # most importantly, there is no getarrayitem_gc here - p23 = call(ConstClass(getexecutioncontext), descr=) - p24 = getfield_gc(p23, descr=) + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) i25 = force_token() - p26 = getfield_gc(p23, descr=) + p26 = getfield_gc(p23, descr=) guard_isnull(p26, descr=...) - i27 = getfield_gc(p23, descr=) + i27 = getfield_gc(p23, descr=) i28 = int_is_zero(i27) guard_true(i28, descr=...) - p30 = getfield_gc(ConstPtr(ptr29), descr=) + p30 = getfield_gc(ConstPtr(ptr29), descr=) guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) - i32 = getfield_gc_pure(p30, descr=) + i32 = getfield_gc_pure(p30, descr=) i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- @@ -452,15 +452,15 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- p22 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p22, i13, descr=) - setfield_gc(p4, p22, descr=) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -46,7 +46,7 @@ assert loop.match_by_id("getitem", """ i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -86,28 +86,28 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_int_str), i5, descr=) guard_no_exception(descr=...) - i12 = call(ConstClass(ll_strhash), p10, descr=) + i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) - setfield_gc(p13, 16, descr=) + p15 = new_array(8, descr=) + setfield_gc(p13, p15, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) - call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) guard_false(i27, descr=...) - p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p28 = getfield_gc(p13, descr=) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure(p29, descr=) + i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -21,9 +21,9 @@ assert loop.match_by_id("generator", """ i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p45, i29, descr=) - setarrayitem_gc(p8, 0, p45, descr=) - i47 = arraylen_gc(p8, descr=) # Should be removed by backend + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) assert loop.match_by_id("subtract", """ diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,11 +16,11 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p10 = getfield_gc(p0, descr=) + p10 = getfield_gc(p0, descr=) guard_value(p10, ConstPtr(ptr11), descr=...) - p12 = getfield_gc(p10, descr=) + p12 = getfield_gc(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc(ConstPtr(p17), descr=) + p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -125,8 +125,8 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p20, i11, descr=) - setfield_gc(ConstPtr(ptr21), p20, descr=) + setfield_gc(p20, i11, descr=) + setfield_gc(ConstPtr(ptr21), p20, descr=) jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -23,8 +23,8 @@ f1 = cast_int_to_float(i0) i3 = float_le(f1, 0) guard_false(i3, descr=...) - f2 = call(ConstClass(log), f1, descr=) - f3 = call(ConstClass(log10), f1, descr=) + f2 = call(ConstClass(log), f1, descr=) + f3 = call(ConstClass(log10), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i4 = int_add(i0, 1) @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) @@ -84,7 +84,7 @@ i4 = int_or(i2, i3) i5 = int_is_true(i4) guard_false(i5, descr=...) - f2 = call(ConstClass(fmod), f1, 2.0, descr=) + f2 = call(ConstClass(fmod), f1, 2.0, descr=) f3 = float_add(f0, f2) i6 = int_sub(i0, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -46,7 +46,7 @@ r *= n n -= 1 return r - log = self.run(fact, [7], threshold=5) + log = self.run(fact, [7], threshold=4) assert log.result == 5040 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" @@ -204,18 +204,18 @@ assert log.result == 1000000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i14 = getfield_gc(p12, descr=) + i14 = getfield_gc(p12, descr=) i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p16 = getfield_gc(p12, descr=) - p17 = getarrayitem_gc(p16, i12, descr=) + p16 = getfield_gc(p12, descr=) + p17 = getarrayitem_gc(p16, i12, descr=) i19 = int_add(i12, 1) - setfield_gc(p9, i19, descr=) + setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, 146982464, descr=...) - i21 = getfield_gc(p17, descr=) + i21 = getfield_gc(p17, descr=) i23 = int_lt(0, i21) guard_true(i23, descr=...) - i24 = getfield_gc(p17, descr=) + i24 = getfield_gc(p17, descr=) i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,6 +1,9 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +# XXX review the descrs to replace some EF=4 with EF=3 (elidable) + + class TestString(BaseTestPyPyC): def test_lookup_default_encoding(self): def main(n): @@ -52,8 +55,8 @@ i += int(long(string.digits[i % len(string.digits)], 16)) return i - log = self.run(main, [1000]) - assert log.result == main(1000) + log = self.run(main, [1100]) + assert log.result == main(1100) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i11 = int_lt(i6, i7) @@ -72,7 +75,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p28 = call(ConstClass(strip_spaces), p25, descr=) + p28 = call(ConstClass(strip_spaces), p25, descr=) guard_no_exception(descr=...) i29 = strlen(p28) i30 = int_is_true(i29) @@ -88,9 +91,9 @@ guard_false(i41, descr=...) i43 = int_eq(i39, 43) guard_false(i43, descr=...) - i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) + i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) guard_false(i43, descr=...) - i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) + i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) guard_false(i46, descr=...) p51 = new_with_vtable(21136408) setfield_gc(p51, _, descr=...) # 7 setfields, but the order is dict-order-dependent @@ -100,9 +103,9 @@ setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) - p55 = call(ConstClass(parse_digit_string), p51, descr=) + p55 = call(ConstClass(parse_digit_string), p51, descr=) guard_no_exception(descr=...) - i57 = call(ConstClass(rbigint.toint), p55, descr=) + i57 = call(ConstClass(rbigint.toint), p55, descr=) guard_no_exception(descr=...) i58 = int_add_ovf(i6, i57) guard_no_overflow(descr=...) @@ -125,7 +128,7 @@ i7 = int_gt(i4, 0) guard_true(i7, descr=...) guard_not_invalidated(descr=...) - p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) + p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) guard_no_exception(descr=...) i10 = strlen(p9) i11 = int_is_true(i10) @@ -149,7 +152,7 @@ copystrcontent(p9, p21, 0, i25, i10) i33 = int_lt(i30, 23) guard_true(i33, descr=...) - p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) + p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) guard_no_exception(descr=...) i37 = strlen(p35) i38 = int_add_ovf(i5, i37) @@ -192,6 +195,6 @@ strsetitem(p35, 3, 104) strsetitem(p35, 4, 95) copystrcontent(p31, p35, 0, 5, i32) - i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) - guard_value(i49, 1, descr=) + i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) + guard_value(i49, 1, descr=...) ''') diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,7 +395,6 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', - 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() From noreply at buildbot.pypy.org Tue Dec 20 23:13:51 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 23:13:51 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: I hope this pdb is not necessary Message-ID: <20111220221351.75768820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: pyarg-parsetuple-s-star-buffer Changeset: r50784:3599bd6c07dc Date: 2011-12-20 23:08 +0100 http://bitbucket.org/pypy/pypy/changeset/3599bd6c07dc/ Log: I hope this pdb is not necessary diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -113,8 +113,6 @@ @specialize.memo() def _get_typedescr_1(typedef): - if typedef.name == "buffer": - import pdb; pdb.set_trace() try: return typedescr_cache[typedef] except KeyError: From noreply at buildbot.pypy.org Tue Dec 20 23:13:52 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 23:13:52 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: merge Message-ID: <20111220221352.94429820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: pyarg-parsetuple-s-star-buffer Changeset: r50785:650faf80d8d2 Date: 2011-12-20 23:11 +0100 http://bitbucket.org/pypy/pypy/changeset/650faf80d8d2/ Log: merge diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -2,7 +2,7 @@ from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) -from pypy.module.cpyext.pyobject import make_typedescr +from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer @@ -25,7 +25,7 @@ make_typedescr(space.gettypefor(Buffer).instancetypedef, basestruct=PyBufferObject.TO, attach=buffer_attach, - # dealloc=buffer_dealloc, + dealloc=buffer_dealloc, realize=buffer_realize) def buffer_attach(space, py_obj, w_obj): @@ -57,6 +57,10 @@ -# @cpython_api([PyObject], lltype.Void, external=False) -# def buffer_dealloc(space, py_obj): - + at cpython_api([PyObject], lltype.Void, external=False) +def buffer_dealloc(space, py_obj): + py_buf = rffi.cast(PyBufferObject, py_obj) + Py_DecRef(space, py_buf.c_b_base) + rffi.free_charp(py_buf.c_b_ptr) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -137,13 +137,11 @@ if (!PyArg_ParseTuple(args, "s*", &buf)) { return NULL; } - printf("OH NO %s %d\\n", buf.buf, buf.len); - fflush(stdout); result = PyString_FromStringAndSize(buf.buf, buf.len); PyBuffer_Release(&buf); return result; ''') - assert buffer('foo\0bar\0baz') == pybuffer(buffer('foo\0bar\0baz')) + assert 'foo\0bar\0baz' == pybuffer(buffer('foo\0bar\0baz')) def test_pyarg_parse_charbuf_and_length(self): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -28,6 +28,7 @@ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError from pypy.rlib.rstring import rsplit from pypy.rlib.objectmodel import specialize @@ -418,8 +419,20 @@ Py_DecRef(space, pyref) return space.len_w(w_str) + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, + external=False, error=-1) +def buf_getreadbuffer(space, pyref, segment, ref): + from pypy.module.cpyext.bufferobject import PyBufferObject + if segment != 0: + raise OperationError(space.w_SystemError, space.wrap + ("accessing non-existent string segment")) + py_buf = rffi.cast(PyBufferObject, pyref) + ref[0] = py_buf.c_b_ptr + #Py_DecRef(space, pyref) + return py_buf.c_b_size + def setup_string_buffer_procs(space, pto): - c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True, immortal=True) c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, str_segcount.api_func.get_wrapper(space)) c_buf.c_bf_getreadbuffer = llhelper(str_getreadbuffer.api_func.functype, @@ -429,6 +442,14 @@ pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER +def setup_buffer_buffer_procs(space, pto): + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True, immortal=True) + c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, + str_segcount.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llhelper(buf_getreadbuffer.api_func.functype, + buf_getreadbuffer.api_func.get_wrapper(space)) + pto.c_tp_as_buffer = c_buf + @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): from pypy.module.cpyext.object import PyObject_dealloc @@ -484,6 +505,8 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) + if space.is_w(w_type, space.gettypefor(Buffer)): + setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, PyObject_Del.api_func.get_wrapper(space)) From noreply at buildbot.pypy.org Tue Dec 20 23:14:46 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 23:14:46 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: Fix translation Message-ID: <20111220221446.4009D820B7@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: pyarg-parsetuple-s-star-buffer Changeset: r50786:ffdf30e6ad7b Date: 2011-12-20 23:13 +0100 http://bitbucket.org/pypy/pypy/changeset/ffdf30e6ad7b/ Log: Fix translation diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -61,6 +61,6 @@ def buffer_dealloc(space, py_obj): py_buf = rffi.cast(PyBufferObject, py_obj) Py_DecRef(space, py_buf.c_b_base) - rffi.free_charp(py_buf.c_b_ptr) + rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr)) from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -432,7 +432,8 @@ return py_buf.c_b_size def setup_string_buffer_procs(space, pto): - c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True, immortal=True) + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, str_segcount.api_func.get_wrapper(space)) c_buf.c_bf_getreadbuffer = llhelper(str_getreadbuffer.api_func.functype, @@ -443,7 +444,8 @@ pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER def setup_buffer_buffer_procs(space, pto): - c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True, immortal=True) + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, str_segcount.api_func.get_wrapper(space)) c_buf.c_bf_getreadbuffer = llhelper(buf_getreadbuffer.api_func.functype, From noreply at buildbot.pypy.org Tue Dec 20 23:19:12 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 23:19:12 +0100 (CET) Subject: [pypy-commit] pypy pyarg-parsetuple-s-star-buffer: Close branch about to be merged Message-ID: <20111220221912.62BAB820B7@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: pyarg-parsetuple-s-star-buffer Changeset: r50787:7359951c27a7 Date: 2011-12-20 23:16 +0100 http://bitbucket.org/pypy/pypy/changeset/7359951c27a7/ Log: Close branch about to be merged From noreply at buildbot.pypy.org Tue Dec 20 23:19:13 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Dec 2011 23:19:13 +0100 (CET) Subject: [pypy-commit] pypy default: Merge branch pyarg-parsetuple-s-star-buffer: Message-ID: <20111220221913.AC6FD820B7@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r50788:5d4dfac3c59a Date: 2011-12-20 23:18 +0100 http://bitbucket.org/pypy/pypy/changeset/5d4dfac3c59a/ Log: Merge branch pyarg-parsetuple-s-star-buffer: (exarkun) Add support for "buffer()" objects in cpyext. They also implement the C buffer interface. diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -45,6 +45,8 @@ import pypy.module.cpyext.longobject import pypy.module.cpyext.listobject import pypy.module.cpyext.sequence +import pypy.module.cpyext.buffer +import pypy.module.cpyext.bufferobject import pypy.module.cpyext.eval import pypy.module.cpyext.import_ import pypy.module.cpyext.mapping diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -317,6 +317,10 @@ INTERPLEVEL_API = {} FUNCTIONS = {} + +# These are C symbols which cpyext will export, but which are defined in .c +# files somewhere in the implementation of cpyext (rather than being defined in +# RPython). SYMBOLS_C = [ 'Py_FatalError', 'PyOS_snprintf', 'PyOS_vsnprintf', 'PyArg_Parse', 'PyArg_ParseTuple', 'PyArg_UnpackTuple', 'PyArg_ParseTupleAndKeywords', diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/buffer.py @@ -0,0 +1,11 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, CANNOT_FAIL, Py_buffer) + + at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) +def PyBuffer_IsContiguous(space, view, fortran): + """Return 1 if the memory defined by the view is C-style (fortran is + 'C') or Fortran-style (fortran is 'F') contiguous or either one + (fortran is 'A'). Return 0 otherwise.""" + # PyPy only supports contiguous Py_buffers for now. + return space.wrap(1) diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/bufferobject.py @@ -0,0 +1,66 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, + PyObjectFields, PyObject) +from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef +from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer + + +PyBufferObjectStruct = lltype.ForwardReference() +PyBufferObject = lltype.Ptr(PyBufferObjectStruct) +PyBufferObjectFields = PyObjectFields + ( + ("b_base", PyObject), + ("b_ptr", rffi.VOIDP), + ("b_size", Py_ssize_t), + ("b_offset", Py_ssize_t), + ("b_readonly", rffi.INT), + ("b_hash", rffi.LONG), + ) + +cpython_struct("PyBufferObject", PyBufferObjectFields, PyBufferObjectStruct) + + at bootstrap_function +def init_bufferobject(space): + "Type description of PyBufferObject" + make_typedescr(space.gettypefor(Buffer).instancetypedef, + basestruct=PyBufferObject.TO, + attach=buffer_attach, + dealloc=buffer_dealloc, + realize=buffer_realize) + +def buffer_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyBufferObject with the given (str) buffer object. + """ + py_buf = rffi.cast(PyBufferObject, py_obj) + py_buf.c_b_offset = 0 + rffi.setintfield(py_buf, 'c_b_readonly', 1) + rffi.setintfield(py_buf, 'c_b_hash', -1) + + if isinstance(w_obj, SubBuffer): + py_buf.c_b_offset = w_obj.offset + w_obj = w_obj.buffer + + if isinstance(w_obj, StringBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_size = w_obj.getlength() + else: + raise Exception("Fail fail fail fail fail") + + +def buffer_realize(space, py_obj): + """ + Creates the buffer in the PyPy interpreter from a cpyext representation. + """ + raise Exception("realize fail fail fail") + + + + at cpython_api([PyObject], lltype.Void, external=False) +def buffer_dealloc(space, py_obj): + py_buf = rffi.cast(PyBufferObject, py_obj) + Py_DecRef(space, py_buf.c_b_base) + rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -9,6 +9,17 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + PyObject *b_base; + void *b_ptr; + Py_ssize_t b_size; + Py_ssize_t b_offset; + int b_readonly; + long b_hash; +} PyBufferObject; + + PyAPI_DATA(PyTypeObject) PyBuffer_Type; #define PyBuffer_Check(op) (((PyObject*)(op))->ob_type == &PyBuffer_Type) diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -234,7 +234,7 @@ writebufferproc bf_getwritebuffer; segcountproc bf_getsegcount; charbufferproc bf_getcharbuffer; - getbufferproc bf_getbuffer; + getbufferproc bf_getbuffer; releasebufferproc bf_releasebuffer; } PyBufferProcs; diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -4,17 +4,6 @@ #include "Python.h" -typedef struct { - PyObject_HEAD - PyObject *b_base; - void *b_ptr; - Py_ssize_t b_size; - Py_ssize_t b_offset; - int b_readonly; - long b_hash; -} PyBufferObject; - - enum buffer_t { READ_BUFFER, WRITE_BUFFER, diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -777,18 +777,14 @@ Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { + fflush(stdout); PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } else { - PyErr_SetString( - PyExc_NotImplementedError, - "s* not implemented for non-string values"); - return NULL; - } -#if 0 + } #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { +#if 0 uarg = UNICODE_DEFAULT_ENCODING(arg); if (uarg == NULL) return converterr(CONV_UNICODE, @@ -796,6 +792,9 @@ PyBuffer_FillInfo(p, arg, PyString_AS_STRING(uarg), PyString_GET_SIZE(uarg), 1, 0); +#else + return converterr("string or buffer", arg, msgbuf, bufsize); +#endif } #endif else { /* any buffer-like object */ @@ -803,7 +802,6 @@ if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } -#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", @@ -1342,7 +1340,6 @@ return count; } -#if 0 //YYY static int getbuffer(PyObject *arg, Py_buffer *view, char **errmsg) { @@ -1373,7 +1370,6 @@ PyBuffer_FillInfo(view, NULL, buf, count, 1, 0); return 0; } -#endif /* Support for keyword arguments donated by Geoff Philbrick */ diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import ( - cpython_api, PyObject, PyObjectP, CANNOT_FAIL + cpython_api, PyObject, PyObjectP, CANNOT_FAIL, Py_buffer ) from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex from pypy.rpython.lltypesystem import rffi, lltype @@ -10,7 +10,6 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP -Py_buffer = rffi.VOIDP va_list = rffi.VOIDP PyDateTime_Date = rffi.VOIDP PyDateTime_DateTime = rffi.VOIDP @@ -178,13 +177,6 @@ ~Py_buffer.format.""" raise NotImplementedError - at cpython_api([Py_buffer, lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): - """Return 1 if the memory defined by the view is C-style (fortran is - 'C') or Fortran-style (fortran is 'F') contiguous or either one - (fortran is 'A'). Return 0 otherwise.""" - raise NotImplementedError - @cpython_api([rffi.INT_real, Py_ssize_t, Py_ssize_t, Py_ssize_t, lltype.Char], lltype.Void) def PyBuffer_FillContiguousStrides(space, ndim, shape, strides, itemsize, fortran): """Fill the strides array with byte-strides of a contiguous (C-style if diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -129,6 +129,21 @@ assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + def test_pyarg_parse_string_old_buffer(self): + pybuffer = self.import_parser( + ''' + Py_buffer buf; + PyObject *result; + if (!PyArg_ParseTuple(args, "s*", &buf)) { + return NULL; + } + result = PyString_FromStringAndSize(buf.buf, buf.len); + PyBuffer_Release(&buf); + return result; + ''') + assert 'foo\0bar\0baz' == pybuffer(buffer('foo\0bar\0baz')) + + def test_pyarg_parse_charbuf_and_length(self): """ The `t#` format specifier can be used to parse a read-only 8-bit diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -28,6 +28,7 @@ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError from pypy.rlib.rstring import rsplit from pypy.rlib.objectmodel import specialize @@ -418,8 +419,21 @@ Py_DecRef(space, pyref) return space.len_w(w_str) + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, + external=False, error=-1) +def buf_getreadbuffer(space, pyref, segment, ref): + from pypy.module.cpyext.bufferobject import PyBufferObject + if segment != 0: + raise OperationError(space.w_SystemError, space.wrap + ("accessing non-existent string segment")) + py_buf = rffi.cast(PyBufferObject, pyref) + ref[0] = py_buf.c_b_ptr + #Py_DecRef(space, pyref) + return py_buf.c_b_size + def setup_string_buffer_procs(space, pto): c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, str_segcount.api_func.get_wrapper(space)) c_buf.c_bf_getreadbuffer = llhelper(str_getreadbuffer.api_func.functype, @@ -429,6 +443,15 @@ pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER +def setup_buffer_buffer_procs(space, pto): + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) + c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, + str_segcount.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llhelper(buf_getreadbuffer.api_func.functype, + buf_getreadbuffer.api_func.get_wrapper(space)) + pto.c_tp_as_buffer = c_buf + @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): from pypy.module.cpyext.object import PyObject_dealloc @@ -484,6 +507,8 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) + if space.is_w(w_type, space.gettypefor(Buffer)): + setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, PyObject_Del.api_func.get_wrapper(space)) From noreply at buildbot.pypy.org Tue Dec 20 23:22:47 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 23:22:47 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: some more dead code Message-ID: <20111220222247.32DD3820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50789:44f6ff290792 Date: 2011-12-21 00:22 +0200 http://bitbucket.org/pypy/pypy/changeset/44f6ff290792/ Log: some more dead code diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -736,9 +736,6 @@ def _del_sources(self): self.values = None - def _find_dtype(self): - return self.res_dtype - def create_sig(self, res_shape): if self.forced_result is not None: return self.forced_result.create_sig(res_shape) @@ -1029,12 +1026,6 @@ ) return array - def descr_len(self, space): - if len(self.shape): - return space.wrap(self.shape[0]) - raise OperationError(space.w_TypeError, space.wrap( - "len() of unsized object")) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) From noreply at buildbot.pypy.org Tue Dec 20 23:27:10 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 23:27:10 +0100 (CET) Subject: [pypy-commit] pypy refactor-signature: close to be merged branch Message-ID: <20111220222710.AFD53820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-signature Changeset: r50790:ad20782034b9 Date: 2011-12-21 00:25 +0200 http://bitbucket.org/pypy/pypy/changeset/ad20782034b9/ Log: close to be merged branch From noreply at buildbot.pypy.org Tue Dec 20 23:27:12 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Dec 2011 23:27:12 +0100 (CET) Subject: [pypy-commit] pypy default: (fijal, agaynor review) Merge refactor-signature branch. This is mostly Message-ID: <20111220222712.0DD3C820B7@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50791:931240eb1ae7 Date: 2011-12-21 00:26 +0200 http://bitbucket.org/pypy/pypy/changeset/931240eb1ae7/ Log: (fijal, agaynor review) Merge refactor-signature branch. This is mostly a refactor of signature.py, but brings a few new features as well: * sharing of iterators within one expression * specializing on array storage * virtual views diff --git a/pypy/module/micronumpy/REVIEW.txt b/pypy/module/micronumpy/REVIEW.txt new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/REVIEW.txt @@ -0,0 +1,10 @@ +REVIEW NOTES +============ + +* VirtualSlice vs. W_NDimSlice? +* W_NDimSlice.__init__ calls ConcreteArray.__init__ instead of + ViewArray.__init__, W_FlatIterator as well. +* Cleanup of the iterator and array caching/numbering. It's a mess right now: + * _creater_iter updates the arraylist + * Why do Scalars need an iterator at all? + * Do views share storage with concrete arrays or other views? diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -21,7 +21,6 @@ _immutable_fields_ = ["itemtype", "num", "kind"] def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): - self.signature = signature.BaseSignature() self.itemtype = itemtype self.num = num self.kind = kind @@ -228,4 +227,4 @@ ) def get_dtype_cache(space): - return space.fromcache(DtypeCache) \ No newline at end of file + return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py --- a/pypy/module/micronumpy/interp_extras.py +++ b/pypy/module/micronumpy/interp_extras.py @@ -4,4 +4,4 @@ @unwrap_spec(array=BaseArray) def debug_repr(space, array): - return space.wrap(array.debug_repr()) + return space.wrap(array.find_sig().debug_repr()) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_iter.py @@ -0,0 +1,104 @@ + +from pypy.rlib import jit +from pypy.rlib.objectmodel import instantiate +from pypy.module.micronumpy.strides import calculate_broadcast_strides + +# Iterators for arrays +# -------------------- +# all those iterators with the exception of BroadcastIterator iterate over the +# entire array in C order (the last index changes the fastest). This will +# yield all elements. Views iterate over indices and look towards strides and +# backstrides to find the correct position. Notably the offset between +# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between +# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. + +# BroadcastIterator works like that, but for indexes that don't change source +# in the original array, strides[i] == backstrides[i] == 0 + +class BaseIterator(object): + def next(self, shapelen): + raise NotImplementedError + + def done(self): + raise NotImplementedError + +class ArrayIterator(BaseIterator): + def __init__(self, size): + self.offset = 0 + self.size = size + + def next(self, shapelen): + arr = instantiate(ArrayIterator) + arr.size = self.size + arr.offset = self.offset + 1 + return arr + + def done(self): + return self.offset >= self.size + +class OneDimIterator(BaseIterator): + def __init__(self, start, step, stop): + self.offset = start + self.step = step + self.size = stop * step + start + + def next(self, shapelen): + arr = instantiate(OneDimIterator) + arr.size = self.size + arr.step = self.step + arr.offset = self.offset + self.step + return arr + + def done(self): + return self.offset == self.size + +def view_iter_from_arr(arr): + return ViewIterator(arr.start, arr.strides, arr.backstrides, arr.shape) + +class ViewIterator(BaseIterator): + def __init__(self, start, strides, backstrides, shape, res_shape=None): + self.offset = start + self._done = False + if res_shape is not None and res_shape != shape: + r = calculate_broadcast_strides(strides, backstrides, + shape, res_shape) + self.strides, self.backstrides = r + self.res_shape = res_shape + else: + self.strides = strides + self.backstrides = backstrides + self.res_shape = shape + self.indices = [0] * len(self.res_shape) + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + for i in range(shapelen): + indices[i] = self.indices[i] + done = False + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.res_shape[i] - 1: + indices[i] += 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + else: + done = True + res = instantiate(ViewIterator) + res.offset = offset + res.indices = indices + res.strides = self.strides + res.backstrides = self.backstrides + res.res_shape = self.res_shape + res._done = done + return res + + def done(self): + return self._done + +class ConstantIterator(BaseIterator): + def next(self, shapelen): + return self diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,28 +3,33 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature +from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import instantiate - +from pypy.module.micronumpy.interp_iter import ArrayIterator,\ + view_iter_from_arr, OneDimIterator numpy_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result_size', 'i', 'ri', 'self', 'result'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['result_size', 'frame', 'ri', 'self', 'result'] ) all_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) any_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) slice_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['self', 'source', 'source_iter', 'res_iter'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['self', 'frame', 'source', 'res_iter'] ) def _find_shape_and_elems(space, w_iterable): @@ -198,231 +203,17 @@ n_old_elems_to_use *= old_shape[oldI] return new_strides -# Iterators for arrays -# -------------------- -# all those iterators with the exception of BroadcastIterator iterate over the -# entire array in C order (the last index changes the fastest). This will -# yield all elements. Views iterate over indices and look towards strides and -# backstrides to find the correct position. Notably the offset between -# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between -# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. +class BaseArray(Wrappable): + _attrs_ = ["invalidates", "shape", 'size'] -# BroadcastIterator works like that, but for indexes that don't change source -# in the original array, strides[i] == backstrides[i] == 0 - -class BaseIterator(object): - def next(self, shapelen): - raise NotImplementedError - - def done(self): - raise NotImplementedError - - def get_offset(self): - raise NotImplementedError - -class ArrayIterator(BaseIterator): - def __init__(self, size): - self.offset = 0 - self.size = size - - def next(self, shapelen): - arr = instantiate(ArrayIterator) - arr.size = self.size - arr.offset = self.offset + 1 - return arr - - def done(self): - return self.offset >= self.size - - def get_offset(self): - return self.offset - -class OneDimIterator(BaseIterator): - def __init__(self, start, step, stop): - self.offset = start - self.step = step - self.size = stop * step + start - - def next(self, shapelen): - arr = instantiate(OneDimIterator) - arr.size = self.size - arr.step = self.step - arr.offset = self.offset + self.step - return arr - - def done(self): - return self.offset == self.size - - def get_offset(self): - return self.offset - -class ViewIterator(BaseIterator): - def __init__(self, arr): - self.indices = [0] * len(arr.shape) - self.offset = arr.start - self.arr = arr - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - for i in range(shapelen): - indices[i] = self.indices[i] - done = False - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.arr.shape[i] - 1: - indices[i] += 1 - offset += self.arr.strides[i] - break - else: - indices[i] = 0 - offset -= self.arr.backstrides[i] - else: - done = True - res = instantiate(ViewIterator) - res.offset = offset - res.indices = indices - res.arr = self.arr - res._done = done - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class BroadcastIterator(BaseIterator): - '''Like a view iterator, but will repeatedly access values - for all iterations across a res_shape, folding the offset - using mod() arithmetic - ''' - def __init__(self, arr, res_shape): - self.indices = [0] * len(res_shape) - self.offset = arr.start - #strides are 0 where original shape==1 - self.strides = [] - self.backstrides = [] - for i in range(len(arr.shape)): - if arr.shape[i] == 1: - self.strides.append(0) - self.backstrides.append(0) - else: - self.strides.append(arr.strides[i]) - self.backstrides.append(arr.backstrides[i]) - self.res_shape = res_shape - self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides - self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - _done = False - for i in range(shapelen): - indices[i] = self.indices[i] - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.res_shape[i] - 1: - indices[i] += 1 - offset += self.strides[i] - break - else: - indices[i] = 0 - offset -= self.backstrides[i] - else: - _done = True - res = instantiate(BroadcastIterator) - res.indices = indices - res.offset = offset - res._done = _done - res.strides = self.strides - res.backstrides = self.backstrides - res.res_shape = self.res_shape - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class Call2Iterator(BaseIterator): - def __init__(self, left, right): - self.left = left - self.right = right - - def next(self, shapelen): - return Call2Iterator(self.left.next(shapelen), - self.right.next(shapelen)) - - def done(self): - if isinstance(self.left, ConstantIterator): - return self.right.done() - return self.left.done() - - def get_offset(self): - if isinstance(self.left, ConstantIterator): - return self.right.get_offset() - return self.left.get_offset() - -class Call1Iterator(BaseIterator): - def __init__(self, child): - self.child = child - - def next(self, shapelen): - return Call1Iterator(self.child.next(shapelen)) - - def done(self): - return self.child.done() - - def get_offset(self): - return self.child.get_offset() - -class ConstantIterator(BaseIterator): - def next(self, shapelen): - return self - - def done(self): - return False - - def get_offset(self): - return 0 - - -class BaseArray(Wrappable): - _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", - "start", 'order'] - - _immutable_fields_ = ['start', "order"] + _immutable_fields_ = [] strides = None start = 0 - def __init__(self, shape, order): + def __init__(self, shape): self.invalidates = [] self.shape = shape - self.order = order - if self.strides is None: - self.calc_strides(shape) - - def calc_strides(self, shape): - strides = [] - backstrides = [] - s = 1 - shape_rev = shape[:] - if self.order == 'C': - shape_rev.reverse() - for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh - if self.order == 'C': - strides.reverse() - backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] def invalidated(self): if self.invalidates: @@ -499,33 +290,34 @@ def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] + greens=['shapelen', 'sig'], + reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'] ) def loop(self): - i = self.start_iter() - cur_best = self.eval(i) + sig = self.find_sig() + frame = sig.create_frame(self) + cur_best = sig.eval(frame, self) shapelen = len(self.shape) - i = i.next(shapelen) + frame.next(shapelen) dtype = self.find_dtype() result = 0 idx = 1 - while not i.done(): - reduce_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, dtype=dtype, - i=i, result=result, idx=idx, + frame=frame, result=result, + idx=idx, cur_best=cur_best) - new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + new_best = getattr(dtype.itemtype, op_name)(cur_best, sig.eval(frame, self)) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - i = i.next(shapelen) + frame.next(shapelen) idx += 1 return result def impl(self, space): - size = self.find_size() - if size == 0: + if self.size == 0: raise OperationError(space.w_ValueError, space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) @@ -533,15 +325,16 @@ def _all(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - all_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + all_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if not dtype.itemtype.bool(self.eval(i)): + dtype=dtype, frame=frame) + if not dtype.itemtype.bool(sig.eval(frame, self)): return False - i = i.next(shapelen) + frame.next(shapelen) return True def descr_all(self, space): @@ -549,15 +342,16 @@ def _any(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - any_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + any_driver.jit_merge_point(sig=sig, frame=frame, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if dtype.itemtype.bool(self.eval(i)): + dtype=dtype) + if dtype.itemtype.bool(sig.eval(frame, self)): return True - i = i.next(shapelen) + frame.next(shapelen) return False def descr_any(self, space): @@ -586,26 +380,33 @@ return space.newtuple([space.wrap(i) for i in self.shape]) def descr_set_shape(self, space, w_iterable): - concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_iterable) - concrete.setshape(space, new_shape) + self.size, w_iterable) + if isinstance(self, Scalar): + return + self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.find_size()) + return space.wrap(self.size) def descr_copy(self, space): + return self.copy() + + def copy(self): return self.get_concrete().copy() def descr_len(self, space): - return self.get_concrete().descr_len(space) + if len(self.shape): + return space.wrap(self.shape[0]) + raise OperationError(space.w_TypeError, space.wrap( + "len() of unsized object")) def descr_repr(self, space): res = StringBuilder() res.append("array(") concrete = self.get_concrete() dtype = concrete.find_dtype() - if not concrete.find_size(): + if not concrete.size: res.append('[]') if len(self.shape) > 1: # An empty slice reports its shape @@ -617,18 +418,417 @@ concrete.to_str(space, 1, res, indent=' ') if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ - not self.find_size(): + not self.size: res.append(", dtype=" + dtype.name) res.append(")") return space.wrap(res.build()) + def descr_str(self, space): + ret = StringBuilder() + concrete = self.get_concrete_or_scalar() + concrete.to_str(space, 0, ret, ' ') + return space.wrap(ret.build()) + + @jit.unroll_safe + def _single_item_result(self, space, w_idx): + """ The result of getitem/setitem is a single item if w_idx + is a list of scalars that match the size of shape + """ + shape_len = len(self.shape) + if shape_len == 0: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) + if shape_len == 1: + if space.isinstance_w(w_idx, space.w_int): + return True + if space.isinstance_w(w_idx, space.w_slice): + return False + elif (space.isinstance_w(w_idx, space.w_slice) or + space.isinstance_w(w_idx, space.w_int)): + return False + lgt = space.len_w(w_idx) + if lgt > shape_len: + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if lgt < shape_len: + return False + for w_item in space.fixedview(w_idx): + if space.isinstance_w(w_item, space.w_slice): + return False + return True + + @jit.unroll_safe + def _prepare_slice_args(self, space, w_idx): + if (space.isinstance_w(w_idx, space.w_int) or + space.isinstance_w(w_idx, space.w_slice)): + return [space.decode_index4(w_idx, self.shape[0])] + return [space.decode_index4(w_item, self.shape[i]) for i, w_item in + enumerate(space.fixedview(w_idx))] + + def descr_getitem(self, space, w_idx): + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + item = concrete._index_of_single_item(space, w_idx) + return concrete.getitem(item) + chunks = self._prepare_slice_args(space, w_idx) + return space.wrap(self.create_slice(chunks)) + + def descr_setitem(self, space, w_idx, w_value): + self.invalidated() + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + item = concrete._index_of_single_item(space, w_idx) + dtype = concrete.find_dtype() + concrete.setitem(item, dtype.coerce(space, w_value)) + return + if not isinstance(w_value, BaseArray): + w_value = convert_to_array(space, w_value) + chunks = self._prepare_slice_args(space, w_idx) + view = self.create_slice(chunks).get_concrete() + view.setslice(space, w_value) + + @jit.unroll_safe + def create_slice(self, chunks): + shape = [] + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + shape.append(lgt) + s = i + 1 + assert s >= 0 + shape += self.shape[s:] + if not isinstance(self, ConcreteArray): + return VirtualSlice(self, chunks, shape) + r = calculate_slice_strides(self.shape, self.start, self.strides, + self.backstrides, chunks) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], self) + + def descr_reshape(self, space, args_w): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `numpypy.reshape` for full documentation. + + See Also + -------- + numpypy.reshape : equivalent function + """ + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newtuple(args_w) + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, concrete.size, w_shape) + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + # We can create a view, strides somehow match up. + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(concrete.start, new_strides, new_backstrides, + new_shape, self) + else: + # Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr + + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + + def descr_mean(self, space): + return space.div(self.descr_sum(space), space.wrap(self.size)) + + def descr_nonzero(self, space): + if self.size > 1: + raise OperationError(space.w_ValueError, space.wrap( + "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + concr = self.get_concrete_or_scalar() + sig = concr.find_sig() + frame = sig.create_frame(self) + return space.wrap(space.is_true( + sig.eval(frame, concr))) + + def get_concrete_or_scalar(self): + return self.get_concrete() + + def descr_get_transpose(self, space): + concrete = self.get_concrete() + if len(concrete.shape) < 2: + return space.wrap(self) + strides = [] + backstrides = [] + shape = [] + for i in range(len(concrete.shape) - 1, -1, -1): + strides.append(concrete.strides[i]) + backstrides.append(concrete.backstrides[i]) + shape.append(concrete.shape[i]) + return space.wrap(W_NDimSlice(concrete.start, strides[:], + backstrides[:], shape[:], concrete)) + + def descr_get_flatiter(self, space): + return space.wrap(W_FlatIterator(self)) + + def getitem(self, item): + raise NotImplementedError + + def find_sig(self, res_shape=None): + """ find a correct signature for the array + """ + res_shape = res_shape or self.shape + return signature.find_sig(self.create_sig(res_shape), self) + + def descr_array_iface(self, space): + if not self.shape: + raise OperationError(space.w_TypeError, + space.wrap("can't get the array data of a 0-d array for now") + ) + concrete = self.get_concrete() + storage = concrete.storage + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + +def convert_to_array(space, w_obj): + if isinstance(w_obj, BaseArray): + return w_obj + elif space.issequence_w(w_obj): + # Convert to array. + return array(space, w_obj, w_order=None) + else: + # If it's a scalar + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) + return scalar_w(space, dtype, w_obj) + +def scalar_w(space, dtype, w_obj): + return Scalar(dtype, dtype.coerce(space, w_obj)) + +class Scalar(BaseArray): + """ + Intermediate class representing a literal. + """ + size = 1 + _attrs_ = ["dtype", "value", "shape"] + + def __init__(self, dtype, value): + self.shape = [] + BaseArray.__init__(self, []) + self.dtype = dtype + self.value = value + + def find_dtype(self): + return self.dtype + + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + builder.append(self.dtype.itemtype.str_format(self.value)) + + def copy(self): + return Scalar(self.dtype, self.value) + + def create_sig(self, res_shape): + return signature.ScalarSignature(self.dtype) + + def get_concrete_or_scalar(self): + return self + + +class VirtualArray(BaseArray): + """ + Class for representing virtual arrays, such as binary ops or ufuncs + """ + def __init__(self, name, shape, res_dtype): + BaseArray.__init__(self, shape) + self.forced_result = None + self.res_dtype = res_dtype + self.name = name + + def _del_sources(self): + # Function for deleting references to source arrays, to allow garbage-collecting them + raise NotImplementedError + + def compute(self): + result = W_NDimArray(self.size, self.shape, self.find_dtype()) + shapelen = len(self.shape) + sig = self.find_sig() + frame = sig.create_frame(self) + ri = ArrayIterator(self.size) + while not ri.done(): + numpy_driver.jit_merge_point(sig=sig, + shapelen=shapelen, + result_size=self.size, + frame=frame, + ri=ri, + self=self, result=result) + result.dtype.setitem(result.storage, ri.offset, + sig.eval(frame, self)) + frame.next(shapelen) + ri = ri.next(shapelen) + return result + + def force_if_needed(self): + if self.forced_result is None: + self.forced_result = self.compute() + self._del_sources() + + def get_concrete(self): + self.force_if_needed() + res = self.forced_result + assert isinstance(res, ConcreteArray) + return res + + def getitem(self, item): + return self.get_concrete().getitem(item) + + def setitem(self, item, value): + return self.get_concrete().setitem(item, value) + + def find_dtype(self): + return self.res_dtype + +class VirtualSlice(VirtualArray): + def __init__(self, child, chunks, shape): + size = 1 + for sh in shape: + size *= sh + self.child = child + self.chunks = chunks + self.size = size + VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.VirtualSliceSignature( + self.child.create_sig(res_shape)) + + def force_if_needed(self): + if self.forced_result is None: + concr = self.child.get_concrete() + self.forced_result = concr.create_slice(self.chunks) + + def _del_sources(self): + self.child = None + +class Call1(VirtualArray): + def __init__(self, ufunc, name, shape, res_dtype, values): + VirtualArray.__init__(self, name, shape, res_dtype) + self.values = values + self.size = values.size + self.ufunc = ufunc + + def _del_sources(self): + self.values = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.Call1(self.ufunc, self.name, + self.values.create_sig(res_shape)) + +class Call2(VirtualArray): + """ + Intermediate class for performing binary operations. + """ + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): + VirtualArray.__init__(self, name, shape, res_dtype) + self.ufunc = ufunc + self.left = left + self.right = right + self.calc_dtype = calc_dtype + self.size = 1 + for s in self.shape: + self.size *= s + + def _del_sources(self): + self.left = None + self.right = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.Call2(self.ufunc, self.name, self.calc_dtype, + self.left.create_sig(res_shape), + self.right.create_sig(res_shape)) + +class ConcreteArray(BaseArray): + """ An array that have actual storage, whether owned or not + """ + _immutable_fields_ = ['storage'] + + def __init__(self, size, shape, dtype, order='C', parent=None): + self.size = size + self.parent = parent + if parent is not None: + self.storage = parent.storage + else: + self.storage = dtype.malloc(size) + self.order = order + self.dtype = dtype + if self.strides is None: + self.calc_strides(shape) + BaseArray.__init__(self, shape) + if parent is not None: + self.invalidates = parent.invalidates + + def get_concrete(self): + return self + + def find_dtype(self): + return self.dtype + + def getitem(self, item): + return self.dtype.getitem(self.storage, item) + + def setitem(self, item, value): + self.invalidated() + self.dtype.setitem(self.storage, item, value) + + def calc_strides(self, shape): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if self.order == 'C': + shape_rev.reverse() + for sh in shape_rev: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + + def array_sig(self, res_shape): + if res_shape is not None and self.shape != res_shape: + return signature.ViewSignature(self.dtype) + return signature.ArraySignature(self.dtype) + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): '''Modifies builder with a representation of the array/slice The items will be seperated by a comma if comma is 1 Multidimensional arrays/slices will span a number of lines, each line will begin with indent. ''' - size = self.find_size() + size = self.size if size < 1: builder.append('[]') return @@ -654,7 +854,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) builder.append('\n' + indent + '..., ') i = self.shape[0] - 3 @@ -669,7 +869,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) i += 1 elif ndims == 1: @@ -705,12 +905,6 @@ builder.append('[') builder.append(']') - def descr_str(self, space): - ret = StringBuilder() - concrete = self.get_concrete() - concrete.to_str(space, 0, ret, ' ') - return space.wrap(ret.build()) - @jit.unroll_safe def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): @@ -735,456 +929,55 @@ item += v * self.strides[i] return item - @jit.unroll_safe - def _single_item_result(self, space, w_idx): - """ The result of getitem/setitem is a single item if w_idx - is a list of scalars that match the size of shape - """ - shape_len = len(self.shape) - if shape_len == 0: - if not space.isinstance_w(w_idx, space.w_int): - raise OperationError(space.w_IndexError, space.wrap( - "wrong index")) - return True - if shape_len == 1: - if space.isinstance_w(w_idx, space.w_int): - return True - if space.isinstance_w(w_idx, space.w_slice): - return False - elif (space.isinstance_w(w_idx, space.w_slice) or - space.isinstance_w(w_idx, space.w_int)): - return False - lgt = space.len_w(w_idx) - if lgt > shape_len: - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - if lgt < shape_len: - return False - for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_slice): - return False - return True - @jit.unroll_safe - def _prepare_slice_args(self, space, w_idx): - if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - return [space.decode_index4(w_idx, self.shape[0])] - return [space.decode_index4(w_item, self.shape[i]) for i, w_item in - enumerate(space.fixedview(w_idx))] +class ViewArray(ConcreteArray): + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = view_iter_from_arr(self) + a_iter = ArrayIterator(array.size) + while not iter.done(): + array.setitem(a_iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) + return array - def descr_getitem(self, space, w_idx): - if self._single_item_result(space, w_idx): - concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) - item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item) - chunks = self._prepare_slice_args(space, w_idx) - return space.wrap(self.create_slice(space, chunks)) + def create_sig(self, res_shape): + return signature.ViewSignature(self.dtype) - def descr_setitem(self, space, w_idx, w_value): - self.invalidated() - if self._single_item_result(space, w_idx): - concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) - item = concrete._index_of_single_item(space, w_idx) - dtype = concrete.find_dtype() - concrete.setitem(item, dtype.coerce(space, w_value)) - return - if not isinstance(w_value, BaseArray): - w_value = convert_to_array(space, w_value) - chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(space, chunks) - view.setslice(space, w_value) - @jit.unroll_safe - def create_slice(self, space, chunks): - if len(chunks) == 1: - start, stop, step, lgt = chunks[0] - if step == 0: - shape = self.shape[1:] - strides = self.strides[1:] - backstrides = self.backstrides[1:] - else: - shape = [lgt] + self.shape[1:] - strides = [self.strides[0] * step] + self.strides[1:] - backstrides = [(lgt - 1) * self.strides[0] * step] + self.backstrides[1:] - start *= self.strides[0] - start += self.start - else: - shape = [] - strides = [] - backstrides = [] - start = self.start - i = -1 - for i, (start_, stop, step, lgt) in enumerate(chunks): - if step != 0: - shape.append(lgt) - strides.append(self.strides[i] * step) - backstrides.append(self.strides[i] * (lgt - 1) * step) - start += self.strides[i] * start_ - # add a reminder - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - strides += self.strides[s:] - backstrides += self.backstrides[s:] - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature, - ]) - return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) - - def descr_reshape(self, space, args_w): - """reshape(...) - a.reshape(shape) - - Returns an array containing the same data with a new shape. - - Refer to `numpypy.reshape` for full documentation. - - See Also - -------- - numpypy.reshape : equivalent function -""" - if len(args_w) == 1: - w_shape = args_w[0] - else: - w_shape = space.newtuple(args_w) - concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_shape) - # Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, - concrete.shape, concrete.strides) - if new_strides: - # We can create a view, strides somehow match up. - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) - ndims = len(new_shape) - new_backstrides = [0] * ndims - for nd in range(ndims): - new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - arr = W_NDimSlice(self, new_sig, self.start, new_strides, - new_backstrides, new_shape) - else: - # Create copy with contiguous data - arr = concrete.copy() - arr.setshape(space, new_shape) - return arr - - def descr_tolist(self, space): - if len(self.shape) == 0: - assert isinstance(self, Scalar) - return self.value.descr_tolist(space) - w_result = space.newlist([]) - for i in range(self.shape[0]): - space.call_method(w_result, "append", - space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") - ) - return w_result - - def descr_mean(self, space): - return space.div(self.descr_sum(space), space.wrap(self.find_size())) - - def descr_nonzero(self, space): - if self.find_size() > 1: - raise OperationError(space.w_ValueError, space.wrap( - "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true( - self.get_concrete().eval(self.start_iter(self.shape)) - )) - - def descr_get_transpose(self, space): - concrete = self.get_concrete() - if len(concrete.shape) < 2: - return space.wrap(self) - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) - strides = [] - backstrides = [] - shape = [] - for i in range(len(concrete.shape) - 1, -1, -1): - strides.append(concrete.strides[i]) - backstrides.append(concrete.backstrides[i]) - shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) - - def descr_get_flatiter(self, space): - return space.wrap(W_FlatIterator(self)) - - def getitem(self, item): - raise NotImplementedError - - def start_iter(self, res_shape=None): - raise NotImplementedError - - def descr_array_iface(self, space): - concrete = self.get_concrete() - storage = concrete.get_storage(space) - addr = rffi.cast(lltype.Signed, storage) - w_d = space.newdict() - space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), - space.w_False])) - return w_d - -def convert_to_array(space, w_obj): - if isinstance(w_obj, BaseArray): - return w_obj - elif space.issequence_w(w_obj): - # Convert to array. - return array(space, w_obj, w_order=None) - else: - # If it's a scalar - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) - return scalar_w(space, dtype, w_obj) - -def scalar_w(space, dtype, w_obj): - return Scalar(dtype, dtype.coerce(space, w_obj)) - -class Scalar(BaseArray): - """ - Intermediate class representing a literal. - """ - signature = signature.BaseSignature() - - _attrs_ = ["dtype", "value", "shape"] - - def __init__(self, dtype, value): - self.shape = self.strides = [] - BaseArray.__init__(self, [], 'C') - self.dtype = dtype - self.value = value - - def find_size(self): - return 1 - - def get_concrete(self): - return self - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - raise NotImplementedError - - def eval(self, iter): - return self.value - - def start_iter(self, res_shape=None): - return ConstantIterator() - - def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.itemtype.str_format(self.value)) - - def copy(self): - return Scalar(self.dtype, self.value) - - def debug_repr(self): - return 'Scalar' - - def setshape(self, space, new_shape): - # In order to get here, we already checked that prod(new_shape) == 1, - # so in order to have a consistent API, let it go through. - pass - - def get_storage(self, space): - raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) - -class VirtualArray(BaseArray): - """ - Class for representing virtual arrays, such as binary ops or ufuncs - """ - def __init__(self, signature, shape, res_dtype, order): - BaseArray.__init__(self, shape, order) - self.forced_result = None - self.signature = signature - self.res_dtype = res_dtype - - def _del_sources(self): - # Function for deleting references to source arrays, to allow garbage-collecting them - raise NotImplementedError - - def compute(self): - i = 0 - signature = self.signature - result_size = self.find_size() - result = W_NDimArray(result_size, self.shape, self.find_dtype()) - shapelen = len(self.shape) - i = self.start_iter() - ri = result.start_iter() - while not ri.done(): - numpy_driver.jit_merge_point(signature=signature, - shapelen=shapelen, - result_size=result_size, i=i, ri=ri, - self=self, result=result) - result.dtype.setitem(result.storage, ri.offset, self.eval(i)) - i = i.next(shapelen) - ri = ri.next(shapelen) - return result - - def force_if_needed(self): - if self.forced_result is None: - self.forced_result = self.compute() - self._del_sources() - - def get_concrete(self): - self.force_if_needed() - return self.forced_result - - def eval(self, iter): - if self.forced_result is not None: - return self.forced_result.eval(iter) - return self._eval(iter) - - def getitem(self, item): - return self.get_concrete().getitem(item) - - def setitem(self, item, value): - return self.get_concrete().setitem(item, value) - - def find_size(self): - if self.forced_result is not None: - # The result has been computed and sources may be unavailable - return self.forced_result.find_size() - return self._find_size() - - def find_dtype(self): - return self.res_dtype - - -class Call1(VirtualArray): - def __init__(self, signature, shape, res_dtype, values, order): - VirtualArray.__init__(self, signature, shape, res_dtype, - values.order) - self.values = values - - def _del_sources(self): - self.values = None - - def _find_size(self): - return self.values.find_size() - - def _find_dtype(self): - return self.res_dtype - - def _eval(self, iter): - assert isinstance(iter, Call1Iterator) - val = self.values.eval(iter.child).convert_to(self.res_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - return call_sig.func(self.res_dtype, val) - - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - return Call1Iterator(self.values.start_iter(res_shape)) - - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - if self.forced_result is not None: - return 'Call1(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call1(%s, %s)' % (call_sig.name, - self.values.debug_repr()) - -class Call2(VirtualArray): - """ - Intermediate class for performing binary operations. - """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): - # XXX do something if left.order != right.order - VirtualArray.__init__(self, signature, shape, res_dtype, left.order) - self.left = left - self.right = right - self.calc_dtype = calc_dtype - self.size = 1 - for s in self.shape: - self.size *= s - - def _del_sources(self): - self.left = None - self.right = None - - def _find_size(self): - return self.size - - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - if res_shape is None: - res_shape = self.shape # we still force the shape on children - return Call2Iterator(self.left.start_iter(res_shape), - self.right.start_iter(res_shape)) - - def _eval(self, iter): - assert isinstance(iter, Call2Iterator) - lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) - rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - return call_sig.func(self.calc_dtype, lhs, rhs) - - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - if self.forced_result is not None: - return 'Call2(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call2(%s, %s, %s)' % (call_sig.name, - self.left.debug_repr(), - self.right.debug_repr()) - -class ViewArray(BaseArray): - """ - Class for representing views of arrays, they will reflect changes of parent - arrays. Example: slices - """ - def __init__(self, parent, signature, strides, backstrides, shape): +class W_NDimSlice(ViewArray): + def __init__(self, start, strides, backstrides, shape, parent): + assert isinstance(parent, ConcreteArray) + if isinstance(parent, W_NDimSlice): + parent = parent.parent + size = 1 + for sh in shape: + size *= sh self.strides = strides self.backstrides = backstrides - BaseArray.__init__(self, shape, parent.order) - self.signature = signature - self.parent = parent - self.invalidates = parent.invalidates + ViewArray.__init__(self, size, shape, parent.dtype, parent.order, + parent) + self.start = start - def get_concrete(self): - # in fact, ViewArray never gets "concrete" as it never stores data. - # This implementation is needed for BaseArray getitem/setitem to work, - # can be refactored. - self.parent.get_concrete() - return self + def setslice(self, space, w_value): + res_shape = shape_agreement(space, self.shape, w_value.shape) + self._sliceloop(w_value, res_shape) - def getitem(self, item): - return self.parent.getitem(item) - - def eval(self, iter): - return self.parent.getitem(iter.get_offset()) - - def setitem(self, item, value): - # This is currently not possible to be called from anywhere. - raise NotImplementedError - - def descr_len(self, space): - if self.shape: - return space.wrap(self.shape[0]) - return space.wrap(1) + def _sliceloop(self, source, res_shape): + sig = source.find_sig(res_shape) + frame = sig.create_frame(source, res_shape) + res_iter = view_iter_from_arr(self) + shapelen = len(res_shape) + while not res_iter.done(): + slice_driver.jit_merge_point(sig=sig, + frame=frame, + shapelen=shapelen, + self=self, source=source, + res_iter=res_iter) + self.setitem(res_iter.offset, sig.eval(frame, source).convert_to( + self.find_dtype())) + frame.next(shapelen) + res_iter = res_iter.next(shapelen) def setshape(self, space, new_shape): if len(self.shape) < 1: @@ -1220,96 +1013,10 @@ self.backstrides = new_backstrides[:] self.shape = new_shape[:] -class W_NDimSlice(ViewArray): - signature = signature.BaseSignature() - - def __init__(self, parent, signature, start, strides, backstrides, - shape): - if isinstance(parent, W_NDimSlice): - parent = parent.parent - ViewArray.__init__(self, parent, signature, strides, backstrides, shape) - self.start = start - self.size = 1 - for sh in shape: - self.size *= sh - - def find_size(self): - return self.size - - def find_dtype(self): - return self.parent.find_dtype() - - def setslice(self, space, w_value): - res_shape = shape_agreement(space, self.shape, w_value.shape) - self._sliceloop(w_value, res_shape) - - def _sliceloop(self, source, res_shape): - source_iter = source.start_iter(res_shape) - res_iter = self.start_iter(res_shape) - shapelen = len(res_shape) - while not res_iter.done(): - slice_driver.jit_merge_point(signature=source.signature, - shapelen=shapelen, - self=self, source=source, - res_iter=res_iter, - source_iter=source_iter) - self.setitem(res_iter.offset, source.eval(source_iter).convert_to( - self.find_dtype())) - source_iter = source_iter.next(shapelen) - res_iter = res_iter.next(shapelen) - - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - if len(self.shape) == 1: - return OneDimIterator(self.start, self.strides[0], self.shape[0]) - return ViewIterator(self) - - def setitem(self, item, value): - self.parent.setitem(item, value) - - def debug_repr(self): - return 'Slice(%s)' % self.parent.debug_repr() - - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = self.start_iter() - a_iter = array.start_iter() - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def get_storage(self, space): - return self.parent.get_storage(space) - -class W_NDimArray(BaseArray): +class W_NDimArray(ConcreteArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ - def __init__(self, size, shape, dtype, order='C'): - BaseArray.__init__(self, shape, order) - self.size = size - self.dtype = dtype - self.storage = dtype.malloc(size) - self.signature = dtype.signature - - def get_concrete(self): - return self - - def find_size(self): - return self.size - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - return self.dtype.getitem(self.storage, item) - - def eval(self, iter): - return self.dtype.getitem(self.storage, iter.get_offset()) - def copy(self): array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( @@ -1319,32 +1026,16 @@ ) return array - def descr_len(self, space): - if len(self.shape): - return space.wrap(self.shape[0]) - raise OperationError(space.w_TypeError, space.wrap( - "len() of unsized object")) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) - def start_iter(self, res_shape=None): - if self.order == 'C': - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return ArrayIterator(self.size) - raise NotImplementedError # use ViewIterator simply, test it - def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) - def debug_repr(self): - return 'Array' - - def get_storage(self, space): - return self.storage + def create_sig(self, res_shape): + return self.array_sig(res_shape) def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1396,10 +1087,11 @@ ) arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) + arr_iter = ArrayIterator(arr.size) for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + dtype.setitem(arr.storage, arr_iter.offset, + dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1492,48 +1184,31 @@ class W_FlatIterator(ViewArray): - signature = signature.BaseSignature() @jit.unroll_safe def __init__(self, arr): + arr = arr.get_concrete() size = 1 for sh in arr.shape: size *= sh - new_sig = signature.Signature.find_sig([ - W_FlatIterator.signature, arr.signature - ]) - ViewArray.__init__(self, arr, new_sig, [arr.strides[-1]], - [arr.backstrides[-1]], [size]) + self.strides = [arr.strides[-1]] + self.backstrides = [arr.backstrides[-1]] + ViewArray.__init__(self, size, [size], arr.dtype, arr.order, + arr) self.shapelen = len(arr.shape) - self.arr = arr - self.iter = self.start_iter() - - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return OneDimIterator(self.arr.start, self.strides[0], - self.shape[0]) - - def find_dtype(self): - return self.arr.find_dtype() - - def find_size(self): - return self.shape[0] + self.iter = OneDimIterator(arr.start, self.strides[0], + self.shape[0]) def descr_next(self, space): if self.iter.done(): raise OperationError(space.w_StopIteration, space.w_None) - result = self.eval(self.iter) + result = self.getitem(self.iter.offset) self.iter = self.iter.next(self.shapelen) return result def descr_iter(self): return self - def debug_repr(self): - return 'FlatIter(%s)' % self.arr.debug_repr() - - W_FlatIterator.typedef = TypeDef( 'flatiter', next = interp2app(W_FlatIterator.descr_next), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,20 +2,21 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types +from pypy.module.micronumpy import interp_boxes, interp_dtype, types +from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature, find_sig from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - reduce_driver = jit.JitDriver( - greens = ['shapelen', "signature"], - reds = ["i", "self", "dtype", "value", "obj"] + greens = ['shapelen', "sig"], + virtualizables = ["frame"], + reds = ["frame", "self", "dtype", "value", "obj"] ) class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] - _immutable_fields_ = ["promote_to_float", "promote_bools"] + _immutable_fields_ = ["promote_to_float", "promote_bools", "name"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -50,6 +51,7 @@ def reduce(self, space, w_obj, multidim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -60,13 +62,16 @@ raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) - size = obj.find_size() + size = obj.size dtype = find_unaryop_result_dtype( space, obj.find_dtype(), promote_to_largest=True ) - start = obj.start_iter(obj.shape) shapelen = len(obj.shape) + sig = find_sig(ReduceSignature(self.func, self.name, dtype, + ScalarSignature(dtype), + obj.create_sig(obj.shape)), obj) + frame = sig.create_frame(obj) if shapelen > 1 and not multidim: raise OperationError(space.w_NotImplementedError, space.wrap("not implemented yet")) @@ -74,34 +79,33 @@ if size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - value = obj.eval(start).convert_to(dtype) - start = start.next(shapelen) + value = sig.eval(frame, obj).convert_to(dtype) + frame.next(shapelen) else: value = self.identity.convert_to(dtype) - new_sig = signature.Signature.find_sig([ - self.reduce_signature, obj.signature - ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) + return self.reduce_loop(shapelen, sig, frame, value, obj, dtype) - def reduce_loop(self, signature, shapelen, i, value, obj, dtype): - while not i.done(): - reduce_driver.jit_merge_point(signature=signature, + def reduce_loop(self, shapelen, sig, frame, value, obj, dtype): + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - value=value, obj=obj, i=i, + value=value, obj=obj, frame=frame, dtype=dtype) - value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) - i = i.next(shapelen) + assert isinstance(sig, ReduceSignature) + value = sig.binfunc(dtype, value, sig.eval(frame, obj).convert_to(dtype)) + frame.next(shapelen) return value class W_Ufunc1(W_Ufunc): argcount = 1 + _immutable_fields_ = ["func", "name"] + def __init__(self, func, name, promote_to_float=False, promote_bools=False, identity=None): W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func - self.signature = signature.Call1(func) def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, @@ -117,14 +121,13 @@ if isinstance(w_obj, Scalar): return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) - new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) - w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) + w_res = Call1(self.func, self.name, w_obj.shape, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res class W_Ufunc2(W_Ufunc): - _immutable_fields_ = ["comparison_func", "func"] + _immutable_fields_ = ["comparison_func", "func", "name"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -133,8 +136,6 @@ W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func self.comparison_func = comparison_func - self.signature = signature.Call2(func) - self.reduce_signature = signature.BaseSignature() def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, @@ -158,11 +159,9 @@ w_rhs.value.convert_to(calc_dtype) ) - new_sig = signature.Signature.find_sig([ - self.signature, w_lhs.signature, w_rhs.signature - ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - w_res = Call2(new_sig, new_shape, calc_dtype, + w_res = Call2(self.func, self.name, + new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,54 +1,322 @@ -from pypy.rlib.objectmodel import r_dict, compute_identity_hash +from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.rlib.rarithmetic import intmask +from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ + OneDimIterator, ConstantIterator +from pypy.module.micronumpy.strides import calculate_slice_strides +from pypy.rlib.jit import hint, unroll_safe, promote +def sigeq(one, two): + return one.eq(two) -def components_eq(lhs, rhs): - if len(lhs) != len(rhs): - return False - for i in range(len(lhs)): - v1, v2 = lhs[i], rhs[i] - if type(v1) is not type(v2) or not v1.eq(v2): +def sigeq_no_numbering(one, two): + """ Cache for iterator numbering should not compare array numbers + """ + return one.eq(two, compare_array_no=False) + +def sighash(sig): + return sig.hash() + +known_sigs = r_dict(sigeq, sighash) + +def find_sig(sig, arr): + sig.invent_array_numbering(arr) + try: + return known_sigs[sig] + except KeyError: + sig.invent_numbering() + known_sigs[sig] = sig + return sig + +class NumpyEvalFrame(object): + _virtualizable2_ = ['iterators[*]', 'final_iter', 'arraylist[*]'] + + @unroll_safe + def __init__(self, iterators, arrays): + self = hint(self, access_directly=True, fresh_virtualizable=True) + self.iterators = iterators[:] + self.arrays = arrays[:] + for i in range(len(self.iterators)): + iter = self.iterators[i] + if not isinstance(iter, ConstantIterator): + self.final_iter = i + break + else: + self.final_iter = -1 + + def done(self): + final_iter = promote(self.final_iter) + if final_iter < 0: return False - return True + return self.iterators[final_iter].done() -def components_hash(components): - res = 0x345678 - for component in components: - res = intmask((1000003 * res) ^ component.hash()) - return res + @unroll_safe + def next(self, shapelen): + for i in range(len(self.iterators)): + self.iterators[i] = self.iterators[i].next(shapelen) -class BaseSignature(object): - _attrs_ = [] +def _add_ptr_to_cache(ptr, cache): + i = 0 + for p in cache: + if ptr == p: + return i + i += 1 + else: + res = len(cache) + cache.append(ptr) + return res - def eq(self, other): - return self is other +class Signature(object): + _attrs_ = ['iter_no', 'array_no'] + _immutable_fields_ = ['iter_no', 'array_no'] + + array_no = 0 + iter_no = 0 + + def invent_numbering(self): + cache = r_dict(sigeq_no_numbering, sighash) + allnumbers = [] + self._invent_numbering(cache, allnumbers) + + def invent_array_numbering(self, arr): + cache = [] + self._invent_array_numbering(arr, cache) + + def _invent_numbering(self, cache, allnumbers): + try: + no = cache[self] + except KeyError: + no = len(allnumbers) + cache[self] = no + allnumbers.append(no) + self.iter_no = no + + def create_frame(self, arr, res_shape=None): + res_shape = res_shape or arr.shape + iterlist = [] + arraylist = [] + self._create_iter(iterlist, arraylist, arr, res_shape, []) + return NumpyEvalFrame(iterlist, arraylist) + +class ConcreteSignature(Signature): + _immutable_fields_ = ['dtype'] + + def __init__(self, dtype): + self.dtype = dtype + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, ConcreteSignature) + if compare_array_no: + if self.array_no != other.array_no: + return False + return self.dtype is other.dtype def hash(self): - return compute_identity_hash(self) + return compute_identity_hash(self.dtype) -class Signature(BaseSignature): - _known_sigs = r_dict(components_eq, components_hash) + def allocate_view_iter(self, arr, res_shape, chunklist): + r = arr.shape, arr.start, arr.strides, arr.backstrides + if chunklist: + for chunkelem in chunklist: + r = calculate_slice_strides(r[0], r[1], r[2], r[3], chunkelem) + shape, start, strides, backstrides = r + if len(res_shape) == 1: + return OneDimIterator(start, strides[0], res_shape[0]) + return ViewIterator(start, strides, backstrides, shape, res_shape) - _attrs_ = ["components"] - _immutable_fields_ = ["components[*]"] +class ArraySignature(ConcreteSignature): + def debug_repr(self): + return 'Array' - def __init__(self, components): - self.components = components + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + self.array_no = _add_ptr_to_cache(concr.storage, cache) - @staticmethod - def find_sig(components): - return Signature._known_sigs.setdefault(components, Signature(components)) + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + storage = concr.storage + if self.iter_no >= len(iterlist): + iterlist.append(self.allocate_iter(concr, res_shape, chunklist)) + if self.array_no >= len(arraylist): + arraylist.append(storage) -class Call1(BaseSignature): - _immutable_fields_ = ["func", "name"] + def allocate_iter(self, arr, res_shape, chunklist): + if chunklist: + return self.allocate_view_iter(arr, res_shape, chunklist) + return ArrayIterator(arr.size) - def __init__(self, func): - self.func = func - self.name = func.func_name + def eval(self, frame, arr): + iter = frame.iterators[self.iter_no] + return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) -class Call2(BaseSignature): - _immutable_fields_ = ["func", "name"] +class ScalarSignature(ConcreteSignature): + def debug_repr(self): + return 'Scalar' - def __init__(self, func): - self.func = func - self.name = func.func_name + def _invent_array_numbering(self, arr, cache): + pass + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + if self.iter_no >= len(iterlist): + iter = ConstantIterator() + iterlist.append(iter) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Scalar + assert isinstance(arr, Scalar) + return arr.value + +class ViewSignature(ArraySignature): + def debug_repr(self): + return 'Slice' + + def _invent_numbering(self, cache, allnumbers): + # always invent a new number for view + no = len(allnumbers) + allnumbers.append(no) + self.iter_no = no + + def allocate_iter(self, arr, res_shape, chunklist): + return self.allocate_view_iter(arr, res_shape, chunklist) + +class VirtualSliceSignature(Signature): + def __init__(self, child): + self.child = child + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + self.child._invent_array_numbering(arr.child, cache) + + def hash(self): + return intmask(self.child.hash() ^ 1234) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, VirtualSliceSignature) + return self.child.eq(other.child, compare_array_no) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + chunklist.append(arr.chunks) + self.child._create_iter(iterlist, arraylist, arr.child, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + return self.child.eval(frame, arr.child) + +class Call1(Signature): + _immutable_fields_ = ['unfunc', 'name', 'child'] + + def __init__(self, func, name, child): + self.unfunc = func + self.child = child + self.name = name + + def hash(self): + return compute_hash(self.name) ^ intmask(self.child.hash() << 1) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, Call1) + return (self.unfunc is other.unfunc and + self.child.eq(other.child, compare_array_no)) + + def debug_repr(self): + return 'Call1(%s, %s)' % (self.name, self.child.debug_repr()) + + def _invent_numbering(self, cache, allnumbers): + self.child._invent_numbering(cache, allnumbers) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._invent_array_numbering(arr.values, cache) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._create_iter(iterlist, arraylist, arr.values, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.res_dtype) + return self.unfunc(arr.res_dtype, v) + +class Call2(Signature): + _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] + + def __init__(self, func, name, calc_dtype, left, right): + self.binfunc = func + self.left = left + self.right = right + self.name = name + self.calc_dtype = calc_dtype + + def hash(self): + return (compute_hash(self.name) ^ intmask(self.left.hash() << 1) ^ + intmask(self.right.hash() << 2)) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, Call2) + return (self.binfunc is other.binfunc and + self.calc_dtype is other.calc_dtype and + self.left.eq(other.left, compare_array_no) and + self.right.eq(other.right, compare_array_no)) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + self.left._invent_array_numbering(arr.left, cache) + self.right._invent_array_numbering(arr.right, cache) + + def _invent_numbering(self, cache, allnumbers): + self.left._invent_numbering(cache, allnumbers) + self.right._invent_numbering(cache, allnumbers) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import Call2 + + assert isinstance(arr, Call2) + self.left._create_iter(iterlist, arraylist, arr.left, res_shape, + chunklist) + self.right._create_iter(iterlist, arraylist, arr.right, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + lhs = self.left.eval(frame, arr.left).convert_to(self.calc_dtype) + rhs = self.right.eval(frame, arr.right).convert_to(self.calc_dtype) + return self.binfunc(self.calc_dtype, lhs, rhs) + + def debug_repr(self): + return 'Call2(%s, %s, %s)' % (self.name, self.left.debug_repr(), + self.right.debug_repr()) + +class ReduceSignature(Call2): + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + self.right._create_iter(iterlist, arraylist, arr, res_shape, chunklist) + + def _invent_numbering(self, cache, allnumbers): + self.right._invent_numbering(cache, allnumbers) + + def _invent_array_numbering(self, arr, cache): + self.right._invent_array_numbering(arr, cache) + + def eval(self, frame, arr): + return self.right.eval(frame, arr) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/strides.py @@ -0,0 +1,34 @@ + +def calculate_slice_strides(shape, start, strides, backstrides, chunks): + rstrides = [] + rbackstrides = [] + rstart = start + rshape = [] + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + rstrides.append(strides[i] * step) + rbackstrides.append(strides[i] * (lgt - 1) * step) + rshape.append(lgt) + rstart += strides[i] * start_ + # add a reminder + s = i + 1 + assert s >= 0 + rstrides += strides[s:] + rbackstrides += backstrides[s:] + rshape += shape[s:] + return rshape, rstart, rstrides, rbackstrides + +def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape): + rstrides = [] + rbackstrides = [] + for i in range(len(orig_shape)): + if orig_shape[i] == 1: + rstrides.append(0) + rbackstrides.append(0) + else: + rstrides.append(strides[i]) + rbackstrides.append(backstrides[i]) + rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides + rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides + return rstrides, rbackstrides diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,7 +4,6 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) - class BaseNumpyAppTest(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['micronumpy']) @@ -15,20 +14,37 @@ bool_dtype = get_dtype_cache(space).w_booldtype ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar2 = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) - assert v1.signature is not v2.signature + sig1 = v1.find_sig() + sig2 = v2.find_sig() + assert v1 is not v2 + assert sig1.left.iter_no == sig1.right.iter_no + assert sig2.left.iter_no != sig2.right.iter_no + assert sig1.left.array_no == sig1.right.array_no + sig1b = ar2.descr_add(space, ar).find_sig() + assert sig1b.left.array_no != sig1b.right.array_no + assert sig1b is not sig1 v3 = ar.descr_add(space, Scalar(float64_dtype, 1.0)) - assert v2.signature is v3.signature + sig3 = v3.find_sig() + assert sig2 is sig3 v4 = ar.descr_add(space, ar) - assert v1.signature is v4.signature + assert v1.find_sig() is v4.find_sig() bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) - assert v5.signature is not v1.signature - assert v5.signature is not v2.signature + assert v5.find_sig() is not v1.find_sig() + assert v5.find_sig() is not v2.find_sig() v6 = ar.descr_add(space, bool_ar) - assert v5.signature is v6.signature + assert v5.find_sig() is v6.find_sig() + v7 = v6.descr_add(space, v6) + sig7 = v7.find_sig() + assert sig7.left.left.iter_no == sig7.right.left.iter_no + assert sig7.left.left.iter_no != sig7.right.right.iter_no + assert sig7.left.right.iter_no == sig7.right.right.iter_no + v1.forced_result = ar + assert v1.find_sig() is not sig1 def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype @@ -36,11 +52,14 @@ ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) - assert v1.signature is v2.signature + assert v1.find_sig() is v2.find_sig() v3 = v2.descr_add(space, v1) v4 = v1.descr_add(space, v2) - assert v3.signature is v4.signature + assert v3.find_sig() is v4.find_sig() + v5 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 3, 1))) + v6 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 4, 1))) + assert v5.find_sig() is v6.find_sig() class TestUfuncCoerscion(object): def test_binops(self, space): diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -137,6 +137,16 @@ interp = self.run(code) assert interp.results[0].value.value == 15 + def test_sum2(self): + code = """ + a = |30| + b = a + a + sum(b) + """ + interp = self.run(code) + assert interp.results[0].value.value == 30 * (30 - 1) + + def test_array_write(self): code = """ a = [1,2,3,4,5] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -8,8 +8,6 @@ class MockDtype(object): - signature = signature.BaseSignature() - def malloc(self, size): return None @@ -38,92 +36,86 @@ assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -132,7 +124,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -142,7 +134,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -897,13 +889,32 @@ a = zeros(1) assert debug_repr(a) == 'Array' assert debug_repr(a + a) == 'Call2(add, Array, Array)' - assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a[::2]) == 'Slice' assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' - assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(a + a.flat) == 'Call2(add, Array, Slice)' assert debug_repr(sin(a)) == 'Call1(sin, Array)' + b = a + a b[0] = 3 - assert debug_repr(b) == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Array' + + def test_virtual_views(self): + from numpypy import arange + a = arange(15) + c = (a + a) + d = c[::2] + assert d[3] == 12 + c[6] = 5 + assert d[3] == 5 + a = arange(15) + c = (a + a) + d = c[::2][::2] + assert d[1] == 8 + b = a + a + c = b[::2] + c[:] = 3 + assert b[0] == 3 + assert b[1] == 2 def test_tolist_scalar(self): from numpypy import int32, bool_ @@ -1075,10 +1086,10 @@ def test_broadcast_setslice(self): from numpypy import zeros, ones - a = zeros((100, 100)) - b = ones(100) + a = zeros((10, 10)) + b = ones(10) a[:, :] = b - assert a[13, 15] == 1 + assert a[3, 5] == 1 def test_broadcast_shape_agreement(self): from numpypy import zeros, array @@ -1112,6 +1123,14 @@ b[:] = (a + a) assert (b == zeros((4, 3, 5))).all() + def test_broadcast_virtualview(self): + from numpypy import arange, zeros + a = arange(8).reshape([2, 2, 2]) + b = (a + a)[1, 1] + c = zeros((2, 2, 2)) + c[:] = b + assert (c == [[[12, 14], [12, 14]], [[12, 14], [12, 14]]]).all() + def test_argmax(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) @@ -1173,6 +1192,11 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 + def test_flatiter_varray(self): + from numpypy import ones + a = ones((2, 2)) + assert list(((a + a).flat)) == [2, 2, 2, 2] + def test_slice_copy(self): from numpypy import zeros a = zeros((10, 10)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -49,10 +49,14 @@ interp.run(space) w_res = interp.results[-1] if isinstance(w_res, BaseArray): - w_res = w_res.eval(w_res.start_iter()) - + concr = w_res.get_concrete_or_scalar() + sig = concr.find_sig() + frame = sig.create_frame(concr) + w_res = sig.eval(frame, concr) if isinstance(w_res, interp_boxes.W_Float64Box): return w_res.value + if isinstance(w_res, interp_boxes.W_Int64Box): + return float(w_res.value) elif isinstance(w_res, interp_boxes.W_BoolBox): return float(w_res.value) raise TypeError(w_res) @@ -78,8 +82,9 @@ def test_add(self): result = self.run("add") self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) assert result == 3 + 3 def define_float_add(): @@ -93,7 +98,8 @@ assert result == 3 + 3 self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, "setinteriorfield_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_sum(): return """ @@ -106,8 +112,8 @@ result = self.run("sum") assert result == 2 * sum(range(30)) self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, - "int_add": 2, "int_ge": 1, "guard_false": 1, - "jump": 1}) + "int_add": 1, "int_ge": 1, "guard_false": 1, + "jump": 1, 'arraylen_gc': 1}) def define_prod(): return """ @@ -123,18 +129,22 @@ expected *= i * 2 assert result == expected self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "float_mul": 1, "int_add": 1, + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) - def test_max(self): - py.test.skip("broken, investigate") - result = self.run(""" + def define_max(): + return """ a = |30| a[13] = 128 b = a + a max(b) - """) + """ + + def test_max(self): + result = self.run("max") assert result == 256 + py.test.skip("not there yet, getting though") self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_mul": 1, "int_add": 1, "int_lt": 1, "guard_true": 1, "jump": 1}) @@ -164,9 +174,9 @@ result = self.run("any") assert result == 1 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, + "float_ne": 1, "int_add": 1, "int_ge": 1, "jump": 1, - "guard_false": 2}) + "guard_false": 2, 'arraylen_gc': 1}) def define_already_forced(): return """ @@ -183,14 +193,13 @@ # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - # XXX the comment above is wrong now. We need preferrably a way to - # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, - 'getfield_gc': 35, 'getfield_gc_pure': 6, - 'guard_class': 22, 'int_add': 8, 'float_mul': 2, - 'guard_isnull': 2, 'jump': 2, 'int_ge': 4, - 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, - 'guard_value': 2}) + self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 26, + 'getarrayitem_gc': 4, 'getarrayitem_gc_pure': 2, + 'getfield_gc_pure': 4, + 'guard_class': 8, 'int_add': 8, 'float_mul': 2, + 'jump': 2, 'int_ge': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, + 'guard_false': 4, 'arraylen_gc': 2, 'same_as': 2}) def define_ufunc(): return """ @@ -204,8 +213,9 @@ result = self.run("ufunc") assert result == -6 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, - "setinteriorfield_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_specialization(): return """ @@ -248,7 +258,8 @@ 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, - 'jump': 1}) + 'jump': 1, + 'arraylen_gc': 1}) def define_multidim(): return """ @@ -263,8 +274,9 @@ # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setinteriorfield_raw': 1}) + 'guard_false': 1, 'int_add': 2, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1, + 'arraylen_gc': 1}) def define_multidim_slice(): return """ @@ -312,7 +324,25 @@ self.check_trace_count(1) self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_eq': 1, 'guard_false': 1, 'jump': 1}) + 'int_lt': 1, 'guard_true': 1, 'jump': 1, + 'arraylen_gc': 3}) + + def define_virtual_slice(): + return """ + a = |30| + c = a + a + d = c -> 1:20 + d -> 1 + """ + + def test_virtual_slice(self): + result = self.run("virtual_slice") + assert result == 4 + self.check_trace_count(1) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): diff --git a/pypy/module/micronumpy/test/test_ztranslation.py b/pypy/module/micronumpy/test/test_ztranslation.py --- a/pypy/module/micronumpy/test/test_ztranslation.py +++ b/pypy/module/micronumpy/test/test_ztranslation.py @@ -1,5 +1,8 @@ - +from pypy.module.micronumpy import signature from pypy.objspace.fake.checkmodule import checkmodule def test_numpy_translates(): + # XXX: If there are signatures floating around this might explode. This fix + # is ugly. + signature.known_sigs.clear() checkmodule('micronumpy') diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -252,7 +252,7 @@ # grow the list done = 0 while done < len(self._seen_extras): - print self._seen_extras + #print self._seen_extras ann.build_types(self._seen_extras[done], [], complete_now=False) done += 1 From noreply at buildbot.pypy.org Tue Dec 20 23:28:57 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Dec 2011 23:28:57 +0100 (CET) Subject: [pypy-commit] pypy default: kill review file Message-ID: <20111220222857.C1B03820B7@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50792:862207881328 Date: 2011-12-20 16:28 -0600 http://bitbucket.org/pypy/pypy/changeset/862207881328/ Log: kill review file diff --git a/pypy/module/micronumpy/REVIEW.txt b/pypy/module/micronumpy/REVIEW.txt deleted file mode 100644 --- a/pypy/module/micronumpy/REVIEW.txt +++ /dev/null @@ -1,10 +0,0 @@ -REVIEW NOTES -============ - -* VirtualSlice vs. W_NDimSlice? -* W_NDimSlice.__init__ calls ConcreteArray.__init__ instead of - ViewArray.__init__, W_FlatIterator as well. -* Cleanup of the iterator and array caching/numbering. It's a mess right now: - * _creater_iter updates the arraylist - * Why do Scalars need an iterator at all? - * Do views share storage with concrete arrays or other views? From noreply at buildbot.pypy.org Wed Dec 21 00:28:44 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Dec 2011 00:28:44 +0100 (CET) Subject: [pypy-commit] pypy default: fix the OverflowError checking in str.replace on 32-bit systems, also simplify the code slightly Message-ID: <20111220232844.9598D820B7@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50793:e9153b7926a3 Date: 2011-12-20 23:28 +0000 http://bitbucket.org/pypy/pypy/changeset/e9153b7926a3/ Log: fix the OverflowError checking in str.replace on 32-bit systems, also simplify the code slightly diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -514,29 +514,33 @@ if maxsplit == 0: return space.wrap(input) - # An ok guess at the default size - builder = StringBuilder(len(input)) - first = True - if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - first = False + try: - for i in range(upper): - builder.append(by) - builder.append(input[i]) + result_size = ovfcheck(upper * len(by)) + result_size = ovfcheck(result_size + upper) + result_size = ovfcheck(result_size + len(by)) + result_size = ovfcheck(result_size + upper - len(input)) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + builder = StringBuilder(result_size) + for i in range(upper): builder.append(by) - builder.append_slice(input, upper, len(input)) - except MemoryError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string too long") - ) + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) else: + # An ok guess for the result size + builder = StringBuilder(len(input)) start = 0 sublen = len(sub) + first = True while maxsplit != 0: next = input.find(sub, start) From noreply at buildbot.pypy.org Wed Dec 21 05:55:27 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Dec 2011 05:55:27 +0100 (CET) Subject: [pypy-commit] pypy default: fix backwards logic Message-ID: <20111221045527.9C3EB820B7@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50794:d84a162addff Date: 2011-12-20 22:54 -0600 http://bitbucket.org/pypy/pypy/changeset/d84a162addff/ Log: fix backwards logic diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -524,7 +524,8 @@ result_size = ovfcheck(upper * len(by)) result_size = ovfcheck(result_size + upper) result_size = ovfcheck(result_size + len(by)) - result_size = ovfcheck(result_size + upper - len(input)) + remaining_size = len(input) - upper + result_size = ovfcheck(result_size + remaining_size) except OverflowError: raise OperationError(space.w_OverflowError, space.wrap("replace string is too long") From noreply at buildbot.pypy.org Wed Dec 21 11:30:36 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 21 Dec 2011 11:30:36 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: hg merge default Message-ID: <20111221103036.50A758217E@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50795:169a84c26509 Date: 2011-12-21 11:29 +0100 http://bitbucket.org/pypy/pypy/changeset/169a84c26509/ Log: hg merge default diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,265 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -363,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -408,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -433,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -444,161 +425,56 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,87 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -735,49 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - assert not self.layoutbuilder.has_finalizer(S) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -791,108 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETINTERIORFIELD_GC ------ - if op.getopnum() == rop.SETINTERIORFIELD_GC: - val = op.getarg(0) - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -358,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -365,33 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +361,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +381,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +405,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,211 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -70,10 +70,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -108,20 +104,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -1358,46 +1340,10 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) - def genop_new_with_vtable(self, op, arglocs, result_loc): - assert result_loc is eax - loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, ImmedLoc) - arglocs = arglocs[:-1] - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - self.set_vtable(eax, loc_vtable) + # ---------- - def set_vtable(self, loc, loc_vtable): - if self.cpu.vtable_offset is not None: - assert isinstance(loc, RegLoc) - assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert isinstance(loc, RegLoc) - assert isinstance(loc_num_elem, ImmedLoc) - self.mc.MOV(mem(loc, ofs_length), loc_num_elem) - - # XXX genop_new is abused for all varsized mallocs with Boehm, for now - # (instead of genop_new_array, genop_newstr, genop_newunicode) - def genop_new(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_new_array(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_array_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newstr(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_str_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newunicode(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_unicode_func_addr, arglocs, eax) + def genop_call_malloc_gc(self, op, arglocs, result_loc): + self.genop_call(op, arglocs, result_loc) self.propagate_memoryerror_if_eax_is_null() def propagate_memoryerror_if_eax_is_null(self): @@ -2066,6 +2012,8 @@ self._genop_call(op, arglocs, resloc, force_index) def _genop_call(self, op, arglocs, resloc, force_index): + from pypy.jit.backend.llsupport.descr import CallDescr + sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -2080,13 +2028,16 @@ else: tmp = eax + descr = op.getdescr() + assert isinstance(descr, CallDescr) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types(), - callconv=op.getdescr().get_call_conv()) + argtypes=descr.get_arg_types(), + callconv=descr.get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return - if op.getdescr().get_return_type() == 'L': + if descr.get_result_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long self.mc.MOV_br(resloc.value + 4, edx.value) # XXX should ideally not move the result on the stack, @@ -2095,7 +2046,7 @@ # can just be always a stack location else: self.mc.FSTPL_b(resloc.value) # float return - elif op.getdescr().get_return_type() == 'S': + elif descr.get_result_type() == 'S': # singlefloat return assert resloc is eax if IS_X86_32: @@ -2293,9 +2244,9 @@ # # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: - from pypy.jit.backend.llsupport.descr import BaseFieldDescr + from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset self.mc.MOV(eax, arglocs[1]) self.mc.MOV_mi((eax.value, ofs), 0) @@ -2498,9 +2449,8 @@ else: self.mc.JMP(imm(target)) - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): - size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) - size = (size + WORD-1) & ~(WORD-1) # round up + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size): + assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) @@ -2536,9 +2486,6 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - # on 64-bits, 'tid' is a value that fits in 31 bits - assert rx86.fits_in_32bits(tid) - self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -16,8 +16,8 @@ from pypy.jit.codewriter import heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr +from pypy.jit.backend.llsupport.descr import FieldDescr, ArrayDescr +from pypy.jit.backend.llsupport.descr import CallDescr, SizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox @@ -870,9 +870,9 @@ def _consider_call(self, op, guard_not_forced_op=None): calldescr = op.getdescr() - assert isinstance(calldescr, BaseCallDescr) + assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - 1 - size = calldescr.get_result_size(self.translate_support_code) + size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm1 @@ -917,12 +917,15 @@ consider_call_release_gil = consider_call_may_force + def consider_call_malloc_gc(self, op): + self._consider_call(op) + def consider_call_assembler(self, op, guard_op): descr = op.getdescr() assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size(self.translate_support_code) + size = jd.portal_calldescr.get_result_size() vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.getarg(vable_index)) @@ -957,21 +960,10 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb - def fastpath_malloc_fixedsize(self, op, descr): - assert isinstance(descr, BaseSizeDescr) - self._do_fastpath_malloc(op, descr.size, descr.tid) - - def fastpath_malloc_varsize(self, op, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - size = basesize + itemsize * num_elem - self._do_fastpath_malloc(op, size, arraydescr.tid) - self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) - - def _do_fastpath_malloc(self, op, size, tid): - gc_ll_descr = self.assembler.cpu.gc_ll_descr + def consider_call_malloc_nursery(self, op): + size_box = op.getarg(0) + assert isinstance(size_box, ConstInt) + size = size_box.getint() self.rm.force_allocate_reg(op.result, selected_reg=eax) # # We need edx as a temporary, but otherwise don't save any more @@ -980,86 +972,39 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) # + gc_ll_descr = self.assembler.cpu.gc_ll_descr self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - size, tid, - ) - - def consider_new(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.getdescr()): - self.fastpath_malloc_fixedsize(op, op.getdescr()) - else: - args = gc_ll_descr.args_for_new(op.getdescr()) - arglocs = [imm(x) for x in args] - return self._call(op, arglocs) - - def consider_new_with_vtable(self, op): - classint = op.getarg(0).getint() - descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) - if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self.fastpath_malloc_fixedsize(op, descrsize) - self.assembler.set_vtable(eax, imm(classint)) - # result of fastpath malloc is in eax - else: - args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) - - def consider_newstr(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_newunicode(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_new_array(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(box_num_elem)) - self._call(op, arglocs) + size) def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - ofs = arraydescr.get_base_size(self.translate_support_code) - size = arraydescr.get_item_size(self.translate_support_code) - ptr = arraydescr.is_array_of_pointers() + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize sign = arraydescr.is_item_signed() - return size, ofs, ofs_length, ptr, sign + return size, ofs, sign def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset - size = fielddescr.get_field_size(self.translate_support_code) - ptr = fielddescr.is_pointer_field() + size = fielddescr.field_size sign = fielddescr.is_field_signed() - return imm(ofs), imm(size), ptr, sign + return imm(ofs), imm(size), sign + _unpack_fielddescr._always_inline_ = True def _unpack_interiorfielddescr(self, descr): assert isinstance(descr, InteriorFieldDescr) arraydescr = descr.arraydescr - ofs = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + ofs = arraydescr.basesize + itemsize = arraydescr.itemsize + fieldsize = descr.fielddescr.field_size sign = descr.fielddescr.is_field_signed() ofs += descr.fielddescr.offset return imm(ofs), imm(itemsize), imm(fieldsize), sign def consider_setfield_gc(self, op): - ofs_loc, size_loc, _, _ = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -1117,7 +1062,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - itemsize, ofs, _, _, _ = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, _ = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if itemsize == 1: @@ -1134,7 +1079,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _, sign = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, sign = self._unpack_fielddescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -1150,7 +1095,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - itemsize, ofs, _, _, sign = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, sign = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -1229,8 +1174,8 @@ def consider_arraylen_gc(self, op): arraydescr = op.getdescr() - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.backend.llsupport.descr import GcCache +from pypy.jit.backend.llsupport.descr import GcCache, FieldDescr, FLAG_SIGNED from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc @@ -17,7 +17,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcPtrFieldDescr +from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.jit.backend.x86.test.test_regalloc import MockAssembler from pypy.jit.backend.x86.test.test_regalloc import BaseTestRegalloc @@ -41,20 +41,15 @@ return ['compressed'] + shape[1:] class MockGcDescr(GcCache): - def get_funcptr_for_new(self): - return 123 - get_funcptr_for_newarray = get_funcptr_for_new - get_funcptr_for_newstr = get_funcptr_for_new - get_funcptr_for_newunicode = get_funcptr_for_new get_malloc_slowpath_addr = None - + write_barrier_descr = None moving_gc = True gcrootmap = MockGcRootMap() def initialize(self): pass - record_constptrs = GcLLDescr_framework.record_constptrs.im_func + _record_constptrs = GcLLDescr_framework._record_constptrs.im_func rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func class TestRegallocDirectGcIntegration(object): @@ -170,42 +165,32 @@ ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) +NOT_INITIALIZED = chr(0xdd) + class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - expected_malloc_slowpath_size = WORD*2 + write_barrier_descr = None def __init__(self): - GcCache.__init__(self, False) + GcLLDescription.__init__(self, None) # create a nursery - NTP = rffi.CArray(lltype.Signed) - self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, + NTP = rffi.CArray(lltype.Char) + self.nursery = lltype.malloc(NTP, 64, flavor='raw') + for i in range(64): + self.nursery[i] = NOT_INITIALIZED + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 16*WORD - self.addrs[2] = 0 - # 16 WORDs + self.addrs[1] = self.addrs[0] + 64 + self.calls = [] def malloc_slowpath(size): - assert size == self.expected_malloc_slowpath_size + self.calls.append(size) + # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size - self.addrs[2] += 1 return nadr - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) - self._counter = 123000 - - def can_inline_malloc(self, descr): - return True - - def get_funcptr_for_new(self): - return 42 -# return llhelper(lltype.Ptr(self.NEW_TP), self.new) - - def init_size_descr(self, S, descr): - descr.tid = self._counter - self._counter += 1 + self.generate_function('malloc_nursery', malloc_slowpath, + [lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): return rffi.cast(lltype.Signed, self.addrs) @@ -214,204 +199,61 @@ return rffi.cast(lltype.Signed, self.addrs) + WORD def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) + return self.get_malloc_fn_addr('malloc_nursery') - get_funcptr_for_newarray = None - get_funcptr_for_newstr = None - get_funcptr_for_newunicode = None + def check_nothing_in_nursery(self): + # CALL_MALLOC_NURSERY should not write anything in the nursery + for i in range(64): + assert self.nursery[i] == NOT_INITIALIZED class TestMallocFastpath(BaseTestRegalloc): def setup_method(self, method): cpu = CPU(None, None) - cpu.vtable_offset = WORD cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() + self.cpu = cpu - # hack: specify 'tid' explicitly, because this test is not running - # with the gc transformer - NODE = lltype.GcStruct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) - valuedescr = cpu.fielddescrof(NODE, 'value') - - self.cpu = cpu - self.nodedescr = nodedescr - vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - vtable_int = cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(vtable)) - NODE2 = lltype.GcStruct('node2', - ('parent', rclass.OBJECT), - ('tid', lltype.Signed), - ('vtable', lltype.Ptr(rclass.OBJECT_VTABLE))) - descrsize = cpu.sizeof(NODE2) - heaptracker.register_known_gctype(cpu, vtable, NODE2) - self.descrsize = descrsize - self.vtable_int = vtable_int - - self.namespace = locals().copy() - def test_malloc_fastpath(self): ops = ''' - [i0] - p0 = new(descr=nodedescr) - setfield_gc(p0, i0, descr=valuedescr) - finish(p0) + [] + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(16) + finish(p0, p1, p2) ''' - self.interpret(ops, [42]) - # check the nursery + self.interpret(ops, []) + # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.nodedescr.tid - assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 48 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 64 + # slowpath never called + assert gc_ll_descr.calls == [] def test_malloc_slowpath(self): ops = ''' [] - p0 = new(descr=nodedescr) - p1 = new(descr=nodedescr) - p2 = new(descr=nodedescr) - p3 = new(descr=nodedescr) - p4 = new(descr=nodedescr) - p5 = new(descr=nodedescr) - p6 = new(descr=nodedescr) - p7 = new(descr=nodedescr) - p8 = new(descr=nodedescr) - finish(p0, p1, p2, p3, p4, p5, p6, p7, p8) + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(24) # overflow + finish(p0, p1, p2) ''' self.interpret(ops, []) + # check the returned pointers + gc_ll_descr = self.cpu.gc_ll_descr + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 0 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once - gc_ll_descr = self.cpu.gc_ll_descr - nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nadr + (WORD*2) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_new_with_vtable(self): - ops = ''' - [i0, i1] - p0 = new_with_vtable(ConstClass(vtable)) - guard_class(p0, ConstClass(vtable)) [i0] - finish(i1) - ''' - self.interpret(ops, [0, 1]) - assert self.getint(0) == 1 - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.descrsize.tid - assert gc_ll_descr.nursery[1] == self.vtable_int - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - -class Seen(Exception): - pass - -class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): - def can_inline_malloc_varsize(self, arraydescr, num_elem): - return num_elem < 5 - def get_funcptr_for_newarray(self): - return 52 - def init_array_descr(self, A, descr): - descr.tid = self._counter - self._counter += 1 - def args_for_new_array(self, descr): - raise Seen("args_for_new_array") - -class TestMallocVarsizeFastpath(BaseTestRegalloc): - def setup_method(self, method): - cpu = CPU(None, None) - cpu.vtable_offset = WORD - cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() - cpu.setup_once() - self.cpu = cpu - - ARRAY = lltype.GcArray(lltype.Signed) - arraydescr = cpu.arraydescrof(ARRAY) - self.arraydescr = arraydescr - ARRAYCHAR = lltype.GcArray(lltype.Char) - arraychardescr = cpu.arraydescrof(ARRAYCHAR) - - self.namespace = locals().copy() - - def test_malloc_varsize_fastpath(self): - # Hack. Running the GcLLDescr_framework without really having - # a complete GC means that we end up with both the tid and the - # length being at offset 0. In this case, so the length overwrites - # the tid. This is of course only the case in this test class. - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 142, descr=arraydescr) - setarrayitem_gc(p0, 3, 143, descr=arraydescr) - finish(p0) - ''' - self.interpret(ops, []) - # check the nursery - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == 4 - assert gc_ll_descr.nursery[1] == 142 - assert gc_ll_descr.nursery[4] == 143 - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - def test_malloc_varsize_slowpath(self): - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 420, descr=arraydescr) - setarrayitem_gc(p0, 3, 430, descr=arraydescr) - p1 = new_array(4, descr=arraydescr) - setarrayitem_gc(p1, 0, 421, descr=arraydescr) - setarrayitem_gc(p1, 3, 431, descr=arraydescr) - p2 = new_array(4, descr=arraydescr) - setarrayitem_gc(p2, 0, 422, descr=arraydescr) - setarrayitem_gc(p2, 3, 432, descr=arraydescr) - p3 = new_array(4, descr=arraydescr) - setarrayitem_gc(p3, 0, 423, descr=arraydescr) - setarrayitem_gc(p3, 3, 433, descr=arraydescr) - finish(p0, p1, p2, p3) - ''' - gc_ll_descr = self.cpu.gc_ll_descr - gc_ll_descr.expected_malloc_slowpath_size = 5*WORD - self.interpret(ops, []) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_malloc_varsize_too_big(self): - ops = ''' - [] - p0 = new_array(5, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_varsize_variable(self): - ops = ''' - [i0] - p0 = new_array(i0, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_array_of_char(self): - # check that fastpath_malloc_varsize() respects the alignment - # of the pointer in the nursery - ops = ''' - [] - p1 = new_array(1, descr=arraychardescr) - p2 = new_array(2, descr=arraychardescr) - p3 = new_array(3, descr=arraychardescr) - p4 = new_array(4, descr=arraychardescr) - finish(p1, p2, p3, p4) - ''' - self.interpret(ops, []) - p1 = self.getptr(0, llmemory.GCREF) - p2 = self.getptr(1, llmemory.GCREF) - p3 = self.getptr(2, llmemory.GCREF) - p4 = self.getptr(3, llmemory.GCREF) - assert p1._obj.intval & (WORD-1) == 0 # aligned - assert p2._obj.intval & (WORD-1) == 0 # aligned - assert p3._obj.intval & (WORD-1) == 0 # aligned - assert p4._obj.intval & (WORD-1) == 0 # aligned + assert gc_ll_descr.calls == [24] diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -69,6 +69,7 @@ return ctypes.cast(res.value._obj.intval, ctypes.POINTER(item_tp)) def test_allocations(self): + py.test.skip("rewrite or kill") from pypy.rpython.lltypesystem import rstr allocs = [None] diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -69,16 +69,17 @@ def get_functions_to_patch(): from pypy.jit.backend.llsupport import gc # - can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc - def can_inline_malloc2(*args): + can_use_nursery_malloc1 = gc.GcLLDescr_framework.can_use_nursery_malloc + def can_use_nursery_malloc2(*args): try: if os.environ['PYPY_NO_INLINE_MALLOC']: return False except KeyError: pass - return can_inline_malloc1(*args) + return can_use_nursery_malloc1(*args) # - return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + return {(gc.GcLLDescr_framework, 'can_use_nursery_malloc'): + can_use_nursery_malloc2} def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -46,7 +46,7 @@ # get the function address as an integer func = argboxes[0].getint() # do the call using the correct function from the cpu - rettype = descr.get_return_type() + rettype = descr.get_result_type() if rettype == INT or rettype == 'S': # *S*ingle float try: result = cpu.bh_call_i(func, descr, args_i, args_r, args_f) @@ -344,6 +344,8 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.CALL_MALLOC_GC, + rop.CALL_MALLOC_NURSERY, rop.LABEL, ): # list of opcodes never executed by pyjitpl continue diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -142,59 +142,6 @@ def repr_of_descr(self): return '%r' % (self,) - def get_arg_types(self): - """ Implement in call descr. - Must return a string of INT, REF and FLOAT ('i', 'r', 'f'). - """ - raise NotImplementedError - - def get_return_type(self): - """ Implement in call descr. - Must return INT, REF, FLOAT, or 'v' for void. - On 32-bit (hack) it can also be 'L' for longlongs. - Additionally it can be 'S' for singlefloats. - """ - raise NotImplementedError - - def get_extra_info(self): - """ Implement in call descr - """ - raise NotImplementedError - - def is_array_of_pointers(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_floats(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_structs(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_pointer_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def is_float_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def as_vtable_size_descr(self): - """ Implement for size descr representing objects with vtables. - Returns self. (it's an annotation hack) - """ - raise NotImplementedError - - def count_fields_if_immutable(self): - return -1 - def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64, r_uint +from pypy.rlib.rarithmetic import r_int64 from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,7 +21,6 @@ # class MemoryManager(object): - NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -37,13 +36,12 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) self.alive_loops = {} - self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) else: self.max_age = max_age if check_frequency <= 0: @@ -51,11 +49,10 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self, do_cleanups_now=True): + def next_generation(self): self.current_generation += 1 - if do_cleanups_now and self.current_generation >= self.next_check: + if self.current_generation == self.next_check: self._kill_old_loops_now() - self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -84,22 +81,3 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") - - def get_current_generation_uint(self): - """Return the current generation, possibly truncated to a uint. - To use only as an approximation for decaying counters.""" - return r_uint(self.current_generation) - - def record_jitcell_dict(self, callback): - """NOT_RPYTHON. The given jitcell_dict is a dict that needs - occasional clean-ups of old cells. A cell is old if it never - reached the threshold, and its counter decayed to a tiny value.""" - # note that the various jitcell_dicts have different RPython types, - # so we have to make a different function for each one. These - # functions are chained to each other: each calls the previous one. - def cleanup_dict(): - callback() - cleanup_previous() - # - cleanup_previous = self._cleanup_jitcell_dicts - self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,3 +1,4 @@ +from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -508,6 +508,8 @@ #'OOSEND', # ootype operation #'OOSEND_PURE', # ootype operation 'CALL_PURE/*d', # removed before it's passed to the backend + 'CALL_MALLOC_GC/*d', # like CALL, but NULL => propagate MemoryError + 'CALL_MALLOC_NURSERY/1', # nursery malloc, const number of bytes, zeroed '_CALL_LAST', '_CANRAISE_LAST', # ----- end of can_raise operations ----- diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,27 +2910,6 @@ res = self.meta_interp(f, [32]) assert res == f(32) - def test_decay_counters(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def f(m, n): - while n > 0: - myjitdriver.jit_merge_point(m=m, n=n) - n += m - n -= m - n -= 1 - def main(): - f(5, 7) # run 7x with m=5 counter[m=5] = 7 - f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) - f(5, 5) # run 5x times with m=5 counter[m=5] = 8 - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=9, trace_eagerness=99) - self.check_trace_count(1) - # - self.meta_interp(main, [], decay_halflife=1, - function_threshold=0, threshold=8, trace_eagerness=99) - self.check_trace_count(2) - class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_executor.py b/pypy/jit/metainterp/test/test_executor.py --- a/pypy/jit/metainterp/test/test_executor.py +++ b/pypy/jit/metainterp/test/test_executor.py @@ -18,7 +18,7 @@ pass class FakeCallDescr(FakeDescr): - def get_return_type(self): + def get_result_type(self): return history.FLOAT class FakeFieldDescr(FakeDescr): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,4 +1,3 @@ -import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -9,7 +8,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat, r_uint +from pypy.rlib.rarithmetic import r_singlefloat def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -277,76 +276,51 @@ res = state.can_never_inline(5, 42.5) assert res is True -def test_decay_counters(): - cell = JitCell(r_uint(5)) - cell.counter = 100 - cell.adjust_counter(r_uint(5), math.log(0.9)) - assert cell.counter == 100 - cell.adjust_counter(r_uint(6), math.log(0.9)) - assert cell.counter == 90 - cell.adjust_counter(r_uint(9), math.log(0.9)) - assert cell.counter == int(90 * (0.9**3)) - def test_cleanup_jitcell_dict(): - from pypy.jit.metainterp.memmgr import MemoryManager - class FakeWarmRunnerDesc: - memory_manager = MemoryManager() - class cpu: - pass class FakeJitDriverSD: _green_args_spec = [lltype.Signed] # # Test creating tons of jitcells that remain at 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell1 = get_jitcell(True, -1) assert len(warmstate._jitcell_dict) == 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 # for i in range(1, 20005): get_jitcell(True, i) # should trigger a clean-up at 20001 assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 # # Same test, with one jitcell that has a counter of BASE instead of 0 - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - warmstate.set_param_decay_halflife(2) - warmstate.set_param_threshold(5) - warmstate.set_param_function_threshold(0) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell2 = get_jitcell(True, -2) - cell2.counter = BASE = warmstate.increment_threshold * 3 + cell2.counter = BASE = warmstate.THRESHOLD_LIMIT // 2 # 50% # for i in range(0, 20005): get_jitcell(True, i) assert len(warmstate._jitcell_dict) == (i % 19999) + 2 # assert cell2 in warmstate._jitcell_dict.values() - assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + assert cell2.counter == int(BASE * 0.92) # decayed once # - # Same test, with jitcells that are compiled and free by the memmgr - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + # Same test, with jitcells that are compiled and freed by the memmgr + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() get_jitcell(True, -1) - assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -1 cell.wref_procedure_token = None # or a dead weakref, equivalently assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # # Same test, with counter == -2 (rare case, kept alive) - warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = warmstate._make_jitcell_getter_default() cell = get_jitcell(True, -1) cell.counter = -2 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 # for i in range(1, 20005): cell = get_jitcell(True, i) cell.counter = -2 assert len(warmstate._jitcell_dict) == i + 1 - assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,11 +64,9 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, - threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, decay_halflife=0, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, - **kwds): + function_threshold=4, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -85,16 +83,15 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(threshold) + jd.warmstate.set_param_threshold(3) # for tests jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(trace_eagerness) + jd.warmstate.set_param_trace_eagerness(2) # for tests jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) - jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref, math +import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask, r_uint +from pypy.rlib.rarithmetic import intmask from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -151,27 +151,9 @@ # counter == -2: tracing is currently going on for this cell counter = 0 dont_trace_here = False + extra_delay = chr(0) wref_procedure_token = None - def __init__(self, generation): - # The stored 'counter' value follows an exponential decay model. - # Conceptually after every generation, it decays by getting - # multiplied by a constant <= 1.0. In practice, decaying occurs - # lazily: the following field records the latest seen generation - # number, and adjustment is done by adjust_counter() when needed. - self.latest_generation_seen = generation - - def adjust_counter(self, generation, log_decay_factor): - if generation != self.latest_generation_seen: - # The latest_generation_seen is older than the current generation. - # Adjust by multiplying self.counter N times by decay_factor, i.e. - # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). - assert self.counter >= 0 - N = generation - self.latest_generation_seen - factor = math.exp(log_decay_factor * N) - self.counter = int(self.counter * factor) - self.latest_generation_seen = generation - def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -231,17 +213,6 @@ def set_param_inlining(self, value): self.inlining = value - def set_param_decay_halflife(self, value): - # Use 0 or -1 to mean "no decay". Initialize the internal variable - # 'log_decay_factor'. It is choosen such that by multiplying the - # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every - # generation, then the counter will be divided by two after 'value' - # generations have passed. - if value <= 0: - self.log_decay_factor = 0.0 # log(1.0) - else: - self.log_decay_factor = math.log(0.5) / value - def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -311,11 +282,6 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) - memmgr = self.warmrunnerdesc.memory_manager - if memmgr is not None: - get_current_generation = memmgr.get_current_generation_uint - else: - get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -350,6 +316,36 @@ # assert 0, "should have raised" + def bound_reached(cell, *args): + # bound reached, but we do a last check: if it is the first + # time we reach the bound, or if another loop or bridge was + # compiled since the last time we reached it, then decrease + # the counter by a few percents instead. It should avoid + # sudden bursts of JIT-compilation, and also corner cases + # where we suddenly compile more than one loop because all + # counters reach the bound at the same time, but where + # compiling all but the first one is pointless. + curgen = warmrunnerdesc.memory_manager.current_generation + curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits + if we_are_translated() and curgen != cell.extra_delay: + cell.counter = int(self.THRESHOLD_LIMIT * 0.98) + cell.extra_delay = curgen + return + # + if not confirm_enter_jit(*args): + cell.counter = 0 + return + # start tracing + from pypy.jit.metainterp.pyjitpl import MetaInterp + metainterp = MetaInterp(metainterp_sd, jitdriver_sd) + # set counter to -2, to mean "tracing in effect" + cell.counter = -2 + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + finally: + if cell.counter == -2: + cell.counter = 0 + def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. @@ -360,25 +356,13 @@ if cell.counter >= 0: # update the profiling counter - cell.adjust_counter(get_current_generation(), - self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n return - if not confirm_enter_jit(*args): - cell.counter = 0 + else: + bound_reached(cell, *args) return - # bound reached; start tracing - from pypy.jit.metainterp.pyjitpl import MetaInterp - metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - # set counter to -2, to mean "tracing in effect" - cell.counter = -2 - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - finally: - if cell.counter == -2: - cell.counter = 0 else: if cell.counter != -1: assert cell.counter == -2 @@ -454,15 +438,6 @@ # return jit_getter - def _new_jitcell(self): - warmrunnerdesc = self.warmrunnerdesc - if (warmrunnerdesc is not None and - warmrunnerdesc.memory_manager is not None): - gen = warmrunnerdesc.memory_manager.get_current_generation_uint() - else: - gen = r_uint(0) - return JitCell(gen) - def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -492,44 +467,32 @@ except AttributeError: pass # - memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager - if memmgr: - def _cleanup_dict(): - minimum = sys.maxint - if self.increment_threshold > 0: - minimum = min(minimum, self.increment_threshold) - if self.increment_function_threshold > 0: - minimum = min(minimum, self.increment_function_threshold) - currentgen = memmgr.get_current_generation_uint() - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.adjust_counter(currentgen, self.log_decay_factor) - if cell.counter < minimum: - killme.append(key) - elif (cell.counter == -1 - and cell.get_procedure_token() is None): + def _cleanup_dict(): + minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.counter = int(cell.counter * 0.92) + if cell.counter < minimum: killme.append(key) - for key in killme: - del jitcell_dict[key] - # - def _maybe_cleanup_dict(): - # If no tracing goes on at all because the jitcells are - # each time for new greenargs, the dictionary grows forever. - # So every one in a (rare) while, we decide to force an - # artificial next_generation() and _cleanup_dict(). - self._trigger_automatic_cleanup += 1 - if self._trigger_automatic_cleanup > 20000: - self._trigger_automatic_cleanup = 0 - memmgr.next_generation(do_cleanups_now=False) - _cleanup_dict() - # - self._trigger_automatic_cleanup = 0 - self._jitcell_dict = jitcell_dict # for tests - memmgr.record_jitcell_dict(_cleanup_dict) - else: - def _maybe_cleanup_dict(): - pass + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # Once in a while, rarely, when too many entries have + # been put in the jitdict_dict, we do a cleanup phase: + # we decay all counters and kill entries with a too + # low counter. + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests # def get_jitcell(build, *greenargs): try: @@ -538,7 +501,7 @@ if not build: return None _maybe_cleanup_dict() - cell = self._new_jitcell() + cell = JitCell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -549,7 +512,7 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} - # note that there is no equivalent of record_jitcell_dict() + # note that there is no equivalent of _maybe_cleanup_dict() # in the case of custom getters. We assume that the interpreter # stores the JitCells on some objects that can go away by GC, # like the PyCode objects in PyPy. @@ -574,7 +537,7 @@ if not build: return cell if cell is None: - cell = self._new_jitcell() + cell = JitCell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -45,6 +45,8 @@ import pypy.module.cpyext.longobject import pypy.module.cpyext.listobject import pypy.module.cpyext.sequence +import pypy.module.cpyext.buffer +import pypy.module.cpyext.bufferobject import pypy.module.cpyext.eval import pypy.module.cpyext.import_ import pypy.module.cpyext.mapping diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -317,6 +317,10 @@ INTERPLEVEL_API = {} FUNCTIONS = {} + +# These are C symbols which cpyext will export, but which are defined in .c +# files somewhere in the implementation of cpyext (rather than being defined in +# RPython). SYMBOLS_C = [ 'Py_FatalError', 'PyOS_snprintf', 'PyOS_vsnprintf', 'PyArg_Parse', 'PyArg_ParseTuple', 'PyArg_UnpackTuple', 'PyArg_ParseTupleAndKeywords', diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/buffer.py @@ -0,0 +1,11 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, CANNOT_FAIL, Py_buffer) + + at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) +def PyBuffer_IsContiguous(space, view, fortran): + """Return 1 if the memory defined by the view is C-style (fortran is + 'C') or Fortran-style (fortran is 'F') contiguous or either one + (fortran is 'A'). Return 0 otherwise.""" + # PyPy only supports contiguous Py_buffers for now. + return space.wrap(1) diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/bufferobject.py @@ -0,0 +1,66 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, + PyObjectFields, PyObject) +from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef +from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer + + +PyBufferObjectStruct = lltype.ForwardReference() +PyBufferObject = lltype.Ptr(PyBufferObjectStruct) +PyBufferObjectFields = PyObjectFields + ( + ("b_base", PyObject), + ("b_ptr", rffi.VOIDP), + ("b_size", Py_ssize_t), + ("b_offset", Py_ssize_t), + ("b_readonly", rffi.INT), + ("b_hash", rffi.LONG), + ) + +cpython_struct("PyBufferObject", PyBufferObjectFields, PyBufferObjectStruct) + + at bootstrap_function +def init_bufferobject(space): + "Type description of PyBufferObject" + make_typedescr(space.gettypefor(Buffer).instancetypedef, + basestruct=PyBufferObject.TO, + attach=buffer_attach, + dealloc=buffer_dealloc, + realize=buffer_realize) + +def buffer_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyBufferObject with the given (str) buffer object. + """ + py_buf = rffi.cast(PyBufferObject, py_obj) + py_buf.c_b_offset = 0 + rffi.setintfield(py_buf, 'c_b_readonly', 1) + rffi.setintfield(py_buf, 'c_b_hash', -1) + + if isinstance(w_obj, SubBuffer): + py_buf.c_b_offset = w_obj.offset + w_obj = w_obj.buffer + + if isinstance(w_obj, StringBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_size = w_obj.getlength() + else: + raise Exception("Fail fail fail fail fail") + + +def buffer_realize(space, py_obj): + """ + Creates the buffer in the PyPy interpreter from a cpyext representation. + """ + raise Exception("realize fail fail fail") + + + + at cpython_api([PyObject], lltype.Void, external=False) +def buffer_dealloc(space, py_obj): + py_buf = rffi.cast(PyBufferObject, py_obj) + Py_DecRef(space, py_buf.c_b_base) + rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -9,6 +9,17 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + PyObject *b_base; + void *b_ptr; + Py_ssize_t b_size; + Py_ssize_t b_offset; + int b_readonly; + long b_hash; +} PyBufferObject; + + PyAPI_DATA(PyTypeObject) PyBuffer_Type; #define PyBuffer_Check(op) (((PyObject*)(op))->ob_type == &PyBuffer_Type) diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -234,7 +234,7 @@ writebufferproc bf_getwritebuffer; segcountproc bf_getsegcount; charbufferproc bf_getcharbuffer; - getbufferproc bf_getbuffer; + getbufferproc bf_getbuffer; releasebufferproc bf_releasebuffer; } PyBufferProcs; diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -4,17 +4,6 @@ #include "Python.h" -typedef struct { - PyObject_HEAD - PyObject *b_base; - void *b_ptr; - Py_ssize_t b_size; - Py_ssize_t b_offset; - int b_readonly; - long b_hash; -} PyBufferObject; - - enum buffer_t { READ_BUFFER, WRITE_BUFFER, diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -777,18 +777,14 @@ Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { + fflush(stdout); PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } else { - PyErr_SetString( - PyExc_NotImplementedError, - "s* not implemented for non-string values"); - return NULL; - } -#if 0 + } #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { +#if 0 uarg = UNICODE_DEFAULT_ENCODING(arg); if (uarg == NULL) return converterr(CONV_UNICODE, @@ -796,6 +792,9 @@ PyBuffer_FillInfo(p, arg, PyString_AS_STRING(uarg), PyString_GET_SIZE(uarg), 1, 0); +#else + return converterr("string or buffer", arg, msgbuf, bufsize); +#endif } #endif else { /* any buffer-like object */ @@ -803,7 +802,6 @@ if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } -#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", @@ -1342,7 +1340,6 @@ return count; } -#if 0 //YYY static int getbuffer(PyObject *arg, Py_buffer *view, char **errmsg) { @@ -1373,7 +1370,6 @@ PyBuffer_FillInfo(view, NULL, buf, count, 1, 0); return 0; } -#endif /* Support for keyword arguments donated by Geoff Philbrick */ diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import ( - cpython_api, PyObject, PyObjectP, CANNOT_FAIL + cpython_api, PyObject, PyObjectP, CANNOT_FAIL, Py_buffer ) from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex from pypy.rpython.lltypesystem import rffi, lltype @@ -10,7 +10,6 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP -Py_buffer = rffi.VOIDP va_list = rffi.VOIDP PyDateTime_Date = rffi.VOIDP PyDateTime_DateTime = rffi.VOIDP @@ -178,13 +177,6 @@ ~Py_buffer.format.""" raise NotImplementedError - at cpython_api([Py_buffer, lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): - """Return 1 if the memory defined by the view is C-style (fortran is - 'C') or Fortran-style (fortran is 'F') contiguous or either one - (fortran is 'A'). Return 0 otherwise.""" - raise NotImplementedError - @cpython_api([rffi.INT_real, Py_ssize_t, Py_ssize_t, Py_ssize_t, lltype.Char], lltype.Void) def PyBuffer_FillContiguousStrides(space, ndim, shape, strides, itemsize, fortran): """Fill the strides array with byte-strides of a contiguous (C-style if diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -129,6 +129,21 @@ assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + def test_pyarg_parse_string_old_buffer(self): + pybuffer = self.import_parser( + ''' + Py_buffer buf; + PyObject *result; + if (!PyArg_ParseTuple(args, "s*", &buf)) { + return NULL; + } + result = PyString_FromStringAndSize(buf.buf, buf.len); + PyBuffer_Release(&buf); + return result; + ''') + assert 'foo\0bar\0baz' == pybuffer(buffer('foo\0bar\0baz')) + + def test_pyarg_parse_charbuf_and_length(self): """ The `t#` format specifier can be used to parse a read-only 8-bit diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -28,6 +28,7 @@ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError from pypy.rlib.rstring import rsplit from pypy.rlib.objectmodel import specialize @@ -418,8 +419,21 @@ Py_DecRef(space, pyref) return space.len_w(w_str) + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, + external=False, error=-1) +def buf_getreadbuffer(space, pyref, segment, ref): + from pypy.module.cpyext.bufferobject import PyBufferObject + if segment != 0: + raise OperationError(space.w_SystemError, space.wrap + ("accessing non-existent string segment")) + py_buf = rffi.cast(PyBufferObject, pyref) + ref[0] = py_buf.c_b_ptr + #Py_DecRef(space, pyref) + return py_buf.c_b_size + def setup_string_buffer_procs(space, pto): c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, str_segcount.api_func.get_wrapper(space)) c_buf.c_bf_getreadbuffer = llhelper(str_getreadbuffer.api_func.functype, @@ -429,6 +443,15 @@ pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER +def setup_buffer_buffer_procs(space, pto): + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) + c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, + str_segcount.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llhelper(buf_getreadbuffer.api_func.functype, + buf_getreadbuffer.api_func.get_wrapper(space)) + pto.c_tp_as_buffer = c_buf + @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): from pypy.module.cpyext.object import PyObject_dealloc @@ -484,6 +507,8 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) + if space.is_w(w_type, space.gettypefor(Buffer)): + setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, PyObject_Del.api_func.get_wrapper(space)) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -21,7 +21,6 @@ _immutable_fields_ = ["itemtype", "num", "kind"] def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): - self.signature = signature.BaseSignature() self.itemtype = itemtype self.num = num self.kind = kind @@ -228,4 +227,4 @@ ) def get_dtype_cache(space): - return space.fromcache(DtypeCache) \ No newline at end of file + return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py --- a/pypy/module/micronumpy/interp_extras.py +++ b/pypy/module/micronumpy/interp_extras.py @@ -4,4 +4,4 @@ @unwrap_spec(array=BaseArray) def debug_repr(space, array): - return space.wrap(array.debug_repr()) + return space.wrap(array.find_sig().debug_repr()) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_iter.py @@ -0,0 +1,104 @@ + +from pypy.rlib import jit +from pypy.rlib.objectmodel import instantiate +from pypy.module.micronumpy.strides import calculate_broadcast_strides + +# Iterators for arrays +# -------------------- +# all those iterators with the exception of BroadcastIterator iterate over the +# entire array in C order (the last index changes the fastest). This will +# yield all elements. Views iterate over indices and look towards strides and +# backstrides to find the correct position. Notably the offset between +# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between +# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. + +# BroadcastIterator works like that, but for indexes that don't change source +# in the original array, strides[i] == backstrides[i] == 0 + +class BaseIterator(object): + def next(self, shapelen): + raise NotImplementedError + + def done(self): + raise NotImplementedError + +class ArrayIterator(BaseIterator): + def __init__(self, size): + self.offset = 0 + self.size = size + + def next(self, shapelen): + arr = instantiate(ArrayIterator) + arr.size = self.size + arr.offset = self.offset + 1 + return arr + + def done(self): + return self.offset >= self.size + +class OneDimIterator(BaseIterator): + def __init__(self, start, step, stop): + self.offset = start + self.step = step + self.size = stop * step + start + + def next(self, shapelen): + arr = instantiate(OneDimIterator) + arr.size = self.size + arr.step = self.step + arr.offset = self.offset + self.step + return arr + + def done(self): + return self.offset == self.size + +def view_iter_from_arr(arr): + return ViewIterator(arr.start, arr.strides, arr.backstrides, arr.shape) + +class ViewIterator(BaseIterator): + def __init__(self, start, strides, backstrides, shape, res_shape=None): + self.offset = start + self._done = False + if res_shape is not None and res_shape != shape: + r = calculate_broadcast_strides(strides, backstrides, + shape, res_shape) + self.strides, self.backstrides = r + self.res_shape = res_shape + else: + self.strides = strides + self.backstrides = backstrides + self.res_shape = shape + self.indices = [0] * len(self.res_shape) + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + for i in range(shapelen): + indices[i] = self.indices[i] + done = False + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.res_shape[i] - 1: + indices[i] += 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + else: + done = True + res = instantiate(ViewIterator) + res.offset = offset + res.indices = indices + res.strides = self.strides + res.backstrides = self.backstrides + res.res_shape = self.res_shape + res._done = done + return res + + def done(self): + return self._done + +class ConstantIterator(BaseIterator): + def next(self, shapelen): + return self diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,28 +3,33 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature +from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import instantiate - +from pypy.module.micronumpy.interp_iter import ArrayIterator,\ + view_iter_from_arr, OneDimIterator numpy_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result_size', 'i', 'ri', 'self', 'result'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['result_size', 'frame', 'ri', 'self', 'result'] ) all_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) any_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) slice_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['self', 'source', 'source_iter', 'res_iter'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['self', 'frame', 'source', 'res_iter'] ) def _find_shape_and_elems(space, w_iterable): @@ -198,231 +203,17 @@ n_old_elems_to_use *= old_shape[oldI] return new_strides -# Iterators for arrays -# -------------------- -# all those iterators with the exception of BroadcastIterator iterate over the -# entire array in C order (the last index changes the fastest). This will -# yield all elements. Views iterate over indices and look towards strides and -# backstrides to find the correct position. Notably the offset between -# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between -# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. +class BaseArray(Wrappable): + _attrs_ = ["invalidates", "shape", 'size'] -# BroadcastIterator works like that, but for indexes that don't change source -# in the original array, strides[i] == backstrides[i] == 0 - -class BaseIterator(object): - def next(self, shapelen): - raise NotImplementedError - - def done(self): - raise NotImplementedError - - def get_offset(self): - raise NotImplementedError - -class ArrayIterator(BaseIterator): - def __init__(self, size): - self.offset = 0 - self.size = size - - def next(self, shapelen): - arr = instantiate(ArrayIterator) - arr.size = self.size - arr.offset = self.offset + 1 - return arr - - def done(self): - return self.offset >= self.size - - def get_offset(self): - return self.offset - -class OneDimIterator(BaseIterator): - def __init__(self, start, step, stop): - self.offset = start - self.step = step - self.size = stop * step + start - - def next(self, shapelen): - arr = instantiate(OneDimIterator) - arr.size = self.size - arr.step = self.step - arr.offset = self.offset + self.step - return arr - - def done(self): - return self.offset == self.size - - def get_offset(self): - return self.offset - -class ViewIterator(BaseIterator): - def __init__(self, arr): - self.indices = [0] * len(arr.shape) - self.offset = arr.start - self.arr = arr - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - for i in range(shapelen): - indices[i] = self.indices[i] - done = False - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.arr.shape[i] - 1: - indices[i] += 1 - offset += self.arr.strides[i] - break - else: - indices[i] = 0 - offset -= self.arr.backstrides[i] - else: - done = True - res = instantiate(ViewIterator) - res.offset = offset - res.indices = indices - res.arr = self.arr - res._done = done - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class BroadcastIterator(BaseIterator): - '''Like a view iterator, but will repeatedly access values - for all iterations across a res_shape, folding the offset - using mod() arithmetic - ''' - def __init__(self, arr, res_shape): - self.indices = [0] * len(res_shape) - self.offset = arr.start - #strides are 0 where original shape==1 - self.strides = [] - self.backstrides = [] - for i in range(len(arr.shape)): - if arr.shape[i] == 1: - self.strides.append(0) - self.backstrides.append(0) - else: - self.strides.append(arr.strides[i]) - self.backstrides.append(arr.backstrides[i]) - self.res_shape = res_shape - self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides - self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - _done = False - for i in range(shapelen): - indices[i] = self.indices[i] - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.res_shape[i] - 1: - indices[i] += 1 - offset += self.strides[i] - break - else: - indices[i] = 0 - offset -= self.backstrides[i] - else: - _done = True - res = instantiate(BroadcastIterator) - res.indices = indices - res.offset = offset - res._done = _done - res.strides = self.strides - res.backstrides = self.backstrides - res.res_shape = self.res_shape - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class Call2Iterator(BaseIterator): - def __init__(self, left, right): - self.left = left - self.right = right - - def next(self, shapelen): - return Call2Iterator(self.left.next(shapelen), - self.right.next(shapelen)) - - def done(self): - if isinstance(self.left, ConstantIterator): - return self.right.done() - return self.left.done() - - def get_offset(self): - if isinstance(self.left, ConstantIterator): - return self.right.get_offset() - return self.left.get_offset() - -class Call1Iterator(BaseIterator): - def __init__(self, child): - self.child = child - - def next(self, shapelen): - return Call1Iterator(self.child.next(shapelen)) - - def done(self): - return self.child.done() - - def get_offset(self): - return self.child.get_offset() - -class ConstantIterator(BaseIterator): - def next(self, shapelen): - return self - - def done(self): - return False - - def get_offset(self): - return 0 - - -class BaseArray(Wrappable): - _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", - "start", 'order'] - - _immutable_fields_ = ['start', "order"] + _immutable_fields_ = [] strides = None start = 0 - def __init__(self, shape, order): + def __init__(self, shape): self.invalidates = [] self.shape = shape - self.order = order - if self.strides is None: - self.calc_strides(shape) - - def calc_strides(self, shape): - strides = [] - backstrides = [] - s = 1 - shape_rev = shape[:] - if self.order == 'C': - shape_rev.reverse() - for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh - if self.order == 'C': - strides.reverse() - backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] def invalidated(self): if self.invalidates: @@ -499,33 +290,34 @@ def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] + greens=['shapelen', 'sig'], + reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'] ) def loop(self): - i = self.start_iter() - cur_best = self.eval(i) + sig = self.find_sig() + frame = sig.create_frame(self) + cur_best = sig.eval(frame, self) shapelen = len(self.shape) - i = i.next(shapelen) + frame.next(shapelen) dtype = self.find_dtype() result = 0 idx = 1 - while not i.done(): - reduce_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, dtype=dtype, - i=i, result=result, idx=idx, + frame=frame, result=result, + idx=idx, cur_best=cur_best) - new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + new_best = getattr(dtype.itemtype, op_name)(cur_best, sig.eval(frame, self)) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - i = i.next(shapelen) + frame.next(shapelen) idx += 1 return result def impl(self, space): - size = self.find_size() - if size == 0: + if self.size == 0: raise OperationError(space.w_ValueError, space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) @@ -533,15 +325,16 @@ def _all(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - all_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + all_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if not dtype.itemtype.bool(self.eval(i)): + dtype=dtype, frame=frame) + if not dtype.itemtype.bool(sig.eval(frame, self)): return False - i = i.next(shapelen) + frame.next(shapelen) return True def descr_all(self, space): @@ -549,15 +342,16 @@ def _any(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - any_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + any_driver.jit_merge_point(sig=sig, frame=frame, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if dtype.itemtype.bool(self.eval(i)): + dtype=dtype) + if dtype.itemtype.bool(sig.eval(frame, self)): return True - i = i.next(shapelen) + frame.next(shapelen) return False def descr_any(self, space): @@ -586,26 +380,33 @@ return space.newtuple([space.wrap(i) for i in self.shape]) def descr_set_shape(self, space, w_iterable): - concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_iterable) - concrete.setshape(space, new_shape) + self.size, w_iterable) + if isinstance(self, Scalar): + return + self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.find_size()) + return space.wrap(self.size) def descr_copy(self, space): + return self.copy() + + def copy(self): return self.get_concrete().copy() def descr_len(self, space): - return self.get_concrete().descr_len(space) + if len(self.shape): + return space.wrap(self.shape[0]) + raise OperationError(space.w_TypeError, space.wrap( + "len() of unsized object")) def descr_repr(self, space): res = StringBuilder() res.append("array(") concrete = self.get_concrete() dtype = concrete.find_dtype() - if not concrete.find_size(): + if not concrete.size: res.append('[]') if len(self.shape) > 1: # An empty slice reports its shape @@ -617,18 +418,417 @@ concrete.to_str(space, 1, res, indent=' ') if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ - not self.find_size(): + not self.size: res.append(", dtype=" + dtype.name) res.append(")") return space.wrap(res.build()) + def descr_str(self, space): + ret = StringBuilder() + concrete = self.get_concrete_or_scalar() + concrete.to_str(space, 0, ret, ' ') + return space.wrap(ret.build()) + + @jit.unroll_safe + def _single_item_result(self, space, w_idx): + """ The result of getitem/setitem is a single item if w_idx + is a list of scalars that match the size of shape + """ + shape_len = len(self.shape) + if shape_len == 0: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) + if shape_len == 1: + if space.isinstance_w(w_idx, space.w_int): + return True + if space.isinstance_w(w_idx, space.w_slice): + return False + elif (space.isinstance_w(w_idx, space.w_slice) or + space.isinstance_w(w_idx, space.w_int)): + return False + lgt = space.len_w(w_idx) + if lgt > shape_len: + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if lgt < shape_len: + return False + for w_item in space.fixedview(w_idx): + if space.isinstance_w(w_item, space.w_slice): + return False + return True + + @jit.unroll_safe + def _prepare_slice_args(self, space, w_idx): + if (space.isinstance_w(w_idx, space.w_int) or + space.isinstance_w(w_idx, space.w_slice)): + return [space.decode_index4(w_idx, self.shape[0])] + return [space.decode_index4(w_item, self.shape[i]) for i, w_item in + enumerate(space.fixedview(w_idx))] + + def descr_getitem(self, space, w_idx): + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + item = concrete._index_of_single_item(space, w_idx) + return concrete.getitem(item) + chunks = self._prepare_slice_args(space, w_idx) + return space.wrap(self.create_slice(chunks)) + + def descr_setitem(self, space, w_idx, w_value): + self.invalidated() + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + item = concrete._index_of_single_item(space, w_idx) + dtype = concrete.find_dtype() + concrete.setitem(item, dtype.coerce(space, w_value)) + return + if not isinstance(w_value, BaseArray): + w_value = convert_to_array(space, w_value) + chunks = self._prepare_slice_args(space, w_idx) + view = self.create_slice(chunks).get_concrete() + view.setslice(space, w_value) + + @jit.unroll_safe + def create_slice(self, chunks): + shape = [] + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + shape.append(lgt) + s = i + 1 + assert s >= 0 + shape += self.shape[s:] + if not isinstance(self, ConcreteArray): + return VirtualSlice(self, chunks, shape) + r = calculate_slice_strides(self.shape, self.start, self.strides, + self.backstrides, chunks) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], self) + + def descr_reshape(self, space, args_w): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `numpypy.reshape` for full documentation. + + See Also + -------- + numpypy.reshape : equivalent function + """ + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newtuple(args_w) + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, concrete.size, w_shape) + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + # We can create a view, strides somehow match up. + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(concrete.start, new_strides, new_backstrides, + new_shape, self) + else: + # Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr + + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + + def descr_mean(self, space): + return space.div(self.descr_sum(space), space.wrap(self.size)) + + def descr_nonzero(self, space): + if self.size > 1: + raise OperationError(space.w_ValueError, space.wrap( + "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + concr = self.get_concrete_or_scalar() + sig = concr.find_sig() + frame = sig.create_frame(self) + return space.wrap(space.is_true( + sig.eval(frame, concr))) + + def get_concrete_or_scalar(self): + return self.get_concrete() + + def descr_get_transpose(self, space): + concrete = self.get_concrete() + if len(concrete.shape) < 2: + return space.wrap(self) + strides = [] + backstrides = [] + shape = [] + for i in range(len(concrete.shape) - 1, -1, -1): + strides.append(concrete.strides[i]) + backstrides.append(concrete.backstrides[i]) + shape.append(concrete.shape[i]) + return space.wrap(W_NDimSlice(concrete.start, strides[:], + backstrides[:], shape[:], concrete)) + + def descr_get_flatiter(self, space): + return space.wrap(W_FlatIterator(self)) + + def getitem(self, item): + raise NotImplementedError + + def find_sig(self, res_shape=None): + """ find a correct signature for the array + """ + res_shape = res_shape or self.shape + return signature.find_sig(self.create_sig(res_shape), self) + + def descr_array_iface(self, space): + if not self.shape: + raise OperationError(space.w_TypeError, + space.wrap("can't get the array data of a 0-d array for now") + ) + concrete = self.get_concrete() + storage = concrete.storage + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + +def convert_to_array(space, w_obj): + if isinstance(w_obj, BaseArray): + return w_obj + elif space.issequence_w(w_obj): + # Convert to array. + return array(space, w_obj, w_order=None) + else: + # If it's a scalar + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) + return scalar_w(space, dtype, w_obj) + +def scalar_w(space, dtype, w_obj): + return Scalar(dtype, dtype.coerce(space, w_obj)) + +class Scalar(BaseArray): + """ + Intermediate class representing a literal. + """ + size = 1 + _attrs_ = ["dtype", "value", "shape"] + + def __init__(self, dtype, value): + self.shape = [] + BaseArray.__init__(self, []) + self.dtype = dtype + self.value = value + + def find_dtype(self): + return self.dtype + + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + builder.append(self.dtype.itemtype.str_format(self.value)) + + def copy(self): + return Scalar(self.dtype, self.value) + + def create_sig(self, res_shape): + return signature.ScalarSignature(self.dtype) + + def get_concrete_or_scalar(self): + return self + + +class VirtualArray(BaseArray): + """ + Class for representing virtual arrays, such as binary ops or ufuncs + """ + def __init__(self, name, shape, res_dtype): + BaseArray.__init__(self, shape) + self.forced_result = None + self.res_dtype = res_dtype + self.name = name + + def _del_sources(self): + # Function for deleting references to source arrays, to allow garbage-collecting them + raise NotImplementedError + + def compute(self): + result = W_NDimArray(self.size, self.shape, self.find_dtype()) + shapelen = len(self.shape) + sig = self.find_sig() + frame = sig.create_frame(self) + ri = ArrayIterator(self.size) + while not ri.done(): + numpy_driver.jit_merge_point(sig=sig, + shapelen=shapelen, + result_size=self.size, + frame=frame, + ri=ri, + self=self, result=result) + result.dtype.setitem(result.storage, ri.offset, + sig.eval(frame, self)) + frame.next(shapelen) + ri = ri.next(shapelen) + return result + + def force_if_needed(self): + if self.forced_result is None: + self.forced_result = self.compute() + self._del_sources() + + def get_concrete(self): + self.force_if_needed() + res = self.forced_result + assert isinstance(res, ConcreteArray) + return res + + def getitem(self, item): + return self.get_concrete().getitem(item) + + def setitem(self, item, value): + return self.get_concrete().setitem(item, value) + + def find_dtype(self): + return self.res_dtype + +class VirtualSlice(VirtualArray): + def __init__(self, child, chunks, shape): + size = 1 + for sh in shape: + size *= sh + self.child = child + self.chunks = chunks + self.size = size + VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.VirtualSliceSignature( + self.child.create_sig(res_shape)) + + def force_if_needed(self): + if self.forced_result is None: + concr = self.child.get_concrete() + self.forced_result = concr.create_slice(self.chunks) + + def _del_sources(self): + self.child = None + +class Call1(VirtualArray): + def __init__(self, ufunc, name, shape, res_dtype, values): + VirtualArray.__init__(self, name, shape, res_dtype) + self.values = values + self.size = values.size + self.ufunc = ufunc + + def _del_sources(self): + self.values = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.Call1(self.ufunc, self.name, + self.values.create_sig(res_shape)) + +class Call2(VirtualArray): + """ + Intermediate class for performing binary operations. + """ + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): + VirtualArray.__init__(self, name, shape, res_dtype) + self.ufunc = ufunc + self.left = left + self.right = right + self.calc_dtype = calc_dtype + self.size = 1 + for s in self.shape: + self.size *= s + + def _del_sources(self): + self.left = None + self.right = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.Call2(self.ufunc, self.name, self.calc_dtype, + self.left.create_sig(res_shape), + self.right.create_sig(res_shape)) + +class ConcreteArray(BaseArray): + """ An array that have actual storage, whether owned or not + """ + _immutable_fields_ = ['storage'] + + def __init__(self, size, shape, dtype, order='C', parent=None): + self.size = size + self.parent = parent + if parent is not None: + self.storage = parent.storage + else: + self.storage = dtype.malloc(size) + self.order = order + self.dtype = dtype + if self.strides is None: + self.calc_strides(shape) + BaseArray.__init__(self, shape) + if parent is not None: + self.invalidates = parent.invalidates + + def get_concrete(self): + return self + + def find_dtype(self): + return self.dtype + + def getitem(self, item): + return self.dtype.getitem(self.storage, item) + + def setitem(self, item, value): + self.invalidated() + self.dtype.setitem(self.storage, item, value) + + def calc_strides(self, shape): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if self.order == 'C': + shape_rev.reverse() + for sh in shape_rev: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + + def array_sig(self, res_shape): + if res_shape is not None and self.shape != res_shape: + return signature.ViewSignature(self.dtype) + return signature.ArraySignature(self.dtype) + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): '''Modifies builder with a representation of the array/slice The items will be seperated by a comma if comma is 1 Multidimensional arrays/slices will span a number of lines, each line will begin with indent. ''' - size = self.find_size() + size = self.size if size < 1: builder.append('[]') return @@ -654,7 +854,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) builder.append('\n' + indent + '..., ') i = self.shape[0] - 3 @@ -669,7 +869,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) i += 1 elif ndims == 1: @@ -705,12 +905,6 @@ builder.append('[') builder.append(']') - def descr_str(self, space): - ret = StringBuilder() - concrete = self.get_concrete() - concrete.to_str(space, 0, ret, ' ') - return space.wrap(ret.build()) - @jit.unroll_safe def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): @@ -735,456 +929,55 @@ item += v * self.strides[i] return item - @jit.unroll_safe - def _single_item_result(self, space, w_idx): - """ The result of getitem/setitem is a single item if w_idx - is a list of scalars that match the size of shape - """ - shape_len = len(self.shape) - if shape_len == 0: - if not space.isinstance_w(w_idx, space.w_int): - raise OperationError(space.w_IndexError, space.wrap( - "wrong index")) - return True - if shape_len == 1: - if space.isinstance_w(w_idx, space.w_int): - return True - if space.isinstance_w(w_idx, space.w_slice): - return False - elif (space.isinstance_w(w_idx, space.w_slice) or - space.isinstance_w(w_idx, space.w_int)): - return False - lgt = space.len_w(w_idx) - if lgt > shape_len: - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - if lgt < shape_len: - return False - for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_slice): - return False - return True - @jit.unroll_safe - def _prepare_slice_args(self, space, w_idx): - if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - return [space.decode_index4(w_idx, self.shape[0])] - return [space.decode_index4(w_item, self.shape[i]) for i, w_item in - enumerate(space.fixedview(w_idx))] +class ViewArray(ConcreteArray): + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = view_iter_from_arr(self) + a_iter = ArrayIterator(array.size) + while not iter.done(): + array.setitem(a_iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) + return array - def descr_getitem(self, space, w_idx): - if self._single_item_result(space, w_idx): - concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) - item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item) - chunks = self._prepare_slice_args(space, w_idx) - return space.wrap(self.create_slice(space, chunks)) + def create_sig(self, res_shape): + return signature.ViewSignature(self.dtype) - def descr_setitem(self, space, w_idx, w_value): - self.invalidated() - if self._single_item_result(space, w_idx): - concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) - item = concrete._index_of_single_item(space, w_idx) - dtype = concrete.find_dtype() - concrete.setitem(item, dtype.coerce(space, w_value)) - return - if not isinstance(w_value, BaseArray): - w_value = convert_to_array(space, w_value) - chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(space, chunks) - view.setslice(space, w_value) - @jit.unroll_safe - def create_slice(self, space, chunks): - if len(chunks) == 1: - start, stop, step, lgt = chunks[0] - if step == 0: - shape = self.shape[1:] - strides = self.strides[1:] - backstrides = self.backstrides[1:] - else: - shape = [lgt] + self.shape[1:] - strides = [self.strides[0] * step] + self.strides[1:] - backstrides = [(lgt - 1) * self.strides[0] * step] + self.backstrides[1:] - start *= self.strides[0] - start += self.start - else: - shape = [] - strides = [] - backstrides = [] - start = self.start - i = -1 - for i, (start_, stop, step, lgt) in enumerate(chunks): - if step != 0: - shape.append(lgt) - strides.append(self.strides[i] * step) - backstrides.append(self.strides[i] * (lgt - 1) * step) - start += self.strides[i] * start_ - # add a reminder - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - strides += self.strides[s:] - backstrides += self.backstrides[s:] - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature, - ]) - return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) - - def descr_reshape(self, space, args_w): - """reshape(...) - a.reshape(shape) - - Returns an array containing the same data with a new shape. - - Refer to `numpypy.reshape` for full documentation. - - See Also - -------- - numpypy.reshape : equivalent function -""" - if len(args_w) == 1: - w_shape = args_w[0] - else: - w_shape = space.newtuple(args_w) - concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_shape) - # Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, - concrete.shape, concrete.strides) - if new_strides: - # We can create a view, strides somehow match up. - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) - ndims = len(new_shape) - new_backstrides = [0] * ndims - for nd in range(ndims): - new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - arr = W_NDimSlice(self, new_sig, self.start, new_strides, - new_backstrides, new_shape) - else: - # Create copy with contiguous data - arr = concrete.copy() - arr.setshape(space, new_shape) - return arr - - def descr_tolist(self, space): - if len(self.shape) == 0: - assert isinstance(self, Scalar) - return self.value.descr_tolist(space) - w_result = space.newlist([]) - for i in range(self.shape[0]): - space.call_method(w_result, "append", - space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") - ) - return w_result - - def descr_mean(self, space): - return space.div(self.descr_sum(space), space.wrap(self.find_size())) - - def descr_nonzero(self, space): - if self.find_size() > 1: - raise OperationError(space.w_ValueError, space.wrap( - "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true( - self.get_concrete().eval(self.start_iter(self.shape)) - )) - - def descr_get_transpose(self, space): - concrete = self.get_concrete() - if len(concrete.shape) < 2: - return space.wrap(self) - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) - strides = [] - backstrides = [] - shape = [] - for i in range(len(concrete.shape) - 1, -1, -1): - strides.append(concrete.strides[i]) - backstrides.append(concrete.backstrides[i]) - shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) - - def descr_get_flatiter(self, space): - return space.wrap(W_FlatIterator(self)) - - def getitem(self, item): - raise NotImplementedError - - def start_iter(self, res_shape=None): - raise NotImplementedError - - def descr_array_iface(self, space): - concrete = self.get_concrete() - storage = concrete.get_storage(space) - addr = rffi.cast(lltype.Signed, storage) - w_d = space.newdict() - space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), - space.w_False])) - return w_d - -def convert_to_array(space, w_obj): - if isinstance(w_obj, BaseArray): - return w_obj - elif space.issequence_w(w_obj): - # Convert to array. - return array(space, w_obj, w_order=None) - else: - # If it's a scalar - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) - return scalar_w(space, dtype, w_obj) - -def scalar_w(space, dtype, w_obj): - return Scalar(dtype, dtype.coerce(space, w_obj)) - -class Scalar(BaseArray): - """ - Intermediate class representing a literal. - """ - signature = signature.BaseSignature() - - _attrs_ = ["dtype", "value", "shape"] - - def __init__(self, dtype, value): - self.shape = self.strides = [] - BaseArray.__init__(self, [], 'C') - self.dtype = dtype - self.value = value - - def find_size(self): - return 1 - - def get_concrete(self): - return self - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - raise NotImplementedError - - def eval(self, iter): - return self.value - - def start_iter(self, res_shape=None): - return ConstantIterator() - - def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.itemtype.str_format(self.value)) - - def copy(self): - return Scalar(self.dtype, self.value) - - def debug_repr(self): - return 'Scalar' - - def setshape(self, space, new_shape): - # In order to get here, we already checked that prod(new_shape) == 1, - # so in order to have a consistent API, let it go through. - pass - - def get_storage(self, space): - raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) - -class VirtualArray(BaseArray): - """ - Class for representing virtual arrays, such as binary ops or ufuncs - """ - def __init__(self, signature, shape, res_dtype, order): - BaseArray.__init__(self, shape, order) - self.forced_result = None - self.signature = signature - self.res_dtype = res_dtype - - def _del_sources(self): - # Function for deleting references to source arrays, to allow garbage-collecting them - raise NotImplementedError - - def compute(self): - i = 0 - signature = self.signature - result_size = self.find_size() - result = W_NDimArray(result_size, self.shape, self.find_dtype()) - shapelen = len(self.shape) - i = self.start_iter() - ri = result.start_iter() - while not ri.done(): - numpy_driver.jit_merge_point(signature=signature, - shapelen=shapelen, - result_size=result_size, i=i, ri=ri, - self=self, result=result) - result.dtype.setitem(result.storage, ri.offset, self.eval(i)) - i = i.next(shapelen) - ri = ri.next(shapelen) - return result - - def force_if_needed(self): - if self.forced_result is None: - self.forced_result = self.compute() - self._del_sources() - - def get_concrete(self): - self.force_if_needed() - return self.forced_result - - def eval(self, iter): - if self.forced_result is not None: - return self.forced_result.eval(iter) - return self._eval(iter) - - def getitem(self, item): - return self.get_concrete().getitem(item) - - def setitem(self, item, value): - return self.get_concrete().setitem(item, value) - - def find_size(self): - if self.forced_result is not None: - # The result has been computed and sources may be unavailable - return self.forced_result.find_size() - return self._find_size() - - def find_dtype(self): - return self.res_dtype - - -class Call1(VirtualArray): - def __init__(self, signature, shape, res_dtype, values, order): - VirtualArray.__init__(self, signature, shape, res_dtype, - values.order) - self.values = values - - def _del_sources(self): - self.values = None - - def _find_size(self): - return self.values.find_size() - - def _find_dtype(self): - return self.res_dtype - - def _eval(self, iter): - assert isinstance(iter, Call1Iterator) - val = self.values.eval(iter.child).convert_to(self.res_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - return call_sig.func(self.res_dtype, val) - - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - return Call1Iterator(self.values.start_iter(res_shape)) - - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - if self.forced_result is not None: - return 'Call1(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call1(%s, %s)' % (call_sig.name, - self.values.debug_repr()) - -class Call2(VirtualArray): - """ - Intermediate class for performing binary operations. - """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): - # XXX do something if left.order != right.order - VirtualArray.__init__(self, signature, shape, res_dtype, left.order) - self.left = left - self.right = right - self.calc_dtype = calc_dtype - self.size = 1 - for s in self.shape: - self.size *= s - - def _del_sources(self): - self.left = None - self.right = None - - def _find_size(self): - return self.size - - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - if res_shape is None: - res_shape = self.shape # we still force the shape on children - return Call2Iterator(self.left.start_iter(res_shape), - self.right.start_iter(res_shape)) - - def _eval(self, iter): - assert isinstance(iter, Call2Iterator) - lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) - rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - return call_sig.func(self.calc_dtype, lhs, rhs) - - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - if self.forced_result is not None: - return 'Call2(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call2(%s, %s, %s)' % (call_sig.name, - self.left.debug_repr(), - self.right.debug_repr()) - -class ViewArray(BaseArray): - """ - Class for representing views of arrays, they will reflect changes of parent - arrays. Example: slices - """ - def __init__(self, parent, signature, strides, backstrides, shape): +class W_NDimSlice(ViewArray): + def __init__(self, start, strides, backstrides, shape, parent): + assert isinstance(parent, ConcreteArray) + if isinstance(parent, W_NDimSlice): + parent = parent.parent + size = 1 + for sh in shape: + size *= sh self.strides = strides self.backstrides = backstrides - BaseArray.__init__(self, shape, parent.order) - self.signature = signature - self.parent = parent - self.invalidates = parent.invalidates + ViewArray.__init__(self, size, shape, parent.dtype, parent.order, + parent) + self.start = start - def get_concrete(self): - # in fact, ViewArray never gets "concrete" as it never stores data. - # This implementation is needed for BaseArray getitem/setitem to work, - # can be refactored. - self.parent.get_concrete() - return self + def setslice(self, space, w_value): + res_shape = shape_agreement(space, self.shape, w_value.shape) + self._sliceloop(w_value, res_shape) - def getitem(self, item): - return self.parent.getitem(item) - - def eval(self, iter): - return self.parent.getitem(iter.get_offset()) - - def setitem(self, item, value): - # This is currently not possible to be called from anywhere. - raise NotImplementedError - - def descr_len(self, space): - if self.shape: - return space.wrap(self.shape[0]) - return space.wrap(1) + def _sliceloop(self, source, res_shape): + sig = source.find_sig(res_shape) + frame = sig.create_frame(source, res_shape) + res_iter = view_iter_from_arr(self) + shapelen = len(res_shape) + while not res_iter.done(): + slice_driver.jit_merge_point(sig=sig, + frame=frame, + shapelen=shapelen, + self=self, source=source, + res_iter=res_iter) + self.setitem(res_iter.offset, sig.eval(frame, source).convert_to( + self.find_dtype())) + frame.next(shapelen) + res_iter = res_iter.next(shapelen) def setshape(self, space, new_shape): if len(self.shape) < 1: @@ -1220,96 +1013,10 @@ self.backstrides = new_backstrides[:] self.shape = new_shape[:] -class W_NDimSlice(ViewArray): - signature = signature.BaseSignature() - - def __init__(self, parent, signature, start, strides, backstrides, - shape): - if isinstance(parent, W_NDimSlice): - parent = parent.parent - ViewArray.__init__(self, parent, signature, strides, backstrides, shape) - self.start = start - self.size = 1 - for sh in shape: - self.size *= sh - - def find_size(self): - return self.size - - def find_dtype(self): - return self.parent.find_dtype() - - def setslice(self, space, w_value): - res_shape = shape_agreement(space, self.shape, w_value.shape) - self._sliceloop(w_value, res_shape) - - def _sliceloop(self, source, res_shape): - source_iter = source.start_iter(res_shape) - res_iter = self.start_iter(res_shape) - shapelen = len(res_shape) - while not res_iter.done(): - slice_driver.jit_merge_point(signature=source.signature, - shapelen=shapelen, - self=self, source=source, - res_iter=res_iter, - source_iter=source_iter) - self.setitem(res_iter.offset, source.eval(source_iter).convert_to( - self.find_dtype())) - source_iter = source_iter.next(shapelen) - res_iter = res_iter.next(shapelen) - - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - if len(self.shape) == 1: - return OneDimIterator(self.start, self.strides[0], self.shape[0]) - return ViewIterator(self) - - def setitem(self, item, value): - self.parent.setitem(item, value) - - def debug_repr(self): - return 'Slice(%s)' % self.parent.debug_repr() - - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = self.start_iter() - a_iter = array.start_iter() - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def get_storage(self, space): - return self.parent.get_storage(space) - -class W_NDimArray(BaseArray): +class W_NDimArray(ConcreteArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ - def __init__(self, size, shape, dtype, order='C'): - BaseArray.__init__(self, shape, order) - self.size = size - self.dtype = dtype - self.storage = dtype.malloc(size) - self.signature = dtype.signature - - def get_concrete(self): - return self - - def find_size(self): - return self.size - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - return self.dtype.getitem(self.storage, item) - - def eval(self, iter): - return self.dtype.getitem(self.storage, iter.get_offset()) - def copy(self): array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( @@ -1319,32 +1026,16 @@ ) return array - def descr_len(self, space): - if len(self.shape): - return space.wrap(self.shape[0]) - raise OperationError(space.w_TypeError, space.wrap( - "len() of unsized object")) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) - def start_iter(self, res_shape=None): - if self.order == 'C': - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return ArrayIterator(self.size) - raise NotImplementedError # use ViewIterator simply, test it - def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) - def debug_repr(self): - return 'Array' - - def get_storage(self, space): - return self.storage + def create_sig(self, res_shape): + return self.array_sig(res_shape) def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1396,10 +1087,11 @@ ) arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) + arr_iter = ArrayIterator(arr.size) for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + dtype.setitem(arr.storage, arr_iter.offset, + dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1492,48 +1184,31 @@ class W_FlatIterator(ViewArray): - signature = signature.BaseSignature() @jit.unroll_safe def __init__(self, arr): + arr = arr.get_concrete() size = 1 for sh in arr.shape: size *= sh - new_sig = signature.Signature.find_sig([ - W_FlatIterator.signature, arr.signature - ]) - ViewArray.__init__(self, arr, new_sig, [arr.strides[-1]], - [arr.backstrides[-1]], [size]) + self.strides = [arr.strides[-1]] + self.backstrides = [arr.backstrides[-1]] + ViewArray.__init__(self, size, [size], arr.dtype, arr.order, + arr) self.shapelen = len(arr.shape) - self.arr = arr - self.iter = self.start_iter() - - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return OneDimIterator(self.arr.start, self.strides[0], - self.shape[0]) - - def find_dtype(self): - return self.arr.find_dtype() - - def find_size(self): - return self.shape[0] + self.iter = OneDimIterator(arr.start, self.strides[0], + self.shape[0]) def descr_next(self, space): if self.iter.done(): raise OperationError(space.w_StopIteration, space.w_None) - result = self.eval(self.iter) + result = self.getitem(self.iter.offset) self.iter = self.iter.next(self.shapelen) return result def descr_iter(self): return self - def debug_repr(self): - return 'FlatIter(%s)' % self.arr.debug_repr() - - W_FlatIterator.typedef = TypeDef( 'flatiter', next = interp2app(W_FlatIterator.descr_next), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,20 +2,21 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types +from pypy.module.micronumpy import interp_boxes, interp_dtype, types +from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature, find_sig from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - reduce_driver = jit.JitDriver( - greens = ['shapelen', "signature"], - reds = ["i", "self", "dtype", "value", "obj"] + greens = ['shapelen', "sig"], + virtualizables = ["frame"], + reds = ["frame", "self", "dtype", "value", "obj"] ) class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] - _immutable_fields_ = ["promote_to_float", "promote_bools"] + _immutable_fields_ = ["promote_to_float", "promote_bools", "name"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -50,6 +51,7 @@ def reduce(self, space, w_obj, multidim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -60,13 +62,16 @@ raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) - size = obj.find_size() + size = obj.size dtype = find_unaryop_result_dtype( space, obj.find_dtype(), promote_to_largest=True ) - start = obj.start_iter(obj.shape) shapelen = len(obj.shape) + sig = find_sig(ReduceSignature(self.func, self.name, dtype, + ScalarSignature(dtype), + obj.create_sig(obj.shape)), obj) + frame = sig.create_frame(obj) if shapelen > 1 and not multidim: raise OperationError(space.w_NotImplementedError, space.wrap("not implemented yet")) @@ -74,34 +79,33 @@ if size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - value = obj.eval(start).convert_to(dtype) - start = start.next(shapelen) + value = sig.eval(frame, obj).convert_to(dtype) + frame.next(shapelen) else: value = self.identity.convert_to(dtype) - new_sig = signature.Signature.find_sig([ - self.reduce_signature, obj.signature - ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) + return self.reduce_loop(shapelen, sig, frame, value, obj, dtype) - def reduce_loop(self, signature, shapelen, i, value, obj, dtype): - while not i.done(): - reduce_driver.jit_merge_point(signature=signature, + def reduce_loop(self, shapelen, sig, frame, value, obj, dtype): + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - value=value, obj=obj, i=i, + value=value, obj=obj, frame=frame, dtype=dtype) - value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) - i = i.next(shapelen) + assert isinstance(sig, ReduceSignature) + value = sig.binfunc(dtype, value, sig.eval(frame, obj).convert_to(dtype)) + frame.next(shapelen) return value class W_Ufunc1(W_Ufunc): argcount = 1 + _immutable_fields_ = ["func", "name"] + def __init__(self, func, name, promote_to_float=False, promote_bools=False, identity=None): W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func - self.signature = signature.Call1(func) def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, @@ -117,14 +121,13 @@ if isinstance(w_obj, Scalar): return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) - new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) - w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) + w_res = Call1(self.func, self.name, w_obj.shape, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res class W_Ufunc2(W_Ufunc): - _immutable_fields_ = ["comparison_func", "func"] + _immutable_fields_ = ["comparison_func", "func", "name"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -133,8 +136,6 @@ W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func self.comparison_func = comparison_func - self.signature = signature.Call2(func) - self.reduce_signature = signature.BaseSignature() def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, @@ -158,11 +159,9 @@ w_rhs.value.convert_to(calc_dtype) ) - new_sig = signature.Signature.find_sig([ - self.signature, w_lhs.signature, w_rhs.signature - ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - w_res = Call2(new_sig, new_shape, calc_dtype, + w_res = Call2(self.func, self.name, + new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,54 +1,322 @@ -from pypy.rlib.objectmodel import r_dict, compute_identity_hash +from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.rlib.rarithmetic import intmask +from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ + OneDimIterator, ConstantIterator +from pypy.module.micronumpy.strides import calculate_slice_strides +from pypy.rlib.jit import hint, unroll_safe, promote +def sigeq(one, two): + return one.eq(two) -def components_eq(lhs, rhs): - if len(lhs) != len(rhs): - return False - for i in range(len(lhs)): - v1, v2 = lhs[i], rhs[i] - if type(v1) is not type(v2) or not v1.eq(v2): +def sigeq_no_numbering(one, two): + """ Cache for iterator numbering should not compare array numbers + """ + return one.eq(two, compare_array_no=False) + +def sighash(sig): + return sig.hash() + +known_sigs = r_dict(sigeq, sighash) + +def find_sig(sig, arr): + sig.invent_array_numbering(arr) + try: + return known_sigs[sig] + except KeyError: + sig.invent_numbering() + known_sigs[sig] = sig + return sig + +class NumpyEvalFrame(object): + _virtualizable2_ = ['iterators[*]', 'final_iter', 'arraylist[*]'] + + @unroll_safe + def __init__(self, iterators, arrays): + self = hint(self, access_directly=True, fresh_virtualizable=True) + self.iterators = iterators[:] + self.arrays = arrays[:] + for i in range(len(self.iterators)): + iter = self.iterators[i] + if not isinstance(iter, ConstantIterator): + self.final_iter = i + break + else: + self.final_iter = -1 + + def done(self): + final_iter = promote(self.final_iter) + if final_iter < 0: return False - return True + return self.iterators[final_iter].done() -def components_hash(components): - res = 0x345678 - for component in components: - res = intmask((1000003 * res) ^ component.hash()) - return res + @unroll_safe + def next(self, shapelen): + for i in range(len(self.iterators)): + self.iterators[i] = self.iterators[i].next(shapelen) -class BaseSignature(object): - _attrs_ = [] +def _add_ptr_to_cache(ptr, cache): + i = 0 + for p in cache: + if ptr == p: + return i + i += 1 + else: + res = len(cache) + cache.append(ptr) + return res - def eq(self, other): - return self is other +class Signature(object): + _attrs_ = ['iter_no', 'array_no'] + _immutable_fields_ = ['iter_no', 'array_no'] + + array_no = 0 + iter_no = 0 + + def invent_numbering(self): + cache = r_dict(sigeq_no_numbering, sighash) + allnumbers = [] + self._invent_numbering(cache, allnumbers) + + def invent_array_numbering(self, arr): + cache = [] + self._invent_array_numbering(arr, cache) + + def _invent_numbering(self, cache, allnumbers): + try: + no = cache[self] + except KeyError: + no = len(allnumbers) + cache[self] = no + allnumbers.append(no) + self.iter_no = no + + def create_frame(self, arr, res_shape=None): + res_shape = res_shape or arr.shape + iterlist = [] + arraylist = [] + self._create_iter(iterlist, arraylist, arr, res_shape, []) + return NumpyEvalFrame(iterlist, arraylist) + +class ConcreteSignature(Signature): + _immutable_fields_ = ['dtype'] + + def __init__(self, dtype): + self.dtype = dtype + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, ConcreteSignature) + if compare_array_no: + if self.array_no != other.array_no: + return False + return self.dtype is other.dtype def hash(self): - return compute_identity_hash(self) + return compute_identity_hash(self.dtype) -class Signature(BaseSignature): - _known_sigs = r_dict(components_eq, components_hash) + def allocate_view_iter(self, arr, res_shape, chunklist): + r = arr.shape, arr.start, arr.strides, arr.backstrides + if chunklist: + for chunkelem in chunklist: + r = calculate_slice_strides(r[0], r[1], r[2], r[3], chunkelem) + shape, start, strides, backstrides = r + if len(res_shape) == 1: + return OneDimIterator(start, strides[0], res_shape[0]) + return ViewIterator(start, strides, backstrides, shape, res_shape) - _attrs_ = ["components"] - _immutable_fields_ = ["components[*]"] +class ArraySignature(ConcreteSignature): + def debug_repr(self): + return 'Array' - def __init__(self, components): - self.components = components + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + self.array_no = _add_ptr_to_cache(concr.storage, cache) - @staticmethod - def find_sig(components): - return Signature._known_sigs.setdefault(components, Signature(components)) + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + storage = concr.storage + if self.iter_no >= len(iterlist): + iterlist.append(self.allocate_iter(concr, res_shape, chunklist)) + if self.array_no >= len(arraylist): + arraylist.append(storage) -class Call1(BaseSignature): - _immutable_fields_ = ["func", "name"] + def allocate_iter(self, arr, res_shape, chunklist): + if chunklist: + return self.allocate_view_iter(arr, res_shape, chunklist) + return ArrayIterator(arr.size) - def __init__(self, func): - self.func = func - self.name = func.func_name + def eval(self, frame, arr): + iter = frame.iterators[self.iter_no] + return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) -class Call2(BaseSignature): - _immutable_fields_ = ["func", "name"] +class ScalarSignature(ConcreteSignature): + def debug_repr(self): + return 'Scalar' - def __init__(self, func): - self.func = func - self.name = func.func_name + def _invent_array_numbering(self, arr, cache): + pass + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + if self.iter_no >= len(iterlist): + iter = ConstantIterator() + iterlist.append(iter) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Scalar + assert isinstance(arr, Scalar) + return arr.value + +class ViewSignature(ArraySignature): + def debug_repr(self): + return 'Slice' + + def _invent_numbering(self, cache, allnumbers): + # always invent a new number for view + no = len(allnumbers) + allnumbers.append(no) + self.iter_no = no + + def allocate_iter(self, arr, res_shape, chunklist): + return self.allocate_view_iter(arr, res_shape, chunklist) + +class VirtualSliceSignature(Signature): + def __init__(self, child): + self.child = child + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + self.child._invent_array_numbering(arr.child, cache) + + def hash(self): + return intmask(self.child.hash() ^ 1234) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, VirtualSliceSignature) + return self.child.eq(other.child, compare_array_no) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + chunklist.append(arr.chunks) + self.child._create_iter(iterlist, arraylist, arr.child, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + return self.child.eval(frame, arr.child) + +class Call1(Signature): + _immutable_fields_ = ['unfunc', 'name', 'child'] + + def __init__(self, func, name, child): + self.unfunc = func + self.child = child + self.name = name + + def hash(self): + return compute_hash(self.name) ^ intmask(self.child.hash() << 1) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, Call1) + return (self.unfunc is other.unfunc and + self.child.eq(other.child, compare_array_no)) + + def debug_repr(self): + return 'Call1(%s, %s)' % (self.name, self.child.debug_repr()) + + def _invent_numbering(self, cache, allnumbers): + self.child._invent_numbering(cache, allnumbers) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._invent_array_numbering(arr.values, cache) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._create_iter(iterlist, arraylist, arr.values, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.res_dtype) + return self.unfunc(arr.res_dtype, v) + +class Call2(Signature): + _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] + + def __init__(self, func, name, calc_dtype, left, right): + self.binfunc = func + self.left = left + self.right = right + self.name = name + self.calc_dtype = calc_dtype + + def hash(self): + return (compute_hash(self.name) ^ intmask(self.left.hash() << 1) ^ + intmask(self.right.hash() << 2)) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, Call2) + return (self.binfunc is other.binfunc and + self.calc_dtype is other.calc_dtype and + self.left.eq(other.left, compare_array_no) and + self.right.eq(other.right, compare_array_no)) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + self.left._invent_array_numbering(arr.left, cache) + self.right._invent_array_numbering(arr.right, cache) + + def _invent_numbering(self, cache, allnumbers): + self.left._invent_numbering(cache, allnumbers) + self.right._invent_numbering(cache, allnumbers) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import Call2 + + assert isinstance(arr, Call2) + self.left._create_iter(iterlist, arraylist, arr.left, res_shape, + chunklist) + self.right._create_iter(iterlist, arraylist, arr.right, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + lhs = self.left.eval(frame, arr.left).convert_to(self.calc_dtype) + rhs = self.right.eval(frame, arr.right).convert_to(self.calc_dtype) + return self.binfunc(self.calc_dtype, lhs, rhs) + + def debug_repr(self): + return 'Call2(%s, %s, %s)' % (self.name, self.left.debug_repr(), + self.right.debug_repr()) + +class ReduceSignature(Call2): + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + self.right._create_iter(iterlist, arraylist, arr, res_shape, chunklist) + + def _invent_numbering(self, cache, allnumbers): + self.right._invent_numbering(cache, allnumbers) + + def _invent_array_numbering(self, arr, cache): + self.right._invent_array_numbering(arr, cache) + + def eval(self, frame, arr): + return self.right.eval(frame, arr) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/strides.py @@ -0,0 +1,34 @@ + +def calculate_slice_strides(shape, start, strides, backstrides, chunks): + rstrides = [] + rbackstrides = [] + rstart = start + rshape = [] + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + rstrides.append(strides[i] * step) + rbackstrides.append(strides[i] * (lgt - 1) * step) + rshape.append(lgt) + rstart += strides[i] * start_ + # add a reminder + s = i + 1 + assert s >= 0 + rstrides += strides[s:] + rbackstrides += backstrides[s:] + rshape += shape[s:] + return rshape, rstart, rstrides, rbackstrides + +def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape): + rstrides = [] + rbackstrides = [] + for i in range(len(orig_shape)): + if orig_shape[i] == 1: + rstrides.append(0) + rbackstrides.append(0) + else: + rstrides.append(strides[i]) + rbackstrides.append(backstrides[i]) + rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides + rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides + return rstrides, rbackstrides diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,7 +4,6 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) - class BaseNumpyAppTest(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['micronumpy']) @@ -15,20 +14,37 @@ bool_dtype = get_dtype_cache(space).w_booldtype ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar2 = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) - assert v1.signature is not v2.signature + sig1 = v1.find_sig() + sig2 = v2.find_sig() + assert v1 is not v2 + assert sig1.left.iter_no == sig1.right.iter_no + assert sig2.left.iter_no != sig2.right.iter_no + assert sig1.left.array_no == sig1.right.array_no + sig1b = ar2.descr_add(space, ar).find_sig() + assert sig1b.left.array_no != sig1b.right.array_no + assert sig1b is not sig1 v3 = ar.descr_add(space, Scalar(float64_dtype, 1.0)) - assert v2.signature is v3.signature + sig3 = v3.find_sig() + assert sig2 is sig3 v4 = ar.descr_add(space, ar) - assert v1.signature is v4.signature + assert v1.find_sig() is v4.find_sig() bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) - assert v5.signature is not v1.signature - assert v5.signature is not v2.signature + assert v5.find_sig() is not v1.find_sig() + assert v5.find_sig() is not v2.find_sig() v6 = ar.descr_add(space, bool_ar) - assert v5.signature is v6.signature + assert v5.find_sig() is v6.find_sig() + v7 = v6.descr_add(space, v6) + sig7 = v7.find_sig() + assert sig7.left.left.iter_no == sig7.right.left.iter_no + assert sig7.left.left.iter_no != sig7.right.right.iter_no + assert sig7.left.right.iter_no == sig7.right.right.iter_no + v1.forced_result = ar + assert v1.find_sig() is not sig1 def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype @@ -36,11 +52,14 @@ ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) - assert v1.signature is v2.signature + assert v1.find_sig() is v2.find_sig() v3 = v2.descr_add(space, v1) v4 = v1.descr_add(space, v2) - assert v3.signature is v4.signature + assert v3.find_sig() is v4.find_sig() + v5 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 3, 1))) + v6 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 4, 1))) + assert v5.find_sig() is v6.find_sig() class TestUfuncCoerscion(object): def test_binops(self, space): diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -137,6 +137,16 @@ interp = self.run(code) assert interp.results[0].value.value == 15 + def test_sum2(self): + code = """ + a = |30| + b = a + a + sum(b) + """ + interp = self.run(code) + assert interp.results[0].value.value == 30 * (30 - 1) + + def test_array_write(self): code = """ a = [1,2,3,4,5] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -8,8 +8,6 @@ class MockDtype(object): - signature = signature.BaseSignature() - def malloc(self, size): return None @@ -38,92 +36,86 @@ assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -132,7 +124,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -142,7 +134,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -897,13 +889,32 @@ a = zeros(1) assert debug_repr(a) == 'Array' assert debug_repr(a + a) == 'Call2(add, Array, Array)' - assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a[::2]) == 'Slice' assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' - assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(a + a.flat) == 'Call2(add, Array, Slice)' assert debug_repr(sin(a)) == 'Call1(sin, Array)' + b = a + a b[0] = 3 - assert debug_repr(b) == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Array' + + def test_virtual_views(self): + from numpypy import arange + a = arange(15) + c = (a + a) + d = c[::2] + assert d[3] == 12 + c[6] = 5 + assert d[3] == 5 + a = arange(15) + c = (a + a) + d = c[::2][::2] + assert d[1] == 8 + b = a + a + c = b[::2] + c[:] = 3 + assert b[0] == 3 + assert b[1] == 2 def test_tolist_scalar(self): from numpypy import int32, bool_ @@ -1075,10 +1086,10 @@ def test_broadcast_setslice(self): from numpypy import zeros, ones - a = zeros((100, 100)) - b = ones(100) + a = zeros((10, 10)) + b = ones(10) a[:, :] = b - assert a[13, 15] == 1 + assert a[3, 5] == 1 def test_broadcast_shape_agreement(self): from numpypy import zeros, array @@ -1112,6 +1123,14 @@ b[:] = (a + a) assert (b == zeros((4, 3, 5))).all() + def test_broadcast_virtualview(self): + from numpypy import arange, zeros + a = arange(8).reshape([2, 2, 2]) + b = (a + a)[1, 1] + c = zeros((2, 2, 2)) + c[:] = b + assert (c == [[[12, 14], [12, 14]], [[12, 14], [12, 14]]]).all() + def test_argmax(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) @@ -1173,6 +1192,11 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 + def test_flatiter_varray(self): + from numpypy import ones + a = ones((2, 2)) + assert list(((a + a).flat)) == [2, 2, 2, 2] + def test_slice_copy(self): from numpypy import zeros a = zeros((10, 10)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -49,10 +49,14 @@ interp.run(space) w_res = interp.results[-1] if isinstance(w_res, BaseArray): - w_res = w_res.eval(w_res.start_iter()) - + concr = w_res.get_concrete_or_scalar() + sig = concr.find_sig() + frame = sig.create_frame(concr) + w_res = sig.eval(frame, concr) if isinstance(w_res, interp_boxes.W_Float64Box): return w_res.value + if isinstance(w_res, interp_boxes.W_Int64Box): + return float(w_res.value) elif isinstance(w_res, interp_boxes.W_BoolBox): return float(w_res.value) raise TypeError(w_res) @@ -78,8 +82,9 @@ def test_add(self): result = self.run("add") self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) assert result == 3 + 3 def define_float_add(): @@ -93,7 +98,8 @@ assert result == 3 + 3 self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, "setinteriorfield_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_sum(): return """ @@ -106,8 +112,8 @@ result = self.run("sum") assert result == 2 * sum(range(30)) self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, - "int_add": 2, "int_ge": 1, "guard_false": 1, - "jump": 1}) + "int_add": 1, "int_ge": 1, "guard_false": 1, + "jump": 1, 'arraylen_gc': 1}) def define_prod(): return """ @@ -123,18 +129,22 @@ expected *= i * 2 assert result == expected self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "float_mul": 1, "int_add": 1, + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) - def test_max(self): - py.test.skip("broken, investigate") - result = self.run(""" + def define_max(): + return """ a = |30| a[13] = 128 b = a + a max(b) - """) + """ + + def test_max(self): + result = self.run("max") assert result == 256 + py.test.skip("not there yet, getting though") self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_mul": 1, "int_add": 1, "int_lt": 1, "guard_true": 1, "jump": 1}) @@ -164,9 +174,9 @@ result = self.run("any") assert result == 1 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, + "float_ne": 1, "int_add": 1, "int_ge": 1, "jump": 1, - "guard_false": 2}) + "guard_false": 2, 'arraylen_gc': 1}) def define_already_forced(): return """ @@ -183,14 +193,13 @@ # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - # XXX the comment above is wrong now. We need preferrably a way to - # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, - 'getfield_gc': 35, 'getfield_gc_pure': 6, - 'guard_class': 22, 'int_add': 8, 'float_mul': 2, - 'guard_isnull': 2, 'jump': 2, 'int_ge': 4, - 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, - 'guard_value': 2}) + self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 26, + 'getarrayitem_gc': 4, 'getarrayitem_gc_pure': 2, + 'getfield_gc_pure': 4, + 'guard_class': 8, 'int_add': 8, 'float_mul': 2, + 'jump': 2, 'int_ge': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, + 'guard_false': 4, 'arraylen_gc': 2, 'same_as': 2}) def define_ufunc(): return """ @@ -204,8 +213,9 @@ result = self.run("ufunc") assert result == -6 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, - "setinteriorfield_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_specialization(): return """ @@ -248,7 +258,8 @@ 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, - 'jump': 1}) + 'jump': 1, + 'arraylen_gc': 1}) def define_multidim(): return """ @@ -263,8 +274,9 @@ # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setinteriorfield_raw': 1}) + 'guard_false': 1, 'int_add': 2, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1, + 'arraylen_gc': 1}) def define_multidim_slice(): return """ @@ -312,7 +324,25 @@ self.check_trace_count(1) self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_eq': 1, 'guard_false': 1, 'jump': 1}) + 'int_lt': 1, 'guard_true': 1, 'jump': 1, + 'arraylen_gc': 3}) + + def define_virtual_slice(): + return """ + a = |30| + c = a + a + d = c -> 1:20 + d -> 1 + """ + + def test_virtual_slice(self): + result = self.run("virtual_slice") + assert result == 4 + self.check_trace_count(1) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): diff --git a/pypy/module/micronumpy/test/test_ztranslation.py b/pypy/module/micronumpy/test/test_ztranslation.py --- a/pypy/module/micronumpy/test/test_ztranslation.py +++ b/pypy/module/micronumpy/test/test_ztranslation.py @@ -1,5 +1,8 @@ - +from pypy.module.micronumpy import signature from pypy.objspace.fake.checkmodule import checkmodule def test_numpy_translates(): + # XXX: If there are signatures floating around this might explode. This fix + # is ugly. + signature.known_sigs.clear() checkmodule('micronumpy') diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -311,7 +311,7 @@ # to repeat it every time ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker_cond0 = int_lt(ticker0, 0) guard_false(ticker_cond0, descr=...) """ @@ -320,9 +320,9 @@ # this is the ticker check generated if we have threads thread_ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(ticker_address, descr=) + ticker0 = getfield_raw(ticker_address, descr=) ticker1 = int_sub(ticker0, _) - setfield_raw(ticker_address, ticker1, descr=) + setfield_raw(ticker_address, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) guard_false(ticker_cond0, descr=...) """ @@ -330,7 +330,7 @@ # # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ - ticker2 = getfield_raw(ticker_address, descr=) + ticker2 = getfield_raw(ticker_address, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -451,7 +451,6 @@ try: self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: - #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 print "Loops don't match" print "=================" @@ -464,7 +463,7 @@ print print "Expected:" print format(expected_src) - return False + raise # always propagate the exception in case of mismatch else: return True diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,8 +7,9 @@ from pypy.tool.udir import udir from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - TraceWithIds, OpMatcher +from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, + find_ids, TraceWithIds, + OpMatcher, InvalidMatch) class BaseTestPyPyC(object): def setup_class(cls): @@ -115,13 +116,18 @@ assert opcodes_names == ['LOAD_FAST', 'LOAD_CONST', 'BINARY_ADD', 'STORE_FAST'] -class TestOpMatcher(object): +class TestOpMatcher_(object): def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations) - return matcher.match(src2, **kwds) + try: + res = matcher.match(src2, **kwds) + assert res is True + return True + except InvalidMatch: + return False def test_match_var(self): match_var = OpMatcher([]).match_var @@ -447,7 +453,7 @@ jump(p0, p1, p2, p3, i8, descr=...) """) # - assert not loop.match(""" + py.test.raises(InvalidMatch, loop.match, """ i6 = int_lt(i4, 1003) guard_true(i6) i8 = int_add(i5, 1) # variable mismatch @@ -492,9 +498,8 @@ guard_no_exception(descr=...) """) # - assert not loop.match_by_id('ntohs', """ + py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) p12 = call(ConstClass(foobar), 1, descr=...) guard_no_exception(descr=...) """) - diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py @@ -35,7 +35,7 @@ guard_not_invalidated(descr=...) i17 = force_token() setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>) - f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) + f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) """ % pow_addr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -42,7 +42,7 @@ guard_not_invalidated(descr=...) i13 = int_lt(i7, i9) guard_true(i13, descr=...) - i15 = getarrayitem_raw(i10, i7, descr=<.*ArrayNoLengthDescr>) + i15 = getarrayitem_raw(i10, i7, descr=) i16 = int_add_ovf(i8, i15) guard_no_overflow(descr=...) i18 = int_add(i7, 1) @@ -72,17 +72,17 @@ guard_true(i13, descr=...) guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=<.*ArrayNoLengthDescr>) + i14 = getarrayitem_raw(i10, i8, descr=) i15 = int_add_ovf(i9, i14) guard_no_overflow(descr=...) i17 = int_sub(i8, 640) # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=<.*ArrayNoLengthDescr>) + i18 = getarrayitem_raw(i11, i17, descr=) i19 = int_add_ovf(i18, i15) guard_no_overflow(descr=...) # on 64bit, there is a guard checking that i19 actually fits into 32bit ... - setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>) + setarrayitem_raw(i11, i8, _, descr=) i28 = int_add(i8, 1) --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=...) @@ -107,10 +107,10 @@ guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - f13 = getarrayitem_raw(i8, i6, descr=) + f13 = getarrayitem_raw(i8, i6, descr=) f15 = float_add(f13, 20.500000) - setarrayitem_raw(i8, i6, f15, descr=) - f16 = getarrayitem_raw(i8, i6, descr=) + setarrayitem_raw(i8, i6, f15, descr=) + f16 = getarrayitem_raw(i8, i6, descr=) i18 = float_eq(f16, 42.000000) guard_true(i18, descr=...) i20 = int_add(i6, 1) @@ -132,28 +132,24 @@ log = self.run(main, []) assert log.result == 321 loop, = log.loops_by_filename(self.filepath) - if sys.maxint == 2147483647: - arraydescr = 'UnsignedArrayNoLengthDescr' - else: - arraydescr = 'UINTArrayNoLengthDescr' assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - i13 = getarrayitem_raw(i8, i6, descr=<%s>) + i13 = getarrayitem_raw(i8, i6, descr=) f14 = cast_singlefloat_to_float(i13) f16 = float_add(f14, 20.500000) i17 = cast_float_to_singlefloat(f16) - setarrayitem_raw(i8, i6,i17, descr=<%s>) - i18 = getarrayitem_raw(i8, i6, descr=<%s>) + setarrayitem_raw(i8, i6,i17, descr=) + i18 = getarrayitem_raw(i8, i6, descr=) f19 = cast_singlefloat_to_float(i18) i21 = float_eq(f19, 42.000000) guard_true(i21, descr=...) i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """ % (arraydescr, arraydescr, arraydescr)) + """) def test_zeropadded(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -75,12 +75,12 @@ assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p38 = call(ConstClass(getexecutioncontext), descr=) - p39 = getfield_gc(p38, descr=) + p38 = call(ConstClass(getexecutioncontext), descr=) + p39 = getfield_gc(p38, descr=) i40 = force_token() - p41 = getfield_gc(p38, descr=) + p41 = getfield_gc(p38, descr=) guard_isnull(p41, descr=...) - i42 = getfield_gc(p38, descr=) + i42 = getfield_gc(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -192,7 +192,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ - p14 = getarrayitem_gc_pure(p8, i9, descr=) + p14 = getarrayitem_gc_pure(p8, i9, descr=) i14 = force_token() i16 = force_token() """) @@ -336,15 +336,15 @@ loop, = log.loops_by_filename(self.filepath) # the int strategy is used here assert loop.match_by_id('append', """ - i13 = getfield_gc(p8, descr=) + i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) # Will be killed by the backend - p15 = getfield_gc(p8, descr=) - i17 = arraylen_gc(p15, descr=) - call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... + p15 = getfield_gc(p8, descr=) + i17 = arraylen_gc(p15, descr=) + call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... guard_no_exception(descr=...) - p17 = getfield_gc(p8, descr=) - setarrayitem_gc(p17, i13, i12, descr=) + p17 = getfield_gc(p8, descr=) + setarrayitem_gc(p17, i13, i12, descr=) """) def test_blockstack_virtualizable(self): @@ -368,13 +368,13 @@ ... i20 = force_token() p22 = new_with_vtable(19511408) - p24 = new_array(1, descr=) + p24 = new_array(1, descr=) p26 = new_with_vtable(ConstClass(W_ListObject)) - setfield_gc(p0, i20, descr=) - setfield_gc(p26, ConstPtr(ptr22), descr=) - setarrayitem_gc(p24, 0, p26, descr=) - setfield_gc(p22, p24, descr=) - p32 = call_may_force(11376960, p18, p22, descr=) + setfield_gc(p0, i20, descr=) + setfield_gc(p26, ConstPtr(ptr22), descr=) + setarrayitem_gc(p24, 0, p26, descr=) + setfield_gc(p22, p24, descr=) + p32 = call_may_force(11376960, p18, p22, descr=) ... """) @@ -415,26 +415,26 @@ guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) guard_value(i4, 0, descr=...) guard_value(p3, ConstPtr(ptr14), descr=...) - i15 = getfield_gc_pure(p8, descr=) + i15 = getfield_gc_pure(p8, descr=) i17 = int_lt(i15, 5000) guard_true(i17, descr=...) - p18 = getfield_gc(p0, descr=) + p18 = getfield_gc(p0, descr=) guard_value(p18, ConstPtr(ptr19), descr=...) - p20 = getfield_gc(p18, descr=) + p20 = getfield_gc(p18, descr=) guard_value(p20, ConstPtr(ptr21), descr=...) guard_not_invalidated(descr=...) # most importantly, there is no getarrayitem_gc here - p23 = call(ConstClass(getexecutioncontext), descr=) - p24 = getfield_gc(p23, descr=) + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) i25 = force_token() - p26 = getfield_gc(p23, descr=) + p26 = getfield_gc(p23, descr=) guard_isnull(p26, descr=...) - i27 = getfield_gc(p23, descr=) + i27 = getfield_gc(p23, descr=) i28 = int_is_zero(i27) guard_true(i28, descr=...) - p30 = getfield_gc(ConstPtr(ptr29), descr=) + p30 = getfield_gc(ConstPtr(ptr29), descr=) guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) - i32 = getfield_gc_pure(p30, descr=) + i32 = getfield_gc_pure(p30, descr=) i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- @@ -452,15 +452,15 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- p22 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p22, i13, descr=) - setfield_gc(p4, p22, descr=) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -46,7 +46,7 @@ assert loop.match_by_id("getitem", """ i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -86,28 +86,28 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_int_str), i5, descr=) guard_no_exception(descr=...) - i12 = call(ConstClass(ll_strhash), p10, descr=) + i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) - setfield_gc(p13, 16, descr=) + p15 = new_array(8, descr=) + setfield_gc(p13, p15, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) - call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) guard_false(i27, descr=...) - p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p28 = getfield_gc(p13, descr=) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure(p29, descr=) + i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -21,9 +21,9 @@ assert loop.match_by_id("generator", """ i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p45, i29, descr=) - setarrayitem_gc(p8, 0, p45, descr=) - i47 = arraylen_gc(p8, descr=) # Should be removed by backend + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) assert loop.match_by_id("subtract", """ diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,11 +16,11 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p10 = getfield_gc(p0, descr=) + p10 = getfield_gc(p0, descr=) guard_value(p10, ConstPtr(ptr11), descr=...) - p12 = getfield_gc(p10, descr=) + p12 = getfield_gc(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc(ConstPtr(p17), descr=) + p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -125,8 +125,8 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p20, i11, descr=) - setfield_gc(ConstPtr(ptr21), p20, descr=) + setfield_gc(p20, i11, descr=) + setfield_gc(ConstPtr(ptr21), p20, descr=) jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -23,8 +23,8 @@ f1 = cast_int_to_float(i0) i3 = float_le(f1, 0) guard_false(i3, descr=...) - f2 = call(ConstClass(log), f1, descr=) - f3 = call(ConstClass(log10), f1, descr=) + f2 = call(ConstClass(log), f1, descr=) + f3 = call(ConstClass(log10), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i4 = int_add(i0, 1) @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) @@ -84,7 +84,7 @@ i4 = int_or(i2, i3) i5 = int_is_true(i4) guard_false(i5, descr=...) - f2 = call(ConstClass(fmod), f1, 2.0, descr=) + f2 = call(ConstClass(fmod), f1, 2.0, descr=) f3 = float_add(f0, f2) i6 = int_sub(i0, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -46,7 +46,7 @@ r *= n n -= 1 return r - log = self.run(fact, [7], threshold=5) + log = self.run(fact, [7], threshold=4) assert log.result == 5040 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" @@ -204,18 +204,18 @@ assert log.result == 1000000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i14 = getfield_gc(p12, descr=) + i14 = getfield_gc(p12, descr=) i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p16 = getfield_gc(p12, descr=) - p17 = getarrayitem_gc(p16, i12, descr=) + p16 = getfield_gc(p12, descr=) + p17 = getarrayitem_gc(p16, i12, descr=) i19 = int_add(i12, 1) - setfield_gc(p9, i19, descr=) + setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, 146982464, descr=...) - i21 = getfield_gc(p17, descr=) + i21 = getfield_gc(p17, descr=) i23 = int_lt(0, i21) guard_true(i23, descr=...) - i24 = getfield_gc(p17, descr=) + i24 = getfield_gc(p17, descr=) i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,6 +1,9 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +# XXX review the descrs to replace some EF=4 with EF=3 (elidable) + + class TestString(BaseTestPyPyC): def test_lookup_default_encoding(self): def main(n): @@ -52,8 +55,8 @@ i += int(long(string.digits[i % len(string.digits)], 16)) return i - log = self.run(main, [1000]) - assert log.result == main(1000) + log = self.run(main, [1100]) + assert log.result == main(1100) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i11 = int_lt(i6, i7) @@ -72,7 +75,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p28 = call(ConstClass(strip_spaces), p25, descr=) + p28 = call(ConstClass(strip_spaces), p25, descr=) guard_no_exception(descr=...) i29 = strlen(p28) i30 = int_is_true(i29) @@ -88,9 +91,9 @@ guard_false(i41, descr=...) i43 = int_eq(i39, 43) guard_false(i43, descr=...) - i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) + i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) guard_false(i43, descr=...) - i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) + i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) guard_false(i46, descr=...) p51 = new_with_vtable(21136408) setfield_gc(p51, _, descr=...) # 7 setfields, but the order is dict-order-dependent @@ -100,9 +103,9 @@ setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) setfield_gc(p51, _, descr=...) - p55 = call(ConstClass(parse_digit_string), p51, descr=) + p55 = call(ConstClass(parse_digit_string), p51, descr=) guard_no_exception(descr=...) - i57 = call(ConstClass(rbigint.toint), p55, descr=) + i57 = call(ConstClass(rbigint.toint), p55, descr=) guard_no_exception(descr=...) i58 = int_add_ovf(i6, i57) guard_no_overflow(descr=...) @@ -125,7 +128,7 @@ i7 = int_gt(i4, 0) guard_true(i7, descr=...) guard_not_invalidated(descr=...) - p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) + p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) guard_no_exception(descr=...) i10 = strlen(p9) i11 = int_is_true(i10) @@ -149,7 +152,7 @@ copystrcontent(p9, p21, 0, i25, i10) i33 = int_lt(i30, 23) guard_true(i33, descr=...) - p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) + p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) guard_no_exception(descr=...) i37 = strlen(p35) i38 = int_add_ovf(i5, i37) @@ -192,6 +195,6 @@ strsetitem(p35, 3, 104) strsetitem(p35, 4, 95) copystrcontent(p31, p35, 0, 5, i32) - i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) - guard_value(i49, 1, descr=) + i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) + guard_value(i49, 1, descr=...) ''') diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -252,7 +252,7 @@ # grow the list done = 0 while done < len(self._seen_extras): - print self._seen_extras + #print self._seen_extras ann.build_types(self._seen_extras[done], [], complete_now=False) done += 1 diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -514,24 +514,34 @@ if maxsplit == 0: return space.wrap(input) - # An ok guess at the default size - builder = StringBuilder(len(input)) - first = True - if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - first = False + + try: + result_size = ovfcheck(upper * len(by)) + result_size = ovfcheck(result_size + upper) + result_size = ovfcheck(result_size + len(by)) + remaining_size = len(input) - upper + result_size = ovfcheck(result_size + remaining_size) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + builder = StringBuilder(result_size) for i in range(upper): builder.append(by) builder.append(input[i]) builder.append(by) builder.append_slice(input, upper, len(input)) else: + # An ok guess for the result size + builder = StringBuilder(len(input)) start = 0 sublen = len(sub) + first = True while maxsplit != 0: next = input.find(sub, start) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -737,13 +737,6 @@ iterable = "hello" raises(TypeError, len, iter(iterable)) - def test_overflow_replace(self): - import sys - if sys.maxint > 2**31-1: - skip("Wrong platform") - x = "A" * (2**16) - raises(OverflowError, x.replace, '', x) - class AppTestPrebuilt(AppTestStringObject): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withprebuiltchar": True}) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,7 +395,6 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', - 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -3,6 +3,7 @@ from pypy.annotation.model import (SomeObject, SomeString, s_None, SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) +from pypy.rlib.rarithmetic import ovfcheck from pypy.tool.pairtype import pair, pairtype from pypy.rpython.extregistry import ExtRegistryEntry @@ -52,25 +53,37 @@ class AbstractStringBuilder(object): def __init__(self, init_size=INIT_SIZE): self.l = [] + self.size = 0 + + def _grow(self, size): + try: + self.size = ovfcheck(self.size + size) + except OverflowError: + raise MemoryError def append(self, s): assert isinstance(s, self.tp) self.l.append(s) + self._grow(len(s)) def append_slice(self, s, start, end): assert isinstance(s, self.tp) assert 0 <= start <= end <= len(s) - self.l.append(s[start:end]) + s = s[start:end] + self.l.append(s) + self._grow(len(s)) def append_multiple_char(self, c, times): assert isinstance(c, self.tp) self.l.append(c * times) + self._grow(times) def append_charpsize(self, s, size): l = [] for i in xrange(size): l.append(s[i]) self.l.append(self.tp("").join(l)) + self._grow(size) def build(self): return self.tp("").join(self.l) diff --git a/pypy/rpython/lltypesystem/llarena.py b/pypy/rpython/lltypesystem/llarena.py --- a/pypy/rpython/lltypesystem/llarena.py +++ b/pypy/rpython/lltypesystem/llarena.py @@ -374,6 +374,7 @@ following an object. For arenas containing heterogenous objects. If minsize is specified, it gives a minimum on the resulting size.""" return _round_up_for_allocation(size, minsize) +round_up_for_allocation._annenforceargs_ = [int, int] def _round_up_for_allocation(size, minsize): # internal return RoundedUpForAllocation(size, minsize) diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/pypy/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/pypy/tool/jitlogparser/test/test_modulefinder.py @@ -3,7 +3,7 @@ import re, sys def setup_module(mod): - if sys.version_info[:2] != (2.6): + if sys.version_info[:2] != (2, 6): py.test.skip("Specific python 2.6 tests") def test_gather_code_py(): diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -21,6 +21,16 @@ win32api.CloseHandle(proch) except pywintypes.error, e: pass + #Try to avoid opeing a dialog box if one of the tests causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = winapi.SetErrorMode(flags) + winapi.SetErrorMode(old_mode | flags) SIGKILL = SIGTERM = 0 READ_MODE = 'rU' From noreply at buildbot.pypy.org Wed Dec 21 14:25:43 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 21 Dec 2011 14:25:43 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: avoiding confusion from rare hash collisions Message-ID: <20111221132543.6C5248217E@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50796:707ec0f104cd Date: 2011-12-21 11:44 +0100 http://bitbucket.org/pypy/pypy/changeset/707ec0f104cd/ Log: avoiding confusion from rare hash collisions diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -707,7 +707,7 @@ self.exported_state = None def repr_of_descr(self): - return 'TargetToken(%d)' % compute_identity_hash(self) + return 'TargetToken(%d)' % compute_unique_id(self) class TreeLoop(object): inputargs = None From noreply at buildbot.pypy.org Wed Dec 21 14:25:44 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 21 Dec 2011 14:25:44 +0100 (CET) Subject: [pypy-commit] pypy jit-multilabel: closing branch for merge Message-ID: <20111221132544.889398217E@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-multilabel Changeset: r50797:67480607b18f Date: 2011-12-21 14:21 +0100 http://bitbucket.org/pypy/pypy/changeset/67480607b18f/ Log: closing branch for merge From noreply at buildbot.pypy.org Wed Dec 21 14:25:46 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 21 Dec 2011 14:25:46 +0100 (CET) Subject: [pypy-commit] pypy default: Merging jit-multilabel. This is mostly a cleanup of the handling of labels in optimizeop. It does add suppot for optimizing traces with multiple intermediate labels, but that feature is curently only used in tests. Message-ID: <20111221132546.0E1408217E@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50798:d5d0c616af63 Date: 2011-12-21 14:25 +0100 http://bitbucket.org/pypy/pypy/changeset/d5d0c616af63/ Log: Merging jit-multilabel. This is mostly a cleanup of the handling of labels in optimizeop. It does add suppot for optimizing traces with multiple intermediate labels, but that feature is curently only used in tests. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -105,7 +105,7 @@ def compile_loop(metainterp, greenkey, start, inputargs, jumpargs, - start_resumedescr, full_preamble_needed=True): + resume_at_jump_descr, full_preamble_needed=True): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -126,10 +126,11 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] h_ops = history.operations - part.start_resumedescr = start_resumedescr + part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ - [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] + try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: @@ -184,7 +185,7 @@ def compile_retrace(metainterp, greenkey, start, inputargs, jumpargs, - start_resumedescr, partial_trace, resumekey): + resume_at_jump_descr, partial_trace, resumekey): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -200,7 +201,7 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] - part.start_resumedescr = start_resumedescr + part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations part.operations = [partial_trace.operations[-1]] + \ @@ -212,13 +213,12 @@ try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - #return None # XXX: Dissable for now # Fall back on jumping to preamble target_token = label.getdescr() assert isinstance(target_token, TargetToken) assert target_token.exported_state part.operations = [orignial_label] + \ - [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + [ResOperation(rop.JUMP, inputargs[:], None, descr=loop_jitcell_token)] try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, @@ -751,7 +751,7 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey, start_resumedescr=None): +def compile_trace(metainterp, resumekey, resume_at_jump_descr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ @@ -767,7 +767,7 @@ # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] - new_trace.start_resumedescr = start_resumedescr + new_trace.resume_at_jump_descr = resume_at_jump_descr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -705,6 +705,9 @@ self.virtual_state = None self.exported_state = None + + def repr_of_descr(self): + return 'TargetToken(%d)' % compute_unique_id(self) class TreeLoop(object): inputargs = None @@ -712,7 +715,7 @@ call_pure_results = None logops = None quasi_immutable_deps = None - start_resumedescr = None + resume_at_jump_descr = None def _token(*args): raise Exception("TreeLoop.token is killed") diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -5,7 +5,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.history import Const, ConstInt, Box, \ - BoxInt, ConstFloat, BoxFloat, AbstractFailDescr + BoxInt, ConstFloat, BoxFloat, AbstractFailDescr, TargetToken class Logger(object): @@ -135,6 +135,13 @@ fail_args = '' return s_offset + res + op.getopname() + '(' + args + ')' + fail_args + def _log_inputarg_setup_ops(self, op): + target_token = op.getdescr() + if isinstance(target_token, TargetToken): + if target_token.exported_state: + for op in target_token.exported_state.inputarg_setup_ops: + debug_print(' ' + self.repr_of_resop(op)) + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return @@ -146,6 +153,8 @@ for i in range(len(operations)): op = operations[i] debug_print(self.repr_of_resop(operations[i], ops_offset)) + if op.getopnum() == rop.LABEL: + self._log_inputarg_setup_ops(op) if ops_offset and None in ops_offset: offset = ops_offset[None] debug_print("+%d: --end of the loop--" % offset) diff --git a/pypy/jit/metainterp/optimize.py b/pypy/jit/metainterp/optimize.py --- a/pypy/jit/metainterp/optimize.py +++ b/pypy/jit/metainterp/optimize.py @@ -5,58 +5,3 @@ """Raised when the optimize*.py detect that the loop that we are trying to build cannot possibly make sense as a long-running loop (e.g. it cannot run 2 complete iterations).""" - -class RetraceLoop(JitException): - """ Raised when inlining a short preamble resulted in an - InvalidLoop. This means the optimized loop is too specialized - to be useful here, so we trace it again and produced a second - copy specialized in some different way. - """ - -# ____________________________________________________________ - -def optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): - debug_start("jit-optimize") - try: - return _optimize_loop(metainterp_sd, old_loop_tokens, loop, - enable_opts) - finally: - debug_stop("jit-optimize") - -def _optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 - loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, - loop.operations) - # XXX do we really still need a list? - if old_loop_tokens: - return old_loop_tokens[0] - optimize_loop_1(metainterp_sd, loop, enable_opts) - return None - -# ____________________________________________________________ - -def optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, - inline_short_preamble=True, retraced=False): - debug_start("jit-optimize") - try: - return _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, - enable_opts, - inline_short_preamble, retraced) - finally: - debug_stop("jit-optimize") - -def _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, - inline_short_preamble, retraced=False): - from pypy.jit.metainterp.optimizeopt import optimize_bridge_1 - bridge.logops = metainterp_sd.logger_noopt.log_loop(bridge.inputargs, - bridge.operations) - if old_loop_tokens: - old_loop_token = old_loop_tokens[0] - bridge.operations[-1].setdescr(old_loop_token) # patch jump target - optimize_bridge_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) - return old_loop_tokens[0] - #return bridge.operations[-1].getdescr() - return None - -# ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -51,34 +51,6 @@ return optimizations, unroll - -def optimize_loop_1(metainterp_sd, loop, enable_opts, - inline_short_preamble=True, retraced=False): - """Optimize loop.operations to remove internal overheadish operations. - """ - - optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble, retraced) - if unroll: - optimize_unroll(metainterp_sd, loop, optimizations) - else: - optimizer = Optimizer(metainterp_sd, loop, optimizations) - optimizer.propagate_all_forward() - -def optimize_bridge_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble=True, retraced=False): - """The same, but for a bridge. """ - enable_opts = enable_opts.copy() - try: - del enable_opts['unroll'] - except KeyError: - pass - optimize_loop_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) - -if __name__ == '__main__': - print ALL_OPTS_NAMES - def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ @@ -96,3 +68,6 @@ finally: debug_stop("jit-optimize") +if __name__ == '__main__': + print ALL_OPTS_NAMES + diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -453,6 +453,7 @@ def clear_newoperations(self): self._newoperations = [] + self.seen_results = {} def make_equal_to(self, box, value, replace=False): assert isinstance(value, OptValue) diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -35,6 +35,9 @@ pass def optimize_LABEL(self, op): + descr = op.getdescr() + if isinstance(descr, JitCellToken): + return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) self.last_label_descr = op.getdescr() self.emit_operation(op) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -9,14 +9,14 @@ class BaseTestMultiLabel(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" - def optimize_loop(self, ops, expected): + def optimize_loop(self, ops, expected, expected_shorts=None): loop = self.parse(ops) if expected != "crash!": expected = self.parse(expected) part = TreeLoop('part') part.inputargs = loop.inputargs - part.start_resumedescr = FakeDescrWithSnapshot() + part.resume_at_jump_descr = FakeDescrWithSnapshot() token = loop.original_jitcell_token optimized = TreeLoop('optimized') @@ -33,15 +33,17 @@ if nxt < len(loop.operations): label = loop.operations[nxt] assert label.getopnum() == rop.LABEL - jumpop = ResOperation(rop.JUMP, label.getarglist(), - None, descr=token) - operations.append(jumpop) + if label.getdescr() is None: + label.setdescr(token) + operations.append(label) part.operations = operations + self._do_optimize_loop(part, None) if part.operations[-1].getopnum() == rop.LABEL: last_label = [part.operations.pop()] else: last_label = [] + optimized.operations.extend(part.operations) prv = nxt + 1 @@ -54,9 +56,32 @@ print 'Failed!' print + shorts = [op.getdescr().short_preamble + for op in optimized.operations + if op.getopnum() == rop.LABEL] + + if expected_shorts: + for short in shorts: + print + print "Short preamble:" + print '\n'.join([str(o) for o in short]) + + assert expected != "crash!", "should have raised an exception" self.assert_equal(optimized, expected) + if expected_shorts: + assert len(shorts) == len(expected_shorts) + for short, expected_short in zip(shorts, expected_shorts): + expected_short = self.parse(expected_short) + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, expected_short, + text_right='expected short preamble') + + return optimized def test_simple(self): @@ -194,8 +219,168 @@ """ with raises(InvalidLoop): self.optimize_loop(ops, ops) - + + def test_two_intermediate_labels_basic_1(self): + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + expected = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1, i2) + i4 = int_add(i1, i2) + label(p1, i4) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + short1 = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + short2 = """ + [p1, i1] + label(p1, i1) + jump(p1, i1) + """ + self.optimize_loop(ops, expected, expected_shorts=[short1, short2]) + + def test_two_intermediate_labels_basic_2(self): + ops = """ + [p1, i1] + i2 = int_add(i1, 1) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = getfield_gc(p1, descr=valuedescr) + i6 = int_add(i4, i5) + jump(p1, i6) + """ + expected = """ + [p1, i1] + i2 = int_add(i1, 1) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4, i3) + i6 = int_add(i4, i3) + jump(p1, i6, i3) + """ + short1 = """ + [p1, i1] + label(p1, i1) + jump(p1, i1) + """ + short2 = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, expected, expected_shorts=[short1, short2]) + + def test_two_intermediate_labels_both(self): + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = getfield_gc(p1, descr=valuedescr) + i6 = int_mul(i4, i5) + jump(p1, i6) + """ + expected = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1, i2) + i4 = int_add(i1, i2) + label(p1, i4, i2) + i6 = int_mul(i4, i2) + jump(p1, i6, i2) + """ + short = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, expected, expected_shorts=[short, short]) + + def test_import_across_multiple_labels_basic(self): + # Not supported, juts make sure we get a functional trace + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = int_add(i1, 1) + label(p1, i1) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + self.optimize_loop(ops, ops) + + def test_import_across_multiple_labels_with_duplication(self): + # Not supported, juts make sure we get a functional trace + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i2) + i3 = int_add(i2, 1) + label(p1, i2) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + exported = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + i6 = same_as(i2) + label(p1, i2) + i3 = int_add(i2, 1) + label(p1, i2) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + self.optimize_loop(ops, exported) + def test_import_virtual_across_multiple_labels(self): + ops = """ + [p0, i1] + i1a = int_add(i1, 1) + pv = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(pv, i1a, descr=valuedescr) + label(pv, i1) + i2 = int_mul(i1, 3) + label(pv, i2) + i3 = getfield_gc(pv, descr=valuedescr) + i4 = int_add(i3, i2) + jump(pv, i4) + """ + expected = """ + [p0, i1] + i1a = int_add(i1, 1) + i5 = same_as(i1a) + label(i1a, i1) + i2 = int_mul(i1, 3) + label(i1a, i2) + i4 = int_add(i1a, i2) + jump(i1a, i4) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestMultiLabel, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -4,7 +4,7 @@ LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize -from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken @@ -4211,7 +4211,6 @@ preamble = """ [p0] i0 = strlen(p0) - i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5668,8 +5667,7 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - i7 = same_as(i2) - jump(p2, p3, i7) + jump(p2, p3, i2) """ expected = """ [p1, p2, i1] @@ -5744,9 +5742,7 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - i129 = same_as(i2) - i130 = same_as(i3) - jump(p2, p3, p5, i129, i130) + jump(p2, p3, p5, i2, i3) """ expected = """ [p1, p2, p3, i1, i2] @@ -5959,8 +5955,7 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - i9 = same_as(i4) - jump(p4, i1, i2, p2, i5, i3, i9) + jump(p4, i1, i2, p2, i5, i3, i4) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6082,9 +6077,7 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - i11 = same_as(i1) - i12 = same_as(i2) - jump(p1, p2, p3, i3, i11, i12) + jump(p1, p2, p3, i3, i1, i2) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6304,7 +6297,6 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) - i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6350,9 +6342,7 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - i11 = same_as(i1) - i12 = same_as(i2) - jump(p1, p2, i3, i11, i12) + jump(p1, p2, i3, i1, i2) """ expected = """ [p1, p2, i3, i1, i2] @@ -6925,8 +6915,7 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - i0 = same_as(i843) - jump(p9, i0) + jump(p9, i843) """ short = """ [p9] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -430,18 +430,18 @@ preamble = TreeLoop('preamble') preamble.inputargs = inputargs - preamble.start_resumedescr = FakeDescrWithSnapshot() + preamble.resume_at_jump_descr = FakeDescrWithSnapshot() token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ operations + \ - [ResOperation(rop.JUMP, jump_args, None, descr=token)] + [ResOperation(rop.LABEL, jump_args, None, descr=token)] self._do_optimize_loop(preamble, call_pure_results) assert preamble.operations[-1].getopnum() == rop.LABEL inliner = Inliner(inputargs, jump_args) - loop.start_resumedescr = preamble.start_resumedescr + loop.resume_at_jump_descr = preamble.resume_at_jump_descr loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -3,7 +3,7 @@ from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException -from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop +from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds from pypy.jit.metainterp.inliner import Inliner @@ -51,10 +51,10 @@ distinction anymore)""" inline_short_preamble = True - did_import = False def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) + self.boxes_created_this_iteration = None def fix_snapshot(self, jump_args, snapshot): if snapshot is None: @@ -71,7 +71,6 @@ loop = self.optimizer.loop self.optimizer.clear_newoperations() - start_label = loop.operations[0] if start_label.getopnum() == rop.LABEL: loop.operations = loop.operations[1:] @@ -82,7 +81,7 @@ start_label = None jumpop = loop.operations[-1] - if jumpop.getopnum() == rop.JUMP: + if jumpop.getopnum() == rop.JUMP or jumpop.getopnum() == rop.LABEL: loop.operations = loop.operations[:-1] else: jumpop = None @@ -91,48 +90,87 @@ self.optimizer.propagate_all_forward(clear=False) if not jumpop: - return - if self.jump_to_already_compiled_trace(jumpop): - # Found a compiled trace to jump to - if self.did_import: - - self.close_bridge(start_label) - self.finilize_short_preamble(start_label) return cell_token = jumpop.getdescr() assert isinstance(cell_token, JitCellToken) stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) - if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) - self.optimizer.flush() - KillHugeIntBounds(self.optimizer).apply() + + if jumpop.getopnum() == rop.JUMP: + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.short: + # Construct our short preamble + assert start_label + self.close_bridge(start_label) + return - loop.operations = self.optimizer.get_newoperations() - self.export_state(stop_label) - loop.operations.append(stop_label) - else: - assert stop_label + if start_label and self.jump_to_start_label(start_label, stop_label): + # Initial label matches, jump to it + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, + descr=start_label.getdescr()) + if self.short: + # Construct our short preamble + self.close_loop(start_label, jumpop) + else: + self.optimizer.send_extra_operation(jumpop) + return + + if cell_token.target_tokens: + limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit + if cell_token.retraced_count < limit: + cell_token.retraced_count += 1 + debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + else: + debug_print("Retrace count reached, jumping to preamble") + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return + + # Found nothing to jump to, emit a label instead + + if self.short: + # Construct our short preamble assert start_label - stop_target = stop_label.getdescr() - start_target = start_label.getdescr() - assert isinstance(stop_target, TargetToken) - assert isinstance(start_target, TargetToken) - assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token - jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + self.close_bridge(start_label) - self.close_loop(jumpop) - self.finilize_short_preamble(start_label) + self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() + + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + + def jump_to_start_label(self, start_label, stop_label): + if not start_label or not stop_label: + return False + + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + if stop_target.targeting_jitcell_token is not start_target.targeting_jitcell_token: + return False + + return True + + #args = stop_label.getarglist() + #modifier = VirtualStateAdder(self.optimizer) + #virtual_state = modifier.get_virtual_state(args) + #if self.initial_virtual_state.generalization_of(virtual_state): + # return True + def export_state(self, targetop): original_jump_args = targetop.getarglist() jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] - assert self.optimizer.loop.start_resumedescr - start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) - # FIXME: I dont thnik we need fix_snapshot anymore + assert self.optimizer.loop.resume_at_jump_descr + resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr.clone_if_mutable() + assert isinstance(resume_at_jump_descr, ResumeGuardDescr) + resume_at_jump_descr.rd_snapshot = self.fix_snapshot(jump_args, resume_at_jump_descr.rd_snapshot) modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) @@ -141,26 +179,21 @@ inputargs = virtual_state.make_inputargs(values, self.optimizer) short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - constant_inputargs[box] = const - short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) - aliased_vrituals = {} - for i in range(len(original_jump_args)): - if original_jump_args[i] is not jump_args[i]: - if values[i].is_virtual(): - aliased_vrituals[original_jump_args[i]] = jump_args[i] - else: - short_boxes.alias(original_jump_args[i], jump_args[i]) + if self.boxes_created_this_iteration is not None: + for box in self.inputargs: + self.boxes_created_this_iteration[box] = True + + short_boxes = ShortBoxes(self.optimizer, inputargs, + self.boxes_created_this_iteration) self.optimizer.clear_newoperations() - for box in short_inputargs: - value = self.getvalue(box) - if value.is_virtual(): - value.force_box(self.optimizer) + for i in range(len(original_jump_args)): + if values[i].is_virtual(): + values[i].force_box(self.optimizer) + if original_jump_args[i] is not jump_args[i]: + op = ResOperation(rop.SAME_AS, [jump_args[i]], original_jump_args[i]) + self.optimizer.emit_operation(op) inputarg_setup_ops = self.optimizer.get_newoperations() target_token = targetop.getdescr() @@ -168,78 +201,76 @@ targetop.initarglist(inputargs) target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] - target_token.start_resumedescr = start_resumedescr - target_token.exported_state = ExportedState(constant_inputargs, short_boxes, - inputarg_setup_ops, self.optimizer, - aliased_vrituals, jump_args) + target_token.resume_at_jump_descr = resume_at_jump_descr + + exported_values = {} + for box in inputargs: + exported_values[box] = self.optimizer.getvalue(box) + for op in short_boxes.operations(): + if op and op.result: + box = op.result + exported_values[box] = self.optimizer.getvalue(box) + + target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops, + exported_values) def import_state(self, targetop): - self.did_import = False - if not targetop: - # FIXME: Set up some sort of empty state with no virtuals? + if not targetop: # Trace did not start with a label + self.inputargs = self.optimizer.loop.inputargs + self.short = None + self.initial_virtual_state = None return + + self.inputargs = targetop.getarglist() target_token = targetop.getdescr() - if not target_token: - return assert isinstance(target_token, TargetToken) exported_state = target_token.exported_state if not exported_state: - # FIXME: Set up some sort of empty state with no virtuals + # No state exported, construct one without virtuals + self.short = None + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(self.inputargs) + self.initial_virtual_state = virtual_state return - self.did_import = True self.short = target_token.short_preamble[:] self.short_seen = {} - self.short_boxes = exported_state.short_boxes.clone() - for box, const in exported_state.constant_inputargs.items(): - self.short_seen[box] = True - self.imported_state = exported_state - self.inputargs = targetop.getarglist() + self.short_boxes = exported_state.short_boxes + self.short_resume_at_jump_descr = target_token.resume_at_jump_descr self.initial_virtual_state = target_token.virtual_state - self.start_resumedescr = target_token.start_resumedescr seen = {} for box in self.inputargs: if box in seen: continue seen[box] = True - preamble_value = exported_state.optimizer.getvalue(box) + preamble_value = exported_state.exported_values[box] value = self.optimizer.getvalue(box) value.import_from(preamble_value, self.optimizer) - for newbox, oldbox in self.short_boxes.aliases.items(): - self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) - # Setup the state of the new optimizer by emiting the # short operations and discarding the result self.optimizer.emitting_dissabled = True for op in exported_state.inputarg_setup_ops: self.optimizer.send_extra_operation(op) + seen = {} - for op in self.short_boxes.operations(): self.ensure_short_op_emitted(op, self.optimizer, seen) if op and op.result: - preamble_value = exported_state.optimizer.getvalue(op.result) + preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) if not value.is_virtual(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) newresult = newvalue.get_key_box() - if newresult is not op.result and not newvalue.is_constant(): - self.short_boxes.alias(newresult, op.result) - op = ResOperation(rop.SAME_AS, [op.result], newresult) - self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX - #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! + assert newresult is op.result or newvalue.is_constant() self.optimizer.flush() self.optimizer.emitting_dissabled = False - for box, key_box in exported_state.aliased_vrituals.items(): - self.optimizer.make_equal_to(box, self.getvalue(key_box)) - def close_bridge(self, start_label): - inputargs = self.inputargs + inputargs = self.inputargs short_jumpargs = inputargs[:] # We dont need to inline the short preamble we are creating as we are conneting @@ -249,8 +280,6 @@ newoperations = self.optimizer.get_newoperations() self.boxes_created_this_iteration = {} i = 0 - while newoperations[i].getopnum() != rop.LABEL: - i += 1 while i < len(newoperations): op = newoperations[i] self.boxes_created_this_iteration[op.result] = True @@ -262,11 +291,11 @@ i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) - - def close_loop(self, jumpop): + self.finilize_short_preamble(start_label) + + def close_loop(self, start_label, jumpop): virtual_state = self.initial_virtual_state short_inputargs = self.short[0].getarglist() - constant_inputargs = self.imported_state.constant_inputargs inputargs = self.inputargs short_jumpargs = inputargs[:] @@ -289,8 +318,6 @@ raise InvalidLoop args[short_inputargs[i]] = jmp_to_short_args[i] self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - for box, const in constant_inputargs.items(): - self.short_inliner.argmap[box] = const for op in self.short[1:]: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) @@ -299,8 +326,6 @@ newoperations = self.optimizer.get_newoperations() self.boxes_created_this_iteration = {} i = j = 0 - while newoperations[i].getopnum() != rop.LABEL: - i += 1 while i < len(newoperations) or j < len(jumpargs): if i == len(newoperations): while j < len(jumpargs): @@ -353,6 +378,8 @@ assert isinstance(target_token, TargetToken) target_token.targeting_jitcell_token.retraced_count = sys.maxint + self.finilize_short_preamble(start_label) + def finilize_short_preamble(self, start_label): short = self.short assert short[-1].getopnum() == rop.JUMP @@ -365,7 +392,7 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - descr = target_token.start_resumedescr.clone_if_mutable() + descr = target_token.resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) short[i] = op @@ -381,13 +408,11 @@ newargs[i] = a.clonebox() boxmap[a] = newargs[i] inliner = Inliner(short_inputargs, newargs) - for box, const in self.imported_state.constant_inputargs.items(): - inliner.argmap[box] = const for i in range(len(short)): short[i] = inliner.inline_op(short[i]) - target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(target_token.start_resumedescr) + target_token.resume_at_jump_descr = target_token.resume_at_jump_descr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed for box in short[0].getarglist(): @@ -398,31 +423,6 @@ target_token.short_preamble = self.short target_token.exported_state = None - - def FIXME_old_stuff(): - preamble_optimizer = self.optimizer - loop.preamble.quasi_immutable_deps = ( - self.optimizer.quasi_immutable_deps) - self.optimizer = self.optimizer.new() - loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps - - - loop.inputargs = inputargs - args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\ - for a in inputargs] - jmp = ResOperation(rop.JUMP, args, None) - jmp.setdescr(loop.token) - loop.preamble.operations.append(jmp) - - loop.operations = self.optimizer.get_newoperations() - maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards - - if self.optimizer.emitted_guards > maxguards: - loop.preamble.token.retraced_count = sys.maxint - - if short: - pass - def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: return @@ -450,7 +450,7 @@ if not isinstance(a, Const) and a not in self.short_seen: self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): - descr = self.start_resumedescr.clone_if_mutable() + descr = self.short_resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) if guards_needed and self.short_boxes.has_producer(op.result): @@ -549,7 +549,7 @@ for guard in extra_guards: if guard.is_guard(): - descr = target.start_resumedescr.clone_if_mutable() + descr = target.resume_at_jump_descr.clone_if_mutable() inliner.inline_descr_inplace(descr) guard.setdescr(descr) self.optimizer.send_extra_operation(guard) @@ -566,20 +566,7 @@ self.optimizer.send_extra_operation(jumpop) return True debug_stop('jit-log-virtualstate') - - if self.did_import: - return False - limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit - if cell_token.retraced_count Author: Armin Rigo Branch: Changeset: r50799:da6ef5856827 Date: 2011-12-20 21:25 +0100 http://bitbucket.org/pypy/pypy/changeset/da6ef5856827/ Log: Improve the test to also have ConstFloatLocs. diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -20,6 +20,11 @@ def regalloc_pop(self, loc): self.ops.append(('pop', loc)) + def regalloc_immedmem2mem(self, from_loc, to_loc): + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + self.ops.append(('immedmem2mem', from_loc, to_loc)) + def got(self, expected): print '------------------------ comparing ---------------------------' for op1, op2 in zip(self.ops, expected): @@ -244,6 +249,13 @@ else: return pick1() # + def pick2c(): + n = random.randrange(-2000, 500) + if n >= 0: + return ConstFloatLoc(n) # n is the address, not really used here + else: + return pick2() + # def pick_dst(fn, count, seen): result = [] while len(result) < count: @@ -280,12 +292,12 @@ if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: - assert isinstance(loc, ImmedLoc) + assert isinstance(loc, (ImmedLoc, ConstFloatLoc)) return regs1, regs2, stack # for i in range(500): seen = {} - src_locations2 = [pick2() for i in range(4)] + src_locations2 = [pick2c() for i in range(4)] dst_locations2 = pick_dst(pick2, 4, seen) src_locations1 = [pick1c() for i in range(5)] dst_locations1 = pick_dst(pick1, 5, seen) @@ -312,9 +324,15 @@ return got if isinstance(loc, ImmedLoc): return 'const-%d' % loc.value + if isinstance(loc, ConstFloatLoc): + got = 'constfloat-@%d' % loc.value + if loc.get_width() > WORD: + got = (got, 'constfloat-next-@%d' % loc.value) + return got assert 0, loc # def write(loc, newvalue): + assert (type(newvalue) is tuple) == (loc.get_width() > WORD) if isinstance(loc, RegLoc): if loc.is_xmm: regs2[loc.value] = newvalue @@ -337,10 +355,14 @@ for op in assembler.ops: if op[0] == 'mov': src, dst = op[1:] - assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) - assert isinstance(dst, (RegLoc, StackLoc)) - assert not (isinstance(src, StackLoc) and - isinstance(dst, StackLoc)) + if isinstance(src, ConstFloatLoc): + assert isinstance(dst, RegLoc) + assert dst.is_xmm + else: + assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) + assert isinstance(dst, (RegLoc, StackLoc)) + assert not (isinstance(src, StackLoc) and + isinstance(dst, StackLoc)) write(dst, read(src)) elif op[0] == 'push': src, = op[1:] @@ -350,6 +372,11 @@ dst, = op[1:] assert isinstance(dst, (RegLoc, StackLoc)) write(dst, extrapushes.pop()) + elif op[0] == 'immedmem2mem': + src, dst = op[1:] + assert isinstance(src, ConstFloatLoc) + assert isinstance(dst, StackLoc) + write(dst, read(src, 8)) else: assert 0, "unknown op: %r" % (op,) assert not extrapushes From noreply at buildbot.pypy.org Wed Dec 21 14:30:55 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Dec 2011 14:30:55 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111221133055.F3A108217E@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50800:9c5561654700 Date: 2011-12-21 10:46 +0100 http://bitbucket.org/pypy/pypy/changeset/9c5561654700/ Log: merge heads diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -45,6 +45,8 @@ import pypy.module.cpyext.longobject import pypy.module.cpyext.listobject import pypy.module.cpyext.sequence +import pypy.module.cpyext.buffer +import pypy.module.cpyext.bufferobject import pypy.module.cpyext.eval import pypy.module.cpyext.import_ import pypy.module.cpyext.mapping diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -317,6 +317,10 @@ INTERPLEVEL_API = {} FUNCTIONS = {} + +# These are C symbols which cpyext will export, but which are defined in .c +# files somewhere in the implementation of cpyext (rather than being defined in +# RPython). SYMBOLS_C = [ 'Py_FatalError', 'PyOS_snprintf', 'PyOS_vsnprintf', 'PyArg_Parse', 'PyArg_ParseTuple', 'PyArg_UnpackTuple', 'PyArg_ParseTupleAndKeywords', diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/buffer.py @@ -0,0 +1,11 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, CANNOT_FAIL, Py_buffer) + + at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) +def PyBuffer_IsContiguous(space, view, fortran): + """Return 1 if the memory defined by the view is C-style (fortran is + 'C') or Fortran-style (fortran is 'F') contiguous or either one + (fortran is 'A'). Return 0 otherwise.""" + # PyPy only supports contiguous Py_buffers for now. + return space.wrap(1) diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/bufferobject.py @@ -0,0 +1,66 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, + PyObjectFields, PyObject) +from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef +from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer + + +PyBufferObjectStruct = lltype.ForwardReference() +PyBufferObject = lltype.Ptr(PyBufferObjectStruct) +PyBufferObjectFields = PyObjectFields + ( + ("b_base", PyObject), + ("b_ptr", rffi.VOIDP), + ("b_size", Py_ssize_t), + ("b_offset", Py_ssize_t), + ("b_readonly", rffi.INT), + ("b_hash", rffi.LONG), + ) + +cpython_struct("PyBufferObject", PyBufferObjectFields, PyBufferObjectStruct) + + at bootstrap_function +def init_bufferobject(space): + "Type description of PyBufferObject" + make_typedescr(space.gettypefor(Buffer).instancetypedef, + basestruct=PyBufferObject.TO, + attach=buffer_attach, + dealloc=buffer_dealloc, + realize=buffer_realize) + +def buffer_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyBufferObject with the given (str) buffer object. + """ + py_buf = rffi.cast(PyBufferObject, py_obj) + py_buf.c_b_offset = 0 + rffi.setintfield(py_buf, 'c_b_readonly', 1) + rffi.setintfield(py_buf, 'c_b_hash', -1) + + if isinstance(w_obj, SubBuffer): + py_buf.c_b_offset = w_obj.offset + w_obj = w_obj.buffer + + if isinstance(w_obj, StringBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_size = w_obj.getlength() + else: + raise Exception("Fail fail fail fail fail") + + +def buffer_realize(space, py_obj): + """ + Creates the buffer in the PyPy interpreter from a cpyext representation. + """ + raise Exception("realize fail fail fail") + + + + at cpython_api([PyObject], lltype.Void, external=False) +def buffer_dealloc(space, py_obj): + py_buf = rffi.cast(PyBufferObject, py_obj) + Py_DecRef(space, py_buf.c_b_base) + rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -9,6 +9,17 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + PyObject *b_base; + void *b_ptr; + Py_ssize_t b_size; + Py_ssize_t b_offset; + int b_readonly; + long b_hash; +} PyBufferObject; + + PyAPI_DATA(PyTypeObject) PyBuffer_Type; #define PyBuffer_Check(op) (((PyObject*)(op))->ob_type == &PyBuffer_Type) diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -234,7 +234,7 @@ writebufferproc bf_getwritebuffer; segcountproc bf_getsegcount; charbufferproc bf_getcharbuffer; - getbufferproc bf_getbuffer; + getbufferproc bf_getbuffer; releasebufferproc bf_releasebuffer; } PyBufferProcs; diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -4,17 +4,6 @@ #include "Python.h" -typedef struct { - PyObject_HEAD - PyObject *b_base; - void *b_ptr; - Py_ssize_t b_size; - Py_ssize_t b_offset; - int b_readonly; - long b_hash; -} PyBufferObject; - - enum buffer_t { READ_BUFFER, WRITE_BUFFER, diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -777,18 +777,14 @@ Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { + fflush(stdout); PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } else { - PyErr_SetString( - PyExc_NotImplementedError, - "s* not implemented for non-string values"); - return NULL; - } -#if 0 + } #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { +#if 0 uarg = UNICODE_DEFAULT_ENCODING(arg); if (uarg == NULL) return converterr(CONV_UNICODE, @@ -796,6 +792,9 @@ PyBuffer_FillInfo(p, arg, PyString_AS_STRING(uarg), PyString_GET_SIZE(uarg), 1, 0); +#else + return converterr("string or buffer", arg, msgbuf, bufsize); +#endif } #endif else { /* any buffer-like object */ @@ -803,7 +802,6 @@ if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } -#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", @@ -1342,7 +1340,6 @@ return count; } -#if 0 //YYY static int getbuffer(PyObject *arg, Py_buffer *view, char **errmsg) { @@ -1373,7 +1370,6 @@ PyBuffer_FillInfo(view, NULL, buf, count, 1, 0); return 0; } -#endif /* Support for keyword arguments donated by Geoff Philbrick */ diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import ( - cpython_api, PyObject, PyObjectP, CANNOT_FAIL + cpython_api, PyObject, PyObjectP, CANNOT_FAIL, Py_buffer ) from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex from pypy.rpython.lltypesystem import rffi, lltype @@ -10,7 +10,6 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP -Py_buffer = rffi.VOIDP va_list = rffi.VOIDP PyDateTime_Date = rffi.VOIDP PyDateTime_DateTime = rffi.VOIDP @@ -178,13 +177,6 @@ ~Py_buffer.format.""" raise NotImplementedError - at cpython_api([Py_buffer, lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): - """Return 1 if the memory defined by the view is C-style (fortran is - 'C') or Fortran-style (fortran is 'F') contiguous or either one - (fortran is 'A'). Return 0 otherwise.""" - raise NotImplementedError - @cpython_api([rffi.INT_real, Py_ssize_t, Py_ssize_t, Py_ssize_t, lltype.Char], lltype.Void) def PyBuffer_FillContiguousStrides(space, ndim, shape, strides, itemsize, fortran): """Fill the strides array with byte-strides of a contiguous (C-style if diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -129,6 +129,21 @@ assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + def test_pyarg_parse_string_old_buffer(self): + pybuffer = self.import_parser( + ''' + Py_buffer buf; + PyObject *result; + if (!PyArg_ParseTuple(args, "s*", &buf)) { + return NULL; + } + result = PyString_FromStringAndSize(buf.buf, buf.len); + PyBuffer_Release(&buf); + return result; + ''') + assert 'foo\0bar\0baz' == pybuffer(buffer('foo\0bar\0baz')) + + def test_pyarg_parse_charbuf_and_length(self): """ The `t#` format specifier can be used to parse a read-only 8-bit diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -28,6 +28,7 @@ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError from pypy.rlib.rstring import rsplit from pypy.rlib.objectmodel import specialize @@ -418,8 +419,21 @@ Py_DecRef(space, pyref) return space.len_w(w_str) + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, + external=False, error=-1) +def buf_getreadbuffer(space, pyref, segment, ref): + from pypy.module.cpyext.bufferobject import PyBufferObject + if segment != 0: + raise OperationError(space.w_SystemError, space.wrap + ("accessing non-existent string segment")) + py_buf = rffi.cast(PyBufferObject, pyref) + ref[0] = py_buf.c_b_ptr + #Py_DecRef(space, pyref) + return py_buf.c_b_size + def setup_string_buffer_procs(space, pto): c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, str_segcount.api_func.get_wrapper(space)) c_buf.c_bf_getreadbuffer = llhelper(str_getreadbuffer.api_func.functype, @@ -429,6 +443,15 @@ pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER +def setup_buffer_buffer_procs(space, pto): + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) + c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, + str_segcount.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llhelper(buf_getreadbuffer.api_func.functype, + buf_getreadbuffer.api_func.get_wrapper(space)) + pto.c_tp_as_buffer = c_buf + @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): from pypy.module.cpyext.object import PyObject_dealloc @@ -484,6 +507,8 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) + if space.is_w(w_type, space.gettypefor(Buffer)): + setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, PyObject_Del.api_func.get_wrapper(space)) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -21,7 +21,6 @@ _immutable_fields_ = ["itemtype", "num", "kind"] def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): - self.signature = signature.BaseSignature() self.itemtype = itemtype self.num = num self.kind = kind @@ -228,4 +227,4 @@ ) def get_dtype_cache(space): - return space.fromcache(DtypeCache) \ No newline at end of file + return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py --- a/pypy/module/micronumpy/interp_extras.py +++ b/pypy/module/micronumpy/interp_extras.py @@ -4,4 +4,4 @@ @unwrap_spec(array=BaseArray) def debug_repr(space, array): - return space.wrap(array.debug_repr()) + return space.wrap(array.find_sig().debug_repr()) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_iter.py @@ -0,0 +1,104 @@ + +from pypy.rlib import jit +from pypy.rlib.objectmodel import instantiate +from pypy.module.micronumpy.strides import calculate_broadcast_strides + +# Iterators for arrays +# -------------------- +# all those iterators with the exception of BroadcastIterator iterate over the +# entire array in C order (the last index changes the fastest). This will +# yield all elements. Views iterate over indices and look towards strides and +# backstrides to find the correct position. Notably the offset between +# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between +# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. + +# BroadcastIterator works like that, but for indexes that don't change source +# in the original array, strides[i] == backstrides[i] == 0 + +class BaseIterator(object): + def next(self, shapelen): + raise NotImplementedError + + def done(self): + raise NotImplementedError + +class ArrayIterator(BaseIterator): + def __init__(self, size): + self.offset = 0 + self.size = size + + def next(self, shapelen): + arr = instantiate(ArrayIterator) + arr.size = self.size + arr.offset = self.offset + 1 + return arr + + def done(self): + return self.offset >= self.size + +class OneDimIterator(BaseIterator): + def __init__(self, start, step, stop): + self.offset = start + self.step = step + self.size = stop * step + start + + def next(self, shapelen): + arr = instantiate(OneDimIterator) + arr.size = self.size + arr.step = self.step + arr.offset = self.offset + self.step + return arr + + def done(self): + return self.offset == self.size + +def view_iter_from_arr(arr): + return ViewIterator(arr.start, arr.strides, arr.backstrides, arr.shape) + +class ViewIterator(BaseIterator): + def __init__(self, start, strides, backstrides, shape, res_shape=None): + self.offset = start + self._done = False + if res_shape is not None and res_shape != shape: + r = calculate_broadcast_strides(strides, backstrides, + shape, res_shape) + self.strides, self.backstrides = r + self.res_shape = res_shape + else: + self.strides = strides + self.backstrides = backstrides + self.res_shape = shape + self.indices = [0] * len(self.res_shape) + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + for i in range(shapelen): + indices[i] = self.indices[i] + done = False + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.res_shape[i] - 1: + indices[i] += 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + else: + done = True + res = instantiate(ViewIterator) + res.offset = offset + res.indices = indices + res.strides = self.strides + res.backstrides = self.backstrides + res.res_shape = self.res_shape + res._done = done + return res + + def done(self): + return self._done + +class ConstantIterator(BaseIterator): + def next(self, shapelen): + return self diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,28 +3,33 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature +from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import instantiate - +from pypy.module.micronumpy.interp_iter import ArrayIterator,\ + view_iter_from_arr, OneDimIterator numpy_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result_size', 'i', 'ri', 'self', 'result'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['result_size', 'frame', 'ri', 'self', 'result'] ) all_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) any_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) slice_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['self', 'source', 'source_iter', 'res_iter'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['self', 'frame', 'source', 'res_iter'] ) def _find_shape_and_elems(space, w_iterable): @@ -198,231 +203,17 @@ n_old_elems_to_use *= old_shape[oldI] return new_strides -# Iterators for arrays -# -------------------- -# all those iterators with the exception of BroadcastIterator iterate over the -# entire array in C order (the last index changes the fastest). This will -# yield all elements. Views iterate over indices and look towards strides and -# backstrides to find the correct position. Notably the offset between -# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between -# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. +class BaseArray(Wrappable): + _attrs_ = ["invalidates", "shape", 'size'] -# BroadcastIterator works like that, but for indexes that don't change source -# in the original array, strides[i] == backstrides[i] == 0 - -class BaseIterator(object): - def next(self, shapelen): - raise NotImplementedError - - def done(self): - raise NotImplementedError - - def get_offset(self): - raise NotImplementedError - -class ArrayIterator(BaseIterator): - def __init__(self, size): - self.offset = 0 - self.size = size - - def next(self, shapelen): - arr = instantiate(ArrayIterator) - arr.size = self.size - arr.offset = self.offset + 1 - return arr - - def done(self): - return self.offset >= self.size - - def get_offset(self): - return self.offset - -class OneDimIterator(BaseIterator): - def __init__(self, start, step, stop): - self.offset = start - self.step = step - self.size = stop * step + start - - def next(self, shapelen): - arr = instantiate(OneDimIterator) - arr.size = self.size - arr.step = self.step - arr.offset = self.offset + self.step - return arr - - def done(self): - return self.offset == self.size - - def get_offset(self): - return self.offset - -class ViewIterator(BaseIterator): - def __init__(self, arr): - self.indices = [0] * len(arr.shape) - self.offset = arr.start - self.arr = arr - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - for i in range(shapelen): - indices[i] = self.indices[i] - done = False - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.arr.shape[i] - 1: - indices[i] += 1 - offset += self.arr.strides[i] - break - else: - indices[i] = 0 - offset -= self.arr.backstrides[i] - else: - done = True - res = instantiate(ViewIterator) - res.offset = offset - res.indices = indices - res.arr = self.arr - res._done = done - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class BroadcastIterator(BaseIterator): - '''Like a view iterator, but will repeatedly access values - for all iterations across a res_shape, folding the offset - using mod() arithmetic - ''' - def __init__(self, arr, res_shape): - self.indices = [0] * len(res_shape) - self.offset = arr.start - #strides are 0 where original shape==1 - self.strides = [] - self.backstrides = [] - for i in range(len(arr.shape)): - if arr.shape[i] == 1: - self.strides.append(0) - self.backstrides.append(0) - else: - self.strides.append(arr.strides[i]) - self.backstrides.append(arr.backstrides[i]) - self.res_shape = res_shape - self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides - self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - _done = False - for i in range(shapelen): - indices[i] = self.indices[i] - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.res_shape[i] - 1: - indices[i] += 1 - offset += self.strides[i] - break - else: - indices[i] = 0 - offset -= self.backstrides[i] - else: - _done = True - res = instantiate(BroadcastIterator) - res.indices = indices - res.offset = offset - res._done = _done - res.strides = self.strides - res.backstrides = self.backstrides - res.res_shape = self.res_shape - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class Call2Iterator(BaseIterator): - def __init__(self, left, right): - self.left = left - self.right = right - - def next(self, shapelen): - return Call2Iterator(self.left.next(shapelen), - self.right.next(shapelen)) - - def done(self): - if isinstance(self.left, ConstantIterator): - return self.right.done() - return self.left.done() - - def get_offset(self): - if isinstance(self.left, ConstantIterator): - return self.right.get_offset() - return self.left.get_offset() - -class Call1Iterator(BaseIterator): - def __init__(self, child): - self.child = child - - def next(self, shapelen): - return Call1Iterator(self.child.next(shapelen)) - - def done(self): - return self.child.done() - - def get_offset(self): - return self.child.get_offset() - -class ConstantIterator(BaseIterator): - def next(self, shapelen): - return self - - def done(self): - return False - - def get_offset(self): - return 0 - - -class BaseArray(Wrappable): - _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", - "start", 'order'] - - _immutable_fields_ = ['start', "order"] + _immutable_fields_ = [] strides = None start = 0 - def __init__(self, shape, order): + def __init__(self, shape): self.invalidates = [] self.shape = shape - self.order = order - if self.strides is None: - self.calc_strides(shape) - - def calc_strides(self, shape): - strides = [] - backstrides = [] - s = 1 - shape_rev = shape[:] - if self.order == 'C': - shape_rev.reverse() - for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh - if self.order == 'C': - strides.reverse() - backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] def invalidated(self): if self.invalidates: @@ -499,33 +290,34 @@ def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] + greens=['shapelen', 'sig'], + reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'] ) def loop(self): - i = self.start_iter() - cur_best = self.eval(i) + sig = self.find_sig() + frame = sig.create_frame(self) + cur_best = sig.eval(frame, self) shapelen = len(self.shape) - i = i.next(shapelen) + frame.next(shapelen) dtype = self.find_dtype() result = 0 idx = 1 - while not i.done(): - reduce_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, dtype=dtype, - i=i, result=result, idx=idx, + frame=frame, result=result, + idx=idx, cur_best=cur_best) - new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + new_best = getattr(dtype.itemtype, op_name)(cur_best, sig.eval(frame, self)) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - i = i.next(shapelen) + frame.next(shapelen) idx += 1 return result def impl(self, space): - size = self.find_size() - if size == 0: + if self.size == 0: raise OperationError(space.w_ValueError, space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) @@ -533,15 +325,16 @@ def _all(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - all_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + all_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if not dtype.itemtype.bool(self.eval(i)): + dtype=dtype, frame=frame) + if not dtype.itemtype.bool(sig.eval(frame, self)): return False - i = i.next(shapelen) + frame.next(shapelen) return True def descr_all(self, space): @@ -549,15 +342,16 @@ def _any(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - any_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + any_driver.jit_merge_point(sig=sig, frame=frame, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if dtype.itemtype.bool(self.eval(i)): + dtype=dtype) + if dtype.itemtype.bool(sig.eval(frame, self)): return True - i = i.next(shapelen) + frame.next(shapelen) return False def descr_any(self, space): @@ -586,26 +380,33 @@ return space.newtuple([space.wrap(i) for i in self.shape]) def descr_set_shape(self, space, w_iterable): - concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_iterable) - concrete.setshape(space, new_shape) + self.size, w_iterable) + if isinstance(self, Scalar): + return + self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.find_size()) + return space.wrap(self.size) def descr_copy(self, space): + return self.copy() + + def copy(self): return self.get_concrete().copy() def descr_len(self, space): - return self.get_concrete().descr_len(space) + if len(self.shape): + return space.wrap(self.shape[0]) + raise OperationError(space.w_TypeError, space.wrap( + "len() of unsized object")) def descr_repr(self, space): res = StringBuilder() res.append("array(") concrete = self.get_concrete() dtype = concrete.find_dtype() - if not concrete.find_size(): + if not concrete.size: res.append('[]') if len(self.shape) > 1: # An empty slice reports its shape @@ -617,18 +418,417 @@ concrete.to_str(space, 1, res, indent=' ') if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ - not self.find_size(): + not self.size: res.append(", dtype=" + dtype.name) res.append(")") return space.wrap(res.build()) + def descr_str(self, space): + ret = StringBuilder() + concrete = self.get_concrete_or_scalar() + concrete.to_str(space, 0, ret, ' ') + return space.wrap(ret.build()) + + @jit.unroll_safe + def _single_item_result(self, space, w_idx): + """ The result of getitem/setitem is a single item if w_idx + is a list of scalars that match the size of shape + """ + shape_len = len(self.shape) + if shape_len == 0: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) + if shape_len == 1: + if space.isinstance_w(w_idx, space.w_int): + return True + if space.isinstance_w(w_idx, space.w_slice): + return False + elif (space.isinstance_w(w_idx, space.w_slice) or + space.isinstance_w(w_idx, space.w_int)): + return False + lgt = space.len_w(w_idx) + if lgt > shape_len: + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if lgt < shape_len: + return False + for w_item in space.fixedview(w_idx): + if space.isinstance_w(w_item, space.w_slice): + return False + return True + + @jit.unroll_safe + def _prepare_slice_args(self, space, w_idx): + if (space.isinstance_w(w_idx, space.w_int) or + space.isinstance_w(w_idx, space.w_slice)): + return [space.decode_index4(w_idx, self.shape[0])] + return [space.decode_index4(w_item, self.shape[i]) for i, w_item in + enumerate(space.fixedview(w_idx))] + + def descr_getitem(self, space, w_idx): + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + item = concrete._index_of_single_item(space, w_idx) + return concrete.getitem(item) + chunks = self._prepare_slice_args(space, w_idx) + return space.wrap(self.create_slice(chunks)) + + def descr_setitem(self, space, w_idx, w_value): + self.invalidated() + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + item = concrete._index_of_single_item(space, w_idx) + dtype = concrete.find_dtype() + concrete.setitem(item, dtype.coerce(space, w_value)) + return + if not isinstance(w_value, BaseArray): + w_value = convert_to_array(space, w_value) + chunks = self._prepare_slice_args(space, w_idx) + view = self.create_slice(chunks).get_concrete() + view.setslice(space, w_value) + + @jit.unroll_safe + def create_slice(self, chunks): + shape = [] + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + shape.append(lgt) + s = i + 1 + assert s >= 0 + shape += self.shape[s:] + if not isinstance(self, ConcreteArray): + return VirtualSlice(self, chunks, shape) + r = calculate_slice_strides(self.shape, self.start, self.strides, + self.backstrides, chunks) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], self) + + def descr_reshape(self, space, args_w): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `numpypy.reshape` for full documentation. + + See Also + -------- + numpypy.reshape : equivalent function + """ + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newtuple(args_w) + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, concrete.size, w_shape) + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + # We can create a view, strides somehow match up. + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(concrete.start, new_strides, new_backstrides, + new_shape, self) + else: + # Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr + + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + + def descr_mean(self, space): + return space.div(self.descr_sum(space), space.wrap(self.size)) + + def descr_nonzero(self, space): + if self.size > 1: + raise OperationError(space.w_ValueError, space.wrap( + "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + concr = self.get_concrete_or_scalar() + sig = concr.find_sig() + frame = sig.create_frame(self) + return space.wrap(space.is_true( + sig.eval(frame, concr))) + + def get_concrete_or_scalar(self): + return self.get_concrete() + + def descr_get_transpose(self, space): + concrete = self.get_concrete() + if len(concrete.shape) < 2: + return space.wrap(self) + strides = [] + backstrides = [] + shape = [] + for i in range(len(concrete.shape) - 1, -1, -1): + strides.append(concrete.strides[i]) + backstrides.append(concrete.backstrides[i]) + shape.append(concrete.shape[i]) + return space.wrap(W_NDimSlice(concrete.start, strides[:], + backstrides[:], shape[:], concrete)) + + def descr_get_flatiter(self, space): + return space.wrap(W_FlatIterator(self)) + + def getitem(self, item): + raise NotImplementedError + + def find_sig(self, res_shape=None): + """ find a correct signature for the array + """ + res_shape = res_shape or self.shape + return signature.find_sig(self.create_sig(res_shape), self) + + def descr_array_iface(self, space): + if not self.shape: + raise OperationError(space.w_TypeError, + space.wrap("can't get the array data of a 0-d array for now") + ) + concrete = self.get_concrete() + storage = concrete.storage + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + +def convert_to_array(space, w_obj): + if isinstance(w_obj, BaseArray): + return w_obj + elif space.issequence_w(w_obj): + # Convert to array. + return array(space, w_obj, w_order=None) + else: + # If it's a scalar + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) + return scalar_w(space, dtype, w_obj) + +def scalar_w(space, dtype, w_obj): + return Scalar(dtype, dtype.coerce(space, w_obj)) + +class Scalar(BaseArray): + """ + Intermediate class representing a literal. + """ + size = 1 + _attrs_ = ["dtype", "value", "shape"] + + def __init__(self, dtype, value): + self.shape = [] + BaseArray.__init__(self, []) + self.dtype = dtype + self.value = value + + def find_dtype(self): + return self.dtype + + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + builder.append(self.dtype.itemtype.str_format(self.value)) + + def copy(self): + return Scalar(self.dtype, self.value) + + def create_sig(self, res_shape): + return signature.ScalarSignature(self.dtype) + + def get_concrete_or_scalar(self): + return self + + +class VirtualArray(BaseArray): + """ + Class for representing virtual arrays, such as binary ops or ufuncs + """ + def __init__(self, name, shape, res_dtype): + BaseArray.__init__(self, shape) + self.forced_result = None + self.res_dtype = res_dtype + self.name = name + + def _del_sources(self): + # Function for deleting references to source arrays, to allow garbage-collecting them + raise NotImplementedError + + def compute(self): + result = W_NDimArray(self.size, self.shape, self.find_dtype()) + shapelen = len(self.shape) + sig = self.find_sig() + frame = sig.create_frame(self) + ri = ArrayIterator(self.size) + while not ri.done(): + numpy_driver.jit_merge_point(sig=sig, + shapelen=shapelen, + result_size=self.size, + frame=frame, + ri=ri, + self=self, result=result) + result.dtype.setitem(result.storage, ri.offset, + sig.eval(frame, self)) + frame.next(shapelen) + ri = ri.next(shapelen) + return result + + def force_if_needed(self): + if self.forced_result is None: + self.forced_result = self.compute() + self._del_sources() + + def get_concrete(self): + self.force_if_needed() + res = self.forced_result + assert isinstance(res, ConcreteArray) + return res + + def getitem(self, item): + return self.get_concrete().getitem(item) + + def setitem(self, item, value): + return self.get_concrete().setitem(item, value) + + def find_dtype(self): + return self.res_dtype + +class VirtualSlice(VirtualArray): + def __init__(self, child, chunks, shape): + size = 1 + for sh in shape: + size *= sh + self.child = child + self.chunks = chunks + self.size = size + VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.VirtualSliceSignature( + self.child.create_sig(res_shape)) + + def force_if_needed(self): + if self.forced_result is None: + concr = self.child.get_concrete() + self.forced_result = concr.create_slice(self.chunks) + + def _del_sources(self): + self.child = None + +class Call1(VirtualArray): + def __init__(self, ufunc, name, shape, res_dtype, values): + VirtualArray.__init__(self, name, shape, res_dtype) + self.values = values + self.size = values.size + self.ufunc = ufunc + + def _del_sources(self): + self.values = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.Call1(self.ufunc, self.name, + self.values.create_sig(res_shape)) + +class Call2(VirtualArray): + """ + Intermediate class for performing binary operations. + """ + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): + VirtualArray.__init__(self, name, shape, res_dtype) + self.ufunc = ufunc + self.left = left + self.right = right + self.calc_dtype = calc_dtype + self.size = 1 + for s in self.shape: + self.size *= s + + def _del_sources(self): + self.left = None + self.right = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.Call2(self.ufunc, self.name, self.calc_dtype, + self.left.create_sig(res_shape), + self.right.create_sig(res_shape)) + +class ConcreteArray(BaseArray): + """ An array that have actual storage, whether owned or not + """ + _immutable_fields_ = ['storage'] + + def __init__(self, size, shape, dtype, order='C', parent=None): + self.size = size + self.parent = parent + if parent is not None: + self.storage = parent.storage + else: + self.storage = dtype.malloc(size) + self.order = order + self.dtype = dtype + if self.strides is None: + self.calc_strides(shape) + BaseArray.__init__(self, shape) + if parent is not None: + self.invalidates = parent.invalidates + + def get_concrete(self): + return self + + def find_dtype(self): + return self.dtype + + def getitem(self, item): + return self.dtype.getitem(self.storage, item) + + def setitem(self, item, value): + self.invalidated() + self.dtype.setitem(self.storage, item, value) + + def calc_strides(self, shape): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if self.order == 'C': + shape_rev.reverse() + for sh in shape_rev: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + + def array_sig(self, res_shape): + if res_shape is not None and self.shape != res_shape: + return signature.ViewSignature(self.dtype) + return signature.ArraySignature(self.dtype) + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): '''Modifies builder with a representation of the array/slice The items will be seperated by a comma if comma is 1 Multidimensional arrays/slices will span a number of lines, each line will begin with indent. ''' - size = self.find_size() + size = self.size if size < 1: builder.append('[]') return @@ -654,7 +854,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) builder.append('\n' + indent + '..., ') i = self.shape[0] - 3 @@ -669,7 +869,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) i += 1 elif ndims == 1: @@ -705,12 +905,6 @@ builder.append('[') builder.append(']') - def descr_str(self, space): - ret = StringBuilder() - concrete = self.get_concrete() - concrete.to_str(space, 0, ret, ' ') - return space.wrap(ret.build()) - @jit.unroll_safe def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): @@ -735,456 +929,55 @@ item += v * self.strides[i] return item - @jit.unroll_safe - def _single_item_result(self, space, w_idx): - """ The result of getitem/setitem is a single item if w_idx - is a list of scalars that match the size of shape - """ - shape_len = len(self.shape) - if shape_len == 0: - if not space.isinstance_w(w_idx, space.w_int): - raise OperationError(space.w_IndexError, space.wrap( - "wrong index")) - return True - if shape_len == 1: - if space.isinstance_w(w_idx, space.w_int): - return True - if space.isinstance_w(w_idx, space.w_slice): - return False - elif (space.isinstance_w(w_idx, space.w_slice) or - space.isinstance_w(w_idx, space.w_int)): - return False - lgt = space.len_w(w_idx) - if lgt > shape_len: - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - if lgt < shape_len: - return False - for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_slice): - return False - return True - @jit.unroll_safe - def _prepare_slice_args(self, space, w_idx): - if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - return [space.decode_index4(w_idx, self.shape[0])] - return [space.decode_index4(w_item, self.shape[i]) for i, w_item in - enumerate(space.fixedview(w_idx))] +class ViewArray(ConcreteArray): + def copy(self): + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = view_iter_from_arr(self) + a_iter = ArrayIterator(array.size) + while not iter.done(): + array.setitem(a_iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) + return array - def descr_getitem(self, space, w_idx): - if self._single_item_result(space, w_idx): - concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) - item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item) - chunks = self._prepare_slice_args(space, w_idx) - return space.wrap(self.create_slice(space, chunks)) + def create_sig(self, res_shape): + return signature.ViewSignature(self.dtype) - def descr_setitem(self, space, w_idx, w_value): - self.invalidated() - if self._single_item_result(space, w_idx): - concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) - item = concrete._index_of_single_item(space, w_idx) - dtype = concrete.find_dtype() - concrete.setitem(item, dtype.coerce(space, w_value)) - return - if not isinstance(w_value, BaseArray): - w_value = convert_to_array(space, w_value) - chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(space, chunks) - view.setslice(space, w_value) - @jit.unroll_safe - def create_slice(self, space, chunks): - if len(chunks) == 1: - start, stop, step, lgt = chunks[0] - if step == 0: - shape = self.shape[1:] - strides = self.strides[1:] - backstrides = self.backstrides[1:] - else: - shape = [lgt] + self.shape[1:] - strides = [self.strides[0] * step] + self.strides[1:] - backstrides = [(lgt - 1) * self.strides[0] * step] + self.backstrides[1:] - start *= self.strides[0] - start += self.start - else: - shape = [] - strides = [] - backstrides = [] - start = self.start - i = -1 - for i, (start_, stop, step, lgt) in enumerate(chunks): - if step != 0: - shape.append(lgt) - strides.append(self.strides[i] * step) - backstrides.append(self.strides[i] * (lgt - 1) * step) - start += self.strides[i] * start_ - # add a reminder - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - strides += self.strides[s:] - backstrides += self.backstrides[s:] - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature, - ]) - return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) - - def descr_reshape(self, space, args_w): - """reshape(...) - a.reshape(shape) - - Returns an array containing the same data with a new shape. - - Refer to `numpypy.reshape` for full documentation. - - See Also - -------- - numpypy.reshape : equivalent function -""" - if len(args_w) == 1: - w_shape = args_w[0] - else: - w_shape = space.newtuple(args_w) - concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_shape) - # Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, - concrete.shape, concrete.strides) - if new_strides: - # We can create a view, strides somehow match up. - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) - ndims = len(new_shape) - new_backstrides = [0] * ndims - for nd in range(ndims): - new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - arr = W_NDimSlice(self, new_sig, self.start, new_strides, - new_backstrides, new_shape) - else: - # Create copy with contiguous data - arr = concrete.copy() - arr.setshape(space, new_shape) - return arr - - def descr_tolist(self, space): - if len(self.shape) == 0: - assert isinstance(self, Scalar) - return self.value.descr_tolist(space) - w_result = space.newlist([]) - for i in range(self.shape[0]): - space.call_method(w_result, "append", - space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") - ) - return w_result - - def descr_mean(self, space): - return space.div(self.descr_sum(space), space.wrap(self.find_size())) - - def descr_nonzero(self, space): - if self.find_size() > 1: - raise OperationError(space.w_ValueError, space.wrap( - "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true( - self.get_concrete().eval(self.start_iter(self.shape)) - )) - - def descr_get_transpose(self, space): - concrete = self.get_concrete() - if len(concrete.shape) < 2: - return space.wrap(self) - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) - strides = [] - backstrides = [] - shape = [] - for i in range(len(concrete.shape) - 1, -1, -1): - strides.append(concrete.strides[i]) - backstrides.append(concrete.backstrides[i]) - shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) - - def descr_get_flatiter(self, space): - return space.wrap(W_FlatIterator(self)) - - def getitem(self, item): - raise NotImplementedError - - def start_iter(self, res_shape=None): - raise NotImplementedError - - def descr_array_iface(self, space): - concrete = self.get_concrete() - storage = concrete.get_storage(space) - addr = rffi.cast(lltype.Signed, storage) - w_d = space.newdict() - space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), - space.w_False])) - return w_d - -def convert_to_array(space, w_obj): - if isinstance(w_obj, BaseArray): - return w_obj - elif space.issequence_w(w_obj): - # Convert to array. - return array(space, w_obj, w_order=None) - else: - # If it's a scalar - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) - return scalar_w(space, dtype, w_obj) - -def scalar_w(space, dtype, w_obj): - return Scalar(dtype, dtype.coerce(space, w_obj)) - -class Scalar(BaseArray): - """ - Intermediate class representing a literal. - """ - signature = signature.BaseSignature() - - _attrs_ = ["dtype", "value", "shape"] - - def __init__(self, dtype, value): - self.shape = self.strides = [] - BaseArray.__init__(self, [], 'C') - self.dtype = dtype - self.value = value - - def find_size(self): - return 1 - - def get_concrete(self): - return self - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - raise NotImplementedError - - def eval(self, iter): - return self.value - - def start_iter(self, res_shape=None): - return ConstantIterator() - - def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.itemtype.str_format(self.value)) - - def copy(self): - return Scalar(self.dtype, self.value) - - def debug_repr(self): - return 'Scalar' - - def setshape(self, space, new_shape): - # In order to get here, we already checked that prod(new_shape) == 1, - # so in order to have a consistent API, let it go through. - pass - - def get_storage(self, space): - raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) - -class VirtualArray(BaseArray): - """ - Class for representing virtual arrays, such as binary ops or ufuncs - """ - def __init__(self, signature, shape, res_dtype, order): - BaseArray.__init__(self, shape, order) - self.forced_result = None - self.signature = signature - self.res_dtype = res_dtype - - def _del_sources(self): - # Function for deleting references to source arrays, to allow garbage-collecting them - raise NotImplementedError - - def compute(self): - i = 0 - signature = self.signature - result_size = self.find_size() - result = W_NDimArray(result_size, self.shape, self.find_dtype()) - shapelen = len(self.shape) - i = self.start_iter() - ri = result.start_iter() - while not ri.done(): - numpy_driver.jit_merge_point(signature=signature, - shapelen=shapelen, - result_size=result_size, i=i, ri=ri, - self=self, result=result) - result.dtype.setitem(result.storage, ri.offset, self.eval(i)) - i = i.next(shapelen) - ri = ri.next(shapelen) - return result - - def force_if_needed(self): - if self.forced_result is None: - self.forced_result = self.compute() - self._del_sources() - - def get_concrete(self): - self.force_if_needed() - return self.forced_result - - def eval(self, iter): - if self.forced_result is not None: - return self.forced_result.eval(iter) - return self._eval(iter) - - def getitem(self, item): - return self.get_concrete().getitem(item) - - def setitem(self, item, value): - return self.get_concrete().setitem(item, value) - - def find_size(self): - if self.forced_result is not None: - # The result has been computed and sources may be unavailable - return self.forced_result.find_size() - return self._find_size() - - def find_dtype(self): - return self.res_dtype - - -class Call1(VirtualArray): - def __init__(self, signature, shape, res_dtype, values, order): - VirtualArray.__init__(self, signature, shape, res_dtype, - values.order) - self.values = values - - def _del_sources(self): - self.values = None - - def _find_size(self): - return self.values.find_size() - - def _find_dtype(self): - return self.res_dtype - - def _eval(self, iter): - assert isinstance(iter, Call1Iterator) - val = self.values.eval(iter.child).convert_to(self.res_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - return call_sig.func(self.res_dtype, val) - - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - return Call1Iterator(self.values.start_iter(res_shape)) - - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - if self.forced_result is not None: - return 'Call1(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call1(%s, %s)' % (call_sig.name, - self.values.debug_repr()) - -class Call2(VirtualArray): - """ - Intermediate class for performing binary operations. - """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): - # XXX do something if left.order != right.order - VirtualArray.__init__(self, signature, shape, res_dtype, left.order) - self.left = left - self.right = right - self.calc_dtype = calc_dtype - self.size = 1 - for s in self.shape: - self.size *= s - - def _del_sources(self): - self.left = None - self.right = None - - def _find_size(self): - return self.size - - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - if res_shape is None: - res_shape = self.shape # we still force the shape on children - return Call2Iterator(self.left.start_iter(res_shape), - self.right.start_iter(res_shape)) - - def _eval(self, iter): - assert isinstance(iter, Call2Iterator) - lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) - rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - return call_sig.func(self.calc_dtype, lhs, rhs) - - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - if self.forced_result is not None: - return 'Call2(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call2(%s, %s, %s)' % (call_sig.name, - self.left.debug_repr(), - self.right.debug_repr()) - -class ViewArray(BaseArray): - """ - Class for representing views of arrays, they will reflect changes of parent - arrays. Example: slices - """ - def __init__(self, parent, signature, strides, backstrides, shape): +class W_NDimSlice(ViewArray): + def __init__(self, start, strides, backstrides, shape, parent): + assert isinstance(parent, ConcreteArray) + if isinstance(parent, W_NDimSlice): + parent = parent.parent + size = 1 + for sh in shape: + size *= sh self.strides = strides self.backstrides = backstrides - BaseArray.__init__(self, shape, parent.order) - self.signature = signature - self.parent = parent - self.invalidates = parent.invalidates + ViewArray.__init__(self, size, shape, parent.dtype, parent.order, + parent) + self.start = start - def get_concrete(self): - # in fact, ViewArray never gets "concrete" as it never stores data. - # This implementation is needed for BaseArray getitem/setitem to work, - # can be refactored. - self.parent.get_concrete() - return self + def setslice(self, space, w_value): + res_shape = shape_agreement(space, self.shape, w_value.shape) + self._sliceloop(w_value, res_shape) - def getitem(self, item): - return self.parent.getitem(item) - - def eval(self, iter): - return self.parent.getitem(iter.get_offset()) - - def setitem(self, item, value): - # This is currently not possible to be called from anywhere. - raise NotImplementedError - - def descr_len(self, space): - if self.shape: - return space.wrap(self.shape[0]) - return space.wrap(1) + def _sliceloop(self, source, res_shape): + sig = source.find_sig(res_shape) + frame = sig.create_frame(source, res_shape) + res_iter = view_iter_from_arr(self) + shapelen = len(res_shape) + while not res_iter.done(): + slice_driver.jit_merge_point(sig=sig, + frame=frame, + shapelen=shapelen, + self=self, source=source, + res_iter=res_iter) + self.setitem(res_iter.offset, sig.eval(frame, source).convert_to( + self.find_dtype())) + frame.next(shapelen) + res_iter = res_iter.next(shapelen) def setshape(self, space, new_shape): if len(self.shape) < 1: @@ -1220,96 +1013,10 @@ self.backstrides = new_backstrides[:] self.shape = new_shape[:] -class W_NDimSlice(ViewArray): - signature = signature.BaseSignature() - - def __init__(self, parent, signature, start, strides, backstrides, - shape): - if isinstance(parent, W_NDimSlice): - parent = parent.parent - ViewArray.__init__(self, parent, signature, strides, backstrides, shape) - self.start = start - self.size = 1 - for sh in shape: - self.size *= sh - - def find_size(self): - return self.size - - def find_dtype(self): - return self.parent.find_dtype() - - def setslice(self, space, w_value): - res_shape = shape_agreement(space, self.shape, w_value.shape) - self._sliceloop(w_value, res_shape) - - def _sliceloop(self, source, res_shape): - source_iter = source.start_iter(res_shape) - res_iter = self.start_iter(res_shape) - shapelen = len(res_shape) - while not res_iter.done(): - slice_driver.jit_merge_point(signature=source.signature, - shapelen=shapelen, - self=self, source=source, - res_iter=res_iter, - source_iter=source_iter) - self.setitem(res_iter.offset, source.eval(source_iter).convert_to( - self.find_dtype())) - source_iter = source_iter.next(shapelen) - res_iter = res_iter.next(shapelen) - - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - if len(self.shape) == 1: - return OneDimIterator(self.start, self.strides[0], self.shape[0]) - return ViewIterator(self) - - def setitem(self, item, value): - self.parent.setitem(item, value) - - def debug_repr(self): - return 'Slice(%s)' % self.parent.debug_repr() - - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = self.start_iter() - a_iter = array.start_iter() - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def get_storage(self, space): - return self.parent.get_storage(space) - -class W_NDimArray(BaseArray): +class W_NDimArray(ConcreteArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ - def __init__(self, size, shape, dtype, order='C'): - BaseArray.__init__(self, shape, order) - self.size = size - self.dtype = dtype - self.storage = dtype.malloc(size) - self.signature = dtype.signature - - def get_concrete(self): - return self - - def find_size(self): - return self.size - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - return self.dtype.getitem(self.storage, item) - - def eval(self, iter): - return self.dtype.getitem(self.storage, iter.get_offset()) - def copy(self): array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( @@ -1319,32 +1026,16 @@ ) return array - def descr_len(self, space): - if len(self.shape): - return space.wrap(self.shape[0]) - raise OperationError(space.w_TypeError, space.wrap( - "len() of unsized object")) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) - def start_iter(self, res_shape=None): - if self.order == 'C': - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return ArrayIterator(self.size) - raise NotImplementedError # use ViewIterator simply, test it - def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) - def debug_repr(self): - return 'Array' - - def get_storage(self, space): - return self.storage + def create_sig(self, res_shape): + return self.array_sig(res_shape) def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1396,10 +1087,11 @@ ) arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) + arr_iter = ArrayIterator(arr.size) for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + dtype.setitem(arr.storage, arr_iter.offset, + dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1492,48 +1184,31 @@ class W_FlatIterator(ViewArray): - signature = signature.BaseSignature() @jit.unroll_safe def __init__(self, arr): + arr = arr.get_concrete() size = 1 for sh in arr.shape: size *= sh - new_sig = signature.Signature.find_sig([ - W_FlatIterator.signature, arr.signature - ]) - ViewArray.__init__(self, arr, new_sig, [arr.strides[-1]], - [arr.backstrides[-1]], [size]) + self.strides = [arr.strides[-1]] + self.backstrides = [arr.backstrides[-1]] + ViewArray.__init__(self, size, [size], arr.dtype, arr.order, + arr) self.shapelen = len(arr.shape) - self.arr = arr - self.iter = self.start_iter() - - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return OneDimIterator(self.arr.start, self.strides[0], - self.shape[0]) - - def find_dtype(self): - return self.arr.find_dtype() - - def find_size(self): - return self.shape[0] + self.iter = OneDimIterator(arr.start, self.strides[0], + self.shape[0]) def descr_next(self, space): if self.iter.done(): raise OperationError(space.w_StopIteration, space.w_None) - result = self.eval(self.iter) + result = self.getitem(self.iter.offset) self.iter = self.iter.next(self.shapelen) return result def descr_iter(self): return self - def debug_repr(self): - return 'FlatIter(%s)' % self.arr.debug_repr() - - W_FlatIterator.typedef = TypeDef( 'flatiter', next = interp2app(W_FlatIterator.descr_next), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,20 +2,21 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types +from pypy.module.micronumpy import interp_boxes, interp_dtype, types +from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature, find_sig from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - reduce_driver = jit.JitDriver( - greens = ['shapelen', "signature"], - reds = ["i", "self", "dtype", "value", "obj"] + greens = ['shapelen', "sig"], + virtualizables = ["frame"], + reds = ["frame", "self", "dtype", "value", "obj"] ) class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] - _immutable_fields_ = ["promote_to_float", "promote_bools"] + _immutable_fields_ = ["promote_to_float", "promote_bools", "name"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -50,6 +51,7 @@ def reduce(self, space, w_obj, multidim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -60,13 +62,16 @@ raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) - size = obj.find_size() + size = obj.size dtype = find_unaryop_result_dtype( space, obj.find_dtype(), promote_to_largest=True ) - start = obj.start_iter(obj.shape) shapelen = len(obj.shape) + sig = find_sig(ReduceSignature(self.func, self.name, dtype, + ScalarSignature(dtype), + obj.create_sig(obj.shape)), obj) + frame = sig.create_frame(obj) if shapelen > 1 and not multidim: raise OperationError(space.w_NotImplementedError, space.wrap("not implemented yet")) @@ -74,34 +79,33 @@ if size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - value = obj.eval(start).convert_to(dtype) - start = start.next(shapelen) + value = sig.eval(frame, obj).convert_to(dtype) + frame.next(shapelen) else: value = self.identity.convert_to(dtype) - new_sig = signature.Signature.find_sig([ - self.reduce_signature, obj.signature - ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) + return self.reduce_loop(shapelen, sig, frame, value, obj, dtype) - def reduce_loop(self, signature, shapelen, i, value, obj, dtype): - while not i.done(): - reduce_driver.jit_merge_point(signature=signature, + def reduce_loop(self, shapelen, sig, frame, value, obj, dtype): + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - value=value, obj=obj, i=i, + value=value, obj=obj, frame=frame, dtype=dtype) - value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) - i = i.next(shapelen) + assert isinstance(sig, ReduceSignature) + value = sig.binfunc(dtype, value, sig.eval(frame, obj).convert_to(dtype)) + frame.next(shapelen) return value class W_Ufunc1(W_Ufunc): argcount = 1 + _immutable_fields_ = ["func", "name"] + def __init__(self, func, name, promote_to_float=False, promote_bools=False, identity=None): W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func - self.signature = signature.Call1(func) def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, @@ -117,14 +121,13 @@ if isinstance(w_obj, Scalar): return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) - new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) - w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) + w_res = Call1(self.func, self.name, w_obj.shape, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res class W_Ufunc2(W_Ufunc): - _immutable_fields_ = ["comparison_func", "func"] + _immutable_fields_ = ["comparison_func", "func", "name"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -133,8 +136,6 @@ W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func self.comparison_func = comparison_func - self.signature = signature.Call2(func) - self.reduce_signature = signature.BaseSignature() def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, @@ -158,11 +159,9 @@ w_rhs.value.convert_to(calc_dtype) ) - new_sig = signature.Signature.find_sig([ - self.signature, w_lhs.signature, w_rhs.signature - ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - w_res = Call2(new_sig, new_shape, calc_dtype, + w_res = Call2(self.func, self.name, + new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,54 +1,322 @@ -from pypy.rlib.objectmodel import r_dict, compute_identity_hash +from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.rlib.rarithmetic import intmask +from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ + OneDimIterator, ConstantIterator +from pypy.module.micronumpy.strides import calculate_slice_strides +from pypy.rlib.jit import hint, unroll_safe, promote +def sigeq(one, two): + return one.eq(two) -def components_eq(lhs, rhs): - if len(lhs) != len(rhs): - return False - for i in range(len(lhs)): - v1, v2 = lhs[i], rhs[i] - if type(v1) is not type(v2) or not v1.eq(v2): +def sigeq_no_numbering(one, two): + """ Cache for iterator numbering should not compare array numbers + """ + return one.eq(two, compare_array_no=False) + +def sighash(sig): + return sig.hash() + +known_sigs = r_dict(sigeq, sighash) + +def find_sig(sig, arr): + sig.invent_array_numbering(arr) + try: + return known_sigs[sig] + except KeyError: + sig.invent_numbering() + known_sigs[sig] = sig + return sig + +class NumpyEvalFrame(object): + _virtualizable2_ = ['iterators[*]', 'final_iter', 'arraylist[*]'] + + @unroll_safe + def __init__(self, iterators, arrays): + self = hint(self, access_directly=True, fresh_virtualizable=True) + self.iterators = iterators[:] + self.arrays = arrays[:] + for i in range(len(self.iterators)): + iter = self.iterators[i] + if not isinstance(iter, ConstantIterator): + self.final_iter = i + break + else: + self.final_iter = -1 + + def done(self): + final_iter = promote(self.final_iter) + if final_iter < 0: return False - return True + return self.iterators[final_iter].done() -def components_hash(components): - res = 0x345678 - for component in components: - res = intmask((1000003 * res) ^ component.hash()) - return res + @unroll_safe + def next(self, shapelen): + for i in range(len(self.iterators)): + self.iterators[i] = self.iterators[i].next(shapelen) -class BaseSignature(object): - _attrs_ = [] +def _add_ptr_to_cache(ptr, cache): + i = 0 + for p in cache: + if ptr == p: + return i + i += 1 + else: + res = len(cache) + cache.append(ptr) + return res - def eq(self, other): - return self is other +class Signature(object): + _attrs_ = ['iter_no', 'array_no'] + _immutable_fields_ = ['iter_no', 'array_no'] + + array_no = 0 + iter_no = 0 + + def invent_numbering(self): + cache = r_dict(sigeq_no_numbering, sighash) + allnumbers = [] + self._invent_numbering(cache, allnumbers) + + def invent_array_numbering(self, arr): + cache = [] + self._invent_array_numbering(arr, cache) + + def _invent_numbering(self, cache, allnumbers): + try: + no = cache[self] + except KeyError: + no = len(allnumbers) + cache[self] = no + allnumbers.append(no) + self.iter_no = no + + def create_frame(self, arr, res_shape=None): + res_shape = res_shape or arr.shape + iterlist = [] + arraylist = [] + self._create_iter(iterlist, arraylist, arr, res_shape, []) + return NumpyEvalFrame(iterlist, arraylist) + +class ConcreteSignature(Signature): + _immutable_fields_ = ['dtype'] + + def __init__(self, dtype): + self.dtype = dtype + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, ConcreteSignature) + if compare_array_no: + if self.array_no != other.array_no: + return False + return self.dtype is other.dtype def hash(self): - return compute_identity_hash(self) + return compute_identity_hash(self.dtype) -class Signature(BaseSignature): - _known_sigs = r_dict(components_eq, components_hash) + def allocate_view_iter(self, arr, res_shape, chunklist): + r = arr.shape, arr.start, arr.strides, arr.backstrides + if chunklist: + for chunkelem in chunklist: + r = calculate_slice_strides(r[0], r[1], r[2], r[3], chunkelem) + shape, start, strides, backstrides = r + if len(res_shape) == 1: + return OneDimIterator(start, strides[0], res_shape[0]) + return ViewIterator(start, strides, backstrides, shape, res_shape) - _attrs_ = ["components"] - _immutable_fields_ = ["components[*]"] +class ArraySignature(ConcreteSignature): + def debug_repr(self): + return 'Array' - def __init__(self, components): - self.components = components + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + self.array_no = _add_ptr_to_cache(concr.storage, cache) - @staticmethod - def find_sig(components): - return Signature._known_sigs.setdefault(components, Signature(components)) + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + storage = concr.storage + if self.iter_no >= len(iterlist): + iterlist.append(self.allocate_iter(concr, res_shape, chunklist)) + if self.array_no >= len(arraylist): + arraylist.append(storage) -class Call1(BaseSignature): - _immutable_fields_ = ["func", "name"] + def allocate_iter(self, arr, res_shape, chunklist): + if chunklist: + return self.allocate_view_iter(arr, res_shape, chunklist) + return ArrayIterator(arr.size) - def __init__(self, func): - self.func = func - self.name = func.func_name + def eval(self, frame, arr): + iter = frame.iterators[self.iter_no] + return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) -class Call2(BaseSignature): - _immutable_fields_ = ["func", "name"] +class ScalarSignature(ConcreteSignature): + def debug_repr(self): + return 'Scalar' - def __init__(self, func): - self.func = func - self.name = func.func_name + def _invent_array_numbering(self, arr, cache): + pass + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + if self.iter_no >= len(iterlist): + iter = ConstantIterator() + iterlist.append(iter) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Scalar + assert isinstance(arr, Scalar) + return arr.value + +class ViewSignature(ArraySignature): + def debug_repr(self): + return 'Slice' + + def _invent_numbering(self, cache, allnumbers): + # always invent a new number for view + no = len(allnumbers) + allnumbers.append(no) + self.iter_no = no + + def allocate_iter(self, arr, res_shape, chunklist): + return self.allocate_view_iter(arr, res_shape, chunklist) + +class VirtualSliceSignature(Signature): + def __init__(self, child): + self.child = child + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + self.child._invent_array_numbering(arr.child, cache) + + def hash(self): + return intmask(self.child.hash() ^ 1234) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, VirtualSliceSignature) + return self.child.eq(other.child, compare_array_no) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + chunklist.append(arr.chunks) + self.child._create_iter(iterlist, arraylist, arr.child, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + return self.child.eval(frame, arr.child) + +class Call1(Signature): + _immutable_fields_ = ['unfunc', 'name', 'child'] + + def __init__(self, func, name, child): + self.unfunc = func + self.child = child + self.name = name + + def hash(self): + return compute_hash(self.name) ^ intmask(self.child.hash() << 1) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, Call1) + return (self.unfunc is other.unfunc and + self.child.eq(other.child, compare_array_no)) + + def debug_repr(self): + return 'Call1(%s, %s)' % (self.name, self.child.debug_repr()) + + def _invent_numbering(self, cache, allnumbers): + self.child._invent_numbering(cache, allnumbers) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._invent_array_numbering(arr.values, cache) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._create_iter(iterlist, arraylist, arr.values, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.res_dtype) + return self.unfunc(arr.res_dtype, v) + +class Call2(Signature): + _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] + + def __init__(self, func, name, calc_dtype, left, right): + self.binfunc = func + self.left = left + self.right = right + self.name = name + self.calc_dtype = calc_dtype + + def hash(self): + return (compute_hash(self.name) ^ intmask(self.left.hash() << 1) ^ + intmask(self.right.hash() << 2)) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, Call2) + return (self.binfunc is other.binfunc and + self.calc_dtype is other.calc_dtype and + self.left.eq(other.left, compare_array_no) and + self.right.eq(other.right, compare_array_no)) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + self.left._invent_array_numbering(arr.left, cache) + self.right._invent_array_numbering(arr.right, cache) + + def _invent_numbering(self, cache, allnumbers): + self.left._invent_numbering(cache, allnumbers) + self.right._invent_numbering(cache, allnumbers) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import Call2 + + assert isinstance(arr, Call2) + self.left._create_iter(iterlist, arraylist, arr.left, res_shape, + chunklist) + self.right._create_iter(iterlist, arraylist, arr.right, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + lhs = self.left.eval(frame, arr.left).convert_to(self.calc_dtype) + rhs = self.right.eval(frame, arr.right).convert_to(self.calc_dtype) + return self.binfunc(self.calc_dtype, lhs, rhs) + + def debug_repr(self): + return 'Call2(%s, %s, %s)' % (self.name, self.left.debug_repr(), + self.right.debug_repr()) + +class ReduceSignature(Call2): + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + self.right._create_iter(iterlist, arraylist, arr, res_shape, chunklist) + + def _invent_numbering(self, cache, allnumbers): + self.right._invent_numbering(cache, allnumbers) + + def _invent_array_numbering(self, arr, cache): + self.right._invent_array_numbering(arr, cache) + + def eval(self, frame, arr): + return self.right.eval(frame, arr) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/strides.py @@ -0,0 +1,34 @@ + +def calculate_slice_strides(shape, start, strides, backstrides, chunks): + rstrides = [] + rbackstrides = [] + rstart = start + rshape = [] + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + rstrides.append(strides[i] * step) + rbackstrides.append(strides[i] * (lgt - 1) * step) + rshape.append(lgt) + rstart += strides[i] * start_ + # add a reminder + s = i + 1 + assert s >= 0 + rstrides += strides[s:] + rbackstrides += backstrides[s:] + rshape += shape[s:] + return rshape, rstart, rstrides, rbackstrides + +def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape): + rstrides = [] + rbackstrides = [] + for i in range(len(orig_shape)): + if orig_shape[i] == 1: + rstrides.append(0) + rbackstrides.append(0) + else: + rstrides.append(strides[i]) + rbackstrides.append(backstrides[i]) + rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides + rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides + return rstrides, rbackstrides diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,7 +4,6 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) - class BaseNumpyAppTest(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['micronumpy']) @@ -15,20 +14,37 @@ bool_dtype = get_dtype_cache(space).w_booldtype ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar2 = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) - assert v1.signature is not v2.signature + sig1 = v1.find_sig() + sig2 = v2.find_sig() + assert v1 is not v2 + assert sig1.left.iter_no == sig1.right.iter_no + assert sig2.left.iter_no != sig2.right.iter_no + assert sig1.left.array_no == sig1.right.array_no + sig1b = ar2.descr_add(space, ar).find_sig() + assert sig1b.left.array_no != sig1b.right.array_no + assert sig1b is not sig1 v3 = ar.descr_add(space, Scalar(float64_dtype, 1.0)) - assert v2.signature is v3.signature + sig3 = v3.find_sig() + assert sig2 is sig3 v4 = ar.descr_add(space, ar) - assert v1.signature is v4.signature + assert v1.find_sig() is v4.find_sig() bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) - assert v5.signature is not v1.signature - assert v5.signature is not v2.signature + assert v5.find_sig() is not v1.find_sig() + assert v5.find_sig() is not v2.find_sig() v6 = ar.descr_add(space, bool_ar) - assert v5.signature is v6.signature + assert v5.find_sig() is v6.find_sig() + v7 = v6.descr_add(space, v6) + sig7 = v7.find_sig() + assert sig7.left.left.iter_no == sig7.right.left.iter_no + assert sig7.left.left.iter_no != sig7.right.right.iter_no + assert sig7.left.right.iter_no == sig7.right.right.iter_no + v1.forced_result = ar + assert v1.find_sig() is not sig1 def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype @@ -36,11 +52,14 @@ ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) - assert v1.signature is v2.signature + assert v1.find_sig() is v2.find_sig() v3 = v2.descr_add(space, v1) v4 = v1.descr_add(space, v2) - assert v3.signature is v4.signature + assert v3.find_sig() is v4.find_sig() + v5 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 3, 1))) + v6 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 4, 1))) + assert v5.find_sig() is v6.find_sig() class TestUfuncCoerscion(object): def test_binops(self, space): diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -137,6 +137,16 @@ interp = self.run(code) assert interp.results[0].value.value == 15 + def test_sum2(self): + code = """ + a = |30| + b = a + a + sum(b) + """ + interp = self.run(code) + assert interp.results[0].value.value == 30 * (30 - 1) + + def test_array_write(self): code = """ a = [1,2,3,4,5] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -8,8 +8,6 @@ class MockDtype(object): - signature = signature.BaseSignature() - def malloc(self, size): return None @@ -38,92 +36,86 @@ assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -132,7 +124,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -142,7 +134,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -897,13 +889,32 @@ a = zeros(1) assert debug_repr(a) == 'Array' assert debug_repr(a + a) == 'Call2(add, Array, Array)' - assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a[::2]) == 'Slice' assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' - assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(a + a.flat) == 'Call2(add, Array, Slice)' assert debug_repr(sin(a)) == 'Call1(sin, Array)' + b = a + a b[0] = 3 - assert debug_repr(b) == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Array' + + def test_virtual_views(self): + from numpypy import arange + a = arange(15) + c = (a + a) + d = c[::2] + assert d[3] == 12 + c[6] = 5 + assert d[3] == 5 + a = arange(15) + c = (a + a) + d = c[::2][::2] + assert d[1] == 8 + b = a + a + c = b[::2] + c[:] = 3 + assert b[0] == 3 + assert b[1] == 2 def test_tolist_scalar(self): from numpypy import int32, bool_ @@ -1075,10 +1086,10 @@ def test_broadcast_setslice(self): from numpypy import zeros, ones - a = zeros((100, 100)) - b = ones(100) + a = zeros((10, 10)) + b = ones(10) a[:, :] = b - assert a[13, 15] == 1 + assert a[3, 5] == 1 def test_broadcast_shape_agreement(self): from numpypy import zeros, array @@ -1112,6 +1123,14 @@ b[:] = (a + a) assert (b == zeros((4, 3, 5))).all() + def test_broadcast_virtualview(self): + from numpypy import arange, zeros + a = arange(8).reshape([2, 2, 2]) + b = (a + a)[1, 1] + c = zeros((2, 2, 2)) + c[:] = b + assert (c == [[[12, 14], [12, 14]], [[12, 14], [12, 14]]]).all() + def test_argmax(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) @@ -1173,6 +1192,11 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 + def test_flatiter_varray(self): + from numpypy import ones + a = ones((2, 2)) + assert list(((a + a).flat)) == [2, 2, 2, 2] + def test_slice_copy(self): from numpypy import zeros a = zeros((10, 10)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -49,10 +49,14 @@ interp.run(space) w_res = interp.results[-1] if isinstance(w_res, BaseArray): - w_res = w_res.eval(w_res.start_iter()) - + concr = w_res.get_concrete_or_scalar() + sig = concr.find_sig() + frame = sig.create_frame(concr) + w_res = sig.eval(frame, concr) if isinstance(w_res, interp_boxes.W_Float64Box): return w_res.value + if isinstance(w_res, interp_boxes.W_Int64Box): + return float(w_res.value) elif isinstance(w_res, interp_boxes.W_BoolBox): return float(w_res.value) raise TypeError(w_res) @@ -78,8 +82,9 @@ def test_add(self): result = self.run("add") self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) assert result == 3 + 3 def define_float_add(): @@ -93,7 +98,8 @@ assert result == 3 + 3 self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, "setinteriorfield_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_sum(): return """ @@ -106,8 +112,8 @@ result = self.run("sum") assert result == 2 * sum(range(30)) self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, - "int_add": 2, "int_ge": 1, "guard_false": 1, - "jump": 1}) + "int_add": 1, "int_ge": 1, "guard_false": 1, + "jump": 1, 'arraylen_gc': 1}) def define_prod(): return """ @@ -123,18 +129,22 @@ expected *= i * 2 assert result == expected self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "float_mul": 1, "int_add": 1, + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) - def test_max(self): - py.test.skip("broken, investigate") - result = self.run(""" + def define_max(): + return """ a = |30| a[13] = 128 b = a + a max(b) - """) + """ + + def test_max(self): + result = self.run("max") assert result == 256 + py.test.skip("not there yet, getting though") self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_mul": 1, "int_add": 1, "int_lt": 1, "guard_true": 1, "jump": 1}) @@ -164,9 +174,9 @@ result = self.run("any") assert result == 1 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, + "float_ne": 1, "int_add": 1, "int_ge": 1, "jump": 1, - "guard_false": 2}) + "guard_false": 2, 'arraylen_gc': 1}) def define_already_forced(): return """ @@ -183,14 +193,13 @@ # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - # XXX the comment above is wrong now. We need preferrably a way to - # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, - 'getfield_gc': 35, 'getfield_gc_pure': 6, - 'guard_class': 22, 'int_add': 8, 'float_mul': 2, - 'guard_isnull': 2, 'jump': 2, 'int_ge': 4, - 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, - 'guard_value': 2}) + self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 26, + 'getarrayitem_gc': 4, 'getarrayitem_gc_pure': 2, + 'getfield_gc_pure': 4, + 'guard_class': 8, 'int_add': 8, 'float_mul': 2, + 'jump': 2, 'int_ge': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, + 'guard_false': 4, 'arraylen_gc': 2, 'same_as': 2}) def define_ufunc(): return """ @@ -204,8 +213,9 @@ result = self.run("ufunc") assert result == -6 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, - "setinteriorfield_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_specialization(): return """ @@ -248,7 +258,8 @@ 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, - 'jump': 1}) + 'jump': 1, + 'arraylen_gc': 1}) def define_multidim(): return """ @@ -263,8 +274,9 @@ # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setinteriorfield_raw': 1}) + 'guard_false': 1, 'int_add': 2, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1, + 'arraylen_gc': 1}) def define_multidim_slice(): return """ @@ -312,7 +324,25 @@ self.check_trace_count(1) self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_eq': 1, 'guard_false': 1, 'jump': 1}) + 'int_lt': 1, 'guard_true': 1, 'jump': 1, + 'arraylen_gc': 3}) + + def define_virtual_slice(): + return """ + a = |30| + c = a + a + d = c -> 1:20 + d -> 1 + """ + + def test_virtual_slice(self): + result = self.run("virtual_slice") + assert result == 4 + self.check_trace_count(1) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): diff --git a/pypy/module/micronumpy/test/test_ztranslation.py b/pypy/module/micronumpy/test/test_ztranslation.py --- a/pypy/module/micronumpy/test/test_ztranslation.py +++ b/pypy/module/micronumpy/test/test_ztranslation.py @@ -1,5 +1,8 @@ - +from pypy.module.micronumpy import signature from pypy.objspace.fake.checkmodule import checkmodule def test_numpy_translates(): + # XXX: If there are signatures floating around this might explode. This fix + # is ugly. + signature.known_sigs.clear() checkmodule('micronumpy') diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -252,7 +252,7 @@ # grow the list done = 0 while done < len(self._seen_extras): - print self._seen_extras + #print self._seen_extras ann.build_types(self._seen_extras[done], [], complete_now=False) done += 1 diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -514,29 +514,34 @@ if maxsplit == 0: return space.wrap(input) - # An ok guess at the default size - builder = StringBuilder(len(input)) - first = True - if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - first = False + try: - for i in range(upper): - builder.append(by) - builder.append(input[i]) + result_size = ovfcheck(upper * len(by)) + result_size = ovfcheck(result_size + upper) + result_size = ovfcheck(result_size + len(by)) + remaining_size = len(input) - upper + result_size = ovfcheck(result_size + remaining_size) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + builder = StringBuilder(result_size) + for i in range(upper): builder.append(by) - builder.append_slice(input, upper, len(input)) - except MemoryError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string too long") - ) + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) else: + # An ok guess for the result size + builder = StringBuilder(len(input)) start = 0 sublen = len(sub) + first = True while maxsplit != 0: next = input.find(sub, start) diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -21,6 +21,16 @@ win32api.CloseHandle(proch) except pywintypes.error, e: pass + #Try to avoid opeing a dialog box if one of the tests causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = winapi.SetErrorMode(flags) + winapi.SetErrorMode(old_mode | flags) SIGKILL = SIGTERM = 0 READ_MODE = 'rU' From noreply at buildbot.pypy.org Wed Dec 21 14:30:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Dec 2011 14:30:57 +0100 (CET) Subject: [pypy-commit] pypy counter-decay: close merged branch Message-ID: <20111221133057.1E3AA8217E@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: counter-decay Changeset: r50801:bae684cd82fb Date: 2011-12-21 14:28 +0100 http://bitbucket.org/pypy/pypy/changeset/bae684cd82fb/ Log: close merged branch From noreply at buildbot.pypy.org Wed Dec 21 14:30:58 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Dec 2011 14:30:58 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111221133058.3FAEE8217E@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50802:fe3fd6bbfb02 Date: 2011-12-21 14:30 +0100 http://bitbucket.org/pypy/pypy/changeset/fe3fd6bbfb02/ Log: merge heads diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -20,6 +20,11 @@ def regalloc_pop(self, loc): self.ops.append(('pop', loc)) + def regalloc_immedmem2mem(self, from_loc, to_loc): + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + self.ops.append(('immedmem2mem', from_loc, to_loc)) + def got(self, expected): print '------------------------ comparing ---------------------------' for op1, op2 in zip(self.ops, expected): @@ -244,6 +249,13 @@ else: return pick1() # + def pick2c(): + n = random.randrange(-2000, 500) + if n >= 0: + return ConstFloatLoc(n) # n is the address, not really used here + else: + return pick2() + # def pick_dst(fn, count, seen): result = [] while len(result) < count: @@ -280,12 +292,12 @@ if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: - assert isinstance(loc, ImmedLoc) + assert isinstance(loc, (ImmedLoc, ConstFloatLoc)) return regs1, regs2, stack # for i in range(500): seen = {} - src_locations2 = [pick2() for i in range(4)] + src_locations2 = [pick2c() for i in range(4)] dst_locations2 = pick_dst(pick2, 4, seen) src_locations1 = [pick1c() for i in range(5)] dst_locations1 = pick_dst(pick1, 5, seen) @@ -312,9 +324,15 @@ return got if isinstance(loc, ImmedLoc): return 'const-%d' % loc.value + if isinstance(loc, ConstFloatLoc): + got = 'constfloat-@%d' % loc.value + if loc.get_width() > WORD: + got = (got, 'constfloat-next-@%d' % loc.value) + return got assert 0, loc # def write(loc, newvalue): + assert (type(newvalue) is tuple) == (loc.get_width() > WORD) if isinstance(loc, RegLoc): if loc.is_xmm: regs2[loc.value] = newvalue @@ -337,10 +355,14 @@ for op in assembler.ops: if op[0] == 'mov': src, dst = op[1:] - assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) - assert isinstance(dst, (RegLoc, StackLoc)) - assert not (isinstance(src, StackLoc) and - isinstance(dst, StackLoc)) + if isinstance(src, ConstFloatLoc): + assert isinstance(dst, RegLoc) + assert dst.is_xmm + else: + assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) + assert isinstance(dst, (RegLoc, StackLoc)) + assert not (isinstance(src, StackLoc) and + isinstance(dst, StackLoc)) write(dst, read(src)) elif op[0] == 'push': src, = op[1:] @@ -350,6 +372,11 @@ dst, = op[1:] assert isinstance(dst, (RegLoc, StackLoc)) write(dst, extrapushes.pop()) + elif op[0] == 'immedmem2mem': + src, dst = op[1:] + assert isinstance(src, ConstFloatLoc) + assert isinstance(dst, StackLoc) + write(dst, read(src, 8)) else: assert 0, "unknown op: %r" % (op,) assert not extrapushes From noreply at buildbot.pypy.org Wed Dec 21 15:54:42 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 21 Dec 2011 15:54:42 +0100 (CET) Subject: [pypy-commit] pypy default: copy logic from logger to show the printable_location in the graphs, but only if get_printable_location was provided Message-ID: <20111221145442.D7E148217E@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50803:b1bccc7a772e Date: 2011-12-21 15:46 +0100 http://bitbucket.org/pypy/pypy/changeset/b1bccc7a772e/ Log: copy logic from logger to show the printable_location in the graphs, but only if get_printable_location was provided diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -38,7 +38,8 @@ else: extraprocedures = [procedure] metainterp_sd.stats.view(errmsg=errmsg, - extraprocedures=extraprocedures) + extraprocedures=extraprocedures, + metainterp_sd=metainterp_sd) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,7 +12,7 @@ def get_display_text(self): return None -def display_procedures(procedures, errmsg=None, highlight_procedures={}): +def display_procedures(procedures, errmsg=None, highlight_procedures={}, metainterp_sd=None): graphs = [(procedure, highlight_procedures.get(procedure, 0)) for procedure in procedures] for graph, highlight in graphs: @@ -20,7 +20,7 @@ if is_interesting_guard(op): graphs.append((SubGraph(op.getdescr()._debug_suboperations), highlight)) - graphpage = ResOpGraphPage(graphs, errmsg) + graphpage = ResOpGraphPage(graphs, errmsg, metainterp_sd) graphpage.display() def is_interesting_guard(op): @@ -36,8 +36,8 @@ class ResOpGraphPage(GraphPage): - def compute(self, graphs, errmsg=None): - resopgen = ResOpGen() + def compute(self, graphs, errmsg=None, metainterp_sd=None): + resopgen = ResOpGen(metainterp_sd) for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: @@ -50,13 +50,14 @@ CLUSTERING = True BOX_COLOR = (128, 0, 96) - def __init__(self): + def __init__(self, metainterp_sd=None): self.graphs = [] self.highlight_graphs = {} self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None self.target_tokens = {} + self.metainterp_sd = metainterp_sd def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -164,7 +165,14 @@ opindex = opstartindex while True: op = operations[opindex] - lines.append(op.repr(graytext=True)) + op_repr = op.repr(graytext=True) + if op.getopnum() == rop.DEBUG_MERGE_POINT: + jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] + if jd_sd._get_printable_location_ptr: + s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = s.replace(',', '.') # we use comma for argument splitting + op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] tgt_g, tgt_i = self.all_operations[tgt] diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -1084,7 +1084,7 @@ if option.view: self.view() - def view(self, errmsg=None, extraprocedures=[]): + def view(self, errmsg=None, extraprocedures=[], metainterp_sd=None): from pypy.jit.metainterp.graphpage import display_procedures procedures = self.get_all_loops()[:] for procedure in extraprocedures: @@ -1096,7 +1096,7 @@ if hasattr(procedure, '_looptoken_number') and ( procedure._looptoken_number in self.invalidated_token_numbers): highlight_procedures.setdefault(procedure, 2) - display_procedures(procedures, errmsg, highlight_procedures) + display_procedures(procedures, errmsg, highlight_procedures, metainterp_sd) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -860,12 +860,15 @@ assert res == f(10) self.check_resops(jump=2) - def test_nested_loops(self): + def test_nested_loops_1(self): class Int(object): def __init__(self, val): self.val = val - myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) bytecode = "iajb+JI" + def get_printable_location(i): + return "%d: %s" % (i, bytecode[i]) + myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j'], + get_printable_location=get_printable_location) def f(n): pc = sa = 0 i = j = Int(0) From noreply at buildbot.pypy.org Wed Dec 21 15:54:44 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 21 Dec 2011 15:54:44 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge Message-ID: <20111221145444.03C0A8217F@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50804:628d03374f2e Date: 2011-12-21 15:54 +0100 http://bitbucket.org/pypy/pypy/changeset/628d03374f2e/ Log: hg merge diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -20,6 +20,11 @@ def regalloc_pop(self, loc): self.ops.append(('pop', loc)) + def regalloc_immedmem2mem(self, from_loc, to_loc): + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + self.ops.append(('immedmem2mem', from_loc, to_loc)) + def got(self, expected): print '------------------------ comparing ---------------------------' for op1, op2 in zip(self.ops, expected): @@ -244,6 +249,13 @@ else: return pick1() # + def pick2c(): + n = random.randrange(-2000, 500) + if n >= 0: + return ConstFloatLoc(n) # n is the address, not really used here + else: + return pick2() + # def pick_dst(fn, count, seen): result = [] while len(result) < count: @@ -280,12 +292,12 @@ if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: - assert isinstance(loc, ImmedLoc) + assert isinstance(loc, (ImmedLoc, ConstFloatLoc)) return regs1, regs2, stack # for i in range(500): seen = {} - src_locations2 = [pick2() for i in range(4)] + src_locations2 = [pick2c() for i in range(4)] dst_locations2 = pick_dst(pick2, 4, seen) src_locations1 = [pick1c() for i in range(5)] dst_locations1 = pick_dst(pick1, 5, seen) @@ -312,9 +324,15 @@ return got if isinstance(loc, ImmedLoc): return 'const-%d' % loc.value + if isinstance(loc, ConstFloatLoc): + got = 'constfloat-@%d' % loc.value + if loc.get_width() > WORD: + got = (got, 'constfloat-next-@%d' % loc.value) + return got assert 0, loc # def write(loc, newvalue): + assert (type(newvalue) is tuple) == (loc.get_width() > WORD) if isinstance(loc, RegLoc): if loc.is_xmm: regs2[loc.value] = newvalue @@ -337,10 +355,14 @@ for op in assembler.ops: if op[0] == 'mov': src, dst = op[1:] - assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) - assert isinstance(dst, (RegLoc, StackLoc)) - assert not (isinstance(src, StackLoc) and - isinstance(dst, StackLoc)) + if isinstance(src, ConstFloatLoc): + assert isinstance(dst, RegLoc) + assert dst.is_xmm + else: + assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) + assert isinstance(dst, (RegLoc, StackLoc)) + assert not (isinstance(src, StackLoc) and + isinstance(dst, StackLoc)) write(dst, read(src)) elif op[0] == 'push': src, = op[1:] @@ -350,6 +372,11 @@ dst, = op[1:] assert isinstance(dst, (RegLoc, StackLoc)) write(dst, extrapushes.pop()) + elif op[0] == 'immedmem2mem': + src, dst = op[1:] + assert isinstance(src, ConstFloatLoc) + assert isinstance(dst, StackLoc) + write(dst, read(src, 8)) else: assert 0, "unknown op: %r" % (op,) assert not extrapushes From noreply at buildbot.pypy.org Wed Dec 21 16:16:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Dec 2011 16:16:03 +0100 (CET) Subject: [pypy-commit] pypy default: A failing test, showing an issue that only occurs after translation. Message-ID: <20111221151603.D6C8F8217E@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50805:d70f132b92fa Date: 2011-12-21 16:13 +0100 http://bitbucket.org/pypy/pypy/changeset/d70f132b92fa/ Log: A failing test, showing an issue that only occurs after translation. diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -385,3 +385,32 @@ assert read(loc, WORD) == src_values1[i] for i, loc in enumerate(dst_locations2): assert read(loc, 8) == src_values2[i] + + +def test_overflow_bug(): + CASE = [ + (-144, -248), # \ cycle + (-248, -144), # / + (-488, -416), # \ two usages of -488 + (-488, -480), # / + (-488, -488), # - one self-application of -488 + ] + class FakeAssembler: + def regalloc_mov(self, src, dst): + print "mov", src, dst + def regalloc_push(self, x): + print "push", x + def regalloc_pop(self, x): + print "pop", x + def regalloc_immedmem2mem(self, x, y): + print "?????????????????????????" + def main(): + srclocs = [StackLoc(9999, x, 'i') for x,y in CASE] + dstlocs = [StackLoc(9999, y, 'i') for x,y in CASE] + remap_frame_layout(FakeAssembler(), srclocs, dstlocs, eax) + # it works when run directly + main() + # but it used to crash when translated, + # because of a -sys.maxint-2 overflowing to sys.maxint + from pypy.rpython.test.test_llinterp import interpret + interpret(main, []) From noreply at buildbot.pypy.org Wed Dec 21 16:16:05 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Dec 2011 16:16:05 +0100 (CET) Subject: [pypy-commit] pypy default: fix. Message-ID: <20111221151605.0159C8217E@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50806:2261c6bb4a58 Date: 2011-12-21 16:15 +0100 http://bitbucket.org/pypy/pypy/changeset/2261c6bb4a58/ Log: fix. diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -17,7 +17,10 @@ key = src._getregkey() if key in srccount: if key == dst_locations[i]._getregkey(): - srccount[key] = -sys.maxint # ignore a move "x = x" + # ignore a move "x = x" + # setting any "large enough" negative value is ok, but + # be careful of overflows, don't use -sys.maxint + srccount[key] = -len(dst_locations) - 1 pending_dests -= 1 else: srccount[key] += 1 From noreply at buildbot.pypy.org Wed Dec 21 16:16:06 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Dec 2011 16:16:06 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20111221151606.359EB8217E@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50807:d73152d7b1ad Date: 2011-12-21 16:15 +0100 http://bitbucket.org/pypy/pypy/changeset/d73152d7b1ad/ Log: merge heads diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -38,7 +38,8 @@ else: extraprocedures = [procedure] metainterp_sd.stats.view(errmsg=errmsg, - extraprocedures=extraprocedures) + extraprocedures=extraprocedures, + metainterp_sd=metainterp_sd) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,7 +12,7 @@ def get_display_text(self): return None -def display_procedures(procedures, errmsg=None, highlight_procedures={}): +def display_procedures(procedures, errmsg=None, highlight_procedures={}, metainterp_sd=None): graphs = [(procedure, highlight_procedures.get(procedure, 0)) for procedure in procedures] for graph, highlight in graphs: @@ -20,7 +20,7 @@ if is_interesting_guard(op): graphs.append((SubGraph(op.getdescr()._debug_suboperations), highlight)) - graphpage = ResOpGraphPage(graphs, errmsg) + graphpage = ResOpGraphPage(graphs, errmsg, metainterp_sd) graphpage.display() def is_interesting_guard(op): @@ -36,8 +36,8 @@ class ResOpGraphPage(GraphPage): - def compute(self, graphs, errmsg=None): - resopgen = ResOpGen() + def compute(self, graphs, errmsg=None, metainterp_sd=None): + resopgen = ResOpGen(metainterp_sd) for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: @@ -50,13 +50,14 @@ CLUSTERING = True BOX_COLOR = (128, 0, 96) - def __init__(self): + def __init__(self, metainterp_sd=None): self.graphs = [] self.highlight_graphs = {} self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None self.target_tokens = {} + self.metainterp_sd = metainterp_sd def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -164,7 +165,14 @@ opindex = opstartindex while True: op = operations[opindex] - lines.append(op.repr(graytext=True)) + op_repr = op.repr(graytext=True) + if op.getopnum() == rop.DEBUG_MERGE_POINT: + jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] + if jd_sd._get_printable_location_ptr: + s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = s.replace(',', '.') # we use comma for argument splitting + op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] tgt_g, tgt_i = self.all_operations[tgt] diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -1084,7 +1084,7 @@ if option.view: self.view() - def view(self, errmsg=None, extraprocedures=[]): + def view(self, errmsg=None, extraprocedures=[], metainterp_sd=None): from pypy.jit.metainterp.graphpage import display_procedures procedures = self.get_all_loops()[:] for procedure in extraprocedures: @@ -1096,7 +1096,7 @@ if hasattr(procedure, '_looptoken_number') and ( procedure._looptoken_number in self.invalidated_token_numbers): highlight_procedures.setdefault(procedure, 2) - display_procedures(procedures, errmsg, highlight_procedures) + display_procedures(procedures, errmsg, highlight_procedures, metainterp_sd) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -860,12 +860,15 @@ assert res == f(10) self.check_resops(jump=2) - def test_nested_loops(self): + def test_nested_loops_1(self): class Int(object): def __init__(self, val): self.val = val - myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) bytecode = "iajb+JI" + def get_printable_location(i): + return "%d: %s" % (i, bytecode[i]) + myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j'], + get_printable_location=get_printable_location) def f(n): pc = sa = 0 i = j = Int(0) From noreply at buildbot.pypy.org Wed Dec 21 19:58:07 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Dec 2011 19:58:07 +0100 (CET) Subject: [pypy-commit] pypy numpy-faster-setslice: Implement fast slice setting using memcpy Message-ID: <20111221185807.803CA8217E@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-faster-setslice Changeset: r50808:b0190f46f44c Date: 2011-12-21 20:55 +0200 http://bitbucket.org/pypy/pypy/changeset/b0190f46f44c/ Log: Implement fast slice setting using memcpy diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -102,3 +102,27 @@ class ConstantIterator(BaseIterator): def next(self, shapelen): return self + +# ------ other iterators that are not part of the computation frame ---------- + +class AxisIterator(object): + """ This object will return offsets of each start of the last stride + """ + def __init__(self, arr): + self.arr = arr + self.indices = [0] * (len(arr.shape) - 1) + self.done = False + self.offset = arr.start + + def next(self): + for i in range(len(self.arr.shape) - 2, -1, -1): + if self.indices[i] < self.arr.shape[i] - 1: + self.indices[i] += 1 + self.offset += self.arr.strides[i] + break + else: + self.indices[i] = 0 + self.offset -= self.arr.backstrides[i] + else: + self.done = True + diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -9,7 +9,7 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder from pypy.module.micronumpy.interp_iter import ArrayIterator,\ - view_iter_from_arr, OneDimIterator + view_iter_from_arr, OneDimIterator, AxisIterator numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], @@ -606,6 +606,9 @@ space.w_False])) return w_d + def supports_fast_slicing(self): + return False + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -790,6 +793,9 @@ def get_concrete(self): return self + def supports_fast_slicing(self): + return self.order == 'C' and self.strides[-1] == 1 + def find_dtype(self): return self.dtype @@ -961,7 +967,33 @@ def setslice(self, space, w_value): res_shape = shape_agreement(space, self.shape, w_value.shape) - self._sliceloop(w_value, res_shape) + if (res_shape == w_value.shape and self.supports_fast_slicing() and + w_value.supports_fast_slicing() and + self.dtype is w_value.find_dtype()): + self._fast_setslice(space, w_value) + else: + self._sliceloop(w_value, res_shape) + + def _fast_setslice(self, space, w_value): + assert isinstance(w_value, ConcreteArray) + itemsize = self.dtype.itemtype.get_element_size() + if len(self.shape) == 1: + rffi.c_memcpy( + rffi.ptradd(self.storage, self.start * itemsize), + rffi.ptradd(w_value.storage, w_value.start * itemsize), + self.size * itemsize + ) + else: + dest = AxisIterator(self) + source = AxisIterator(w_value) + while not dest.done: + rffi.c_memcpy( + rffi.ptradd(self.storage, dest.offset * itemsize), + rffi.ptradd(w_value.storage, source.offset * itemsize), + self.shape[0] * itemsize + ) + source.next() + dest.next() def _sliceloop(self, source, res_shape): sig = source.find_sig(res_shape) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1077,6 +1077,17 @@ a = ones((1, 2, 3)) assert a[0, 1, 2] == 1.0 + def test_multidim_setslice(self): + from numpypy import zeros, ones + a = zeros((3, 3)) + b = ones((3, 3)) + a[:,1:3] = b[:,1:3] + assert (a == [[0, 1, 1], [0, 1, 1], [0, 1, 1]]).all() + a = zeros((3, 3)) + b = ones((3, 3)) + a[:,::2] = b[:,::2] + assert (a == [[1, 0, 1], [1, 0, 1], [1, 0, 1]]).all() + def test_broadcast_ufunc(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) From noreply at buildbot.pypy.org Wed Dec 21 20:46:48 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Dec 2011 20:46:48 +0100 (CET) Subject: [pypy-commit] pypy numpy-faster-setslice: share code between copy and setslice Message-ID: <20111221194648.64F9A8217E@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-faster-setslice Changeset: r50809:2961d5daa1d1 Date: 2011-12-21 21:46 +0200 http://bitbucket.org/pypy/pypy/changeset/2961d5daa1d1/ Log: share code between copy and setslice diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -390,10 +390,10 @@ return space.wrap(self.size) def descr_copy(self, space): - return self.copy() + return self.copy(space) - def copy(self): - return self.get_concrete().copy() + def copy(self, space): + return self.get_concrete().copy(space) def descr_len(self, space): if len(self.shape): @@ -536,7 +536,7 @@ new_shape, self) else: # Create copy with contiguous data - arr = concrete.copy() + arr = concrete.copy(space) arr.setshape(space, new_shape) return arr @@ -642,7 +642,7 @@ def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): builder.append(self.dtype.itemtype.str_format(self.value)) - def copy(self): + def copy(self, space): return Scalar(self.dtype, self.value) def create_sig(self, res_shape): @@ -935,36 +935,6 @@ item += v * self.strides[i] return item - -class ViewArray(ConcreteArray): - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = view_iter_from_arr(self) - a_iter = ArrayIterator(array.size) - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def create_sig(self, res_shape): - return signature.ViewSignature(self.dtype) - - -class W_NDimSlice(ViewArray): - def __init__(self, start, strides, backstrides, shape, parent): - assert isinstance(parent, ConcreteArray) - if isinstance(parent, W_NDimSlice): - parent = parent.parent - size = 1 - for sh in shape: - size *= sh - self.strides = strides - self.backstrides = backstrides - ViewArray.__init__(self, size, shape, parent.dtype, parent.order, - parent) - self.start = start - def setslice(self, space, w_value): res_shape = shape_agreement(space, self.shape, w_value.shape) if (res_shape == w_value.shape and self.supports_fast_slicing() and @@ -990,7 +960,7 @@ rffi.c_memcpy( rffi.ptradd(self.storage, dest.offset * itemsize), rffi.ptradd(w_value.storage, source.offset * itemsize), - self.shape[0] * itemsize + self.shape[-1] * itemsize ) source.next() dest.next() @@ -1011,6 +981,31 @@ frame.next(shapelen) res_iter = res_iter.next(shapelen) + def copy(self, space): + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + array.setslice(space, self) + return array + + +class ViewArray(ConcreteArray): + def create_sig(self, res_shape): + return signature.ViewSignature(self.dtype) + + +class W_NDimSlice(ViewArray): + def __init__(self, start, strides, backstrides, shape, parent): + assert isinstance(parent, ConcreteArray) + if isinstance(parent, W_NDimSlice): + parent = parent.parent + size = 1 + for sh in shape: + size *= sh + self.strides = strides + self.backstrides = backstrides + ViewArray.__init__(self, size, shape, parent.dtype, parent.order, + parent) + self.start = start + def setshape(self, space, new_shape): if len(self.shape) < 1: return @@ -1049,15 +1044,6 @@ """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) - rffi.c_memcpy( - array.storage, - self.storage, - self.size * self.dtype.itemtype.get_element_size() - ) - return array - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) From noreply at buildbot.pypy.org Wed Dec 21 20:52:10 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Dec 2011 20:52:10 +0100 (CET) Subject: [pypy-commit] pypy default: Merge numpy-faster-setslice, uses memcpy for setslice operations if possible Message-ID: <20111221195210.2DE848217E@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50810:441db5bbe505 Date: 2011-12-21 21:51 +0200 http://bitbucket.org/pypy/pypy/changeset/441db5bbe505/ Log: Merge numpy-faster-setslice, uses memcpy for setslice operations if possible diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -102,3 +102,27 @@ class ConstantIterator(BaseIterator): def next(self, shapelen): return self + +# ------ other iterators that are not part of the computation frame ---------- + +class AxisIterator(object): + """ This object will return offsets of each start of the last stride + """ + def __init__(self, arr): + self.arr = arr + self.indices = [0] * (len(arr.shape) - 1) + self.done = False + self.offset = arr.start + + def next(self): + for i in range(len(self.arr.shape) - 2, -1, -1): + if self.indices[i] < self.arr.shape[i] - 1: + self.indices[i] += 1 + self.offset += self.arr.strides[i] + break + else: + self.indices[i] = 0 + self.offset -= self.arr.backstrides[i] + else: + self.done = True + diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -9,7 +9,7 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder from pypy.module.micronumpy.interp_iter import ArrayIterator,\ - view_iter_from_arr, OneDimIterator + view_iter_from_arr, OneDimIterator, AxisIterator numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], @@ -390,10 +390,10 @@ return space.wrap(self.size) def descr_copy(self, space): - return self.copy() + return self.copy(space) - def copy(self): - return self.get_concrete().copy() + def copy(self, space): + return self.get_concrete().copy(space) def descr_len(self, space): if len(self.shape): @@ -536,7 +536,7 @@ new_shape, self) else: # Create copy with contiguous data - arr = concrete.copy() + arr = concrete.copy(space) arr.setshape(space, new_shape) return arr @@ -606,6 +606,9 @@ space.w_False])) return w_d + def supports_fast_slicing(self): + return False + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -639,7 +642,7 @@ def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): builder.append(self.dtype.itemtype.str_format(self.value)) - def copy(self): + def copy(self, space): return Scalar(self.dtype, self.value) def create_sig(self, res_shape): @@ -790,6 +793,9 @@ def get_concrete(self): return self + def supports_fast_slicing(self): + return self.order == 'C' and self.strides[-1] == 1 + def find_dtype(self): return self.dtype @@ -929,39 +935,35 @@ item += v * self.strides[i] return item - -class ViewArray(ConcreteArray): - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = view_iter_from_arr(self) - a_iter = ArrayIterator(array.size) - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def create_sig(self, res_shape): - return signature.ViewSignature(self.dtype) - - -class W_NDimSlice(ViewArray): - def __init__(self, start, strides, backstrides, shape, parent): - assert isinstance(parent, ConcreteArray) - if isinstance(parent, W_NDimSlice): - parent = parent.parent - size = 1 - for sh in shape: - size *= sh - self.strides = strides - self.backstrides = backstrides - ViewArray.__init__(self, size, shape, parent.dtype, parent.order, - parent) - self.start = start - def setslice(self, space, w_value): res_shape = shape_agreement(space, self.shape, w_value.shape) - self._sliceloop(w_value, res_shape) + if (res_shape == w_value.shape and self.supports_fast_slicing() and + w_value.supports_fast_slicing() and + self.dtype is w_value.find_dtype()): + self._fast_setslice(space, w_value) + else: + self._sliceloop(w_value, res_shape) + + def _fast_setslice(self, space, w_value): + assert isinstance(w_value, ConcreteArray) + itemsize = self.dtype.itemtype.get_element_size() + if len(self.shape) == 1: + rffi.c_memcpy( + rffi.ptradd(self.storage, self.start * itemsize), + rffi.ptradd(w_value.storage, w_value.start * itemsize), + self.size * itemsize + ) + else: + dest = AxisIterator(self) + source = AxisIterator(w_value) + while not dest.done: + rffi.c_memcpy( + rffi.ptradd(self.storage, dest.offset * itemsize), + rffi.ptradd(w_value.storage, source.offset * itemsize), + self.shape[-1] * itemsize + ) + source.next() + dest.next() def _sliceloop(self, source, res_shape): sig = source.find_sig(res_shape) @@ -979,6 +981,31 @@ frame.next(shapelen) res_iter = res_iter.next(shapelen) + def copy(self, space): + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + array.setslice(space, self) + return array + + +class ViewArray(ConcreteArray): + def create_sig(self, res_shape): + return signature.ViewSignature(self.dtype) + + +class W_NDimSlice(ViewArray): + def __init__(self, start, strides, backstrides, shape, parent): + assert isinstance(parent, ConcreteArray) + if isinstance(parent, W_NDimSlice): + parent = parent.parent + size = 1 + for sh in shape: + size *= sh + self.strides = strides + self.backstrides = backstrides + ViewArray.__init__(self, size, shape, parent.dtype, parent.order, + parent) + self.start = start + def setshape(self, space, new_shape): if len(self.shape) < 1: return @@ -1017,15 +1044,6 @@ """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) - rffi.c_memcpy( - array.storage, - self.storage, - self.size * self.dtype.itemtype.get_element_size() - ) - return array - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1077,6 +1077,17 @@ a = ones((1, 2, 3)) assert a[0, 1, 2] == 1.0 + def test_multidim_setslice(self): + from numpypy import zeros, ones + a = zeros((3, 3)) + b = ones((3, 3)) + a[:,1:3] = b[:,1:3] + assert (a == [[0, 1, 1], [0, 1, 1], [0, 1, 1]]).all() + a = zeros((3, 3)) + b = ones((3, 3)) + a[:,::2] = b[:,::2] + assert (a == [[1, 0, 1], [1, 0, 1], [1, 0, 1]]).all() + def test_broadcast_ufunc(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) From noreply at buildbot.pypy.org Wed Dec 21 20:55:26 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Dec 2011 20:55:26 +0100 (CET) Subject: [pypy-commit] pypy numpy-faster-setslice: close merged branch Message-ID: <20111221195526.CFC1A8217E@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: numpy-faster-setslice Changeset: r50811:6b116d5dea60 Date: 2011-12-21 21:54 +0200 http://bitbucket.org/pypy/pypy/changeset/6b116d5dea60/ Log: close merged branch From noreply at buildbot.pypy.org Wed Dec 21 21:28:23 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 21 Dec 2011 21:28:23 +0100 (CET) Subject: [pypy-commit] pypy default: Issue971: multiprocessing: Use network byte order to send the length of strings. Message-ID: <20111221202823.2D0AA8217E@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r50812:60129de702dc Date: 2011-12-21 21:25 +0100 http://bitbucket.org/pypy/pypy/changeset/60129de702dc/ Log: Issue971: multiprocessing: Use network byte order to send the length of strings. diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -6,7 +6,7 @@ OperationError, wrap_oserror, operationerrfmt) from pypy.rpython.lltypesystem import rffi, lltype from pypy.rlib.rarithmetic import intmask -from pypy.rlib import rpoll +from pypy.rlib import rpoll, rsocket import sys READABLE = 1 @@ -252,7 +252,8 @@ # "header" and the "body" of the message and send them at once. message = lltype.malloc(rffi.CCHARP.TO, size + 4, flavor='raw') try: - rffi.cast(rffi.UINTP, message)[0] = rffi.r_uint(size) # XXX htonl! + length = rffi.r_uint(rsocket.htonl(size)) + rffi.cast(rffi.UINTP, message)[0] = length i = size - 1 while i >= 0: message[4 + i] = buffer[offset + i] @@ -264,7 +265,7 @@ def do_recv_string(self, space, buflength, maxlength): with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as length_ptr: self._recvall(space, rffi.cast(rffi.CCHARP, length_ptr), 4) - length = intmask(length_ptr[0]) + length = intmask(rsocket.ntohl(length_ptr[0])) if length > maxlength: # bad message, close connection self.flags &= ~READABLE if self.flags == 0: diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -37,6 +37,9 @@ def test_connection(self): rhandle, whandle = self.make_pair() + whandle.send_bytes("abc") + assert rhandle.recv_bytes(100) == "abc" + obj = [1, 2.0, "hello"] whandle.send(obj) obj2 = rhandle.recv() @@ -150,4 +153,20 @@ import _multiprocessing raises(IOError, _multiprocessing.Connection, -1) - raises(IOError, _multiprocessing.Connection, -15) \ No newline at end of file + raises(IOError, _multiprocessing.Connection, -15) + + def test_byte_order(self): + # The exact format of net strings (length in network byte + # order) is important for interoperation with others + # implementations. + rhandle, whandle = self.make_pair() + whandle.send_bytes("abc") + whandle.send_bytes("defg") + import socket + sock = socket.fromfd(rhandle.fileno(), + socket.AF_INET, socket.SOCK_STREAM) + data1 = sock.recv(7) + assert data1 == '\x00\x00\x00\x03abc' + data2 = sock.recv(8) + assert data2 == '\x00\x00\x00\x04defg' + From noreply at buildbot.pypy.org Wed Dec 21 21:38:07 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 21 Dec 2011 21:38:07 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: Allow the bidge in cases like test_virtual.test_nested_loops to jump to the top of the innerloop instead of traceing a full iteration of it Message-ID: <20111221203807.854CB82AC3@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50813:3c0b32210942 Date: 2011-12-21 21:36 +0100 http://bitbucket.org/pypy/pypy/changeset/3c0b32210942/ Log: Allow the bidge in cases like test_virtual.test_nested_loops to jump to the top of the innerloop instead of traceing a full iteration of it diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -976,10 +976,13 @@ self.verify_green_args(jitdriver_sd, greenboxes) self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.in_recursion, greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: - if not jitdriver_sd.no_loop_header or not any_operation: + if not any_operation: return + if not self.metainterp.get_procedure_token(greenboxes, True): + if not jitdriver_sd.no_loop_header: + return # automatically add a loop_header if there is none self.metainterp.seen_loop_header_for_jdindex = jdindex # @@ -2053,9 +2056,15 @@ from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) - def get_procedure_token(self, greenkey): + def get_procedure_token(self, greenkey, with_compiled_targets=False): cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - return cell.get_procedure_token() + token = cell.get_procedure_token() + if with_compiled_targets: + if not token: + return None + if not token.target_tokens: + return None + return token def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args @@ -2088,11 +2097,9 @@ def compile_trace(self, live_arg_boxes, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] - target_jitcell_token = self.get_procedure_token(greenkey) + target_jitcell_token = self.get_procedure_token(greenkey, True) if not target_jitcell_token: return - if not target_jitcell_token.target_tokens: - return self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -880,7 +880,7 @@ elif op == 'j': j = Int(0) elif op == '+': - sa += i.val * j.val + sa += (i.val + 2) * (j.val + 2) elif op == 'a': i = Int(i.val + 1) elif op == 'b': @@ -902,6 +902,7 @@ assert res == f(10) self.check_aborted_count(0) self.check_target_token_count(3) + self.check_resops(int_mul=2) def test_nested_loops_bridge(self): class Int(object): From noreply at buildbot.pypy.org Wed Dec 21 22:45:09 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 21 Dec 2011 22:45:09 +0100 (CET) Subject: [pypy-commit] pypy default: Attempt to fix translation Message-ID: <20111221214509.9A54382B16@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r50814:d94baec282bc Date: 2011-12-21 22:44 +0100 http://bitbucket.org/pypy/pypy/changeset/d94baec282bc/ Log: Attempt to fix translation diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -252,7 +252,8 @@ # "header" and the "body" of the message and send them at once. message = lltype.malloc(rffi.CCHARP.TO, size + 4, flavor='raw') try: - length = rffi.r_uint(rsocket.htonl(size)) + length = rffi.r_uint(rsocket.htonl( + rffi.cast(lltype.Unsigned, size))) rffi.cast(rffi.UINTP, message)[0] = length i = size - 1 while i >= 0: @@ -265,7 +266,8 @@ def do_recv_string(self, space, buflength, maxlength): with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as length_ptr: self._recvall(space, rffi.cast(rffi.CCHARP, length_ptr), 4) - length = intmask(rsocket.ntohl(length_ptr[0])) + length = intmask(rsocket.ntohl( + rffi.cast(lltype.Unsigned, length_ptr[0]))) if length > maxlength: # bad message, close connection self.flags &= ~READABLE if self.flags == 0: From noreply at buildbot.pypy.org Thu Dec 22 00:21:08 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 22 Dec 2011 00:21:08 +0100 (CET) Subject: [pypy-commit] pypy py3k: Implement imp.source_from_cache() Message-ID: <20111221232108.1F7D982B16@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50815:221a0077614d Date: 2011-12-21 23:00 +0100 http://bitbucket.org/pypy/pypy/changeset/221a0077614d/ Log: Implement imp.source_from_cache() Kind of... we still use the old .py/pyc scheme. diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -36,6 +36,7 @@ 'release_lock': 'interp_imp.release_lock', 'cache_from_source': 'interp_imp.cache_from_source', + 'source_from_cache': 'interp_imp.source_from_cache', } appleveldefs = { diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -866,6 +866,14 @@ "Given the path to a .py file, return the path to its .pyc file." return pathname + 'c' +def make_source_pathname(pathname): + pos_extension = len(pathname) - 4 # len('.pyc') + if pos_extension < 0: + raise ValueError("path is too short") + if pathname[pos_extension:] != '.pyc': + raise ValueError("not a .pyc path name") + return pathname[:pos_extension + 3] + @jit.dont_look_inside def load_source_module(space, w_modulename, w_mod, pathname, source, write_pyc=True): diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -182,5 +182,29 @@ importing.getimportlock(space).reinit_lock() @unwrap_spec(pathname=str) -def cache_from_source(space, pathname): +def cache_from_source(space, pathname, w_debug_override=None): + """cache_from_source(path, [debug_override]) -> path + Given the path to a .py file, return the path to its .pyc/.pyo file. + + The .py file does not need to exist; this simply returns the path to the + .pyc/.pyo file calculated as if the .py file were imported. The extension + will be .pyc unless __debug__ is not defined, then it will be .pyo. + + If debug_override is not None, then it must be a boolean and is taken as + the value of __debug__ instead.""" return space.wrap(importing.make_compiled_pathname(pathname)) + + at unwrap_spec(pathname=str) +def source_from_cache(space, pathname): + """source_from_cache(path) -> path + Given the path to a .pyc./.pyo file, return the path to its .py file. + + The .pyc/.pyo file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc/.pyo file. If path + does not conform to PEP 3147 format, ValueError will be raised.""" + try: + sourcename = importing.make_source_pathname(pathname) + except ValueError: + raise operationerrfmt(space.w_ValueError, + "Not a PEP 3147 pyc path: %s", pathname) + return space.wrap(sourcename) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -579,6 +579,8 @@ def test_cache_from_source(self): import imp assert imp.cache_from_source('a/b/c.py') == 'a/b/c.pyc' + assert imp.source_from_cache('a/b/c.pyc') == 'a/b/c.py' + raises(ValueError, imp.source_from_cache, 'a/b/c.py') def test_shadow_builtin(self): if self.runappdirect: skip("hard to test: module is already imported") From noreply at buildbot.pypy.org Thu Dec 22 00:21:09 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 22 Dec 2011 00:21:09 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix almost all tests in module/_sre Message-ID: <20111221232109.4864682B16@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50816:dce4b22308da Date: 2011-12-22 00:20 +0100 http://bitbucket.org/pypy/pypy/changeset/dce4b22308da/ Log: Fix almost all tests in module/_sre diff --git a/lib_pypy/array.py b/lib_pypy/array.py --- a/lib_pypy/array.py +++ b/lib_pypy/array.py @@ -106,6 +106,8 @@ self.itemsize = calcsize(typecode) if isinstance(initializer, list): self.fromlist(initializer) + elif isinstance(initializer, bytes): + self.fromstring(initializer) elif isinstance(initializer, str) and self.typecode == "u": self.fromunicode(initializer) else: @@ -138,8 +140,6 @@ """Appends items from the string, interpreting it as an array of machine values, as if it had been read from a file using the fromfile() method.""" - if isinstance(s, unicode): - s = str(s) self._frombuffer(s) def _frombuffer(self, s): diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -41,9 +41,9 @@ class __extend__(rsre_core.StrMatchContext): __metaclass__ = extendabletype def _w_slice(self, space, start, end): - return space.wrap(self._string[start:end]) + return space.wrapbytes(self._string[start:end]) def _w_string(self, space): - return space.wrap(self._string) + return space.wrapbytes(self._string) class __extend__(rsre_core.UnicodeMatchContext): __metaclass__ = extendabletype @@ -225,7 +225,7 @@ literal = u'\\' not in filter_as_unicode else: try: - filter_as_string = space.str_w(w_ptemplate) + filter_as_string = space.bytes_w(w_ptemplate) except OperationError, e: if e.async(space): raise @@ -281,7 +281,7 @@ if space.is_true(space.isinstance(w_string, space.w_unicode)): w_emptystr = space.wrap(u'') else: - w_emptystr = space.wrap('') + w_emptystr = space.wrapbytes('') w_item = space.call_method(w_emptystr, 'join', space.newlist(sublist_w)) return w_item, n diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -7,7 +7,7 @@ def init_globals_hack(space): space.appexec([space.wrap(autopath.this_dir)], """(this_dir): - import __builtin__ as b + import builtins as b import sys, os.path # Uh-oh, ugly hack sys.path.insert(0, this_dir) @@ -38,7 +38,7 @@ def test_creation_attributes(self): import re - pattern_string = "(b)l(?Pa)" + pattern_string = b"(b)l(?Pa)" p = re.compile(pattern_string, re.I | re.M) assert pattern_string == p.pattern assert re.I | re.M == p.flags @@ -73,9 +73,9 @@ def test_finditer(self): import re it = re.finditer("b(.)", "brabbel") - assert "br" == it.next().group(0) - assert "bb" == it.next().group(0) - raises(StopIteration, it.next) + assert "br" == next(it).group(0) + assert "bb" == next(it).group(0) + raises(StopIteration, next, it) def test_split(self): import re @@ -177,25 +177,25 @@ m = re.search("a(..)(?P..)", "ab1bc") assert "b1bcbc" == m.expand(r"\1\g\2") - def test_sub(self): + def test_sub_bytes(self): import re - assert "bbbbb" == re.sub("a", "b", "ababa") - assert ("bbbbb", 3) == re.subn("a", "b", "ababa") - assert "dddd" == re.sub("[abc]", "d", "abcd") - assert ("dddd", 3) == re.subn("[abc]", "d", "abcd") - assert "rbd\nbr\n" == re.sub("a(.)", r"b\1\n", "radar") - assert ("rbd\nbr\n", 2) == re.subn("a(.)", r"b\1\n", "radar") - assert ("bbbba", 2) == re.subn("a", "b", "ababa", 2) + assert b"bbbbb" == re.sub(b"a", b"b", b"ababa") + assert (b"bbbbb", 3) == re.subn(b"a", b"b", b"ababa") + assert b"dddd" == re.sub(b"[abc]", b"d", b"abcd") + assert (b"dddd", 3) == re.subn(b"[abc]", b"d", b"abcd") + assert b"rbd\nbr\n" == re.sub(b"a(.)", br"b\1\n", b"radar") + assert (b"rbd\nbr\n", 2) == re.subn(b"a(.)", br"b\1\n", b"radar") + assert (b"bbbba", 2) == re.subn(b"a", b"b", b"ababa", 2) def test_sub_unicode(self): import re - assert isinstance(re.sub(u"a", u"b", u""), unicode) + assert isinstance(re.sub(u"a", u"b", u""), str) # the input is returned unmodified if no substitution is performed, # which (if interpreted literally, as CPython does) gives the # following strangeish rules: - assert isinstance(re.sub(u"a", u"b", "diwoiioamoi"), unicode) - assert isinstance(re.sub(u"a", u"b", "diwoiiobmoi"), str) - assert isinstance(re.sub(u'x', 'y', 'x'), str) + assert isinstance(re.sub(u"a", u"b", "diwoiioamoi"), str) + assert isinstance(re.sub(u"a", u"b", b"diwoiiobmoi"), bytes) + assert isinstance(re.sub(u'x', b'y', b'x'), bytes) def test_sub_callable(self): import re @@ -212,18 +212,10 @@ return None assert "acd" == re.sub("b", call_me, "abcd") - def test_sub_callable_suddenly_unicode(self): - import re - def call_me(match): - if match.group() == 'A': - return unichr(0x3039) - return '' - assert (u"bb\u3039b", 2) == re.subn("[aA]", call_me, "babAb") - def test_match_array(self): import re, array - a = array.array('c', 'hello') - m = re.match('hel+', a) + a = array.array('b', b'hello') + m = re.match(b'hel+', a) assert m.end() == 4 def test_match_typeerror(self): From noreply at buildbot.pypy.org Thu Dec 22 00:39:53 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 22 Dec 2011 00:39:53 +0100 (CET) Subject: [pypy-commit] pypy py3k: set.__repr__ uses the new set literal syntax: {1, 2, 3} Message-ID: <20111221233953.8F40682B16@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r50817:ceb5f49d0eeb Date: 2011-12-22 00:39 +0100 http://bitbucket.org/pypy/pypy/changeset/ceb5f49d0eeb/ Log: set.__repr__ uses the new set literal syntax: {1, 2, 3} diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -641,7 +641,11 @@ return '%s(...)' % (s.__class__.__name__,) currently_in_repr[set_id] = 1 try: - return '%s(%s)' % (s.__class__.__name__, [x for x in s]) + listrepr = repr([x for x in s]) + if type(s) is set: + return '{%s}' % (listrepr[1:-1],) + else: + return '%s({%s})' % (s.__class__.__name__, listrepr[1:-1]) finally: try: del currently_in_repr[set_id] diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -52,7 +52,7 @@ def test_space_newset(self): s = self.space.newset() - assert self.space.str_w(self.space.repr(s)) == 'set([])' + assert self.space.str_w(self.space.repr(s)) == '{}' class AppTestAppSetTest: def test_subtype(self): @@ -189,9 +189,9 @@ s = set([1, 2, 3]) s.add(A(s)) therepr = repr(s) - assert therepr.startswith("set([") - assert therepr.endswith("])") - inner = set(therepr[5:-2].split(", ")) + assert therepr.startswith("{") + assert therepr.endswith("}") + inner = set(therepr[1:-1].split(", ")) assert inner == set(["1", "2", "3", "set(...)"]) def test_recursive_repr_frozenset(self): @@ -202,8 +202,8 @@ s = frozenset([1, 2, 3, a]) a.s = s therepr = repr(s) - assert therepr.startswith("frozenset([") - assert therepr.endswith("])") + assert therepr.startswith("frozenset({") + assert therepr.endswith("})") inner = set(therepr[11:-2].split(", ")) assert inner == set(["1", "2", "3", "frozenset(...)"]) @@ -211,7 +211,7 @@ s = set() try: s.remove(1) - except KeyError, e: + except KeyError as e: assert e.args[0] == 1 else: assert 0, "should raise" @@ -223,7 +223,7 @@ return int(id(self) & 0x7fffffff) s = H() f = set([s]) - print f + print(f) assert s in f f.remove(s) f.add(s) @@ -265,7 +265,7 @@ key = set([2, 3]) try: s.remove(key) - except KeyError, e: + except KeyError as e: assert e.args[0] is key def test_contains(self): @@ -294,7 +294,7 @@ for v1 in ['Q', (1,)]: try: s.remove(v1) - except KeyError, e: + except KeyError as e: v2 = e.args[0] assert v1 == v2 else: From noreply at buildbot.pypy.org Thu Dec 22 03:49:19 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 03:49:19 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: draft text for a pycon blogspot Message-ID: <20111222024919.B424082B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3988:f1bf364cca09 Date: 2011-12-21 20:49 -0600 http://bitbucket.org/pypy/extradoc/changeset/f1bf364cca09/ Log: draft text for a pycon blogspot diff --git a/blog/draft/pycon-2012-teaser.rst b/blog/draft/pycon-2012-teaser.rst new file mode 100644 --- /dev/null +++ b/blog/draft/pycon-2012-teaser.rst @@ -0,0 +1,36 @@ +Come see us at PyCon 2012 +========================= + +`PyCon 2012`_ is coming up in just a few short months, and PyPy will be well +represented there. We'll be delivering a tutorial, two talks, plus we'll be +around for the sprints. + +Here are the abstracts for the tutorials and talks: + +* **How to get the most out of your PyPy**: For many applications PyPy can + provide performance benefits right out of the box. However, little details + can push your application to perform much better. In this tutorial we'll give + you insights on how to push PyPy to it's limits. We'll focus on understanding + the performance characteristics of PyPy, and learning the analysis tools in + order to maximize your applications performance. + +* **Why PyPy by example**: One of the goals of PyPy is to make existing Python + code faster, however an even broader goal was to make it possible to write + things in Python that previous would needed to be written in C or other + low-level language. This talk will show examples of this, and describe how + they represent the tremendous progress PyPy has made, and what it means for + people looking to use PyPy. + +* **How the PyPy JIT works**: The Python community is abuzz about the major + speed gains PyPy can offer pure Python code. But how does PyPy JIT actually + work? This talk will discuss how the PyPy JIT is implemented. It will include + descriptions of the tracing, optimization, and assembly generation phases. I + will demonstrate each step with a example loop. + +If you have any questions let us know! We look forward to seeing people at +PyCon and chatting about PyPy and the entire Python ecosystem. + +See you there, +Maciej Fijalkowski, Alex Gaynor, Benjamin Peterson, Armin Rigo, and the entire PyPy team + +.. _`PyCon 2012`: https://us.pycon.org/2012/ From noreply at buildbot.pypy.org Thu Dec 22 04:06:37 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 04:06:37 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: note the authors of each talk Message-ID: <20111222030637.A0DE282B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3989:913e169481be Date: 2011-12-21 21:06 -0600 http://bitbucket.org/pypy/extradoc/changeset/913e169481be/ Log: note the authors of each talk diff --git a/blog/draft/pycon-2012-teaser.rst b/blog/draft/pycon-2012-teaser.rst --- a/blog/draft/pycon-2012-teaser.rst +++ b/blog/draft/pycon-2012-teaser.rst @@ -7,25 +7,27 @@ Here are the abstracts for the tutorials and talks: -* **How to get the most out of your PyPy**: For many applications PyPy can - provide performance benefits right out of the box. However, little details - can push your application to perform much better. In this tutorial we'll give - you insights on how to push PyPy to it's limits. We'll focus on understanding - the performance characteristics of PyPy, and learning the analysis tools in - order to maximize your applications performance. +* **How to get the most out of your PyPy**, by Maciej Fijalkowski, Alex Gaynor + and Armin Rigo: For many applications PyPy can provide performance benefits + right out of the box. However, little details can push your application to + perform much better. In this tutorial we'll give you insights on how to push + PyPy to it's limits. We'll focus on understanding the performance + characteristics of PyPy, and learning the analysis tools in order to maximize + your applications performance. -* **Why PyPy by example**: One of the goals of PyPy is to make existing Python - code faster, however an even broader goal was to make it possible to write - things in Python that previous would needed to be written in C or other - low-level language. This talk will show examples of this, and describe how - they represent the tremendous progress PyPy has made, and what it means for - people looking to use PyPy. +* **Why PyPy by example**, by Maciej Fijalkowski, Alex Gaynor and Armin Rigo: + One of the goals of PyPy is to make existing Python code faster, however an + even broader goal was to make it possible to write things in Python that + previous would needed to be written in C or other low-level language. This + talk will show examples of this, and describe how they represent the + tremendous progress PyPy has made, and what it means for people looking to + use PyPy. -* **How the PyPy JIT works**: The Python community is abuzz about the major - speed gains PyPy can offer pure Python code. But how does PyPy JIT actually - work? This talk will discuss how the PyPy JIT is implemented. It will include - descriptions of the tracing, optimization, and assembly generation phases. I - will demonstrate each step with a example loop. +* **How the PyPy JIT works**, by Benjamin Peterson: The Python community is + abuzz about the major speed gains PyPy can offer pure Python code. But how + does PyPy JIT actually work? This talk will discuss how the PyPy JIT is + implemented. It will include descriptions of the tracing, optimization, and + assembly generation phases. I will demonstrate each step with a example loop. If you have any questions let us know! We look forward to seeing people at PyCon and chatting about PyPy and the entire Python ecosystem. From noreply at buildbot.pypy.org Thu Dec 22 04:18:33 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 04:18:33 +0100 (CET) Subject: [pypy-commit] pypy default: catch all the needed memoryerror conditions in str.replace Message-ID: <20111222031833.76B7A82B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50818:072b5317cc0f Date: 2011-12-21 21:14 -0600 http://bitbucket.org/pypy/pypy/changeset/072b5317cc0f/ Log: catch all the needed memoryerror conditions in str.replace diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -510,6 +510,16 @@ return space.wrap(res) +def _replace_overflow_check(space, builder, new_piece): + # Checks if adding new_piece chars to the builder would overflow, and + # converts into an OverflowError. + try: + ovfcheck(builder.getlength() + new_piece) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + def _string_replace(space, input, sub, by, maxsplit): if maxsplit == 0: return space.wrap(input) @@ -519,7 +529,7 @@ if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - + try: result_size = ovfcheck(upper * len(by)) result_size = ovfcheck(result_size + upper) @@ -548,14 +558,18 @@ if next < 0: break if not first: + _replace_overflow_check(space, builder, len(by)) builder.append(by) first = False + _replace_overflow_check(space, builder, next - start) builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 if not first: + _replace_overflow_check(space, builder, len(by)) builder.append(by) + _replace_overflow_check(space, builder, len(input) - start) builder.append_slice(input, start, len(input)) return space.wrap(builder.build()) From noreply at buildbot.pypy.org Thu Dec 22 04:18:34 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 04:18:34 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20111222031834.9B63882B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50819:bb7e012d070a Date: 2011-12-21 21:18 -0600 http://bitbucket.org/pypy/pypy/changeset/bb7e012d070a/ Log: merged upstream diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -6,7 +6,7 @@ OperationError, wrap_oserror, operationerrfmt) from pypy.rpython.lltypesystem import rffi, lltype from pypy.rlib.rarithmetic import intmask -from pypy.rlib import rpoll +from pypy.rlib import rpoll, rsocket import sys READABLE = 1 @@ -252,7 +252,9 @@ # "header" and the "body" of the message and send them at once. message = lltype.malloc(rffi.CCHARP.TO, size + 4, flavor='raw') try: - rffi.cast(rffi.UINTP, message)[0] = rffi.r_uint(size) # XXX htonl! + length = rffi.r_uint(rsocket.htonl( + rffi.cast(lltype.Unsigned, size))) + rffi.cast(rffi.UINTP, message)[0] = length i = size - 1 while i >= 0: message[4 + i] = buffer[offset + i] @@ -264,7 +266,8 @@ def do_recv_string(self, space, buflength, maxlength): with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as length_ptr: self._recvall(space, rffi.cast(rffi.CCHARP, length_ptr), 4) - length = intmask(length_ptr[0]) + length = intmask(rsocket.ntohl( + rffi.cast(lltype.Unsigned, length_ptr[0]))) if length > maxlength: # bad message, close connection self.flags &= ~READABLE if self.flags == 0: diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -37,6 +37,9 @@ def test_connection(self): rhandle, whandle = self.make_pair() + whandle.send_bytes("abc") + assert rhandle.recv_bytes(100) == "abc" + obj = [1, 2.0, "hello"] whandle.send(obj) obj2 = rhandle.recv() @@ -150,4 +153,20 @@ import _multiprocessing raises(IOError, _multiprocessing.Connection, -1) - raises(IOError, _multiprocessing.Connection, -15) \ No newline at end of file + raises(IOError, _multiprocessing.Connection, -15) + + def test_byte_order(self): + # The exact format of net strings (length in network byte + # order) is important for interoperation with others + # implementations. + rhandle, whandle = self.make_pair() + whandle.send_bytes("abc") + whandle.send_bytes("defg") + import socket + sock = socket.fromfd(rhandle.fileno(), + socket.AF_INET, socket.SOCK_STREAM) + data1 = sock.recv(7) + assert data1 == '\x00\x00\x00\x03abc' + data2 = sock.recv(8) + assert data2 == '\x00\x00\x00\x04defg' + From noreply at buildbot.pypy.org Thu Dec 22 04:36:02 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 04:36:02 +0100 (CET) Subject: [pypy-commit] pypy py3k: simplify callable() code and fix a bug in it, None is not the same as false :) Message-ID: <20111222033602.8317082B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: py3k Changeset: r50820:fad1ecae7c0d Date: 2011-12-21 21:35 -0600 http://bitbucket.org/pypy/pypy/changeset/fad1ecae7c0d/ Log: simplify callable() code and fix a bug in it, None is not the same as false :) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1012,10 +1012,7 @@ return None def callable(self, w_obj): - if self.lookup(w_obj, "__call__") is not None: - return self.w_True - else: - return self.w_None + return self.wrap(self.lookup(w_obj, "__call__") is not None) def issequence_w(self, w_obj): return (self.findattr(w_obj, self.wrap("__getitem__")) is not None) From noreply at buildbot.pypy.org Thu Dec 22 05:09:16 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 05:09:16 +0100 (CET) Subject: [pypy-commit] pypy py3k: make sure filter(x, non_callable) raises immediately, not on the first call to __next__ Message-ID: <20111222040916.8A24382B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: py3k Changeset: r50821:141ccdc2c4fb Date: 2011-12-21 22:09 -0600 http://bitbucket.org/pypy/pypy/changeset/141ccdc2c4fb/ Log: make sure filter(x, non_callable) raises immediately, not on the first call to __next__ diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -129,7 +129,10 @@ or string, return the same type, else return a list.""" if func is None: func = bool - for item in seq: + return _filter(func, iter(seq)) + +def _filter(func, iterator): + for item in iterator: if func(item): yield item diff --git a/pypy/module/__builtin__/test/test_filter.py b/pypy/module/__builtin__/test/test_filter.py --- a/pypy/module/__builtin__/test/test_filter.py +++ b/pypy/module/__builtin__/test/test_filter.py @@ -1,11 +1,11 @@ import autopath -# trivial functions for testing +# trivial functions for testing class AppTestFilter: def test_filter_no_arguments(self): raises(TypeError, filter) - + def test_filter_no_function_no_seq(self): raises(TypeError, filter, None) @@ -16,10 +16,14 @@ raises(TypeError, filter, lambda x: x>3, [1], [2]) def test_filter_no_function_list(self): - assert list(filter(None, [1, 2, 3])) == [1, 2, 3] + assert list(filter(None, [1, 2, 3])) == [1, 2, 3] def test_filter_no_function_with_bools(self): - assert tuple(filter(None, (True, False, True))) == (True, True) - + assert tuple(filter(None, (True, False, True))) == (True, True) + def test_filter_list(self): - assert list(filter(lambda x: x>3, [1, 2, 3, 4, 5])) == [4, 5] + assert list(filter(lambda x: x>3, [1, 2, 3, 4, 5])) == [4, 5] + + def test_filter_non_iterable(self): + raises(TypeError, filter, None, 42) + raises(TypeError, filter, callable, list) \ No newline at end of file From noreply at buildbot.pypy.org Thu Dec 22 16:06:43 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 22 Dec 2011 16:06:43 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: we cant call get_procedure_token if we are in_recursion since the actual greenkey arguments are wrongly typed for the "main" jitdriver of the metainterp Message-ID: <20111222150643.167A482B16@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50822:ce62dce925d3 Date: 2011-12-22 16:02 +0100 http://bitbucket.org/pypy/pypy/changeset/ce62dce925d3/ Log: we cant call get_procedure_token if we are in_recursion since the actual greenkey arguments are wrongly typed for the "main" jitdriver of the metainterp diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -980,7 +980,7 @@ if self.metainterp.seen_loop_header_for_jdindex < 0: if not any_operation: return - if not self.metainterp.get_procedure_token(greenboxes, True): + if self.metainterp.in_recursion or not self.metainterp.get_procedure_token(greenboxes, True): if not jitdriver_sd.no_loop_header: return # automatically add a loop_header if there is none From noreply at buildbot.pypy.org Thu Dec 22 16:06:44 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 22 Dec 2011 16:06:44 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: hg merge default Message-ID: <20111222150644.91D9F82B17@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50823:f637da0553fb Date: 2011-12-22 16:04 +0100 http://bitbucket.org/pypy/pypy/changeset/f637da0553fb/ Log: hg merge default diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -17,7 +17,10 @@ key = src._getregkey() if key in srccount: if key == dst_locations[i]._getregkey(): - srccount[key] = -sys.maxint # ignore a move "x = x" + # ignore a move "x = x" + # setting any "large enough" negative value is ok, but + # be careful of overflows, don't use -sys.maxint + srccount[key] = -len(dst_locations) - 1 pending_dests -= 1 else: srccount[key] += 1 diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -385,3 +385,32 @@ assert read(loc, WORD) == src_values1[i] for i, loc in enumerate(dst_locations2): assert read(loc, 8) == src_values2[i] + + +def test_overflow_bug(): + CASE = [ + (-144, -248), # \ cycle + (-248, -144), # / + (-488, -416), # \ two usages of -488 + (-488, -480), # / + (-488, -488), # - one self-application of -488 + ] + class FakeAssembler: + def regalloc_mov(self, src, dst): + print "mov", src, dst + def regalloc_push(self, x): + print "push", x + def regalloc_pop(self, x): + print "pop", x + def regalloc_immedmem2mem(self, x, y): + print "?????????????????????????" + def main(): + srclocs = [StackLoc(9999, x, 'i') for x,y in CASE] + dstlocs = [StackLoc(9999, y, 'i') for x,y in CASE] + remap_frame_layout(FakeAssembler(), srclocs, dstlocs, eax) + # it works when run directly + main() + # but it used to crash when translated, + # because of a -sys.maxint-2 overflowing to sys.maxint + from pypy.rpython.test.test_llinterp import interpret + interpret(main, []) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -6,7 +6,7 @@ OperationError, wrap_oserror, operationerrfmt) from pypy.rpython.lltypesystem import rffi, lltype from pypy.rlib.rarithmetic import intmask -from pypy.rlib import rpoll +from pypy.rlib import rpoll, rsocket import sys READABLE = 1 @@ -252,7 +252,9 @@ # "header" and the "body" of the message and send them at once. message = lltype.malloc(rffi.CCHARP.TO, size + 4, flavor='raw') try: - rffi.cast(rffi.UINTP, message)[0] = rffi.r_uint(size) # XXX htonl! + length = rffi.r_uint(rsocket.htonl( + rffi.cast(lltype.Unsigned, size))) + rffi.cast(rffi.UINTP, message)[0] = length i = size - 1 while i >= 0: message[4 + i] = buffer[offset + i] @@ -264,7 +266,8 @@ def do_recv_string(self, space, buflength, maxlength): with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as length_ptr: self._recvall(space, rffi.cast(rffi.CCHARP, length_ptr), 4) - length = intmask(length_ptr[0]) + length = intmask(rsocket.ntohl( + rffi.cast(lltype.Unsigned, length_ptr[0]))) if length > maxlength: # bad message, close connection self.flags &= ~READABLE if self.flags == 0: diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -37,6 +37,9 @@ def test_connection(self): rhandle, whandle = self.make_pair() + whandle.send_bytes("abc") + assert rhandle.recv_bytes(100) == "abc" + obj = [1, 2.0, "hello"] whandle.send(obj) obj2 = rhandle.recv() @@ -150,4 +153,20 @@ import _multiprocessing raises(IOError, _multiprocessing.Connection, -1) - raises(IOError, _multiprocessing.Connection, -15) \ No newline at end of file + raises(IOError, _multiprocessing.Connection, -15) + + def test_byte_order(self): + # The exact format of net strings (length in network byte + # order) is important for interoperation with others + # implementations. + rhandle, whandle = self.make_pair() + whandle.send_bytes("abc") + whandle.send_bytes("defg") + import socket + sock = socket.fromfd(rhandle.fileno(), + socket.AF_INET, socket.SOCK_STREAM) + data1 = sock.recv(7) + assert data1 == '\x00\x00\x00\x03abc' + data2 = sock.recv(8) + assert data2 == '\x00\x00\x00\x04defg' + diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -102,3 +102,27 @@ class ConstantIterator(BaseIterator): def next(self, shapelen): return self + +# ------ other iterators that are not part of the computation frame ---------- + +class AxisIterator(object): + """ This object will return offsets of each start of the last stride + """ + def __init__(self, arr): + self.arr = arr + self.indices = [0] * (len(arr.shape) - 1) + self.done = False + self.offset = arr.start + + def next(self): + for i in range(len(self.arr.shape) - 2, -1, -1): + if self.indices[i] < self.arr.shape[i] - 1: + self.indices[i] += 1 + self.offset += self.arr.strides[i] + break + else: + self.indices[i] = 0 + self.offset -= self.arr.backstrides[i] + else: + self.done = True + diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -9,7 +9,7 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder from pypy.module.micronumpy.interp_iter import ArrayIterator,\ - view_iter_from_arr, OneDimIterator + view_iter_from_arr, OneDimIterator, AxisIterator numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], @@ -390,10 +390,10 @@ return space.wrap(self.size) def descr_copy(self, space): - return self.copy() + return self.copy(space) - def copy(self): - return self.get_concrete().copy() + def copy(self, space): + return self.get_concrete().copy(space) def descr_len(self, space): if len(self.shape): @@ -536,7 +536,7 @@ new_shape, self) else: # Create copy with contiguous data - arr = concrete.copy() + arr = concrete.copy(space) arr.setshape(space, new_shape) return arr @@ -606,6 +606,9 @@ space.w_False])) return w_d + def supports_fast_slicing(self): + return False + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj @@ -639,7 +642,7 @@ def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): builder.append(self.dtype.itemtype.str_format(self.value)) - def copy(self): + def copy(self, space): return Scalar(self.dtype, self.value) def create_sig(self, res_shape): @@ -790,6 +793,9 @@ def get_concrete(self): return self + def supports_fast_slicing(self): + return self.order == 'C' and self.strides[-1] == 1 + def find_dtype(self): return self.dtype @@ -929,39 +935,35 @@ item += v * self.strides[i] return item - -class ViewArray(ConcreteArray): - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = view_iter_from_arr(self) - a_iter = ArrayIterator(array.size) - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def create_sig(self, res_shape): - return signature.ViewSignature(self.dtype) - - -class W_NDimSlice(ViewArray): - def __init__(self, start, strides, backstrides, shape, parent): - assert isinstance(parent, ConcreteArray) - if isinstance(parent, W_NDimSlice): - parent = parent.parent - size = 1 - for sh in shape: - size *= sh - self.strides = strides - self.backstrides = backstrides - ViewArray.__init__(self, size, shape, parent.dtype, parent.order, - parent) - self.start = start - def setslice(self, space, w_value): res_shape = shape_agreement(space, self.shape, w_value.shape) - self._sliceloop(w_value, res_shape) + if (res_shape == w_value.shape and self.supports_fast_slicing() and + w_value.supports_fast_slicing() and + self.dtype is w_value.find_dtype()): + self._fast_setslice(space, w_value) + else: + self._sliceloop(w_value, res_shape) + + def _fast_setslice(self, space, w_value): + assert isinstance(w_value, ConcreteArray) + itemsize = self.dtype.itemtype.get_element_size() + if len(self.shape) == 1: + rffi.c_memcpy( + rffi.ptradd(self.storage, self.start * itemsize), + rffi.ptradd(w_value.storage, w_value.start * itemsize), + self.size * itemsize + ) + else: + dest = AxisIterator(self) + source = AxisIterator(w_value) + while not dest.done: + rffi.c_memcpy( + rffi.ptradd(self.storage, dest.offset * itemsize), + rffi.ptradd(w_value.storage, source.offset * itemsize), + self.shape[-1] * itemsize + ) + source.next() + dest.next() def _sliceloop(self, source, res_shape): sig = source.find_sig(res_shape) @@ -979,6 +981,31 @@ frame.next(shapelen) res_iter = res_iter.next(shapelen) + def copy(self, space): + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + array.setslice(space, self) + return array + + +class ViewArray(ConcreteArray): + def create_sig(self, res_shape): + return signature.ViewSignature(self.dtype) + + +class W_NDimSlice(ViewArray): + def __init__(self, start, strides, backstrides, shape, parent): + assert isinstance(parent, ConcreteArray) + if isinstance(parent, W_NDimSlice): + parent = parent.parent + size = 1 + for sh in shape: + size *= sh + self.strides = strides + self.backstrides = backstrides + ViewArray.__init__(self, size, shape, parent.dtype, parent.order, + parent) + self.start = start + def setshape(self, space, new_shape): if len(self.shape) < 1: return @@ -1017,15 +1044,6 @@ """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) - rffi.c_memcpy( - array.storage, - self.storage, - self.size * self.dtype.itemtype.get_element_size() - ) - return array - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1077,6 +1077,17 @@ a = ones((1, 2, 3)) assert a[0, 1, 2] == 1.0 + def test_multidim_setslice(self): + from numpypy import zeros, ones + a = zeros((3, 3)) + b = ones((3, 3)) + a[:,1:3] = b[:,1:3] + assert (a == [[0, 1, 1], [0, 1, 1], [0, 1, 1]]).all() + a = zeros((3, 3)) + b = ones((3, 3)) + a[:,::2] = b[:,::2] + assert (a == [[1, 0, 1], [1, 0, 1], [1, 0, 1]]).all() + def test_broadcast_ufunc(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -510,6 +510,16 @@ return space.wrap(res) +def _replace_overflow_check(space, builder, new_piece): + # Checks if adding new_piece chars to the builder would overflow, and + # converts into an OverflowError. + try: + ovfcheck(builder.getlength() + new_piece) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + def _string_replace(space, input, sub, by, maxsplit): if maxsplit == 0: return space.wrap(input) @@ -519,7 +529,7 @@ if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - + try: result_size = ovfcheck(upper * len(by)) result_size = ovfcheck(result_size + upper) @@ -548,14 +558,18 @@ if next < 0: break if not first: + _replace_overflow_check(space, builder, len(by)) builder.append(by) first = False + _replace_overflow_check(space, builder, next - start) builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 if not first: + _replace_overflow_check(space, builder, len(by)) builder.append(by) + _replace_overflow_check(space, builder, len(input) - start) builder.append_slice(input, start, len(input)) return space.wrap(builder.build()) From noreply at buildbot.pypy.org Thu Dec 22 19:43:32 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 19:43:32 +0100 (CET) Subject: [pypy-commit] pypy default: do the same check as CPython here, explicitly check for length overflow before doing a replace. Message-ID: <20111222184332.F2FE982B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50824:4d8780140779 Date: 2011-12-22 18:43 +0000 http://bitbucket.org/pypy/pypy/changeset/4d8780140779/ Log: do the same check as CPython here, explicitly check for length overflow before doing a replace. diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -510,16 +510,6 @@ return space.wrap(res) -def _replace_overflow_check(space, builder, new_piece): - # Checks if adding new_piece chars to the builder would overflow, and - # converts into an OverflowError. - try: - ovfcheck(builder.getlength() + new_piece) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long") - ) - def _string_replace(space, input, sub, by, maxsplit): if maxsplit == 0: return space.wrap(input) @@ -548,7 +538,19 @@ builder.append_slice(input, upper, len(input)) else: # An ok guess for the result size - builder = StringBuilder(len(input)) + count = input.count(sub) + if count > maxsplit and maxsplit > 0: + count = maxsplit + diff_len = len(by) - len(sub) + try: + result_size = ovfcheck(diff_len * count) + result_size = ovfcheck(result_size + len(input)) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + + builder = StringBuilder(result_size) start = 0 sublen = len(sub) first = True @@ -558,18 +560,14 @@ if next < 0: break if not first: - _replace_overflow_check(space, builder, len(by)) builder.append(by) first = False - _replace_overflow_check(space, builder, next - start) builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 if not first: - _replace_overflow_check(space, builder, len(by)) builder.append(by) - _replace_overflow_check(space, builder, len(input) - start) builder.append_slice(input, start, len(input)) return space.wrap(builder.build()) From noreply at buildbot.pypy.org Thu Dec 22 23:26:47 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 23:26:47 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: note which is the tutorial. Message-ID: <20111222222647.1315182B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3990:5c5293074ddb Date: 2011-12-22 16:26 -0600 http://bitbucket.org/pypy/extradoc/changeset/5c5293074ddb/ Log: note which is the tutorial. diff --git a/blog/draft/pycon-2012-teaser.rst b/blog/draft/pycon-2012-teaser.rst --- a/blog/draft/pycon-2012-teaser.rst +++ b/blog/draft/pycon-2012-teaser.rst @@ -13,7 +13,7 @@ perform much better. In this tutorial we'll give you insights on how to push PyPy to it's limits. We'll focus on understanding the performance characteristics of PyPy, and learning the analysis tools in order to maximize - your applications performance. + your applications performance. *This is the tutorial.* * **Why PyPy by example**, by Maciej Fijalkowski, Alex Gaynor and Armin Rigo: One of the goals of PyPy is to make existing Python code faster, however an From noreply at buildbot.pypy.org Thu Dec 22 23:59:03 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 23:59:03 +0100 (CET) Subject: [pypy-commit] pypy default: Work with sqlite's that don't have extension support. Message-ID: <20111222225903.8902E82B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50825:2988075eae8d Date: 2011-12-22 16:58 -0600 http://bitbucket.org/pypy/pypy/changeset/2988075eae8d/ Log: Work with sqlite's that don't have extension support. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,8 +231,10 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None -sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] -sqlite.sqlite3_enable_load_extension.restype = c_int +HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension") +if HAS_LOAD_EXTENSION: + sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] + sqlite.sqlite3_enable_load_extension.restype = c_int ########################################## # END Wrapped SQLite C API and constants @@ -708,13 +710,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) - def enable_load_extension(self, enabled): - self._check_thread() - self._check_closed() + if HAS_LOAD_EXTENSION: + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() - rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) - if rc != SQLITE_OK: - raise OperationalError("Error enabling load extension") + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") DML, DQL, DDL = range(3) From noreply at buildbot.pypy.org Thu Dec 22 23:59:04 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Dec 2011 23:59:04 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20111222225904.AE21D82B16@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50826:f6f8ddc1a2f0 Date: 2011-12-22 16:58 -0600 http://bitbucket.org/pypy/pypy/changeset/f6f8ddc1a2f0/ Log: merged upstream diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -510,16 +510,6 @@ return space.wrap(res) -def _replace_overflow_check(space, builder, new_piece): - # Checks if adding new_piece chars to the builder would overflow, and - # converts into an OverflowError. - try: - ovfcheck(builder.getlength() + new_piece) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long") - ) - def _string_replace(space, input, sub, by, maxsplit): if maxsplit == 0: return space.wrap(input) @@ -548,7 +538,19 @@ builder.append_slice(input, upper, len(input)) else: # An ok guess for the result size - builder = StringBuilder(len(input)) + count = input.count(sub) + if count > maxsplit and maxsplit > 0: + count = maxsplit + diff_len = len(by) - len(sub) + try: + result_size = ovfcheck(diff_len * count) + result_size = ovfcheck(result_size + len(input)) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + + builder = StringBuilder(result_size) start = 0 sublen = len(sub) first = True @@ -558,18 +560,14 @@ if next < 0: break if not first: - _replace_overflow_check(space, builder, len(by)) builder.append(by) first = False - _replace_overflow_check(space, builder, next - start) builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 if not first: - _replace_overflow_check(space, builder, len(by)) builder.append(by) - _replace_overflow_check(space, builder, len(input) - start) builder.append_slice(input, start, len(input)) return space.wrap(builder.build()) From noreply at buildbot.pypy.org Fri Dec 23 10:15:06 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 23 Dec 2011 10:15:06 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: hg merge default Message-ID: <20111223091506.C875882B14@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50827:896e5b681170 Date: 2011-12-23 10:14 +0100 http://bitbucket.org/pypy/pypy/changeset/896e5b681170/ Log: hg merge default diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,8 +231,10 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None -sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] -sqlite.sqlite3_enable_load_extension.restype = c_int +HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension") +if HAS_LOAD_EXTENSION: + sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] + sqlite.sqlite3_enable_load_extension.restype = c_int ########################################## # END Wrapped SQLite C API and constants @@ -708,13 +710,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) - def enable_load_extension(self, enabled): - self._check_thread() - self._check_closed() + if HAS_LOAD_EXTENSION: + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() - rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) - if rc != SQLITE_OK: - raise OperationalError("Error enabling load extension") + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") DML, DQL, DDL = range(3) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -510,16 +510,6 @@ return space.wrap(res) -def _replace_overflow_check(space, builder, new_piece): - # Checks if adding new_piece chars to the builder would overflow, and - # converts into an OverflowError. - try: - ovfcheck(builder.getlength() + new_piece) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long") - ) - def _string_replace(space, input, sub, by, maxsplit): if maxsplit == 0: return space.wrap(input) @@ -548,7 +538,19 @@ builder.append_slice(input, upper, len(input)) else: # An ok guess for the result size - builder = StringBuilder(len(input)) + count = input.count(sub) + if count > maxsplit and maxsplit > 0: + count = maxsplit + diff_len = len(by) - len(sub) + try: + result_size = ovfcheck(diff_len * count) + result_size = ovfcheck(result_size + len(input)) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + + builder = StringBuilder(result_size) start = 0 sublen = len(sub) first = True @@ -558,18 +560,14 @@ if next < 0: break if not first: - _replace_overflow_check(space, builder, len(by)) builder.append(by) first = False - _replace_overflow_check(space, builder, next - start) builder.append_slice(input, start, next) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 if not first: - _replace_overflow_check(space, builder, len(by)) builder.append(by) - _replace_overflow_check(space, builder, len(input) - start) builder.append_slice(input, start, len(input)) return space.wrap(builder.build()) From noreply at buildbot.pypy.org Fri Dec 23 11:05:15 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 23 Dec 2011 11:05:15 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the test to again test what it was intended to test. It curently fails Message-ID: <20111223100515.516FA82B14@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50828:27228df2fb9a Date: 2011-12-23 11:04 +0100 http://bitbucket.org/pypy/pypy/changeset/27228df2fb9a/ Log: Fix the test to again test what it was intended to test. It curently fails diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -521,14 +521,20 @@ loop = """ [i0] - label(i0, descr=targettoken) + label(i0, descr=preambletoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1, descr=targettoken) + label(i1, descr=targettoken) + debug_merge_point('xyz', 0) + i11 = int_add(i1, 1) + i12 = int_ge(i11, 10) + guard_false(i12) [] + jump(i11, descr=targettoken) """ - ops = parse(loop, namespace={'targettoken': TargetToken()}) + ops = parse(loop, namespace={'targettoken': TargetToken(), + 'preambletoken': TargetToken()}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) @@ -537,6 +543,8 @@ self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] + assert struct.i == 1 + struct = self.cpu.assembler.loop_run_counters[1] assert struct.i == 10 self.cpu.finish_once() finally: From noreply at buildbot.pypy.org Fri Dec 23 14:33:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Dec 2011 14:33:14 +0100 (CET) Subject: [pypy-commit] pypy generator-in-rpython: A non-test for a non-feature. Message-ID: <20111223133314.C74C082B10@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: generator-in-rpython Changeset: r50829:9d9bba8e1c24 Date: 2011-12-23 11:34 +0100 http://bitbucket.org/pypy/pypy/changeset/9d9bba8e1c24/ Log: A non-test for a non-feature. diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py --- a/pypy/rpython/test/test_generator.py +++ b/pypy/rpython/test/test_generator.py @@ -17,6 +17,40 @@ res = self.interpret(f, []) assert res == 358 + def test_cannot_merge(self): + # merging two different generators is not supported + # right now, but we can use workarounds like here + class MyGen: + def next(self): + raise NotImplementedError + class MyG1(MyGen): + def __init__(self, a): + self._gen = self.g1(a) + def next(self): + return self._gen.next() + @staticmethod + def g1(a): + yield a + 1 + yield a + 2 + class MyG2(MyGen): + def __init__(self): + self._gen = self.g2() + def next(self): + return self._gen.next() + @staticmethod + def g2(): + yield 42 + def f(n): + if n > 0: + gen = MyG1(n) + else: + gen = MyG2() + return gen.next() + res = self.interpret(f, [10]) + assert res == 11 + res = self.interpret(f, [0]) + assert res == 42 + class TestLLtype(BaseTestGenerator, LLRtypeMixin): pass From noreply at buildbot.pypy.org Fri Dec 23 14:33:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Dec 2011 14:33:15 +0100 (CET) Subject: [pypy-commit] pypy generator-in-rpython: Mark the test classes as _immutable_=True, to showcase what a real Message-ID: <20111223133315.E935D82B10@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: generator-in-rpython Changeset: r50830:c73c899880d8 Date: 2011-12-23 12:26 +0100 http://bitbucket.org/pypy/pypy/changeset/c73c899880d8/ Log: Mark the test classes as _immutable_=True, to showcase what a real implementation would look like. diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py --- a/pypy/rpython/test/test_generator.py +++ b/pypy/rpython/test/test_generator.py @@ -21,9 +21,11 @@ # merging two different generators is not supported # right now, but we can use workarounds like here class MyGen: + _immutable_ = True def next(self): raise NotImplementedError class MyG1(MyGen): + _immutable_ = True def __init__(self, a): self._gen = self.g1(a) def next(self): @@ -33,6 +35,7 @@ yield a + 1 yield a + 2 class MyG2(MyGen): + _immutable_ = True def __init__(self): self._gen = self.g2() def next(self): From noreply at buildbot.pypy.org Fri Dec 23 14:33:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Dec 2011 14:33:17 +0100 (CET) Subject: [pypy-commit] pypy default: Kill the old version of the function, probably left over during a merge. Message-ID: <20111223133317.1C35782B10@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50831:21c3998a104d Date: 2011-12-23 14:31 +0100 http://bitbucket.org/pypy/pypy/changeset/21c3998a104d/ Log: Kill the old version of the function, probably left over during a merge. diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -1007,25 +1007,6 @@ # a jump back to itself and possibly a few bridges ending with finnish. # Only the operations within the loop formed by that single jump will # be counted. - - # XXX hacked version, ignore and remove me when jit-targets is merged. - loops = self.get_all_loops() - loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX - assert len(loops) == 1 - loop, = loops - jumpop = loop.operations[-1] - assert jumpop.getopnum() == rop.JUMP - insns = {} - for op in loop.operations: - opname = op.getopname() - insns[opname] = insns.get(opname, 0) + 1 - return self._check_insns(insns, expected, check) - - def check_simple_loop(self, expected=None, **check): - # Usefull in the simplest case when we have only one trace ending with - # a jump back to itself and possibly a few bridges ending with finnish. - # Only the operations within the loop formed by that single jump will - # be counted. loops = self.get_all_loops() assert len(loops) == 1 loop = loops[0] From noreply at buildbot.pypy.org Fri Dec 23 14:37:48 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Dec 2011 14:37:48 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge generator-in-rpython: minimal support for generators. Message-ID: <20111223133748.5014282B10@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50832:9bec9e6ff111 Date: 2011-12-23 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/9bec9e6ff111/ Log: hg merge generator-in-rpython: minimal support for generators. diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -180,7 +180,12 @@ if name is None: name = pyobj.func_name if signature is None: - signature = cpython_code_signature(pyobj.func_code) + if hasattr(pyobj, '_generator_next_method_of_'): + from pypy.interpreter.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyobj.func_code) if defaults is None: defaults = pyobj.func_defaults self.name = name diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -185,7 +185,7 @@ class FlowExecutionContext(ExecutionContext): def __init__(self, space, code, globals, constargs={}, outer_func=None, - name=None): + name=None, is_generator=False): ExecutionContext.__init__(self, space) self.code = code @@ -208,6 +208,7 @@ initialblock = SpamBlock(FrameState(frame).copy()) self.pendingblocks = collections.deque([initialblock]) self.graph = FunctionGraph(name or code.co_name, initialblock) + self.is_generator = is_generator make_link = Link # overridable for transition tracking @@ -247,6 +248,8 @@ return outcome, w_exc_cls, w_exc_value def build_flow(self): + if self.is_generator: + self.produce_generator_mark() while self.pendingblocks: block = self.pendingblocks.popleft() frame = self.create_frame() @@ -259,9 +262,15 @@ self.topframeref = jit.non_virtual_ref(frame) self.crnt_frame = frame try: - w_result = frame.dispatch(frame.pycode, - frame.last_instr, - self) + frame.frame_finished_execution = False + while True: + w_result = frame.dispatch(frame.pycode, + frame.last_instr, + self) + if frame.frame_finished_execution: + break + else: + self.generate_yield(frame, w_result) finally: self.crnt_frame = None self.topframeref = old_frameref @@ -307,6 +316,21 @@ del self.recorder self.fixeggblocks() + def produce_generator_mark(self): + [initialblock] = self.pendingblocks + initialblock.operations.append( + SpaceOperation('generator_mark', [], Variable())) + + def generate_yield(self, frame, w_result): + assert self.is_generator + self.recorder.crnt_block.operations.append( + SpaceOperation('yield', [w_result], Variable())) + # we must push a dummy value that will be POPped: it's the .send() + # passed into the generator (2.5 feature) + assert sys.version_info >= (2, 5) + frame.pushvalue(None) + frame.last_instr += 1 + def fixeggblocks(self): # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -8,6 +8,7 @@ from pypy.interpreter.pycode import PyCode, cpython_code_signature from pypy.interpreter.module import Module from pypy.interpreter.error import OperationError +from pypy.interpreter.astcompiler.consts import CO_GENERATOR from pypy.interpreter import pyframe, argument from pypy.objspace.flow.model import * from pypy.objspace.flow import flowcontext, operation, specialcase @@ -247,15 +248,13 @@ return ecls return None - def build_flow(self, func, constargs={}): + def build_flow(self, func, constargs={}, tweak_for_generator=True): """ """ if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): raise Exception, "%r is tagged as NOT_RPYTHON" % (func,) code = func.func_code - if code.co_flags & 32: - # generator - raise TypeError("%r is a generator" % (func,)) + is_generator = bool(code.co_flags & CO_GENERATOR) code = PyCode._from_code(self, code) if func.func_closure is None: cl = None @@ -271,7 +270,8 @@ class outerfunc: # hack closure = cl ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, outerfunc, name) + constargs, outerfunc, name, + is_generator) graph = ec.graph graph.func = func # attach a signature and defaults to the graph @@ -291,6 +291,11 @@ e = error.FlowingError(formated) raise error.FlowingError, e, tb checkgraph(graph) + # + if is_generator and tweak_for_generator: + from pypy.translator.generator import tweak_generator_graph + tweak_generator_graph(graph) + # return graph def fixedview(self, w_tuple, expected_length=None): diff --git a/pypy/objspace/flow/test/test_generator.py b/pypy/objspace/flow/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/flow/test/test_generator.py @@ -0,0 +1,18 @@ +from pypy.objspace.flow.test.test_objspace import Base + + +class TestGenerator(Base): + + def test_simple_generator(self): + def f(n): + i = 0 + while i < n: + yield i + yield i + i += 1 + graph = self.codetest(f, tweak_for_generator=False) + ops = self.all_operations(graph) + assert ops == {'generator_mark': 1, + 'lt': 1, 'is_true': 1, + 'yield': 2, + 'inplace_add': 1} diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -16,14 +16,14 @@ is_operator = getattr(operator, 'is_', operator.eq) # it's not there 2.2 class Base: - def codetest(self, func): + def codetest(self, func, **kwds): import inspect try: func = func.im_func except AttributeError: pass #name = func.func_name - graph = self.space.build_flow(func) + graph = self.space.build_flow(func, **kwds) graph.source = inspect.getsource(func) self.show(graph) return graph @@ -882,12 +882,6 @@ num = bytecode_spec.opmap[name] flow_meth_names[num] = locals()['old_' + name] - def test_generator(self): - def f(): - yield 3 - - py.test.raises(TypeError, "self.codetest(f)") - def test_dont_capture_RuntimeError(self): class Foo: def __hash__(self): diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/rpython/test/test_generator.py @@ -0,0 +1,62 @@ +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + + +class BaseTestGenerator(BaseRtypingTest): + + def test_simple_explicit(self): + def g(a, b, c): + yield a + yield b + yield c + def f(): + gen = g(3, 5, 8) + x = gen.next() * 100 + x += gen.next() * 10 + x += gen.next() + return x + res = self.interpret(f, []) + assert res == 358 + + def test_cannot_merge(self): + # merging two different generators is not supported + # right now, but we can use workarounds like here + class MyGen: + _immutable_ = True + def next(self): + raise NotImplementedError + class MyG1(MyGen): + _immutable_ = True + def __init__(self, a): + self._gen = self.g1(a) + def next(self): + return self._gen.next() + @staticmethod + def g1(a): + yield a + 1 + yield a + 2 + class MyG2(MyGen): + _immutable_ = True + def __init__(self): + self._gen = self.g2() + def next(self): + return self._gen.next() + @staticmethod + def g2(): + yield 42 + def f(n): + if n > 0: + gen = MyG1(n) + else: + gen = MyG2() + return gen.next() + res = self.interpret(f, [10]) + assert res == 11 + res = self.interpret(f, [0]) + assert res == 42 + + +class TestLLtype(BaseTestGenerator, LLRtypeMixin): + pass + +class TestOOtype(BaseTestGenerator, OORtypeMixin): + pass diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/generator.py @@ -0,0 +1,166 @@ +from pypy.objspace.flow.model import Block, Link, SpaceOperation, checkgraph +from pypy.objspace.flow.model import Variable, Constant, FunctionGraph +from pypy.translator.unsimplify import insert_empty_startblock +from pypy.translator.unsimplify import split_block +from pypy.translator.simplify import eliminate_empty_blocks +from pypy.tool.sourcetools import func_with_new_name +from pypy.interpreter.argument import Signature + + +class AbstractPosition(object): + _immutable_ = True + _attrs_ = () + + +def tweak_generator_graph(graph): + if not hasattr(graph.func, '_generator_next_method_of_'): + # This is the first copy of the graph. We replace it with + # a small bootstrap graph. + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + # We attach a 'next' method to the GeneratorIterator class + # that will invoke the real function, based on a second + # copy of the graph. + attach_next_method(GeneratorIterator, graph) + else: + # This is the second copy of the graph. Tweak it. + GeneratorIterator = graph.func._generator_next_method_of_ + tweak_generator_body_graph(GeneratorIterator.Entry, graph) + + +def make_generatoriterator_class(graph): + class GeneratorIterator(object): + class Entry(AbstractPosition): + _immutable_ = True + varnames = get_variable_names(graph.startblock.inputargs) + def __init__(self, entry): + self.current = entry + return GeneratorIterator + +def replace_graph_with_bootstrap(GeneratorIterator, graph): + Entry = GeneratorIterator.Entry + newblock = Block(graph.startblock.inputargs) + v_generator = Variable('generator') + v_entry = Variable('entry') + newblock.operations.append( + SpaceOperation('simple_call', [Constant(Entry)], v_entry)) + assert len(graph.startblock.inputargs) == len(Entry.varnames) + for v, name in zip(graph.startblock.inputargs, Entry.varnames): + newblock.operations.append( + SpaceOperation('setattr', [v_entry, Constant(name), v], + Variable())) + newblock.operations.append( + SpaceOperation('simple_call', [Constant(GeneratorIterator), v_entry], + v_generator)) + newblock.closeblock(Link([v_generator], graph.returnblock)) + graph.startblock = newblock + +def attach_next_method(GeneratorIterator, graph): + func = graph.func + func = func_with_new_name(func, '%s__next' % (func.func_name,)) + func._generator_next_method_of_ = GeneratorIterator + func._always_inline_ = True + # + def next(self): + entry = self.current + self.current = None + (next_entry, return_value) = func(entry) + self.current = next_entry + return return_value + GeneratorIterator.next = next + return func # for debugging + +def get_variable_names(variables): + seen = set() + result = [] + for v in variables: + name = v._name.strip('_') + while name in seen: + name += '_' + result.append('g_' + name) + seen.add(name) + return result + +def _insert_reads(block, varnames): + assert len(varnames) == len(block.inputargs) + v_entry1 = Variable('entry') + for i, name in enumerate(varnames): + block.operations.insert(i, + SpaceOperation('getattr', [v_entry1, Constant(name)], + block.inputargs[i])) + block.inputargs = [v_entry1] + +def tweak_generator_body_graph(Entry, graph): + assert graph.startblock.operations[0].opname == 'generator_mark' + graph.startblock.operations.pop(0) + # + insert_empty_startblock(None, graph) + _insert_reads(graph.startblock, Entry.varnames) + Entry.block = graph.startblock + # + mappings = [Entry] + # + for block in list(graph.iterblocks()): + for exit in block.exits: + if exit.target is graph.returnblock: + exit.args = [Constant(StopIteration), + Constant(StopIteration())] + exit.target = graph.exceptblock + for index in range(len(block.operations)-1, -1, -1): + op = block.operations[index] + if op.opname == 'yield': + [v_yielded_value] = op.args + del block.operations[index] + newlink = split_block(None, block, index) + newblock = newlink.target + # + class Resume(AbstractPosition): + _immutable_ = True + block = newblock + Resume.__name__ = 'Resume%d' % len(mappings) + mappings.append(Resume) + varnames = get_variable_names(newlink.args) + # + _insert_reads(newblock, varnames) + # + v_resume = Variable('resume') + block.operations.append( + SpaceOperation('simple_call', [Constant(Resume)], + v_resume)) + for i, name in enumerate(varnames): + block.operations.append( + SpaceOperation('setattr', [v_resume, Constant(name), + newlink.args[i]], + Variable())) + v_pair = Variable('pair') + block.operations.append( + SpaceOperation('newtuple', [v_resume, v_yielded_value], + v_pair)) + newlink.args = [v_pair] + newlink.target = graph.returnblock + # + regular_entry_block = Block([Variable('entry')]) + block = regular_entry_block + for Resume in mappings: + v_check = Variable() + block.operations.append( + SpaceOperation('simple_call', [Constant(isinstance), + block.inputargs[0], + Constant(Resume)], + v_check)) + block.exitswitch = v_check + link1 = Link([block.inputargs[0]], Resume.block) + link1.exitcase = True + nextblock = Block([Variable('entry')]) + link2 = Link([block.inputargs[0]], nextblock) + link2.exitcase = False + block.closeblock(link1, link2) + block = nextblock + block.closeblock(Link([Constant(AssertionError), + Constant(AssertionError("bad generator class"))], + graph.exceptblock)) + graph.startblock = regular_entry_block + graph.signature = Signature(['entry']) + graph.defaults = () + checkgraph(graph) + eliminate_empty_blocks(graph) diff --git a/pypy/translator/test/test_generator.py b/pypy/translator/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/test/test_generator.py @@ -0,0 +1,156 @@ +from pypy.conftest import option +from pypy.objspace.flow.objspace import FlowObjSpace +from pypy.objspace.flow.model import Variable +from pypy.interpreter.argument import Signature +from pypy.translator.translator import TranslationContext +from pypy.translator.generator import make_generatoriterator_class +from pypy.translator.generator import replace_graph_with_bootstrap +from pypy.translator.generator import get_variable_names +from pypy.translator.generator import tweak_generator_body_graph +from pypy.translator.generator import attach_next_method +from pypy.translator.simplify import join_blocks + + +# ____________________________________________________________ + +def f_gen(n): + i = 0 + while i < n: + yield i + i += 1 + +class GeneratorIterator(object): + def __init__(self, entry): + self.current = entry + def next(self): + e = self.current + self.current = None + if isinstance(e, Yield1): + n = e.n_0 + i = e.i_0 + i += 1 + else: + n = e.n_0 + i = 0 + if i < n: + e = Yield1() + e.n_0 = n + e.i_0 = i + self.current = e + return i + raise StopIteration + + def __iter__(self): + return self + +class AbstractPosition(object): + _immutable_ = True +class Entry1(AbstractPosition): + _immutable_ = True +class Yield1(AbstractPosition): + _immutable_ = True + +def f_explicit(n): + e = Entry1() + e.n_0 = n + return GeneratorIterator(e) + +def test_explicit(): + assert list(f_gen(10)) == list(f_explicit(10)) + +def test_get_variable_names(): + lst = get_variable_names([Variable('a'), Variable('b_'), Variable('a')]) + assert lst == ['g_a', 'g_b', 'g_a_'] + +# ____________________________________________________________ + + +class TestGenerator: + + def test_replace_graph_with_bootstrap(self): + def func(n, x, y, z): + yield n + yield n + # + space = FlowObjSpace() + graph = space.build_flow(func, tweak_for_generator=False) + assert graph.startblock.operations[0].opname == 'generator_mark' + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + if option.view: + graph.show() + block = graph.startblock + ops = block.operations + assert ops[0].opname == 'simple_call' # e = Entry1() + assert ops[1].opname == 'setattr' # e.g_n = n + assert ops[1].args[1].value == 'g_n' + assert ops[2].opname == 'setattr' # e.g_x = x + assert ops[2].args[1].value == 'g_x' + assert ops[3].opname == 'setattr' # e.g_y = y + assert ops[3].args[1].value == 'g_y' + assert ops[4].opname == 'setattr' # e.g_z = z + assert ops[4].args[1].value == 'g_z' + assert ops[5].opname == 'simple_call' # g = GeneratorIterator(e) + assert ops[5].args[1] == ops[0].result + assert len(ops) == 6 + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock + + def test_tweak_generator_body_graph(self): + def f(n, x, y, z=3): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + class Entry: + varnames = ['g_n', 'g_x', 'g_y', 'g_z'] + tweak_generator_body_graph(Entry, graph) + if option.view: + graph.show() + # XXX how to test directly that the graph is correct? :-( + assert len(graph.startblock.inputargs) == 1 + assert graph.signature == Signature(['entry']) + assert graph.defaults == () + + def test_tweak_generator_graph(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + func1 = attach_next_method(GeneratorIterator, graph) + if option.view: + graph.show() + # + assert func1._generator_next_method_of_ is GeneratorIterator + assert hasattr(GeneratorIterator, 'next') + # + graph_next = space.build_flow(GeneratorIterator.next.im_func) + join_blocks(graph_next) + if option.view: + graph_next.show() + # + graph1 = space.build_flow(func1, tweak_for_generator=False) + tweak_generator_body_graph(GeneratorIterator.Entry, graph1) + if option.view: + graph1.show() + + def test_automatic(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f) # tweak_for_generator=True + if option.view: + graph.show() + block = graph.startblock + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock From noreply at buildbot.pypy.org Fri Dec 23 15:22:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Dec 2011 15:22:54 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: Add a link to the Compatibility Wiki. Message-ID: <20111223142254.0BF1382B10@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r299:682c997ce4fb Date: 2011-12-23 15:22 +0100 http://bitbucket.org/pypy/pypy.org/changeset/682c997ce4fb/ Log: Add a link to the Compatibility Wiki. diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -52,7 +52,7 @@

    PyPy has alpha/beta-level support for the CPython C API, however, as of 1.7 release this feature is not yet complete. Many libraries will require a bit of effort to work, but there are known success stories. Check out -PyPy blog for updates.

    +PyPy blog for updates, as well as the Compatibility Wiki.

    C extensions need to be recompiled for PyPy in order to work. Depending on your build system, it might work out of the box or will be slightly harder.

    Standard library modules supported by PyPy, in alphabetical order:

    diff --git a/source/compat.txt b/source/compat.txt --- a/source/compat.txt +++ b/source/compat.txt @@ -11,7 +11,9 @@ PyPy has **alpha/beta-level** support for the `CPython C API`_, however, as of 1.7 release this feature is not yet complete. Many libraries will require a bit of effort to work, but there are known success stories. Check out -PyPy blog for updates. +PyPy blog for updates, as well as the `Compatibility Wiki`__. + +.. __: https://bitbucket.org/pypy/compatibility/wiki/Home C extensions need to be recompiled for PyPy in order to work. Depending on your build system, it might work out of the box or will be slightly harder. From noreply at buildbot.pypy.org Fri Dec 23 15:24:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Dec 2011 15:24:20 +0100 (CET) Subject: [pypy-commit] pypy default: Skip these broken tests for now Message-ID: <20111223142420.0F76582B10@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50833:bf79890911ed Date: 2011-12-23 15:23 +0100 http://bitbucket.org/pypy/pypy/changeset/bf79890911ed/ Log: Skip these broken tests for now diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/pypy/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/pypy/tool/jitlogparser/test/test_modulefinder.py @@ -7,12 +7,14 @@ py.test.skip("Specific python 2.6 tests") def test_gather_code_py(): + py.test.skip("XXX broken, fix me") fname = re.__file__ codes = gather_all_code_objs(fname) assert len(codes) == 21 assert sorted(codes.keys()) == [102, 134, 139, 144, 153, 164, 169, 181, 188, 192, 197, 206, 229, 251, 266, 271, 277, 285, 293, 294, 308] def test_load_code(): + py.test.skip("XXX broken, fix me") fname = re.__file__ code = gather_all_code_objs(fname)[144] assert code.co_name == 'sub' From noreply at buildbot.pypy.org Fri Dec 23 15:27:52 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Dec 2011 15:27:52 +0100 (CET) Subject: [pypy-commit] pypy default: fix? Message-ID: <20111223142752.7674782B10@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50834:532130e19935 Date: 2011-12-23 15:27 +0100 http://bitbucket.org/pypy/pypy/changeset/532130e19935/ Log: fix? diff --git a/pypy/jit/backend/x86/test/test_zrpy_platform.py b/pypy/jit/backend/x86/test/test_zrpy_platform.py --- a/pypy/jit/backend/x86/test/test_zrpy_platform.py +++ b/pypy/jit/backend/x86/test/test_zrpy_platform.py @@ -74,8 +74,8 @@ myjitdriver = jit.JitDriver(greens = [], reds = ['n']) def entrypoint(argv): - myjitdriver.set_param('threshold', 2) - myjitdriver.set_param('trace_eagerness', 0) + jit.set_param(myjitdriver, 'threshold', 2) + jit.set_param(myjitdriver, 'trace_eagerness', 0) n = 16 while n > 0: myjitdriver.can_enter_jit(n=n) From noreply at buildbot.pypy.org Sat Dec 24 03:26:39 2011 From: noreply at buildbot.pypy.org (gutworth) Date: Sat, 24 Dec 2011 03:26:39 +0100 (CET) Subject: [pypy-commit] pypy default: less indirection Message-ID: <20111224022639.5E12782B10@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r50835:619c71657ffc Date: 2011-12-23 20:26 -0600 http://bitbucket.org/pypy/pypy/changeset/619c71657ffc/ Log: less indirection diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -42,8 +42,7 @@ except AttributeError: pass - def is_candidate(graph): - return policy.look_inside_graph(graph) + is_candidate = policy.look_inside_graph assert len(self.jitdrivers_sd) > 0 todo = [jd.portal_graph for jd in self.jitdrivers_sd] From noreply at buildbot.pypy.org Sat Dec 24 06:24:52 2011 From: noreply at buildbot.pypy.org (gutworth) Date: Sat, 24 Dec 2011 06:24:52 +0100 (CET) Subject: [pypy-commit] pypy default: give W_PyCFunctionObject __name__ Message-ID: <20111224052452.AECB082B10@wyvern.cs.uni-duesseldorf.de> Author: Benjamin Peterson Branch: Changeset: r50836:819faa2129a8 Date: 2011-12-23 23:24 -0600 http://bitbucket.org/pypy/pypy/changeset/819faa2129a8/ Log: give W_PyCFunctionObject __name__ diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -58,6 +58,7 @@ class W_PyCFunctionObject(Wrappable): def __init__(self, space, ml, w_self, w_module=None): self.ml = ml + self.name = rffi.charp2str(self.ml.c_ml_name) self.w_self = w_self self.w_module = w_module @@ -69,7 +70,7 @@ flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST) if space.is_true(w_kw) and not flags & METH_KEYWORDS: raise OperationError(space.w_TypeError, space.wrap( - rffi.charp2str(self.ml.c_ml_name) + "() takes no keyword arguments")) + self.name + "() takes no keyword arguments")) func = rffi.cast(PyCFunction, self.ml.c_ml_meth) length = space.int_w(space.len(w_args)) @@ -80,13 +81,12 @@ if length == 0: return generic_cpy_call(space, func, w_self, None) raise OperationError(space.w_TypeError, space.wrap( - rffi.charp2str(self.ml.c_ml_name) + "() takes no arguments")) + self.name + "() takes no arguments")) elif flags & METH_O: if length != 1: raise OperationError(space.w_TypeError, space.wrap("%s() takes exactly one argument (%d given)" % ( - rffi.charp2str(self.ml.c_ml_name), - length))) + self.name, length))) w_arg = space.getitem(w_args, space.wrap(0)) return generic_cpy_call(space, func, w_self, w_arg) elif flags & METH_VARARGS: @@ -199,6 +199,7 @@ __call__ = interp2app(cfunction_descr_call), __doc__ = GetSetProperty(W_PyCFunctionObject.get_doc), __module__ = interp_attrproperty_w('w_module', cls=W_PyCFunctionObject), + __name__ = interp_attrproperty('name', cls=W_PyCFunctionObject), ) W_PyCFunctionObject.typedef.acceptable_as_base_class = False diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -63,6 +63,7 @@ ), ]) assert mod.getarg_O(1) == 1 + assert mod.getarg_O.__name__ == "getarg_O" raises(TypeError, mod.getarg_O) raises(TypeError, mod.getarg_O, 1, 1) From noreply at buildbot.pypy.org Sat Dec 24 09:37:19 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 24 Dec 2011 09:37:19 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: we get one extra bridge now Message-ID: <20111224083719.7A32E822A3@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50837:7ba363df8be4 Date: 2011-12-23 15:15 +0100 http://bitbucket.org/pypy/pypy/changeset/7ba363df8be4/ Log: we get one extra bridge now diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2697,7 +2697,7 @@ # bridge back to the preamble of the first loop is produced. A guard in # this bridge is later traced resulting in a failed attempt of retracing # the second loop. - self.check_trace_count(8) + self.check_trace_count(9) # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times. From noreply at buildbot.pypy.org Sat Dec 24 09:37:20 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 24 Dec 2011 09:37:20 +0100 (CET) Subject: [pypy-commit] pypy jit-label-counters: use one counter per label Message-ID: <20111224083720.B3F8A822A3@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-label-counters Changeset: r50838:167d9feec558 Date: 2011-12-24 09:36 +0100 http://bitbucket.org/pypy/pypy/changeset/167d9feec558/ Log: use one counter per label diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -76,6 +76,7 @@ failargs_limit) self.fail_ebp = 0 self.loop_run_counters = [] + self.loop_run_counter_tokens = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 self.malloc_slowpath1 = 0 @@ -147,12 +148,17 @@ def finish_once(self): if self._debug: debug_start('jit-backend-counts') - for struct in self.loop_run_counters: - if struct.bridge: - prefix = 'bridge ' + for i in range(len(self.loop_run_counters)): + struct = self.loop_run_counters[i] + token = self.loop_run_counter_tokens[i] + if token: + prefix = token else: - prefix = 'loop ' - debug_print(prefix + str(struct.number) + ':' + str(struct.i)) + if struct.bridge: + prefix = 'bridge ' + str(struct.number) + else: + prefix = 'loop ' + str(struct.number) + debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') def _build_float_constants(self): @@ -422,8 +428,8 @@ self.setup(looptoken) if log: - self._register_counter(False, looptoken.number) - operations = self._inject_debugging_code(looptoken, operations) + operations = self._inject_debugging_code(looptoken, operations, + False, looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -489,8 +495,8 @@ self.setup(original_loop_token) if log: - self._register_counter(True, descr_number) - operations = self._inject_debugging_code(faildescr, operations) + operations = self._inject_debugging_code(faildescr, operations, + True, descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -597,17 +603,18 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number): - if self._debug: - # YYY very minor leak -- we need the counters to stay alive - # forever, just because we want to report them at the end - # of the process - struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', - track_allocation=False) - struct.i = 0 - struct.bridge = int(bridge) - struct.number = number - self.loop_run_counters.append(struct) + def _register_counter(self, bridge, number, token): + # YYY very minor leak -- we need the counters to stay alive + # forever, just because we want to report them at the end + # of the process + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', + track_allocation=False) + struct.i = 0 + struct.bridge = int(bridge) + struct.number = number + self.loop_run_counters.append(struct) + self.loop_run_counter_tokens.append(token.repr_of_descr()) + return struct def _find_failure_recovery_bytecode(self, faildescr): adr_jump_offset = faildescr._x86_adr_jump_offset @@ -651,27 +658,37 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None + def _append_debugging_code(self, operations, bridge, number, token): + counter = self._register_counter(bridge, number, token) + c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations.extend(ops) + @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations): + def _inject_debugging_code(self, looptoken, operations, bridge, number): if self._debug: # before doing anything, let's increase a counter s = 0 for op in operations: s += op.getopnum() looptoken._x86_debug_checksum = s - c_adr = ConstInt(rffi.cast(lltype.Signed, - self.loop_run_counters[-1])) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - if operations[0].getopnum() == rop.LABEL: - operations = [operations[0]] + ops + operations[1:] - else: - operations = ops + operations + + newoperations = [] + if bridge: + self._append_debugging_code(newoperations, bridge, number, + None) + for op in operations: + newoperations.append(op) + if op.getopnum() == rop.LABEL: + self._append_debugging_code(newoperations, bridge, number, + op.getdescr()) + operations = newoperations return operations def _assemble(self, regalloc, operations): diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -519,6 +519,7 @@ from pypy.tool.logparser import parse_log_file, extract_category from pypy.rlib import debug + targettoken, preambletoken = TargetToken(), TargetToken() loop = """ [i0] label(i0, descr=preambletoken) @@ -533,8 +534,8 @@ guard_false(i12) [] jump(i11, descr=targettoken) """ - ops = parse(loop, namespace={'targettoken': TargetToken(), - 'preambletoken': TargetToken()}) + ops = parse(loop, namespace={'targettoken': targettoken, + 'preambletoken': preambletoken}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) @@ -544,12 +545,18 @@ # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 1 + token = self.cpu.assembler.loop_run_counter_tokens[0] + assert token == preambletoken.repr_of_descr() struct = self.cpu.assembler.loop_run_counters[1] - assert struct.i == 10 + assert struct.i == 9 + token = self.cpu.assembler.loop_run_counter_tokens[1] + assert token == targettoken.repr_of_descr() self.cpu.finish_once() finally: debug._log = None - assert ('jit-backend-counts', [('debug_print', 'loop -1:10')]) in dlog + l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') + l2 = ('debug_print', targettoken.repr_of_descr() + ':9') + assert ('jit-backend-counts', [l1, l2]) in dlog def test_debugger_checksum(self): loop = """ From noreply at buildbot.pypy.org Sat Dec 24 10:51:21 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 24 Dec 2011 10:51:21 +0100 (CET) Subject: [pypy-commit] pypy jit-label-counters: split traces into parts going from one label to the next Message-ID: <20111224095121.E1A68822A3@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-label-counters Changeset: r50839:b9fedb0f9f96 Date: 2011-12-24 10:51 +0100 http://bitbucket.org/pypy/pypy/changeset/b9fedb0f9f96/ Log: split traces into parts going from one label to the next diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -387,6 +387,25 @@ loops.append(loop) return log, loops +class Part(object): + def __init__(self, trace, operations): + self.trace = trace + self.operations = operations + + def __len___(self): + return len(self.operations) + +def split_trace(trace): + labels = [i for i, op in enumerate(trace.operations) + if op.name == 'label'] + labels = [0] + labels + [len(trace.operations) - 1] + parts = [] + for i in range(len(labels) - 1): + start, stop = labels[i], labels[i+1] + + parts.append(Part(trace, trace.operations[start : stop + 1])) + + return parts def parse_log_counts(input, loops): if not input: diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -1,6 +1,6 @@ from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, Function, adjust_bridges, - import_log, Op) + import_log, split_trace, Op) from pypy.tool.jitlogparser.storage import LoopStorage import py, sys @@ -231,3 +231,21 @@ myrepr = 'c = foobar(a, b, descr=mydescr)' assert op.repr() == myrepr assert op.repr() == myrepr # do it twice + +def test_split_trace(): + loop = parse(''' + [i7] + i9 = int_lt(i7, 1003) + label(i9) + guard_true(i9, descr=) [] + i13 = getfield_raw(151937600, descr=) + label(i13) + i19 = int_lt(i13, 1003) + guard_true(i19, descr=) [] + i113 = getfield_raw(151937600, descr=) + ''') + parts = split_trace(loop) + assert len(parts) == 3 + assert len(parts[0].operations) == 2 + assert len(parts[1].operations) == 4 + assert len(parts[2].operations) == 4 From noreply at buildbot.pypy.org Sat Dec 24 18:14:31 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 24 Dec 2011 18:14:31 +0100 (CET) Subject: [pypy-commit] pypy jit-label-counters: hopefully fix tests Message-ID: <20111224171431.A4FDA822A3@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-label-counters Changeset: r50840:1e33997d6c55 Date: 2011-12-24 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/1e33997d6c55/ Log: hopefully fix tests diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -613,7 +613,10 @@ struct.bridge = int(bridge) struct.number = number self.loop_run_counters.append(struct) - self.loop_run_counter_tokens.append(token.repr_of_descr()) + if token is not None: + self.loop_run_counter_tokens.append(token.repr_of_descr()) + else: + self.loop_run_counter_tokens.append(None) return struct def _find_failure_recovery_bytecode(self, faildescr): From noreply at buildbot.pypy.org Sun Dec 25 01:02:01 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Dec 2011 01:02:01 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: hg merge with default Message-ID: <20111225000201.396DA822A3@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50841:62ab064ed939 Date: 2011-12-23 13:41 +0200 http://bitbucket.org/pypy/pypy/changeset/62ab064ed939/ Log: hg merge with default diff too long, truncating to 10000 out of 29808 lines diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -406,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,8 +231,10 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None -sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] -sqlite.sqlite3_enable_load_extension.restype = c_int +HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension") +if HAS_LOAD_EXTENSION: + sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] + sqlite.sqlite3_enable_load_extension.restype = c_int ########################################## # END Wrapped SQLite C API and constants @@ -708,13 +710,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) - def enable_load_extension(self, enabled): - self._check_thread() - self._check_closed() + if HAS_LOAD_EXTENSION: + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() - rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) - if rc != SQLITE_OK: - raise OperationalError("Error enabling load extension") + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") DML, DQL, DDL = range(3) diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -191,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -488,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -519,8 +528,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -697,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -1608,6 +1620,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -48,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -322,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -347,6 +361,16 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -381,13 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -521,10 +557,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -617,6 +654,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -959,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1788,9 +1835,11 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): @@ -138,29 +138,30 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + jitcell_token.compiled_loop_token = clt + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -183,9 +185,11 @@ llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types, descr.extrainfo, descr.width) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -239,9 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,21 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,265 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -363,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -408,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -433,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -444,161 +425,56 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,87 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -735,49 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - assert not self.layoutbuilder.has_finalizer(S) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -791,108 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETINTERIORFIELD_GC ------ - if op.getopnum() == rop.SETINTERIORFIELD_GC: - val = op.getarg(0) - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +123,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +146,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +169,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -0,0 +1,328 @@ +import sys +from pypy.rlib.rarithmetic import ovfcheck +from pypy.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr +from pypy.jit.metainterp.resoperation import ResOperation, rop +from pypy.jit.codewriter import heaptracker +from pypy.jit.backend.llsupport.symbolic import WORD +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr + + +class GcRewriterAssembler(object): + # This class performs the following rewrites on the list of operations: + # + # - Remove the DEBUG_MERGE_POINTs. + # + # - Turn all NEW_xxx to either a CALL_MALLOC_GC, or a CALL_MALLOC_NURSERY + # followed by SETFIELDs in order to initialize their GC fields. The + # two advantages of CALL_MALLOC_NURSERY is that it inlines the common + # path, and we need only one such operation to allocate several blocks + # of memory at once. + # + # - Add COND_CALLs to the write barrier before SETFIELD_GC and + # SETARRAYITEM_GC operations. + + _previous_size = -1 + _op_malloc_nursery = None + _v_last_malloced_nursery = None + c_zero = ConstInt(0) + + def __init__(self, gc_ll_descr, cpu): + self.gc_ll_descr = gc_ll_descr + self.cpu = cpu + self.newops = [] + self.known_lengths = {} + self.recent_mallocs = {} # set of variables + + def rewrite(self, operations): + # we can only remember one malloc since the next malloc can possibly + # collect; but we can try to collapse several known-size mallocs into + # one, both for performance and to reduce the number of write + # barriers. We do this on each "basic block" of operations, which in + # this case means between CALLs or unknown-size mallocs. + # + for op in operations: + if op.getopnum() == rop.DEBUG_MERGE_POINT: + continue + # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- + if op.is_malloc(): + self.handle_malloc_operation(op) + continue + elif op.can_malloc(): + self.emitting_an_operation_that_can_collect() + elif op.getopnum() == rop.LABEL: + self.emitting_an_operation_that_can_collect() + self.known_lengths.clear() + # ---------- write barriers ---------- + if self.gc_ll_descr.write_barrier_descr is not None: + if op.getopnum() == rop.SETFIELD_GC: + self.handle_write_barrier_setfield(op) + continue + if op.getopnum() == rop.SETINTERIORFIELD_GC: + self.handle_write_barrier_setinteriorfield(op) + continue + if op.getopnum() == rop.SETARRAYITEM_GC: + self.handle_write_barrier_setarrayitem(op) + continue + # ---------- + self.newops.append(op) + return self.newops + + # ---------- + + def handle_malloc_operation(self, op): + opnum = op.getopnum() + if opnum == rop.NEW: + self.handle_new_fixedsize(op.getdescr(), op) + elif opnum == rop.NEW_WITH_VTABLE: + classint = op.getarg(0).getint() + descr = heaptracker.vtable2descr(self.cpu, classint) + self.handle_new_fixedsize(descr, op) + if self.gc_ll_descr.fielddescr_vtable is not None: + op = ResOperation(rop.SETFIELD_GC, + [op.result, ConstInt(classint)], None, + descr=self.gc_ll_descr.fielddescr_vtable) + self.newops.append(op) + elif opnum == rop.NEW_ARRAY: + descr = op.getdescr() + assert isinstance(descr, ArrayDescr) + self.handle_new_array(descr, op) + elif opnum == rop.NEWSTR: + self.handle_new_array(self.gc_ll_descr.str_descr, op) + elif opnum == rop.NEWUNICODE: + self.handle_new_array(self.gc_ll_descr.unicode_descr, op) + else: + raise NotImplementedError(op.getopname()) + + def handle_new_fixedsize(self, descr, op): + assert isinstance(descr, SizeDescr) + size = descr.size + self.gen_malloc_nursery(size, op.result) + self.gen_initialize_tid(op.result, descr.tid) + + def handle_new_array(self, arraydescr, op): + v_length = op.getarg(0) + total_size = -1 + if isinstance(v_length, ConstInt): + num_elem = v_length.getint() + self.known_lengths[op.result] = num_elem + try: + var_size = ovfcheck(arraydescr.itemsize * num_elem) + total_size = ovfcheck(arraydescr.basesize + var_size) + except OverflowError: + pass # total_size is still -1 + elif arraydescr.itemsize == 0: + total_size = arraydescr.basesize + if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily + self.gen_malloc_nursery(total_size, op.result) + self.gen_initialize_tid(op.result, arraydescr.tid) + self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) + elif self.gc_ll_descr.kind == 'boehm': + self.gen_boehm_malloc_array(arraydescr, v_length, op.result) + else: + opnum = op.getopnum() + if opnum == rop.NEW_ARRAY: + self.gen_malloc_array(arraydescr, v_length, op.result) + elif opnum == rop.NEWSTR: + self.gen_malloc_str(v_length, op.result) + elif opnum == rop.NEWUNICODE: + self.gen_malloc_unicode(v_length, op.result) + else: + raise NotImplementedError(op.getopname()) + + # ---------- + + def emitting_an_operation_that_can_collect(self): + # must be called whenever we emit an operation that can collect: + # forgets the previous MALLOC_NURSERY, if any; and empty the + # set 'recent_mallocs', so that future SETFIELDs will generate + # a write barrier as usual. + self._op_malloc_nursery = None + self.recent_mallocs.clear() + + def _gen_call_malloc_gc(self, args, v_result, descr): + """Generate a CALL_MALLOC_GC with the given args.""" + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) + self.newops.append(op) + # mark 'v_result' as freshly malloced + self.recent_mallocs[v_result] = None + + def gen_malloc_fixedsize(self, size, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). + Note that with the framework GC, this should be called very rarely. + """ + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, + self.gc_ll_descr.malloc_fixedsize_descr) + + def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + self._gen_call_malloc_gc([ConstInt(addr), + ConstInt(arraydescr.basesize), + v_num_elem, + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset)], + v_result, + self.gc_ll_descr.malloc_array_descr) + + def gen_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) going either + to the standard or the nonstandard version of the function.""" + # + if (arraydescr.basesize == self.gc_ll_descr.standard_array_basesize + and arraydescr.lendescr.offset == + self.gc_ll_descr.standard_array_length_ofs): + # this is a standard-looking array, common case + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + args = [ConstInt(addr), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_descr + else: + # rare case, so don't care too much about the number of arguments + addr = self.gc_ll_descr.get_malloc_fn_addr( + 'malloc_array_nonstandard') + args = [ConstInt(addr), + ConstInt(arraydescr.basesize), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_nonstandard_descr + self._gen_call_malloc_gc(args, v_result, calldescr) + + def gen_malloc_str(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_str') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_str_descr) + + def gen_malloc_unicode(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_unicode') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_unicode_descr) + + def gen_malloc_nursery(self, size, v_result): + """Try to generate or update a CALL_MALLOC_NURSERY. + If that fails, generate a plain CALL_MALLOC_GC instead. + """ + size = self.round_up_for_allocation(size) + if not self.gc_ll_descr.can_use_nursery_malloc(size): + self.gen_malloc_fixedsize(size, v_result) + return + # + op = None + if self._op_malloc_nursery is not None: + # already a MALLOC_NURSERY: increment its total size + total_size = self._op_malloc_nursery.getarg(0).getint() + total_size += size + if self.gc_ll_descr.can_use_nursery_malloc(total_size): + # if the total size is still reasonable, merge it + self._op_malloc_nursery.setarg(0, ConstInt(total_size)) + op = ResOperation(rop.INT_ADD, + [self._v_last_malloced_nursery, + ConstInt(self._previous_size)], + v_result) + if op is None: + # if we failed to merge with a previous MALLOC_NURSERY, emit one + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_NURSERY, + [ConstInt(size)], + v_result) + self._op_malloc_nursery = op + # + self.newops.append(op) + self._previous_size = size + self._v_last_malloced_nursery = v_result + self.recent_mallocs[v_result] = None + + def gen_initialize_tid(self, v_newgcobj, tid): + if self.gc_ll_descr.fielddescr_tid is not None: + # produce a SETFIELD to initialize the GC header + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, ConstInt(tid)], None, + descr=self.gc_ll_descr.fielddescr_tid) + self.newops.append(op) + + def gen_initialize_len(self, v_newgcobj, v_length, arraylen_descr): + # produce a SETFIELD to initialize the array length + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, v_length], None, + descr=arraylen_descr) + self.newops.append(op) + + # ---------- + + def handle_write_barrier_setfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(1) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setinteriorfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setarrayitem(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier_array(op.getarg(0), + op.getarg(1), v) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.newops.append(op) + + def gen_write_barrier(self, v_base, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + args = [v_base, v_value] + self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, + descr=write_barrier_descr)) + + def gen_write_barrier_array(self, v_base, v_index, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + if write_barrier_descr.has_write_barrier_from_array(self.cpu): + # If we know statically the length of 'v', and it is not too + # big, then produce a regular write_barrier. If it's unknown or + # too big, produce instead a write_barrier_from_array. + LARGE = 130 + length = self.known_lengths.get(v_base, LARGE) + if length >= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -358,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -365,33 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +361,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +381,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +405,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,211 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -42,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -282,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -305,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -327,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -348,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -84,24 +85,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -40,17 +40,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -107,12 +108,12 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -253,13 +254,13 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -284,12 +285,12 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, ConstObj, BoxFloat, ConstFloat) @@ -32,22 +32,19 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -106,10 +103,9 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -118,19 +114,20 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -139,19 +136,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -162,15 +162,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -190,15 +192,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -206,14 +210,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -226,17 +229,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -244,14 +251,13 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -261,19 +267,20 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -290,18 +297,17 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -311,7 +317,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -320,20 +326,19 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -350,20 +355,20 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -419,14 +424,12 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1082,16 +1085,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1109,17 +1114,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1149,30 +1144,33 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1214,7 +1212,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1222,14 +1220,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1271,7 +1267,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1281,16 +1277,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1330,19 +1324,20 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1400,15 +1395,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1675,15 +1669,14 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1700,9 +1693,9 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1718,14 +1711,13 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1895,18 +1887,14 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -1940,18 +1928,14 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -1986,19 +1970,15 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2031,10 +2011,9 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2091,14 +2070,14 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2147,13 +2126,12 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2169,12 +2147,10 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2183,9 +2159,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2201,9 +2175,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2212,9 +2184,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2415,7 +2385,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 @@ -2423,9 +2393,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2435,11 +2404,10 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called @@ -2471,12 +2439,12 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2486,11 +2454,11 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2499,11 +2467,11 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2561,12 +2529,12 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2578,13 +2546,13 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2596,7 +2564,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) @@ -2604,10 +2572,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called @@ -2958,12 +2925,138 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" + + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + fail = self.cpu.execute_token(looptoken, 2) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [BoxInt()] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2, -9) + assert fail.identifier == 42 class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,9 +3,10 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -179,7 +180,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -525,29 +526,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +581,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +612,22 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -608,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -676,33 +717,55 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,8 +2,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -69,10 +70,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -107,20 +104,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -152,14 +135,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -275,7 +257,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() @@ -310,12 +293,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -326,7 +308,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -422,12 +404,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -443,37 +421,35 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, - frame_depth+param_depth) + clt.frame_depth = frame_depth + clt.param_depth = param_depth + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, - rawstart + directbootstrappos, + rawstart + looppos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -484,18 +460,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -548,6 +523,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +646,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,20 +668,24 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -793,152 +780,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -965,7 +821,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -976,13 +832,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) From noreply at buildbot.pypy.org Sun Dec 25 01:02:02 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Dec 2011 01:02:02 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: remove all ufunc changes, revert to default version Message-ID: <20111225000202.69FD182AC3@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50842:495d8ce73189 Date: 2011-12-23 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/495d8ce73189/ Log: remove all ufunc changes, revert to default version diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -280,14 +280,13 @@ def _reduce_ufunc_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, multidim=True, promote_to_largest=promote_to_largest) + self, multidim=True) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) - descr_sum = _reduce_ufunc_impl("add", False) - descr_prod = _reduce_ufunc_impl("multiply", False) - descr_max = _reduce_ufunc_impl("maximum", False) - descr_min = _reduce_ufunc_impl("minimum", False) - descr_sumpromote = _reduce_ufunc_impl("add", True) + descr_sum = _reduce_ufunc_impl("add") + descr_prod = _reduce_ufunc_impl("multiply") + descr_max = _reduce_ufunc_impl("maximum") + descr_min = _reduce_ufunc_impl("minimum") def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( @@ -631,7 +630,7 @@ return w_result def descr_mean(self, space): - return space.div(self.descr_sumpromote(space), + return space.div(self.descr_sum(space), space.wrap(self.size)) def descr_nonzero(self, space): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -49,7 +49,7 @@ def descr_reduce(self, space, w_obj): return self.reduce(space, w_obj, multidim=False) - def reduce(self, space, w_obj, multidim, promote_to_largest): + def reduce(self, space, w_obj, multidim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar if self.argcount != 2: @@ -136,19 +136,12 @@ W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func self.comparison_func = comparison_func - self.signature = signature.Call2(func) - self.reduce_signature = signature.BaseSignature() def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement) - #TODO: use of w_ssd, w_osd can be optimized. - if len(args_w)<4: - [w_lhs, w_rhs] = args_w - w_ssd = space.newlist([space.wrap(-1)]*2) - w_osd = space.newlist([space.wrap(-1)]*2) - else: - [w_lhs, w_rhs, w_ssd, w_osd] = args_w + + [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) calc_dtype = find_binop_result_dtype(space, @@ -166,17 +159,10 @@ w_rhs.value.convert_to(calc_dtype) ) - new_shape = [] - ssd = [space.int_w(s) for s in space.listview(w_ssd)] - osd = [space.int_w(s) for s in space.listview(w_osd)] - if ssd[0]<0: - new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - else: - #Assumption (should have been checked in call): - #w_lhs.shape[ssd[1]] == w_rhs.shape[osd[1]] - new_shape = [w_lhs.shape[ssd[1]]] - w_res = Call2(new_sig, new_shape, calc_dtype, - res_dtype, w_lhs, w_rhs, ssd, osd) + new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) + w_res = Call2(self.func, self.name, + new_shape, calc_dtype, + res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -716,7 +716,6 @@ a = array(range(5)) b = a.sum() assert b == 10 - assert isinstance(b,int) assert a[:4].sum() == 6 a = array([True] * 5, bool) From noreply at buildbot.pypy.org Sun Dec 25 01:02:03 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Dec 2011 01:02:03 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: clean up to_str a tiny bit Message-ID: <20111225000203.E0BCC822A3@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50843:40ef4fde1202 Date: 2011-12-24 22:36 +0200 http://bitbucket.org/pypy/pypy/changeset/40ef4fde1202/ Log: clean up to_str a tiny bit diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -361,22 +361,6 @@ descr_argmax = _reduce_argmax_argmin_impl("max") descr_argmin = _reduce_argmax_argmin_impl("min") - def _binop_impl_one_dim(ufunc_name): - #The third and fourth arguments allow the operator to proceed on a - #single dimension starting at a particular index - #i.e. ssd => self start, dimension; osd => other start, dimension - def impl(self, space, w_other, w_ssd, w_osd): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, - [self, w_other, w_ssd, w_osd]) - return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) - - descr_add1d = _binop_impl_one_dim("add") - descr_sub1d = _binop_impl_one_dim("subtract") - descr_mul1d = _binop_impl_one_dim("multiply") - descr_div1d = _binop_impl_one_dim("divide") - descr_pow1d = _binop_impl_one_dim("power") - descr_mod1d = _binop_impl_one_dim("mod") - def descr_dot(self, space, w_other): '''Dot product of two arrays. @@ -396,7 +380,7 @@ return w_res.descr_sum(space) dtype = interp_ufuncs.find_binop_result_dtype(space, self.find_dtype(), w_other.find_dtype()) - if self.find_size() < 1 and w_other.find_size() < 1: + if self.size < 1 and w_other.size < 1: #numpy compatability return scalar_w(space, dtype, space.wrap(0)) #Do the dims match? @@ -414,7 +398,7 @@ w_other.shape[0:other_critical_dim] + \ w_other.shape[other_critical_dim + 1:] elif len(w_other.shape) > 0: - #dot does not reduce + #dot does not reduce for scalars out_shape += self.shape[:-1] if my_critical_dim_size != other_critical_dim_size: raise OperationError(space.w_ValueError, space.wrap( @@ -425,7 +409,7 @@ out_ndims = len(out_shape) #TODO: what should the order be? C or F? arr = W_NDimArray(out_size, out_shape, dtype=dtype) - out_iter = ViewIterator(arr) + out_iter = view_iter_from_arr(arr) #TODO: invalidate self, w_other with arr ? while not out_iter.done(): my_index = self.start @@ -441,7 +425,7 @@ space.wrap(len(self.shape) - 1)]) w_osd = space.newlist([space.wrap(other_index), space.wrap(other_critical_dim)]) - w_res = self.descr_mul1d(space, w_other, w_ssd, w_osd) + w_res = self.descr_mul(space, w_other) assert isinstance(w_res, BaseArray) value = w_res.descr_sum(space) arr.setitem(out_iter.get_offset(), value) @@ -952,9 +936,7 @@ builder.append('\n' + indent) else: builder.append(indent) - # create_slice requires len(chunks) > 1 in order to reduce - # shape - view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() + view = self.create_slice([(i, 0, 0, 1)]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) i += 1 elif ndims == 1: From noreply at buildbot.pypy.org Sun Dec 25 01:02:05 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Dec 2011 01:02:05 +0100 (CET) Subject: [pypy-commit] pypy matrixmath-dot: clean up to_str a tiny bit (2) Message-ID: <20111225000205.1767A822A3@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: matrixmath-dot Changeset: r50844:d1038ba54b76 Date: 2011-12-24 22:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d1038ba54b76/ Log: clean up to_str a tiny bit (2) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -921,9 +921,7 @@ builder.append('\n' + indent) else: builder.append(indent) - # create_slice requires len(chunks) > 1 in order to reduce - # shape - view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() + view = self.create_slice([(i, 0, 0, 1)]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) builder.append('\n' + indent + '..., ') i = self.shape[0] - 3 From noreply at buildbot.pypy.org Sun Dec 25 01:02:06 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Dec 2011 01:02:06 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: test, implement improved AxisIterator Message-ID: <20111225000206.43ABB822A3@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50845:3cfc0b93cb23 Date: 2011-12-25 00:10 +0200 http://bitbucket.org/pypy/pypy/changeset/3cfc0b93cb23/ Log: test, implement improved AxisIterator diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -106,16 +106,24 @@ # ------ other iterators that are not part of the computation frame ---------- class AxisIterator(object): - """ This object will return offsets of each start of the last stride + """ This object will return offsets of each start of a stride on the + desired dimension, starting at the desired index """ - def __init__(self, arr): + def __init__(self, arr, dim=-1, start=[]): self.arr = arr - self.indices = [0] * (len(arr.shape) - 1) + self.indices = [0] * len(arr.shape) self.done = False self.offset = arr.start - + self.dim = len(arr.shape) - 1 + if dim >= 0: + self.dim = dim + if len(start) == len(arr.shape): + for i in range(len(start)): + self.offset += arr.strides[i] * start[i] def next(self): - for i in range(len(self.arr.shape) - 2, -1, -1): + for i in range(len(self.arr.shape) - 1, -1, -1): + if i == self.dim: + continue if self.indices[i] < self.arr.shape[i] - 1: self.indices[i] += 1 self.offset += self.arr.strides[i] diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -0,0 +1,51 @@ + +from pypy.module.micronumpy.interp_iter import AxisIterator +from pypy.module.micronumpy.interp_numarray import W_NDimArray + +class MockDtype(object): + def malloc(self, size): + return None + +class TestAxisIteratorDirect(object): + def test_axis_iterator(self): + a = W_NDimArray(7*5*3, [7, 5, 3], MockDtype(), 'C') + i = AxisIterator(a) + ret = [] + while not i.done: + ret.append(i.offset) + i.next() + assert ret == [3*v for v in range(7*5)] + i = AxisIterator(a,2) + ret = [] + while not i.done: + ret.append(i.offset) + i.next() + assert ret == [3*v for v in range(7*5)] + i = AxisIterator(a,1) + ret = [] + while not i.done: + ret.append(i.offset) + i.next() + assert ret == [ 0, 1, 2, 15, 16, 17, 30, 31, 32, 45, 46, 47, + 60, 61, 62, 75, 76, 77, 90, 91, 92] + def test_axis_iterator_with_start(self): + a = W_NDimArray(7*5*3, [7, 5, 3], MockDtype(), 'C') + i = AxisIterator(a, start=[0, 0, 0]) + ret = [] + while not i.done: + ret.append(i.offset) + i.next() + assert ret == [3*v for v in range(7*5)] + i = AxisIterator(a, start=[1, 1, 0]) + ret = [] + while not i.done: + ret.append(i.offset) + i.next() + assert ret == [3*v+18 for v in range(7*5)] + i = AxisIterator(a, 1, [2, 0, 2]) + ret = [] + while not i.done: + ret.append(i.offset) + i.next() + assert ret == [v + 32 for v in [ 0, 1, 2, 15, 16, 17, 30, 31, 32, + 45, 46, 47, 60, 61, 62, 75, 76, 77, 90, 91, 92]] From noreply at buildbot.pypy.org Sun Dec 25 01:02:07 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Dec 2011 01:02:07 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: added dim arg to sum, but unused Message-ID: <20111225000207.6CE66822A3@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50846:a4ff50ca85e8 Date: 2011-12-25 00:58 +0200 http://bitbucket.org/pypy/pypy/changeset/a4ff50ca85e8/ Log: added dim arg to sum, but unused diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -25,6 +25,30 @@ return a.mean() def sum(a): + '''sum(a, axis=None) + Sum of array elements over a given axis. + + Parameters + ---------- + a : array_like + Elements to sum. + axis : integer, optional + Axis over which the sum is taken. By default `axis` is None, + and all elements are summed. + + Returns + ------- + sum_along_axis : ndarray + An array with the same shape as `a`, with the specified + axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + See Also + -------- + ndarray.sum : Equivalent method. + ''' + # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements. if not hasattr(a, "sum"): a = numpypy.array(a) return a.sum() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -279,8 +279,8 @@ descr_rmod = _binop_right_impl("mod") def _reduce_ufunc_impl(ufunc_name): - def impl(self, space): - return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, multidim=True) + def impl(self, space, args_w): + return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, True, args_w) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -47,15 +47,19 @@ return self.call(space, __args__.arguments_w) def descr_reduce(self, space, w_obj): - return self.reduce(space, w_obj, multidim=False) + return self.reduce(space, w_obj, False) - def reduce(self, space, w_obj, multidim): + def reduce(self, space, w_obj, multidim, args_w): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar - if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) - + dim = -1 + if multidim and len(args_w)>0: + dim = space.int_w(args_w[0]) + if len(args_w)>1: + raise OperationError(space.w_TypeError, space.wrap( + self.name + " recieved extra arguments")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if isinstance(obj, Scalar): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -711,13 +711,18 @@ assert a[:4].mean() == 1.5 def test_sum(self): - from numpypy import array + from numpypy import array, arange a = array(range(5)) assert a.sum() == 10.0 assert a[:4].sum() == 6.0 a = array([True] * 5, bool) assert a.sum() == 5 + + raises(TypeError, 'a.sum(2, 3)') + a = arange(15).reshape(5, 3) + assert (a.sum(0) == [30, 35, 40]).all() + assert (a.sum(1) == [3, 12, 21, 30, 39]).all() def test_identity(self): from numpypy import identity, array From noreply at buildbot.pypy.org Sun Dec 25 01:02:08 2011 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 25 Dec 2011 01:02:08 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: translation fixes Message-ID: <20111225000208.957BE822A3@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50847:f06e38ca0d00 Date: 2011-12-25 01:58 +0200 http://bitbucket.org/pypy/pypy/changeset/f06e38ca0d00/ Log: translation fixes diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -277,10 +277,11 @@ descr_rdiv = _binop_right_impl("divide") descr_rpow = _binop_right_impl("power") descr_rmod = _binop_right_impl("mod") - + def _reduce_ufunc_impl(ufunc_name): - def impl(self, space, args_w): - return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, True, args_w) + def impl(self, space, w_dim=None): + return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, + self, True, w_dim) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -47,19 +47,16 @@ return self.call(space, __args__.arguments_w) def descr_reduce(self, space, w_obj): - return self.reduce(space, w_obj, False) + return self.reduce(space, w_obj, False, space.wrap(-1)) - def reduce(self, space, w_obj, multidim, args_w): + def reduce(self, space, w_obj, multidim, w_dim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) dim = -1 - if multidim and len(args_w)>0: - dim = space.int_w(args_w[0]) - if len(args_w)>1: - raise OperationError(space.w_TypeError, space.wrap( - self.name + " recieved extra arguments")) + if not space.is_w(w_dim, space.w_None): + dim = space.int_w(w_dim) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if isinstance(obj, Scalar): @@ -72,6 +69,7 @@ promote_to_largest=True ) shapelen = len(obj.shape) + #TODO: if dim>=0 return a ArraySignature? sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), obj.create_sig(obj.shape)), obj) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -718,7 +718,7 @@ a = array([True] * 5, bool) assert a.sum() == 5 - + raises(TypeError, 'a.sum(2, 3)') a = arange(15).reshape(5, 3) assert (a.sum(0) == [30, 35, 40]).all() @@ -730,19 +730,19 @@ a = identity(0) assert len(a) == 0 assert a.dtype == dtype('float64') - assert a.shape == (0,0) + assert a.shape == (0, 0) b = identity(1, dtype=int32) assert len(b) == 1 assert b[0][0] == 1 - assert b.shape == (1,1) + assert b.shape == (1, 1) assert b.dtype == dtype('int32') c = identity(2) - assert c.shape == (2,2) - assert (c == [[1,0],[0,1]]).all() + assert c.shape == (2, 2) + assert (c == [[1, 0], [0, 1]]).all() d = identity(3, dtype='int32') - assert d.shape == (3,3) + assert d.shape == (3, 3) assert d.dtype == dtype('int32') - assert (d == [[1,0,0],[0,1,0],[0,0,1]]).all() + assert (d == [[1, 0, 0], [0, 1, 0], [0, 0, 1]]).all() def test_prod(self): from numpypy import array @@ -950,13 +950,13 @@ def test_tolist_view(self): from numpypy import array - a = array([[1,2],[3,4]]) + a = array([[1, 2], [3, 4]]) assert (a + a).tolist() == [[2, 4], [6, 8]] def test_tolist_slice(self): from numpypy import array a = array([[17.1, 27.2], [40.3, 50.3]]) - assert a[:,0].tolist() == [17.1, 40.3] + assert a[:, 0].tolist() == [17.1, 40.3] assert a[0].tolist() == [17.1, 27.2] @@ -1086,11 +1086,11 @@ from numpypy import zeros, ones a = zeros((3, 3)) b = ones((3, 3)) - a[:,1:3] = b[:,1:3] + a[:, 1:3] = b[:, 1:3] assert (a == [[0, 1, 1], [0, 1, 1], [0, 1, 1]]).all() a = zeros((3, 3)) b = ones((3, 3)) - a[:,::2] = b[:,::2] + a[:, ::2] = b[:, ::2] assert (a == [[1, 0, 1], [1, 0, 1], [1, 0, 1]]).all() def test_broadcast_ufunc(self): @@ -1271,17 +1271,17 @@ assert g[1] == 2 assert g[2] == 3 h = fromstring("1, , 2, 3", dtype=uint8, sep=",") - assert (h == [1,0,2,3]).all() + assert (h == [1, 0, 2, 3]).all() i = fromstring("1 2 3", dtype=uint8, sep=" ") - assert (i == [1,2,3]).all() + assert (i == [1, 2, 3]).all() j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") - assert (j == [1,2,3]).all() + assert (j == [1, 2, 3]).all() k = fromstring("1,x,2,3", dtype=uint8, sep=",") - assert (k == [1,0]).all() + assert (k == [1, 0]).all() l = fromstring("1,x,2,3", dtype='float32', sep=",") - assert (l == [1.0,-1.0]).all() + assert (l == [1.0, -1.0]).all() m = fromstring("1,,2,3", sep=",") - assert (m == [1.0,-1.0,2.0,3.0]).all() + assert (m == [1.0, -1.0, 2.0, 3.0]).all() n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") assert (n == [3]).all() o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") @@ -1329,7 +1329,6 @@ j = fromstring(self.ulongval, dtype='L') assert j[0] == 12 - def test_fromstring_invalid(self): from numpypy import fromstring, uint16, uint8, int32 #default dtype is 64-bit float, so 3 bytes should fail From noreply at buildbot.pypy.org Sun Dec 25 10:45:06 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 25 Dec 2011 10:45:06 +0100 (CET) Subject: [pypy-commit] pypy jit-label-counters: save a bit of memory Message-ID: <20111225094506.0D1F0820CF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-label-counters Changeset: r50848:52c88d381e03 Date: 2011-12-25 10:44 +0100 http://bitbucket.org/pypy/pypy/changeset/52c88d381e03/ Log: save a bit of memory diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -39,6 +39,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_unique_id # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -76,7 +77,6 @@ failargs_limit) self.fail_ebp = 0 self.loop_run_counters = [] - self.loop_run_counter_tokens = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 self.malloc_slowpath1 = 0 @@ -150,14 +150,10 @@ debug_start('jit-backend-counts') for i in range(len(self.loop_run_counters)): struct = self.loop_run_counters[i] - token = self.loop_run_counter_tokens[i] - if token: - prefix = token + if not struct.bridge: + prefix = 'TargetToken(%d)' % struct.number else: - if struct.bridge: - prefix = 'bridge ' + str(struct.number) - else: - prefix = 'loop ' + str(struct.number) + prefix = 'bridge ' + str(struct.number) debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') @@ -611,12 +607,12 @@ track_allocation=False) struct.i = 0 struct.bridge = int(bridge) - struct.number = number - self.loop_run_counters.append(struct) - if token is not None: - self.loop_run_counter_tokens.append(token.repr_of_descr()) + if bridge: + struct.number = number else: - self.loop_run_counter_tokens.append(None) + assert token + struct.number = compute_unique_id(token) + self.loop_run_counters.append(struct) return struct def _find_failure_recovery_bytecode(self, faildescr): diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -545,12 +545,8 @@ # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 1 - token = self.cpu.assembler.loop_run_counter_tokens[0] - assert token == preambletoken.repr_of_descr() struct = self.cpu.assembler.loop_run_counters[1] assert struct.i == 9 - token = self.cpu.assembler.loop_run_counter_tokens[1] - assert token == targettoken.repr_of_descr() self.cpu.finish_once() finally: debug._log = None diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -402,7 +402,6 @@ parts = [] for i in range(len(labels) - 1): start, stop = labels[i], labels[i+1] - parts.append(Part(trace, trace.operations[start : stop + 1])) return parts From noreply at buildbot.pypy.org Sun Dec 25 10:45:07 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 25 Dec 2011 10:45:07 +0100 (CET) Subject: [pypy-commit] pypy jit-label-counters: hg merge default Message-ID: <20111225094507.B838482AC3@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-label-counters Changeset: r50849:e0d2025b0064 Date: 2011-12-25 10:44 +0100 http://bitbucket.org/pypy/pypy/changeset/e0d2025b0064/ Log: hg merge default diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -180,7 +180,12 @@ if name is None: name = pyobj.func_name if signature is None: - signature = cpython_code_signature(pyobj.func_code) + if hasattr(pyobj, '_generator_next_method_of_'): + from pypy.interpreter.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyobj.func_code) if defaults is None: defaults = pyobj.func_defaults self.name = name diff --git a/pypy/jit/backend/x86/test/test_zrpy_platform.py b/pypy/jit/backend/x86/test/test_zrpy_platform.py --- a/pypy/jit/backend/x86/test/test_zrpy_platform.py +++ b/pypy/jit/backend/x86/test/test_zrpy_platform.py @@ -74,8 +74,8 @@ myjitdriver = jit.JitDriver(greens = [], reds = ['n']) def entrypoint(argv): - myjitdriver.set_param('threshold', 2) - myjitdriver.set_param('trace_eagerness', 0) + jit.set_param(myjitdriver, 'threshold', 2) + jit.set_param(myjitdriver, 'trace_eagerness', 0) n = 16 while n > 0: myjitdriver.can_enter_jit(n=n) diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -42,8 +42,7 @@ except AttributeError: pass - def is_candidate(graph): - return policy.look_inside_graph(graph) + is_candidate = policy.look_inside_graph assert len(self.jitdrivers_sd) > 0 todo = [jd.portal_graph for jd in self.jitdrivers_sd] diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -1007,25 +1007,6 @@ # a jump back to itself and possibly a few bridges ending with finnish. # Only the operations within the loop formed by that single jump will # be counted. - - # XXX hacked version, ignore and remove me when jit-targets is merged. - loops = self.get_all_loops() - loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX - assert len(loops) == 1 - loop, = loops - jumpop = loop.operations[-1] - assert jumpop.getopnum() == rop.JUMP - insns = {} - for op in loop.operations: - opname = op.getopname() - insns[opname] = insns.get(opname, 0) + 1 - return self._check_insns(insns, expected, check) - - def check_simple_loop(self, expected=None, **check): - # Usefull in the simplest case when we have only one trace ending with - # a jump back to itself and possibly a few bridges ending with finnish. - # Only the operations within the loop formed by that single jump will - # be counted. loops = self.get_all_loops() assert len(loops) == 1 loop = loops[0] diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -58,6 +58,7 @@ class W_PyCFunctionObject(Wrappable): def __init__(self, space, ml, w_self, w_module=None): self.ml = ml + self.name = rffi.charp2str(self.ml.c_ml_name) self.w_self = w_self self.w_module = w_module @@ -69,7 +70,7 @@ flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST) if space.is_true(w_kw) and not flags & METH_KEYWORDS: raise OperationError(space.w_TypeError, space.wrap( - rffi.charp2str(self.ml.c_ml_name) + "() takes no keyword arguments")) + self.name + "() takes no keyword arguments")) func = rffi.cast(PyCFunction, self.ml.c_ml_meth) length = space.int_w(space.len(w_args)) @@ -80,13 +81,12 @@ if length == 0: return generic_cpy_call(space, func, w_self, None) raise OperationError(space.w_TypeError, space.wrap( - rffi.charp2str(self.ml.c_ml_name) + "() takes no arguments")) + self.name + "() takes no arguments")) elif flags & METH_O: if length != 1: raise OperationError(space.w_TypeError, space.wrap("%s() takes exactly one argument (%d given)" % ( - rffi.charp2str(self.ml.c_ml_name), - length))) + self.name, length))) w_arg = space.getitem(w_args, space.wrap(0)) return generic_cpy_call(space, func, w_self, w_arg) elif flags & METH_VARARGS: @@ -199,6 +199,7 @@ __call__ = interp2app(cfunction_descr_call), __doc__ = GetSetProperty(W_PyCFunctionObject.get_doc), __module__ = interp_attrproperty_w('w_module', cls=W_PyCFunctionObject), + __name__ = interp_attrproperty('name', cls=W_PyCFunctionObject), ) W_PyCFunctionObject.typedef.acceptable_as_base_class = False diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -63,6 +63,7 @@ ), ]) assert mod.getarg_O(1) == 1 + assert mod.getarg_O.__name__ == "getarg_O" raises(TypeError, mod.getarg_O) raises(TypeError, mod.getarg_O, 1, 1) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -185,7 +185,7 @@ class FlowExecutionContext(ExecutionContext): def __init__(self, space, code, globals, constargs={}, outer_func=None, - name=None): + name=None, is_generator=False): ExecutionContext.__init__(self, space) self.code = code @@ -208,6 +208,7 @@ initialblock = SpamBlock(FrameState(frame).copy()) self.pendingblocks = collections.deque([initialblock]) self.graph = FunctionGraph(name or code.co_name, initialblock) + self.is_generator = is_generator make_link = Link # overridable for transition tracking @@ -247,6 +248,8 @@ return outcome, w_exc_cls, w_exc_value def build_flow(self): + if self.is_generator: + self.produce_generator_mark() while self.pendingblocks: block = self.pendingblocks.popleft() frame = self.create_frame() @@ -259,9 +262,15 @@ self.topframeref = jit.non_virtual_ref(frame) self.crnt_frame = frame try: - w_result = frame.dispatch(frame.pycode, - frame.last_instr, - self) + frame.frame_finished_execution = False + while True: + w_result = frame.dispatch(frame.pycode, + frame.last_instr, + self) + if frame.frame_finished_execution: + break + else: + self.generate_yield(frame, w_result) finally: self.crnt_frame = None self.topframeref = old_frameref @@ -307,6 +316,21 @@ del self.recorder self.fixeggblocks() + def produce_generator_mark(self): + [initialblock] = self.pendingblocks + initialblock.operations.append( + SpaceOperation('generator_mark', [], Variable())) + + def generate_yield(self, frame, w_result): + assert self.is_generator + self.recorder.crnt_block.operations.append( + SpaceOperation('yield', [w_result], Variable())) + # we must push a dummy value that will be POPped: it's the .send() + # passed into the generator (2.5 feature) + assert sys.version_info >= (2, 5) + frame.pushvalue(None) + frame.last_instr += 1 + def fixeggblocks(self): # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -8,6 +8,7 @@ from pypy.interpreter.pycode import PyCode, cpython_code_signature from pypy.interpreter.module import Module from pypy.interpreter.error import OperationError +from pypy.interpreter.astcompiler.consts import CO_GENERATOR from pypy.interpreter import pyframe, argument from pypy.objspace.flow.model import * from pypy.objspace.flow import flowcontext, operation, specialcase @@ -247,15 +248,13 @@ return ecls return None - def build_flow(self, func, constargs={}): + def build_flow(self, func, constargs={}, tweak_for_generator=True): """ """ if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): raise Exception, "%r is tagged as NOT_RPYTHON" % (func,) code = func.func_code - if code.co_flags & 32: - # generator - raise TypeError("%r is a generator" % (func,)) + is_generator = bool(code.co_flags & CO_GENERATOR) code = PyCode._from_code(self, code) if func.func_closure is None: cl = None @@ -271,7 +270,8 @@ class outerfunc: # hack closure = cl ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, outerfunc, name) + constargs, outerfunc, name, + is_generator) graph = ec.graph graph.func = func # attach a signature and defaults to the graph @@ -291,6 +291,11 @@ e = error.FlowingError(formated) raise error.FlowingError, e, tb checkgraph(graph) + # + if is_generator and tweak_for_generator: + from pypy.translator.generator import tweak_generator_graph + tweak_generator_graph(graph) + # return graph def fixedview(self, w_tuple, expected_length=None): diff --git a/pypy/objspace/flow/test/test_generator.py b/pypy/objspace/flow/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/flow/test/test_generator.py @@ -0,0 +1,18 @@ +from pypy.objspace.flow.test.test_objspace import Base + + +class TestGenerator(Base): + + def test_simple_generator(self): + def f(n): + i = 0 + while i < n: + yield i + yield i + i += 1 + graph = self.codetest(f, tweak_for_generator=False) + ops = self.all_operations(graph) + assert ops == {'generator_mark': 1, + 'lt': 1, 'is_true': 1, + 'yield': 2, + 'inplace_add': 1} diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -16,14 +16,14 @@ is_operator = getattr(operator, 'is_', operator.eq) # it's not there 2.2 class Base: - def codetest(self, func): + def codetest(self, func, **kwds): import inspect try: func = func.im_func except AttributeError: pass #name = func.func_name - graph = self.space.build_flow(func) + graph = self.space.build_flow(func, **kwds) graph.source = inspect.getsource(func) self.show(graph) return graph @@ -882,12 +882,6 @@ num = bytecode_spec.opmap[name] flow_meth_names[num] = locals()['old_' + name] - def test_generator(self): - def f(): - yield 3 - - py.test.raises(TypeError, "self.codetest(f)") - def test_dont_capture_RuntimeError(self): class Foo: def __hash__(self): diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/rpython/test/test_generator.py @@ -0,0 +1,62 @@ +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + + +class BaseTestGenerator(BaseRtypingTest): + + def test_simple_explicit(self): + def g(a, b, c): + yield a + yield b + yield c + def f(): + gen = g(3, 5, 8) + x = gen.next() * 100 + x += gen.next() * 10 + x += gen.next() + return x + res = self.interpret(f, []) + assert res == 358 + + def test_cannot_merge(self): + # merging two different generators is not supported + # right now, but we can use workarounds like here + class MyGen: + _immutable_ = True + def next(self): + raise NotImplementedError + class MyG1(MyGen): + _immutable_ = True + def __init__(self, a): + self._gen = self.g1(a) + def next(self): + return self._gen.next() + @staticmethod + def g1(a): + yield a + 1 + yield a + 2 + class MyG2(MyGen): + _immutable_ = True + def __init__(self): + self._gen = self.g2() + def next(self): + return self._gen.next() + @staticmethod + def g2(): + yield 42 + def f(n): + if n > 0: + gen = MyG1(n) + else: + gen = MyG2() + return gen.next() + res = self.interpret(f, [10]) + assert res == 11 + res = self.interpret(f, [0]) + assert res == 42 + + +class TestLLtype(BaseTestGenerator, LLRtypeMixin): + pass + +class TestOOtype(BaseTestGenerator, OORtypeMixin): + pass diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/pypy/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/pypy/tool/jitlogparser/test/test_modulefinder.py @@ -7,12 +7,14 @@ py.test.skip("Specific python 2.6 tests") def test_gather_code_py(): + py.test.skip("XXX broken, fix me") fname = re.__file__ codes = gather_all_code_objs(fname) assert len(codes) == 21 assert sorted(codes.keys()) == [102, 134, 139, 144, 153, 164, 169, 181, 188, 192, 197, 206, 229, 251, 266, 271, 277, 285, 293, 294, 308] def test_load_code(): + py.test.skip("XXX broken, fix me") fname = re.__file__ code = gather_all_code_objs(fname)[144] assert code.co_name == 'sub' diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/generator.py @@ -0,0 +1,166 @@ +from pypy.objspace.flow.model import Block, Link, SpaceOperation, checkgraph +from pypy.objspace.flow.model import Variable, Constant, FunctionGraph +from pypy.translator.unsimplify import insert_empty_startblock +from pypy.translator.unsimplify import split_block +from pypy.translator.simplify import eliminate_empty_blocks +from pypy.tool.sourcetools import func_with_new_name +from pypy.interpreter.argument import Signature + + +class AbstractPosition(object): + _immutable_ = True + _attrs_ = () + + +def tweak_generator_graph(graph): + if not hasattr(graph.func, '_generator_next_method_of_'): + # This is the first copy of the graph. We replace it with + # a small bootstrap graph. + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + # We attach a 'next' method to the GeneratorIterator class + # that will invoke the real function, based on a second + # copy of the graph. + attach_next_method(GeneratorIterator, graph) + else: + # This is the second copy of the graph. Tweak it. + GeneratorIterator = graph.func._generator_next_method_of_ + tweak_generator_body_graph(GeneratorIterator.Entry, graph) + + +def make_generatoriterator_class(graph): + class GeneratorIterator(object): + class Entry(AbstractPosition): + _immutable_ = True + varnames = get_variable_names(graph.startblock.inputargs) + def __init__(self, entry): + self.current = entry + return GeneratorIterator + +def replace_graph_with_bootstrap(GeneratorIterator, graph): + Entry = GeneratorIterator.Entry + newblock = Block(graph.startblock.inputargs) + v_generator = Variable('generator') + v_entry = Variable('entry') + newblock.operations.append( + SpaceOperation('simple_call', [Constant(Entry)], v_entry)) + assert len(graph.startblock.inputargs) == len(Entry.varnames) + for v, name in zip(graph.startblock.inputargs, Entry.varnames): + newblock.operations.append( + SpaceOperation('setattr', [v_entry, Constant(name), v], + Variable())) + newblock.operations.append( + SpaceOperation('simple_call', [Constant(GeneratorIterator), v_entry], + v_generator)) + newblock.closeblock(Link([v_generator], graph.returnblock)) + graph.startblock = newblock + +def attach_next_method(GeneratorIterator, graph): + func = graph.func + func = func_with_new_name(func, '%s__next' % (func.func_name,)) + func._generator_next_method_of_ = GeneratorIterator + func._always_inline_ = True + # + def next(self): + entry = self.current + self.current = None + (next_entry, return_value) = func(entry) + self.current = next_entry + return return_value + GeneratorIterator.next = next + return func # for debugging + +def get_variable_names(variables): + seen = set() + result = [] + for v in variables: + name = v._name.strip('_') + while name in seen: + name += '_' + result.append('g_' + name) + seen.add(name) + return result + +def _insert_reads(block, varnames): + assert len(varnames) == len(block.inputargs) + v_entry1 = Variable('entry') + for i, name in enumerate(varnames): + block.operations.insert(i, + SpaceOperation('getattr', [v_entry1, Constant(name)], + block.inputargs[i])) + block.inputargs = [v_entry1] + +def tweak_generator_body_graph(Entry, graph): + assert graph.startblock.operations[0].opname == 'generator_mark' + graph.startblock.operations.pop(0) + # + insert_empty_startblock(None, graph) + _insert_reads(graph.startblock, Entry.varnames) + Entry.block = graph.startblock + # + mappings = [Entry] + # + for block in list(graph.iterblocks()): + for exit in block.exits: + if exit.target is graph.returnblock: + exit.args = [Constant(StopIteration), + Constant(StopIteration())] + exit.target = graph.exceptblock + for index in range(len(block.operations)-1, -1, -1): + op = block.operations[index] + if op.opname == 'yield': + [v_yielded_value] = op.args + del block.operations[index] + newlink = split_block(None, block, index) + newblock = newlink.target + # + class Resume(AbstractPosition): + _immutable_ = True + block = newblock + Resume.__name__ = 'Resume%d' % len(mappings) + mappings.append(Resume) + varnames = get_variable_names(newlink.args) + # + _insert_reads(newblock, varnames) + # + v_resume = Variable('resume') + block.operations.append( + SpaceOperation('simple_call', [Constant(Resume)], + v_resume)) + for i, name in enumerate(varnames): + block.operations.append( + SpaceOperation('setattr', [v_resume, Constant(name), + newlink.args[i]], + Variable())) + v_pair = Variable('pair') + block.operations.append( + SpaceOperation('newtuple', [v_resume, v_yielded_value], + v_pair)) + newlink.args = [v_pair] + newlink.target = graph.returnblock + # + regular_entry_block = Block([Variable('entry')]) + block = regular_entry_block + for Resume in mappings: + v_check = Variable() + block.operations.append( + SpaceOperation('simple_call', [Constant(isinstance), + block.inputargs[0], + Constant(Resume)], + v_check)) + block.exitswitch = v_check + link1 = Link([block.inputargs[0]], Resume.block) + link1.exitcase = True + nextblock = Block([Variable('entry')]) + link2 = Link([block.inputargs[0]], nextblock) + link2.exitcase = False + block.closeblock(link1, link2) + block = nextblock + block.closeblock(Link([Constant(AssertionError), + Constant(AssertionError("bad generator class"))], + graph.exceptblock)) + graph.startblock = regular_entry_block + graph.signature = Signature(['entry']) + graph.defaults = () + checkgraph(graph) + eliminate_empty_blocks(graph) diff --git a/pypy/translator/test/test_generator.py b/pypy/translator/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/test/test_generator.py @@ -0,0 +1,156 @@ +from pypy.conftest import option +from pypy.objspace.flow.objspace import FlowObjSpace +from pypy.objspace.flow.model import Variable +from pypy.interpreter.argument import Signature +from pypy.translator.translator import TranslationContext +from pypy.translator.generator import make_generatoriterator_class +from pypy.translator.generator import replace_graph_with_bootstrap +from pypy.translator.generator import get_variable_names +from pypy.translator.generator import tweak_generator_body_graph +from pypy.translator.generator import attach_next_method +from pypy.translator.simplify import join_blocks + + +# ____________________________________________________________ + +def f_gen(n): + i = 0 + while i < n: + yield i + i += 1 + +class GeneratorIterator(object): + def __init__(self, entry): + self.current = entry + def next(self): + e = self.current + self.current = None + if isinstance(e, Yield1): + n = e.n_0 + i = e.i_0 + i += 1 + else: + n = e.n_0 + i = 0 + if i < n: + e = Yield1() + e.n_0 = n + e.i_0 = i + self.current = e + return i + raise StopIteration + + def __iter__(self): + return self + +class AbstractPosition(object): + _immutable_ = True +class Entry1(AbstractPosition): + _immutable_ = True +class Yield1(AbstractPosition): + _immutable_ = True + +def f_explicit(n): + e = Entry1() + e.n_0 = n + return GeneratorIterator(e) + +def test_explicit(): + assert list(f_gen(10)) == list(f_explicit(10)) + +def test_get_variable_names(): + lst = get_variable_names([Variable('a'), Variable('b_'), Variable('a')]) + assert lst == ['g_a', 'g_b', 'g_a_'] + +# ____________________________________________________________ + + +class TestGenerator: + + def test_replace_graph_with_bootstrap(self): + def func(n, x, y, z): + yield n + yield n + # + space = FlowObjSpace() + graph = space.build_flow(func, tweak_for_generator=False) + assert graph.startblock.operations[0].opname == 'generator_mark' + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + if option.view: + graph.show() + block = graph.startblock + ops = block.operations + assert ops[0].opname == 'simple_call' # e = Entry1() + assert ops[1].opname == 'setattr' # e.g_n = n + assert ops[1].args[1].value == 'g_n' + assert ops[2].opname == 'setattr' # e.g_x = x + assert ops[2].args[1].value == 'g_x' + assert ops[3].opname == 'setattr' # e.g_y = y + assert ops[3].args[1].value == 'g_y' + assert ops[4].opname == 'setattr' # e.g_z = z + assert ops[4].args[1].value == 'g_z' + assert ops[5].opname == 'simple_call' # g = GeneratorIterator(e) + assert ops[5].args[1] == ops[0].result + assert len(ops) == 6 + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock + + def test_tweak_generator_body_graph(self): + def f(n, x, y, z=3): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + class Entry: + varnames = ['g_n', 'g_x', 'g_y', 'g_z'] + tweak_generator_body_graph(Entry, graph) + if option.view: + graph.show() + # XXX how to test directly that the graph is correct? :-( + assert len(graph.startblock.inputargs) == 1 + assert graph.signature == Signature(['entry']) + assert graph.defaults == () + + def test_tweak_generator_graph(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + func1 = attach_next_method(GeneratorIterator, graph) + if option.view: + graph.show() + # + assert func1._generator_next_method_of_ is GeneratorIterator + assert hasattr(GeneratorIterator, 'next') + # + graph_next = space.build_flow(GeneratorIterator.next.im_func) + join_blocks(graph_next) + if option.view: + graph_next.show() + # + graph1 = space.build_flow(func1, tweak_for_generator=False) + tweak_generator_body_graph(GeneratorIterator.Entry, graph1) + if option.view: + graph1.show() + + def test_automatic(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f) # tweak_for_generator=True + if option.view: + graph.show() + block = graph.startblock + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock From noreply at buildbot.pypy.org Sun Dec 25 11:02:16 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 25 Dec 2011 11:02:16 +0100 (CET) Subject: [pypy-commit] pypy jit-label-counters: use copies of the original trace instead of Part objects Message-ID: <20111225100216.90C3B820CF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-label-counters Changeset: r50850:093150a1f76e Date: 2011-12-25 11:01 +0100 http://bitbucket.org/pypy/pypy/changeset/093150a1f76e/ Log: use copies of the original trace instead of Part objects diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -3,6 +3,7 @@ from pypy.jit.metainterp.resoperation import opname from pypy.jit.tool.oparser import OpParser from pypy.tool.logparser import parse_log_file, extract_category +from copy import copy class Op(object): bridge = None @@ -387,14 +388,6 @@ loops.append(loop) return log, loops -class Part(object): - def __init__(self, trace, operations): - self.trace = trace - self.operations = operations - - def __len___(self): - return len(self.operations) - def split_trace(trace): labels = [i for i, op in enumerate(trace.operations) if op.name == 'label'] @@ -402,7 +395,9 @@ parts = [] for i in range(len(labels) - 1): start, stop = labels[i], labels[i+1] - parts.append(Part(trace, trace.operations[start : stop + 1])) + part = copy(trace) + part.operations = trace.operations[start : stop + 1] + parts.append(part) return parts From noreply at buildbot.pypy.org Sun Dec 25 14:40:23 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 14:40:23 +0100 (CET) Subject: [pypy-commit] pypy jit-label-counters: close merged branch Message-ID: <20111225134023.D0395820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-label-counters Changeset: r50851:8273e78218c5 Date: 2011-12-25 15:38 +0200 http://bitbucket.org/pypy/pypy/changeset/8273e78218c5/ Log: close merged branch From noreply at buildbot.pypy.org Sun Dec 25 14:40:25 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 14:40:25 +0100 (CET) Subject: [pypy-commit] pypy default: Merge jit-label-counters, an attempt to fix jitviewer Message-ID: <20111225134025.23D38820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50852:4c2a8587e2c5 Date: 2011-12-25 15:39 +0200 http://bitbucket.org/pypy/pypy/changeset/4c2a8587e2c5/ Log: Merge jit-label-counters, an attempt to fix jitviewer diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -39,6 +39,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_unique_id # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -147,12 +148,13 @@ def finish_once(self): if self._debug: debug_start('jit-backend-counts') - for struct in self.loop_run_counters: - if struct.bridge: - prefix = 'bridge ' + for i in range(len(self.loop_run_counters)): + struct = self.loop_run_counters[i] + if not struct.bridge: + prefix = 'TargetToken(%d)' % struct.number else: - prefix = 'loop ' - debug_print(prefix + str(struct.number) + ':' + str(struct.i)) + prefix = 'bridge ' + str(struct.number) + debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') def _build_float_constants(self): @@ -422,8 +424,8 @@ self.setup(looptoken) if log: - self._register_counter(False, looptoken.number) - operations = self._inject_debugging_code(looptoken, operations) + operations = self._inject_debugging_code(looptoken, operations, + False, looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -489,8 +491,8 @@ self.setup(original_loop_token) if log: - self._register_counter(True, descr_number) - operations = self._inject_debugging_code(faildescr, operations) + operations = self._inject_debugging_code(faildescr, operations, + True, descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -597,17 +599,21 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number): - if self._debug: - # YYY very minor leak -- we need the counters to stay alive - # forever, just because we want to report them at the end - # of the process - struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', - track_allocation=False) - struct.i = 0 - struct.bridge = int(bridge) + def _register_counter(self, bridge, number, token): + # YYY very minor leak -- we need the counters to stay alive + # forever, just because we want to report them at the end + # of the process + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', + track_allocation=False) + struct.i = 0 + struct.bridge = int(bridge) + if bridge: struct.number = number - self.loop_run_counters.append(struct) + else: + assert token + struct.number = compute_unique_id(token) + self.loop_run_counters.append(struct) + return struct def _find_failure_recovery_bytecode(self, faildescr): adr_jump_offset = faildescr._x86_adr_jump_offset @@ -651,27 +657,37 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None + def _append_debugging_code(self, operations, bridge, number, token): + counter = self._register_counter(bridge, number, token) + c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations.extend(ops) + @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations): + def _inject_debugging_code(self, looptoken, operations, bridge, number): if self._debug: # before doing anything, let's increase a counter s = 0 for op in operations: s += op.getopnum() looptoken._x86_debug_checksum = s - c_adr = ConstInt(rffi.cast(lltype.Signed, - self.loop_run_counters[-1])) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - if operations[0].getopnum() == rop.LABEL: - operations = [operations[0]] + ops + operations[1:] - else: - operations = ops + operations + + newoperations = [] + if bridge: + self._append_debugging_code(newoperations, bridge, number, + None) + for op in operations: + newoperations.append(op) + if op.getopnum() == rop.LABEL: + self._append_debugging_code(newoperations, bridge, number, + op.getdescr()) + operations = newoperations return operations def _assemble(self, regalloc, operations): diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -519,6 +519,7 @@ from pypy.tool.logparser import parse_log_file, extract_category from pypy.rlib import debug + targettoken, preambletoken = TargetToken(), TargetToken() loop = """ [i0] label(i0, descr=preambletoken) @@ -533,8 +534,8 @@ guard_false(i12) [] jump(i11, descr=targettoken) """ - ops = parse(loop, namespace={'targettoken': TargetToken(), - 'preambletoken': TargetToken()}) + ops = parse(loop, namespace={'targettoken': targettoken, + 'preambletoken': preambletoken}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) @@ -545,11 +546,13 @@ struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 1 struct = self.cpu.assembler.loop_run_counters[1] - assert struct.i == 10 + assert struct.i == 9 self.cpu.finish_once() finally: debug._log = None - assert ('jit-backend-counts', [('debug_print', 'loop -1:10')]) in dlog + l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') + l2 = ('debug_print', targettoken.repr_of_descr() + ':9') + assert ('jit-backend-counts', [l1, l2]) in dlog def test_debugger_checksum(self): loop = """ diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -3,6 +3,7 @@ from pypy.jit.metainterp.resoperation import opname from pypy.jit.tool.oparser import OpParser from pypy.tool.logparser import parse_log_file, extract_category +from copy import copy class Op(object): bridge = None @@ -387,6 +388,18 @@ loops.append(loop) return log, loops +def split_trace(trace): + labels = [i for i, op in enumerate(trace.operations) + if op.name == 'label'] + labels = [0] + labels + [len(trace.operations) - 1] + parts = [] + for i in range(len(labels) - 1): + start, stop = labels[i], labels[i+1] + part = copy(trace) + part.operations = trace.operations[start : stop + 1] + parts.append(part) + + return parts def parse_log_counts(input, loops): if not input: diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -1,6 +1,6 @@ from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, Function, adjust_bridges, - import_log, Op) + import_log, split_trace, Op) from pypy.tool.jitlogparser.storage import LoopStorage import py, sys @@ -231,3 +231,21 @@ myrepr = 'c = foobar(a, b, descr=mydescr)' assert op.repr() == myrepr assert op.repr() == myrepr # do it twice + +def test_split_trace(): + loop = parse(''' + [i7] + i9 = int_lt(i7, 1003) + label(i9) + guard_true(i9, descr=) [] + i13 = getfield_raw(151937600, descr=) + label(i13) + i19 = int_lt(i13, 1003) + guard_true(i19, descr=) [] + i113 = getfield_raw(151937600, descr=) + ''') + parts = split_trace(loop) + assert len(parts) == 3 + assert len(parts[0].operations) == 2 + assert len(parts[1].operations) == 4 + assert len(parts[2].operations) == 4 From noreply at buildbot.pypy.org Sun Dec 25 17:06:18 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 17:06:18 +0100 (CET) Subject: [pypy-commit] pypy default: (fijal, arigo) A partial revert of 1e4c74e007f4, reintroduce emitting of Message-ID: <20111225160618.D400382B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50853:05c2089f5545 Date: 2011-12-25 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/05c2089f5545/ Log: (fijal, arigo) A partial revert of 1e4c74e007f4, reintroduce emitting of same_as if some optimizations did not produce the same boxes. diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -265,7 +265,13 @@ self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) newresult = newvalue.get_key_box() - assert newresult is op.result or newvalue.is_constant() + # note that emitting here SAME_AS should not happen, but + # in case it does, we would prefer to be suboptimal in asm + # to a fatal RPython exception. + if newresult is not op.result and not newvalue.is_constant(): + self.short_boxes.alias(newresult, op.result) + op = ResOperation(rop.SAME_AS, [op.result], newresult) + self.optimizer._newoperations = [op] + self.optimizer._newoperations self.optimizer.flush() self.optimizer.emitting_dissabled = False From noreply at buildbot.pypy.org Sun Dec 25 17:47:44 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 17:47:44 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: make the test pass, by calling the abort function Message-ID: <20111225164744.5017682B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50854:be708e46d261 Date: 2011-12-25 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/be708e46d261/ Log: make the test pass, by calling the abort function diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -8,11 +8,12 @@ class JitPolicy(object): - def __init__(self): + def __init__(self, portal=None): self.unsafe_loopy_graphs = set() self.supports_floats = False self.supports_longlong = False self.supports_singlefloats = False + self.portal = portal def set_supports_floats(self, flag): self.supports_floats = flag diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -30,9 +30,11 @@ """ def _setup(): + counter_names = [] names = counters.split() for i, name in enumerate(names): globals()[name] = i + counter_names.append(name) global ncounters ncounters = len(names) _setup() diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1795,6 +1795,7 @@ def aborted_tracing(self, reason): self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') + self.staticdata.warmrunnerdesc.on_abort(reason) self.staticdata.stats.aborted() def blackhole_if_trace_too_long(self): diff --git a/pypy/jit/metainterp/test/test_jitportal.py b/pypy/jit/metainterp/test/test_jitportal.py --- a/pypy/jit/metainterp/test/test_jitportal.py +++ b/pypy/jit/metainterp/test/test_jitportal.py @@ -1,13 +1,16 @@ from pypy.rlib.jit import JitDriver, JitPortal from pypy.jit.metainterp.test.support import LLJitMixin -from pypy.jit.codewriter.policy import PortalPolicy +from pypy.jit.codewriter.policy import JitPolicy +from pypy.jit.metainterp.jitprof import ABORT_FORCE_QUASIIMMUT class TestJitPortal(LLJitMixin): def test_abort_quasi_immut(self): + reasons = [] + class MyJitPortal(JitPortal): - def abort(self, *args): - xxxx + def on_abort(self, reason): + reasons.append(reason) portal = MyJitPortal() @@ -29,6 +32,6 @@ return total # assert f(100, 7) == 721 - res = self.meta_interp(f, [100, 7], policy=PortalPolicy(portal)) + res = self.meta_interp(f, [100, 7], policy=JitPolicy(portal)) assert res == 721 - + assert reasons == [ABORT_FORCE_QUASIIMMUT] * 2 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -211,6 +211,7 @@ self.make_driverhook_graphs() self.make_enter_functions() self.rewrite_jit_merge_points(policy) + self.make_portal_callbacks(policy.portal) verbose = False # not self.cpu.translate_support_code self.codewriter.make_jitcodes(verbose=verbose) @@ -424,6 +425,15 @@ for jd in self.jitdrivers_sd: self.make_enter_function(jd) + def make_portal_callbacks(self, portal): + if portal is not None: + def on_abort(reason): + portal.on_abort(reason) + else: + def on_abort(reason): + pass + self.on_abort = on_abort + def make_enter_function(self, jd): from pypy.jit.metainterp.warmstate import WarmEnterState state = WarmEnterState(self, jd) From noreply at buildbot.pypy.org Sun Dec 25 17:48:26 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 17:48:26 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: merge default Message-ID: <20111225164826.9C2A682B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50855:39462e5a78fc Date: 2011-12-25 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/39462e5a78fc/ Log: merge default diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,8 +231,10 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None -sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] -sqlite.sqlite3_enable_load_extension.restype = c_int +HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension") +if HAS_LOAD_EXTENSION: + sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] + sqlite.sqlite3_enable_load_extension.restype = c_int ########################################## # END Wrapped SQLite C API and constants @@ -708,13 +710,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) - def enable_load_extension(self, enabled): - self._check_thread() - self._check_closed() + if HAS_LOAD_EXTENSION: + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() - rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) - if rc != SQLITE_OK: - raise OperationalError("Error enabling load extension") + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") DML, DQL, DDL = range(3) diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -180,7 +180,12 @@ if name is None: name = pyobj.func_name if signature is None: - signature = cpython_code_signature(pyobj.func_code) + if hasattr(pyobj, '_generator_next_method_of_'): + from pypy.interpreter.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyobj.func_code) if defaults is None: defaults = pyobj.func_defaults self.name = name diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -39,6 +39,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_unique_id # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -147,12 +148,13 @@ def finish_once(self): if self._debug: debug_start('jit-backend-counts') - for struct in self.loop_run_counters: - if struct.bridge: - prefix = 'bridge ' + for i in range(len(self.loop_run_counters)): + struct = self.loop_run_counters[i] + if not struct.bridge: + prefix = 'TargetToken(%d)' % struct.number else: - prefix = 'loop ' - debug_print(prefix + str(struct.number) + ':' + str(struct.i)) + prefix = 'bridge ' + str(struct.number) + debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') def _build_float_constants(self): @@ -422,8 +424,8 @@ self.setup(looptoken) if log: - self._register_counter(False, looptoken.number) - operations = self._inject_debugging_code(looptoken, operations) + operations = self._inject_debugging_code(looptoken, operations, + False, looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -489,8 +491,8 @@ self.setup(original_loop_token) if log: - self._register_counter(True, descr_number) - operations = self._inject_debugging_code(faildescr, operations) + operations = self._inject_debugging_code(faildescr, operations, + True, descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -597,17 +599,21 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number): - if self._debug: - # YYY very minor leak -- we need the counters to stay alive - # forever, just because we want to report them at the end - # of the process - struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', - track_allocation=False) - struct.i = 0 - struct.bridge = int(bridge) + def _register_counter(self, bridge, number, token): + # YYY very minor leak -- we need the counters to stay alive + # forever, just because we want to report them at the end + # of the process + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', + track_allocation=False) + struct.i = 0 + struct.bridge = int(bridge) + if bridge: struct.number = number - self.loop_run_counters.append(struct) + else: + assert token + struct.number = compute_unique_id(token) + self.loop_run_counters.append(struct) + return struct def _find_failure_recovery_bytecode(self, faildescr): adr_jump_offset = faildescr._x86_adr_jump_offset @@ -651,27 +657,37 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None + def _append_debugging_code(self, operations, bridge, number, token): + counter = self._register_counter(bridge, number, token) + c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations.extend(ops) + @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations): + def _inject_debugging_code(self, looptoken, operations, bridge, number): if self._debug: # before doing anything, let's increase a counter s = 0 for op in operations: s += op.getopnum() looptoken._x86_debug_checksum = s - c_adr = ConstInt(rffi.cast(lltype.Signed, - self.loop_run_counters[-1])) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - if operations[0].getopnum() == rop.LABEL: - operations = [operations[0]] + ops + operations[1:] - else: - operations = ops + operations + + newoperations = [] + if bridge: + self._append_debugging_code(newoperations, bridge, number, + None) + for op in operations: + newoperations.append(op) + if op.getopnum() == rop.LABEL: + self._append_debugging_code(newoperations, bridge, number, + op.getdescr()) + operations = newoperations return operations def _assemble(self, regalloc, operations): diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -17,7 +17,10 @@ key = src._getregkey() if key in srccount: if key == dst_locations[i]._getregkey(): - srccount[key] = -sys.maxint # ignore a move "x = x" + # ignore a move "x = x" + # setting any "large enough" negative value is ok, but + # be careful of overflows, don't use -sys.maxint + srccount[key] = -len(dst_locations) - 1 pending_dests -= 1 else: srccount[key] += 1 diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -20,6 +20,11 @@ def regalloc_pop(self, loc): self.ops.append(('pop', loc)) + def regalloc_immedmem2mem(self, from_loc, to_loc): + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + self.ops.append(('immedmem2mem', from_loc, to_loc)) + def got(self, expected): print '------------------------ comparing ---------------------------' for op1, op2 in zip(self.ops, expected): @@ -244,6 +249,13 @@ else: return pick1() # + def pick2c(): + n = random.randrange(-2000, 500) + if n >= 0: + return ConstFloatLoc(n) # n is the address, not really used here + else: + return pick2() + # def pick_dst(fn, count, seen): result = [] while len(result) < count: @@ -280,12 +292,12 @@ if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: - assert isinstance(loc, ImmedLoc) + assert isinstance(loc, (ImmedLoc, ConstFloatLoc)) return regs1, regs2, stack # for i in range(500): seen = {} - src_locations2 = [pick2() for i in range(4)] + src_locations2 = [pick2c() for i in range(4)] dst_locations2 = pick_dst(pick2, 4, seen) src_locations1 = [pick1c() for i in range(5)] dst_locations1 = pick_dst(pick1, 5, seen) @@ -312,9 +324,15 @@ return got if isinstance(loc, ImmedLoc): return 'const-%d' % loc.value + if isinstance(loc, ConstFloatLoc): + got = 'constfloat-@%d' % loc.value + if loc.get_width() > WORD: + got = (got, 'constfloat-next-@%d' % loc.value) + return got assert 0, loc # def write(loc, newvalue): + assert (type(newvalue) is tuple) == (loc.get_width() > WORD) if isinstance(loc, RegLoc): if loc.is_xmm: regs2[loc.value] = newvalue @@ -337,10 +355,14 @@ for op in assembler.ops: if op[0] == 'mov': src, dst = op[1:] - assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) - assert isinstance(dst, (RegLoc, StackLoc)) - assert not (isinstance(src, StackLoc) and - isinstance(dst, StackLoc)) + if isinstance(src, ConstFloatLoc): + assert isinstance(dst, RegLoc) + assert dst.is_xmm + else: + assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) + assert isinstance(dst, (RegLoc, StackLoc)) + assert not (isinstance(src, StackLoc) and + isinstance(dst, StackLoc)) write(dst, read(src)) elif op[0] == 'push': src, = op[1:] @@ -350,6 +372,11 @@ dst, = op[1:] assert isinstance(dst, (RegLoc, StackLoc)) write(dst, extrapushes.pop()) + elif op[0] == 'immedmem2mem': + src, dst = op[1:] + assert isinstance(src, ConstFloatLoc) + assert isinstance(dst, StackLoc) + write(dst, read(src, 8)) else: assert 0, "unknown op: %r" % (op,) assert not extrapushes @@ -358,3 +385,32 @@ assert read(loc, WORD) == src_values1[i] for i, loc in enumerate(dst_locations2): assert read(loc, 8) == src_values2[i] + + +def test_overflow_bug(): + CASE = [ + (-144, -248), # \ cycle + (-248, -144), # / + (-488, -416), # \ two usages of -488 + (-488, -480), # / + (-488, -488), # - one self-application of -488 + ] + class FakeAssembler: + def regalloc_mov(self, src, dst): + print "mov", src, dst + def regalloc_push(self, x): + print "push", x + def regalloc_pop(self, x): + print "pop", x + def regalloc_immedmem2mem(self, x, y): + print "?????????????????????????" + def main(): + srclocs = [StackLoc(9999, x, 'i') for x,y in CASE] + dstlocs = [StackLoc(9999, y, 'i') for x,y in CASE] + remap_frame_layout(FakeAssembler(), srclocs, dstlocs, eax) + # it works when run directly + main() + # but it used to crash when translated, + # because of a -sys.maxint-2 overflowing to sys.maxint + from pypy.rpython.test.test_llinterp import interpret + interpret(main, []) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -519,16 +519,23 @@ from pypy.tool.logparser import parse_log_file, extract_category from pypy.rlib import debug + targettoken, preambletoken = TargetToken(), TargetToken() loop = """ [i0] - label(i0, descr=targettoken) + label(i0, descr=preambletoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1, descr=targettoken) + label(i1, descr=targettoken) + debug_merge_point('xyz', 0) + i11 = int_add(i1, 1) + i12 = int_ge(i11, 10) + guard_false(i12) [] + jump(i11, descr=targettoken) """ - ops = parse(loop, namespace={'targettoken': TargetToken()}) + ops = parse(loop, namespace={'targettoken': targettoken, + 'preambletoken': preambletoken}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) @@ -537,11 +544,15 @@ self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] - assert struct.i == 10 + assert struct.i == 1 + struct = self.cpu.assembler.loop_run_counters[1] + assert struct.i == 9 self.cpu.finish_once() finally: debug._log = None - assert ('jit-backend-counts', [('debug_print', 'loop -1:10')]) in dlog + l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') + l2 = ('debug_print', targettoken.repr_of_descr() + ':9') + assert ('jit-backend-counts', [l1, l2]) in dlog def test_debugger_checksum(self): loop = """ diff --git a/pypy/jit/backend/x86/test/test_zrpy_platform.py b/pypy/jit/backend/x86/test/test_zrpy_platform.py --- a/pypy/jit/backend/x86/test/test_zrpy_platform.py +++ b/pypy/jit/backend/x86/test/test_zrpy_platform.py @@ -74,8 +74,8 @@ myjitdriver = jit.JitDriver(greens = [], reds = ['n']) def entrypoint(argv): - myjitdriver.set_param('threshold', 2) - myjitdriver.set_param('trace_eagerness', 0) + jit.set_param(myjitdriver, 'threshold', 2) + jit.set_param(myjitdriver, 'trace_eagerness', 0) n = 16 while n > 0: myjitdriver.can_enter_jit(n=n) diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -42,8 +42,7 @@ except AttributeError: pass - def is_candidate(graph): - return policy.look_inside_graph(graph) + is_candidate = policy.look_inside_graph assert len(self.jitdrivers_sd) > 0 todo = [jd.portal_graph for jd in self.jitdrivers_sd] diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -38,7 +38,8 @@ else: extraprocedures = [procedure] metainterp_sd.stats.view(errmsg=errmsg, - extraprocedures=extraprocedures) + extraprocedures=extraprocedures, + metainterp_sd=metainterp_sd) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -105,7 +106,7 @@ def compile_loop(metainterp, greenkey, start, inputargs, jumpargs, - start_resumedescr, full_preamble_needed=True): + resume_at_jump_descr, full_preamble_needed=True): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -126,10 +127,11 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] h_ops = history.operations - part.start_resumedescr = start_resumedescr + part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ - [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] + try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: @@ -184,7 +186,7 @@ def compile_retrace(metainterp, greenkey, start, inputargs, jumpargs, - start_resumedescr, partial_trace, resumekey): + resume_at_jump_descr, partial_trace, resumekey): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -200,7 +202,7 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] - part.start_resumedescr = start_resumedescr + part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations part.operations = [partial_trace.operations[-1]] + \ @@ -212,13 +214,12 @@ try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - #return None # XXX: Dissable for now # Fall back on jumping to preamble target_token = label.getdescr() assert isinstance(target_token, TargetToken) assert target_token.exported_state part.operations = [orignial_label] + \ - [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + [ResOperation(rop.JUMP, inputargs[:], None, descr=loop_jitcell_token)] try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, @@ -751,7 +752,7 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey, start_resumedescr=None): +def compile_trace(metainterp, resumekey, resume_at_jump_descr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ @@ -767,7 +768,7 @@ # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] - new_trace.start_resumedescr = start_resumedescr + new_trace.resume_at_jump_descr = resume_at_jump_descr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,7 +12,7 @@ def get_display_text(self): return None -def display_procedures(procedures, errmsg=None, highlight_procedures={}): +def display_procedures(procedures, errmsg=None, highlight_procedures={}, metainterp_sd=None): graphs = [(procedure, highlight_procedures.get(procedure, 0)) for procedure in procedures] for graph, highlight in graphs: @@ -20,7 +20,7 @@ if is_interesting_guard(op): graphs.append((SubGraph(op.getdescr()._debug_suboperations), highlight)) - graphpage = ResOpGraphPage(graphs, errmsg) + graphpage = ResOpGraphPage(graphs, errmsg, metainterp_sd) graphpage.display() def is_interesting_guard(op): @@ -36,8 +36,8 @@ class ResOpGraphPage(GraphPage): - def compute(self, graphs, errmsg=None): - resopgen = ResOpGen() + def compute(self, graphs, errmsg=None, metainterp_sd=None): + resopgen = ResOpGen(metainterp_sd) for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: @@ -50,13 +50,14 @@ CLUSTERING = True BOX_COLOR = (128, 0, 96) - def __init__(self): + def __init__(self, metainterp_sd=None): self.graphs = [] self.highlight_graphs = {} self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None self.target_tokens = {} + self.metainterp_sd = metainterp_sd def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -164,7 +165,14 @@ opindex = opstartindex while True: op = operations[opindex] - lines.append(op.repr(graytext=True)) + op_repr = op.repr(graytext=True) + if op.getopnum() == rop.DEBUG_MERGE_POINT: + jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] + if jd_sd._get_printable_location_ptr: + s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = s.replace(',', '.') # we use comma for argument splitting + op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] tgt_g, tgt_i = self.all_operations[tgt] diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -705,6 +705,9 @@ self.virtual_state = None self.exported_state = None + + def repr_of_descr(self): + return 'TargetToken(%d)' % compute_unique_id(self) class TreeLoop(object): inputargs = None @@ -712,7 +715,7 @@ call_pure_results = None logops = None quasi_immutable_deps = None - start_resumedescr = None + resume_at_jump_descr = None def _token(*args): raise Exception("TreeLoop.token is killed") @@ -1004,25 +1007,6 @@ # a jump back to itself and possibly a few bridges ending with finnish. # Only the operations within the loop formed by that single jump will # be counted. - - # XXX hacked version, ignore and remove me when jit-targets is merged. - loops = self.get_all_loops() - loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX - assert len(loops) == 1 - loop, = loops - jumpop = loop.operations[-1] - assert jumpop.getopnum() == rop.JUMP - insns = {} - for op in loop.operations: - opname = op.getopname() - insns[opname] = insns.get(opname, 0) + 1 - return self._check_insns(insns, expected, check) - - def check_simple_loop(self, expected=None, **check): - # Usefull in the simplest case when we have only one trace ending with - # a jump back to itself and possibly a few bridges ending with finnish. - # Only the operations within the loop formed by that single jump will - # be counted. loops = self.get_all_loops() assert len(loops) == 1 loop = loops[0] @@ -1081,7 +1065,7 @@ if option.view: self.view() - def view(self, errmsg=None, extraprocedures=[]): + def view(self, errmsg=None, extraprocedures=[], metainterp_sd=None): from pypy.jit.metainterp.graphpage import display_procedures procedures = self.get_all_loops()[:] for procedure in extraprocedures: @@ -1093,7 +1077,7 @@ if hasattr(procedure, '_looptoken_number') and ( procedure._looptoken_number in self.invalidated_token_numbers): highlight_procedures.setdefault(procedure, 2) - display_procedures(procedures, errmsg, highlight_procedures) + display_procedures(procedures, errmsg, highlight_procedures, metainterp_sd) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -5,7 +5,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.history import Const, ConstInt, Box, \ - BoxInt, ConstFloat, BoxFloat, AbstractFailDescr + BoxInt, ConstFloat, BoxFloat, AbstractFailDescr, TargetToken class Logger(object): @@ -135,6 +135,13 @@ fail_args = '' return s_offset + res + op.getopname() + '(' + args + ')' + fail_args + def _log_inputarg_setup_ops(self, op): + target_token = op.getdescr() + if isinstance(target_token, TargetToken): + if target_token.exported_state: + for op in target_token.exported_state.inputarg_setup_ops: + debug_print(' ' + self.repr_of_resop(op)) + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return @@ -146,6 +153,8 @@ for i in range(len(operations)): op = operations[i] debug_print(self.repr_of_resop(operations[i], ops_offset)) + if op.getopnum() == rop.LABEL: + self._log_inputarg_setup_ops(op) if ops_offset and None in ops_offset: offset = ops_offset[None] debug_print("+%d: --end of the loop--" % offset) diff --git a/pypy/jit/metainterp/optimize.py b/pypy/jit/metainterp/optimize.py --- a/pypy/jit/metainterp/optimize.py +++ b/pypy/jit/metainterp/optimize.py @@ -5,58 +5,3 @@ """Raised when the optimize*.py detect that the loop that we are trying to build cannot possibly make sense as a long-running loop (e.g. it cannot run 2 complete iterations).""" - -class RetraceLoop(JitException): - """ Raised when inlining a short preamble resulted in an - InvalidLoop. This means the optimized loop is too specialized - to be useful here, so we trace it again and produced a second - copy specialized in some different way. - """ - -# ____________________________________________________________ - -def optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): - debug_start("jit-optimize") - try: - return _optimize_loop(metainterp_sd, old_loop_tokens, loop, - enable_opts) - finally: - debug_stop("jit-optimize") - -def _optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 - loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, - loop.operations) - # XXX do we really still need a list? - if old_loop_tokens: - return old_loop_tokens[0] - optimize_loop_1(metainterp_sd, loop, enable_opts) - return None - -# ____________________________________________________________ - -def optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, - inline_short_preamble=True, retraced=False): - debug_start("jit-optimize") - try: - return _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, - enable_opts, - inline_short_preamble, retraced) - finally: - debug_stop("jit-optimize") - -def _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, - inline_short_preamble, retraced=False): - from pypy.jit.metainterp.optimizeopt import optimize_bridge_1 - bridge.logops = metainterp_sd.logger_noopt.log_loop(bridge.inputargs, - bridge.operations) - if old_loop_tokens: - old_loop_token = old_loop_tokens[0] - bridge.operations[-1].setdescr(old_loop_token) # patch jump target - optimize_bridge_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) - return old_loop_tokens[0] - #return bridge.operations[-1].getdescr() - return None - -# ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -51,34 +51,6 @@ return optimizations, unroll - -def optimize_loop_1(metainterp_sd, loop, enable_opts, - inline_short_preamble=True, retraced=False): - """Optimize loop.operations to remove internal overheadish operations. - """ - - optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble, retraced) - if unroll: - optimize_unroll(metainterp_sd, loop, optimizations) - else: - optimizer = Optimizer(metainterp_sd, loop, optimizations) - optimizer.propagate_all_forward() - -def optimize_bridge_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble=True, retraced=False): - """The same, but for a bridge. """ - enable_opts = enable_opts.copy() - try: - del enable_opts['unroll'] - except KeyError: - pass - optimize_loop_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) - -if __name__ == '__main__': - print ALL_OPTS_NAMES - def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ @@ -96,3 +68,6 @@ finally: debug_stop("jit-optimize") +if __name__ == '__main__': + print ALL_OPTS_NAMES + diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -453,6 +453,7 @@ def clear_newoperations(self): self._newoperations = [] + self.seen_results = {} def make_equal_to(self, box, value, replace=False): assert isinstance(value, OptValue) diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -35,6 +35,9 @@ pass def optimize_LABEL(self, op): + descr = op.getdescr() + if isinstance(descr, JitCellToken): + return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) self.last_label_descr = op.getdescr() self.emit_operation(op) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -9,14 +9,14 @@ class BaseTestMultiLabel(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" - def optimize_loop(self, ops, expected): + def optimize_loop(self, ops, expected, expected_shorts=None): loop = self.parse(ops) if expected != "crash!": expected = self.parse(expected) part = TreeLoop('part') part.inputargs = loop.inputargs - part.start_resumedescr = FakeDescrWithSnapshot() + part.resume_at_jump_descr = FakeDescrWithSnapshot() token = loop.original_jitcell_token optimized = TreeLoop('optimized') @@ -33,15 +33,17 @@ if nxt < len(loop.operations): label = loop.operations[nxt] assert label.getopnum() == rop.LABEL - jumpop = ResOperation(rop.JUMP, label.getarglist(), - None, descr=token) - operations.append(jumpop) + if label.getdescr() is None: + label.setdescr(token) + operations.append(label) part.operations = operations + self._do_optimize_loop(part, None) if part.operations[-1].getopnum() == rop.LABEL: last_label = [part.operations.pop()] else: last_label = [] + optimized.operations.extend(part.operations) prv = nxt + 1 @@ -54,9 +56,32 @@ print 'Failed!' print + shorts = [op.getdescr().short_preamble + for op in optimized.operations + if op.getopnum() == rop.LABEL] + + if expected_shorts: + for short in shorts: + print + print "Short preamble:" + print '\n'.join([str(o) for o in short]) + + assert expected != "crash!", "should have raised an exception" self.assert_equal(optimized, expected) + if expected_shorts: + assert len(shorts) == len(expected_shorts) + for short, expected_short in zip(shorts, expected_shorts): + expected_short = self.parse(expected_short) + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, expected_short, + text_right='expected short preamble') + + return optimized def test_simple(self): @@ -194,8 +219,168 @@ """ with raises(InvalidLoop): self.optimize_loop(ops, ops) - + + def test_two_intermediate_labels_basic_1(self): + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + expected = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1, i2) + i4 = int_add(i1, i2) + label(p1, i4) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + short1 = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + short2 = """ + [p1, i1] + label(p1, i1) + jump(p1, i1) + """ + self.optimize_loop(ops, expected, expected_shorts=[short1, short2]) + + def test_two_intermediate_labels_basic_2(self): + ops = """ + [p1, i1] + i2 = int_add(i1, 1) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = getfield_gc(p1, descr=valuedescr) + i6 = int_add(i4, i5) + jump(p1, i6) + """ + expected = """ + [p1, i1] + i2 = int_add(i1, 1) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4, i3) + i6 = int_add(i4, i3) + jump(p1, i6, i3) + """ + short1 = """ + [p1, i1] + label(p1, i1) + jump(p1, i1) + """ + short2 = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, expected, expected_shorts=[short1, short2]) + + def test_two_intermediate_labels_both(self): + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = getfield_gc(p1, descr=valuedescr) + i6 = int_mul(i4, i5) + jump(p1, i6) + """ + expected = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1, i2) + i4 = int_add(i1, i2) + label(p1, i4, i2) + i6 = int_mul(i4, i2) + jump(p1, i6, i2) + """ + short = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, expected, expected_shorts=[short, short]) + + def test_import_across_multiple_labels_basic(self): + # Not supported, juts make sure we get a functional trace + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = int_add(i1, 1) + label(p1, i1) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + self.optimize_loop(ops, ops) + + def test_import_across_multiple_labels_with_duplication(self): + # Not supported, juts make sure we get a functional trace + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i2) + i3 = int_add(i2, 1) + label(p1, i2) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + exported = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + i6 = same_as(i2) + label(p1, i2) + i3 = int_add(i2, 1) + label(p1, i2) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + self.optimize_loop(ops, exported) + def test_import_virtual_across_multiple_labels(self): + ops = """ + [p0, i1] + i1a = int_add(i1, 1) + pv = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(pv, i1a, descr=valuedescr) + label(pv, i1) + i2 = int_mul(i1, 3) + label(pv, i2) + i3 = getfield_gc(pv, descr=valuedescr) + i4 = int_add(i3, i2) + jump(pv, i4) + """ + expected = """ + [p0, i1] + i1a = int_add(i1, 1) + i5 = same_as(i1a) + label(i1a, i1) + i2 = int_mul(i1, 3) + label(i1a, i2) + i4 = int_add(i1a, i2) + jump(i1a, i4) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestMultiLabel, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -4,7 +4,7 @@ LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize -from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken @@ -4211,7 +4211,6 @@ preamble = """ [p0] i0 = strlen(p0) - i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5668,8 +5667,7 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - i7 = same_as(i2) - jump(p2, p3, i7) + jump(p2, p3, i2) """ expected = """ [p1, p2, i1] @@ -5744,9 +5742,7 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - i129 = same_as(i2) - i130 = same_as(i3) - jump(p2, p3, p5, i129, i130) + jump(p2, p3, p5, i2, i3) """ expected = """ [p1, p2, p3, i1, i2] @@ -5959,8 +5955,7 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - i9 = same_as(i4) - jump(p4, i1, i2, p2, i5, i3, i9) + jump(p4, i1, i2, p2, i5, i3, i4) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6082,9 +6077,7 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - i11 = same_as(i1) - i12 = same_as(i2) - jump(p1, p2, p3, i3, i11, i12) + jump(p1, p2, p3, i3, i1, i2) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6304,7 +6297,6 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) - i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6350,9 +6342,7 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - i11 = same_as(i1) - i12 = same_as(i2) - jump(p1, p2, i3, i11, i12) + jump(p1, p2, i3, i1, i2) """ expected = """ [p1, p2, i3, i1, i2] @@ -6925,8 +6915,7 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - i0 = same_as(i843) - jump(p9, i0) + jump(p9, i843) """ short = """ [p9] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -430,18 +430,18 @@ preamble = TreeLoop('preamble') preamble.inputargs = inputargs - preamble.start_resumedescr = FakeDescrWithSnapshot() + preamble.resume_at_jump_descr = FakeDescrWithSnapshot() token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ operations + \ - [ResOperation(rop.JUMP, jump_args, None, descr=token)] + [ResOperation(rop.LABEL, jump_args, None, descr=token)] self._do_optimize_loop(preamble, call_pure_results) assert preamble.operations[-1].getopnum() == rop.LABEL inliner = Inliner(inputargs, jump_args) - loop.start_resumedescr = preamble.start_resumedescr + loop.resume_at_jump_descr = preamble.resume_at_jump_descr loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -3,7 +3,7 @@ from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException -from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop +from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds from pypy.jit.metainterp.inliner import Inliner @@ -51,10 +51,10 @@ distinction anymore)""" inline_short_preamble = True - did_import = False def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) + self.boxes_created_this_iteration = None def fix_snapshot(self, jump_args, snapshot): if snapshot is None: @@ -71,7 +71,6 @@ loop = self.optimizer.loop self.optimizer.clear_newoperations() - start_label = loop.operations[0] if start_label.getopnum() == rop.LABEL: loop.operations = loop.operations[1:] @@ -82,7 +81,7 @@ start_label = None jumpop = loop.operations[-1] - if jumpop.getopnum() == rop.JUMP: + if jumpop.getopnum() == rop.JUMP or jumpop.getopnum() == rop.LABEL: loop.operations = loop.operations[:-1] else: jumpop = None @@ -91,48 +90,87 @@ self.optimizer.propagate_all_forward(clear=False) if not jumpop: - return - if self.jump_to_already_compiled_trace(jumpop): - # Found a compiled trace to jump to - if self.did_import: - - self.close_bridge(start_label) - self.finilize_short_preamble(start_label) return cell_token = jumpop.getdescr() assert isinstance(cell_token, JitCellToken) stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) - if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) - self.optimizer.flush() - KillHugeIntBounds(self.optimizer).apply() + + if jumpop.getopnum() == rop.JUMP: + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.short: + # Construct our short preamble + assert start_label + self.close_bridge(start_label) + return - loop.operations = self.optimizer.get_newoperations() - self.export_state(stop_label) - loop.operations.append(stop_label) - else: - assert stop_label + if start_label and self.jump_to_start_label(start_label, stop_label): + # Initial label matches, jump to it + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, + descr=start_label.getdescr()) + if self.short: + # Construct our short preamble + self.close_loop(start_label, jumpop) + else: + self.optimizer.send_extra_operation(jumpop) + return + + if cell_token.target_tokens: + limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit + if cell_token.retraced_count < limit: + cell_token.retraced_count += 1 + debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + else: + debug_print("Retrace count reached, jumping to preamble") + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return + + # Found nothing to jump to, emit a label instead + + if self.short: + # Construct our short preamble assert start_label - stop_target = stop_label.getdescr() - start_target = start_label.getdescr() - assert isinstance(stop_target, TargetToken) - assert isinstance(start_target, TargetToken) - assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token - jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + self.close_bridge(start_label) - self.close_loop(jumpop) - self.finilize_short_preamble(start_label) + self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() + + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + + def jump_to_start_label(self, start_label, stop_label): + if not start_label or not stop_label: + return False + + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + if stop_target.targeting_jitcell_token is not start_target.targeting_jitcell_token: + return False + + return True + + #args = stop_label.getarglist() + #modifier = VirtualStateAdder(self.optimizer) + #virtual_state = modifier.get_virtual_state(args) + #if self.initial_virtual_state.generalization_of(virtual_state): + # return True + def export_state(self, targetop): original_jump_args = targetop.getarglist() jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] - assert self.optimizer.loop.start_resumedescr - start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) - # FIXME: I dont thnik we need fix_snapshot anymore + assert self.optimizer.loop.resume_at_jump_descr + resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr.clone_if_mutable() + assert isinstance(resume_at_jump_descr, ResumeGuardDescr) + resume_at_jump_descr.rd_snapshot = self.fix_snapshot(jump_args, resume_at_jump_descr.rd_snapshot) modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) @@ -141,26 +179,21 @@ inputargs = virtual_state.make_inputargs(values, self.optimizer) short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - constant_inputargs[box] = const - short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) - aliased_vrituals = {} - for i in range(len(original_jump_args)): - if original_jump_args[i] is not jump_args[i]: - if values[i].is_virtual(): - aliased_vrituals[original_jump_args[i]] = jump_args[i] - else: - short_boxes.alias(original_jump_args[i], jump_args[i]) + if self.boxes_created_this_iteration is not None: + for box in self.inputargs: + self.boxes_created_this_iteration[box] = True + + short_boxes = ShortBoxes(self.optimizer, inputargs, + self.boxes_created_this_iteration) self.optimizer.clear_newoperations() - for box in short_inputargs: - value = self.getvalue(box) - if value.is_virtual(): - value.force_box(self.optimizer) + for i in range(len(original_jump_args)): + if values[i].is_virtual(): + values[i].force_box(self.optimizer) + if original_jump_args[i] is not jump_args[i]: + op = ResOperation(rop.SAME_AS, [jump_args[i]], original_jump_args[i]) + self.optimizer.emit_operation(op) inputarg_setup_ops = self.optimizer.get_newoperations() target_token = targetop.getdescr() @@ -168,78 +201,82 @@ targetop.initarglist(inputargs) target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] - target_token.start_resumedescr = start_resumedescr - target_token.exported_state = ExportedState(constant_inputargs, short_boxes, - inputarg_setup_ops, self.optimizer, - aliased_vrituals, jump_args) + target_token.resume_at_jump_descr = resume_at_jump_descr + + exported_values = {} + for box in inputargs: + exported_values[box] = self.optimizer.getvalue(box) + for op in short_boxes.operations(): + if op and op.result: + box = op.result + exported_values[box] = self.optimizer.getvalue(box) + + target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops, + exported_values) def import_state(self, targetop): - self.did_import = False - if not targetop: - # FIXME: Set up some sort of empty state with no virtuals? + if not targetop: # Trace did not start with a label + self.inputargs = self.optimizer.loop.inputargs + self.short = None + self.initial_virtual_state = None return + + self.inputargs = targetop.getarglist() target_token = targetop.getdescr() - if not target_token: - return assert isinstance(target_token, TargetToken) exported_state = target_token.exported_state if not exported_state: - # FIXME: Set up some sort of empty state with no virtuals + # No state exported, construct one without virtuals + self.short = None + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(self.inputargs) + self.initial_virtual_state = virtual_state return - self.did_import = True self.short = target_token.short_preamble[:] self.short_seen = {} - self.short_boxes = exported_state.short_boxes.clone() - for box, const in exported_state.constant_inputargs.items(): - self.short_seen[box] = True - self.imported_state = exported_state - self.inputargs = targetop.getarglist() + self.short_boxes = exported_state.short_boxes + self.short_resume_at_jump_descr = target_token.resume_at_jump_descr self.initial_virtual_state = target_token.virtual_state - self.start_resumedescr = target_token.start_resumedescr seen = {} for box in self.inputargs: if box in seen: continue seen[box] = True - preamble_value = exported_state.optimizer.getvalue(box) + preamble_value = exported_state.exported_values[box] value = self.optimizer.getvalue(box) value.import_from(preamble_value, self.optimizer) - for newbox, oldbox in self.short_boxes.aliases.items(): - self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) - # Setup the state of the new optimizer by emiting the # short operations and discarding the result self.optimizer.emitting_dissabled = True for op in exported_state.inputarg_setup_ops: self.optimizer.send_extra_operation(op) + seen = {} - for op in self.short_boxes.operations(): self.ensure_short_op_emitted(op, self.optimizer, seen) if op and op.result: - preamble_value = exported_state.optimizer.getvalue(op.result) + preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) if not value.is_virtual(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) newresult = newvalue.get_key_box() + # note that emitting here SAME_AS should not happen, but + # in case it does, we would prefer to be suboptimal in asm + # to a fatal RPython exception. if newresult is not op.result and not newvalue.is_constant(): self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) - self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX - #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! + self.optimizer._newoperations = [op] + self.optimizer._newoperations self.optimizer.flush() self.optimizer.emitting_dissabled = False - for box, key_box in exported_state.aliased_vrituals.items(): - self.optimizer.make_equal_to(box, self.getvalue(key_box)) - def close_bridge(self, start_label): - inputargs = self.inputargs + inputargs = self.inputargs short_jumpargs = inputargs[:] # We dont need to inline the short preamble we are creating as we are conneting @@ -249,8 +286,6 @@ newoperations = self.optimizer.get_newoperations() self.boxes_created_this_iteration = {} i = 0 - while newoperations[i].getopnum() != rop.LABEL: - i += 1 while i < len(newoperations): op = newoperations[i] self.boxes_created_this_iteration[op.result] = True @@ -262,11 +297,11 @@ i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) - - def close_loop(self, jumpop): + self.finilize_short_preamble(start_label) + + def close_loop(self, start_label, jumpop): virtual_state = self.initial_virtual_state short_inputargs = self.short[0].getarglist() - constant_inputargs = self.imported_state.constant_inputargs inputargs = self.inputargs short_jumpargs = inputargs[:] @@ -289,8 +324,6 @@ raise InvalidLoop args[short_inputargs[i]] = jmp_to_short_args[i] self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - for box, const in constant_inputargs.items(): - self.short_inliner.argmap[box] = const for op in self.short[1:]: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) @@ -299,8 +332,6 @@ newoperations = self.optimizer.get_newoperations() self.boxes_created_this_iteration = {} i = j = 0 - while newoperations[i].getopnum() != rop.LABEL: - i += 1 while i < len(newoperations) or j < len(jumpargs): if i == len(newoperations): while j < len(jumpargs): @@ -353,6 +384,8 @@ assert isinstance(target_token, TargetToken) target_token.targeting_jitcell_token.retraced_count = sys.maxint + self.finilize_short_preamble(start_label) + def finilize_short_preamble(self, start_label): short = self.short assert short[-1].getopnum() == rop.JUMP @@ -365,7 +398,7 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - descr = target_token.start_resumedescr.clone_if_mutable() + descr = target_token.resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) short[i] = op @@ -381,13 +414,11 @@ newargs[i] = a.clonebox() boxmap[a] = newargs[i] inliner = Inliner(short_inputargs, newargs) - for box, const in self.imported_state.constant_inputargs.items(): - inliner.argmap[box] = const for i in range(len(short)): short[i] = inliner.inline_op(short[i]) - target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(target_token.start_resumedescr) + target_token.resume_at_jump_descr = target_token.resume_at_jump_descr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed for box in short[0].getarglist(): @@ -398,31 +429,6 @@ target_token.short_preamble = self.short target_token.exported_state = None - - def FIXME_old_stuff(): - preamble_optimizer = self.optimizer - loop.preamble.quasi_immutable_deps = ( - self.optimizer.quasi_immutable_deps) - self.optimizer = self.optimizer.new() - loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps - - - loop.inputargs = inputargs - args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\ - for a in inputargs] - jmp = ResOperation(rop.JUMP, args, None) - jmp.setdescr(loop.token) - loop.preamble.operations.append(jmp) - - loop.operations = self.optimizer.get_newoperations() - maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards - - if self.optimizer.emitted_guards > maxguards: - loop.preamble.token.retraced_count = sys.maxint - - if short: - pass - def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: return @@ -450,7 +456,7 @@ if not isinstance(a, Const) and a not in self.short_seen: self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): - descr = self.start_resumedescr.clone_if_mutable() + descr = self.short_resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) if guards_needed and self.short_boxes.has_producer(op.result): @@ -549,7 +555,7 @@ for guard in extra_guards: if guard.is_guard(): - descr = target.start_resumedescr.clone_if_mutable() + descr = target.resume_at_jump_descr.clone_if_mutable() inliner.inline_descr_inplace(descr) guard.setdescr(descr) self.optimizer.send_extra_operation(guard) @@ -566,20 +572,7 @@ self.optimizer.send_extra_operation(jumpop) return True debug_stop('jit-log-virtualstate') - - if self.did_import: - return False - limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit - if cell_token.retraced_count= 0: message[4 + i] = buffer[offset + i] @@ -264,7 +266,8 @@ def do_recv_string(self, space, buflength, maxlength): with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as length_ptr: self._recvall(space, rffi.cast(rffi.CCHARP, length_ptr), 4) - length = intmask(length_ptr[0]) + length = intmask(rsocket.ntohl( + rffi.cast(lltype.Unsigned, length_ptr[0]))) if length > maxlength: # bad message, close connection self.flags &= ~READABLE if self.flags == 0: diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -37,6 +37,9 @@ def test_connection(self): rhandle, whandle = self.make_pair() + whandle.send_bytes("abc") + assert rhandle.recv_bytes(100) == "abc" + obj = [1, 2.0, "hello"] whandle.send(obj) obj2 = rhandle.recv() @@ -150,4 +153,20 @@ import _multiprocessing raises(IOError, _multiprocessing.Connection, -1) - raises(IOError, _multiprocessing.Connection, -15) \ No newline at end of file + raises(IOError, _multiprocessing.Connection, -15) + + def test_byte_order(self): + # The exact format of net strings (length in network byte + # order) is important for interoperation with others + # implementations. + rhandle, whandle = self.make_pair() + whandle.send_bytes("abc") + whandle.send_bytes("defg") + import socket + sock = socket.fromfd(rhandle.fileno(), + socket.AF_INET, socket.SOCK_STREAM) + data1 = sock.recv(7) + assert data1 == '\x00\x00\x00\x03abc' + data2 = sock.recv(8) + assert data2 == '\x00\x00\x00\x04defg' + diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -45,6 +45,8 @@ import pypy.module.cpyext.longobject import pypy.module.cpyext.listobject import pypy.module.cpyext.sequence +import pypy.module.cpyext.buffer +import pypy.module.cpyext.bufferobject import pypy.module.cpyext.eval import pypy.module.cpyext.import_ import pypy.module.cpyext.mapping diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -317,6 +317,10 @@ INTERPLEVEL_API = {} FUNCTIONS = {} + +# These are C symbols which cpyext will export, but which are defined in .c +# files somewhere in the implementation of cpyext (rather than being defined in +# RPython). SYMBOLS_C = [ 'Py_FatalError', 'PyOS_snprintf', 'PyOS_vsnprintf', 'PyArg_Parse', 'PyArg_ParseTuple', 'PyArg_UnpackTuple', 'PyArg_ParseTupleAndKeywords', diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/buffer.py @@ -0,0 +1,11 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, CANNOT_FAIL, Py_buffer) + + at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) +def PyBuffer_IsContiguous(space, view, fortran): + """Return 1 if the memory defined by the view is C-style (fortran is + 'C') or Fortran-style (fortran is 'F') contiguous or either one + (fortran is 'A'). Return 0 otherwise.""" + # PyPy only supports contiguous Py_buffers for now. + return space.wrap(1) diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/bufferobject.py @@ -0,0 +1,66 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, + PyObjectFields, PyObject) +from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef +from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer + + +PyBufferObjectStruct = lltype.ForwardReference() +PyBufferObject = lltype.Ptr(PyBufferObjectStruct) +PyBufferObjectFields = PyObjectFields + ( + ("b_base", PyObject), + ("b_ptr", rffi.VOIDP), + ("b_size", Py_ssize_t), + ("b_offset", Py_ssize_t), + ("b_readonly", rffi.INT), + ("b_hash", rffi.LONG), + ) + +cpython_struct("PyBufferObject", PyBufferObjectFields, PyBufferObjectStruct) + + at bootstrap_function +def init_bufferobject(space): + "Type description of PyBufferObject" + make_typedescr(space.gettypefor(Buffer).instancetypedef, + basestruct=PyBufferObject.TO, + attach=buffer_attach, + dealloc=buffer_dealloc, + realize=buffer_realize) + +def buffer_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyBufferObject with the given (str) buffer object. + """ + py_buf = rffi.cast(PyBufferObject, py_obj) + py_buf.c_b_offset = 0 + rffi.setintfield(py_buf, 'c_b_readonly', 1) + rffi.setintfield(py_buf, 'c_b_hash', -1) + + if isinstance(w_obj, SubBuffer): + py_buf.c_b_offset = w_obj.offset + w_obj = w_obj.buffer + + if isinstance(w_obj, StringBuffer): + py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value) + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str())) + py_buf.c_b_size = w_obj.getlength() + else: + raise Exception("Fail fail fail fail fail") + + +def buffer_realize(space, py_obj): + """ + Creates the buffer in the PyPy interpreter from a cpyext representation. + """ + raise Exception("realize fail fail fail") + + + + at cpython_api([PyObject], lltype.Void, external=False) +def buffer_dealloc(space, py_obj): + py_buf = rffi.cast(PyBufferObject, py_obj) + Py_DecRef(space, py_buf.c_b_base) + rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -9,6 +9,17 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + PyObject *b_base; + void *b_ptr; + Py_ssize_t b_size; + Py_ssize_t b_offset; + int b_readonly; + long b_hash; +} PyBufferObject; + + PyAPI_DATA(PyTypeObject) PyBuffer_Type; #define PyBuffer_Check(op) (((PyObject*)(op))->ob_type == &PyBuffer_Type) diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -234,7 +234,7 @@ writebufferproc bf_getwritebuffer; segcountproc bf_getsegcount; charbufferproc bf_getcharbuffer; - getbufferproc bf_getbuffer; + getbufferproc bf_getbuffer; releasebufferproc bf_releasebuffer; } PyBufferProcs; diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -58,6 +58,7 @@ class W_PyCFunctionObject(Wrappable): def __init__(self, space, ml, w_self, w_module=None): self.ml = ml + self.name = rffi.charp2str(self.ml.c_ml_name) self.w_self = w_self self.w_module = w_module @@ -69,7 +70,7 @@ flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST) if space.is_true(w_kw) and not flags & METH_KEYWORDS: raise OperationError(space.w_TypeError, space.wrap( - rffi.charp2str(self.ml.c_ml_name) + "() takes no keyword arguments")) + self.name + "() takes no keyword arguments")) func = rffi.cast(PyCFunction, self.ml.c_ml_meth) length = space.int_w(space.len(w_args)) @@ -80,13 +81,12 @@ if length == 0: return generic_cpy_call(space, func, w_self, None) raise OperationError(space.w_TypeError, space.wrap( - rffi.charp2str(self.ml.c_ml_name) + "() takes no arguments")) + self.name + "() takes no arguments")) elif flags & METH_O: if length != 1: raise OperationError(space.w_TypeError, space.wrap("%s() takes exactly one argument (%d given)" % ( - rffi.charp2str(self.ml.c_ml_name), - length))) + self.name, length))) w_arg = space.getitem(w_args, space.wrap(0)) return generic_cpy_call(space, func, w_self, w_arg) elif flags & METH_VARARGS: @@ -199,6 +199,7 @@ __call__ = interp2app(cfunction_descr_call), __doc__ = GetSetProperty(W_PyCFunctionObject.get_doc), __module__ = interp_attrproperty_w('w_module', cls=W_PyCFunctionObject), + __name__ = interp_attrproperty('name', cls=W_PyCFunctionObject), ) W_PyCFunctionObject.typedef.acceptable_as_base_class = False diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -4,17 +4,6 @@ #include "Python.h" -typedef struct { - PyObject_HEAD - PyObject *b_base; - void *b_ptr; - Py_ssize_t b_size; - Py_ssize_t b_offset; - int b_readonly; - long b_hash; -} PyBufferObject; - - enum buffer_t { READ_BUFFER, WRITE_BUFFER, diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -777,18 +777,14 @@ Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { + fflush(stdout); PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } else { - PyErr_SetString( - PyExc_NotImplementedError, - "s* not implemented for non-string values"); - return NULL; - } -#if 0 + } #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { +#if 0 uarg = UNICODE_DEFAULT_ENCODING(arg); if (uarg == NULL) return converterr(CONV_UNICODE, @@ -796,6 +792,9 @@ PyBuffer_FillInfo(p, arg, PyString_AS_STRING(uarg), PyString_GET_SIZE(uarg), 1, 0); +#else + return converterr("string or buffer", arg, msgbuf, bufsize); +#endif } #endif else { /* any buffer-like object */ @@ -803,7 +802,6 @@ if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } -#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", @@ -1342,7 +1340,6 @@ return count; } -#if 0 //YYY static int getbuffer(PyObject *arg, Py_buffer *view, char **errmsg) { @@ -1373,7 +1370,6 @@ PyBuffer_FillInfo(view, NULL, buf, count, 1, 0); return 0; } -#endif /* Support for keyword arguments donated by Geoff Philbrick */ diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import ( - cpython_api, PyObject, PyObjectP, CANNOT_FAIL + cpython_api, PyObject, PyObjectP, CANNOT_FAIL, Py_buffer ) from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex from pypy.rpython.lltypesystem import rffi, lltype @@ -10,7 +10,6 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP -Py_buffer = rffi.VOIDP va_list = rffi.VOIDP PyDateTime_Date = rffi.VOIDP PyDateTime_DateTime = rffi.VOIDP @@ -178,13 +177,6 @@ ~Py_buffer.format.""" raise NotImplementedError - at cpython_api([Py_buffer, lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): - """Return 1 if the memory defined by the view is C-style (fortran is - 'C') or Fortran-style (fortran is 'F') contiguous or either one - (fortran is 'A'). Return 0 otherwise.""" - raise NotImplementedError - @cpython_api([rffi.INT_real, Py_ssize_t, Py_ssize_t, Py_ssize_t, lltype.Char], lltype.Void) def PyBuffer_FillContiguousStrides(space, ndim, shape, strides, itemsize, fortran): """Fill the strides array with byte-strides of a contiguous (C-style if diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -129,6 +129,21 @@ assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + def test_pyarg_parse_string_old_buffer(self): + pybuffer = self.import_parser( + ''' + Py_buffer buf; + PyObject *result; + if (!PyArg_ParseTuple(args, "s*", &buf)) { + return NULL; + } + result = PyString_FromStringAndSize(buf.buf, buf.len); + PyBuffer_Release(&buf); + return result; + ''') + assert 'foo\0bar\0baz' == pybuffer(buffer('foo\0bar\0baz')) + + def test_pyarg_parse_charbuf_and_length(self): """ The `t#` format specifier can be used to parse a read-only 8-bit diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -63,6 +63,7 @@ ), ]) assert mod.getarg_O(1) == 1 + assert mod.getarg_O.__name__ == "getarg_O" raises(TypeError, mod.getarg_O) raises(TypeError, mod.getarg_O, 1, 1) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -28,6 +28,7 @@ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError from pypy.rlib.rstring import rsplit from pypy.rlib.objectmodel import specialize @@ -418,8 +419,21 @@ Py_DecRef(space, pyref) return space.len_w(w_str) + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, + external=False, error=-1) +def buf_getreadbuffer(space, pyref, segment, ref): + from pypy.module.cpyext.bufferobject import PyBufferObject + if segment != 0: + raise OperationError(space.w_SystemError, space.wrap + ("accessing non-existent string segment")) + py_buf = rffi.cast(PyBufferObject, pyref) + ref[0] = py_buf.c_b_ptr + #Py_DecRef(space, pyref) + return py_buf.c_b_size + def setup_string_buffer_procs(space, pto): c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, str_segcount.api_func.get_wrapper(space)) c_buf.c_bf_getreadbuffer = llhelper(str_getreadbuffer.api_func.functype, @@ -429,6 +443,15 @@ pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER +def setup_buffer_buffer_procs(space, pto): + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + lltype.render_immortal(c_buf) + c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, + str_segcount.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llhelper(buf_getreadbuffer.api_func.functype, + buf_getreadbuffer.api_func.get_wrapper(space)) + pto.c_tp_as_buffer = c_buf + @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): from pypy.module.cpyext.object import PyObject_dealloc @@ -484,6 +507,8 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) + if space.is_w(w_type, space.gettypefor(Buffer)): + setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, PyObject_Del.api_func.get_wrapper(space)) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -21,7 +21,6 @@ _immutable_fields_ = ["itemtype", "num", "kind"] def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): - self.signature = signature.BaseSignature() self.itemtype = itemtype self.num = num self.kind = kind @@ -228,4 +227,4 @@ ) def get_dtype_cache(space): - return space.fromcache(DtypeCache) \ No newline at end of file + return space.fromcache(DtypeCache) diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py --- a/pypy/module/micronumpy/interp_extras.py +++ b/pypy/module/micronumpy/interp_extras.py @@ -4,4 +4,4 @@ @unwrap_spec(array=BaseArray) def debug_repr(space, array): - return space.wrap(array.debug_repr()) + return space.wrap(array.find_sig().debug_repr()) diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_iter.py @@ -0,0 +1,128 @@ + +from pypy.rlib import jit +from pypy.rlib.objectmodel import instantiate +from pypy.module.micronumpy.strides import calculate_broadcast_strides + +# Iterators for arrays +# -------------------- +# all those iterators with the exception of BroadcastIterator iterate over the +# entire array in C order (the last index changes the fastest). This will +# yield all elements. Views iterate over indices and look towards strides and +# backstrides to find the correct position. Notably the offset between +# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between +# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. + +# BroadcastIterator works like that, but for indexes that don't change source +# in the original array, strides[i] == backstrides[i] == 0 + +class BaseIterator(object): + def next(self, shapelen): + raise NotImplementedError + + def done(self): + raise NotImplementedError + +class ArrayIterator(BaseIterator): + def __init__(self, size): + self.offset = 0 + self.size = size + + def next(self, shapelen): + arr = instantiate(ArrayIterator) + arr.size = self.size + arr.offset = self.offset + 1 + return arr + + def done(self): + return self.offset >= self.size + +class OneDimIterator(BaseIterator): + def __init__(self, start, step, stop): + self.offset = start + self.step = step + self.size = stop * step + start + + def next(self, shapelen): + arr = instantiate(OneDimIterator) + arr.size = self.size + arr.step = self.step + arr.offset = self.offset + self.step + return arr + + def done(self): + return self.offset == self.size + +def view_iter_from_arr(arr): + return ViewIterator(arr.start, arr.strides, arr.backstrides, arr.shape) + +class ViewIterator(BaseIterator): + def __init__(self, start, strides, backstrides, shape, res_shape=None): + self.offset = start + self._done = False + if res_shape is not None and res_shape != shape: + r = calculate_broadcast_strides(strides, backstrides, + shape, res_shape) + self.strides, self.backstrides = r + self.res_shape = res_shape + else: + self.strides = strides + self.backstrides = backstrides + self.res_shape = shape + self.indices = [0] * len(self.res_shape) + + @jit.unroll_safe + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + for i in range(shapelen): + indices[i] = self.indices[i] + done = False + for i in range(shapelen - 1, -1, -1): + if indices[i] < self.res_shape[i] - 1: + indices[i] += 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + else: + done = True + res = instantiate(ViewIterator) + res.offset = offset + res.indices = indices + res.strides = self.strides + res.backstrides = self.backstrides + res.res_shape = self.res_shape + res._done = done + return res + + def done(self): + return self._done + +class ConstantIterator(BaseIterator): + def next(self, shapelen): + return self + +# ------ other iterators that are not part of the computation frame ---------- + +class AxisIterator(object): + """ This object will return offsets of each start of the last stride + """ + def __init__(self, arr): + self.arr = arr + self.indices = [0] * (len(arr.shape) - 1) + self.done = False + self.offset = arr.start + + def next(self): + for i in range(len(self.arr.shape) - 2, -1, -1): + if self.indices[i] < self.arr.shape[i] - 1: + self.indices[i] += 1 + self.offset += self.arr.strides[i] + break + else: + self.indices[i] = 0 + self.offset -= self.arr.backstrides[i] + else: + self.done = True + diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,28 +3,33 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature +from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import instantiate - +from pypy.module.micronumpy.interp_iter import ArrayIterator,\ + view_iter_from_arr, OneDimIterator, AxisIterator numpy_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result_size', 'i', 'ri', 'self', 'result'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['result_size', 'frame', 'ri', 'self', 'result'] ) all_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) any_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['i', 'self', 'dtype'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['frame', 'self', 'dtype'] ) slice_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['self', 'source', 'source_iter', 'res_iter'] + greens=['shapelen', 'sig'], + virtualizables=['frame'], + reds=['self', 'frame', 'source', 'res_iter'] ) def _find_shape_and_elems(space, w_iterable): @@ -198,231 +203,17 @@ n_old_elems_to_use *= old_shape[oldI] return new_strides -# Iterators for arrays -# -------------------- -# all those iterators with the exception of BroadcastIterator iterate over the -# entire array in C order (the last index changes the fastest). This will -# yield all elements. Views iterate over indices and look towards strides and -# backstrides to find the correct position. Notably the offset between -# x[..., i + 1] and x[..., i] will be strides[-1]. Offset between -# x[..., k + 1, 0] and x[..., k, i_max] will be backstrides[-2] etc. +class BaseArray(Wrappable): + _attrs_ = ["invalidates", "shape", 'size'] -# BroadcastIterator works like that, but for indexes that don't change source -# in the original array, strides[i] == backstrides[i] == 0 - -class BaseIterator(object): - def next(self, shapelen): - raise NotImplementedError - - def done(self): - raise NotImplementedError - - def get_offset(self): - raise NotImplementedError - -class ArrayIterator(BaseIterator): - def __init__(self, size): - self.offset = 0 - self.size = size - - def next(self, shapelen): - arr = instantiate(ArrayIterator) - arr.size = self.size - arr.offset = self.offset + 1 - return arr - - def done(self): - return self.offset >= self.size - - def get_offset(self): - return self.offset - -class OneDimIterator(BaseIterator): - def __init__(self, start, step, stop): - self.offset = start - self.step = step - self.size = stop * step + start - - def next(self, shapelen): - arr = instantiate(OneDimIterator) - arr.size = self.size - arr.step = self.step - arr.offset = self.offset + self.step - return arr - - def done(self): - return self.offset == self.size - - def get_offset(self): - return self.offset - -class ViewIterator(BaseIterator): - def __init__(self, arr): - self.indices = [0] * len(arr.shape) - self.offset = arr.start - self.arr = arr - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - for i in range(shapelen): - indices[i] = self.indices[i] - done = False - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.arr.shape[i] - 1: - indices[i] += 1 - offset += self.arr.strides[i] - break - else: - indices[i] = 0 - offset -= self.arr.backstrides[i] - else: - done = True - res = instantiate(ViewIterator) - res.offset = offset - res.indices = indices - res.arr = self.arr - res._done = done - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class BroadcastIterator(BaseIterator): - '''Like a view iterator, but will repeatedly access values - for all iterations across a res_shape, folding the offset - using mod() arithmetic - ''' - def __init__(self, arr, res_shape): - self.indices = [0] * len(res_shape) - self.offset = arr.start - #strides are 0 where original shape==1 - self.strides = [] - self.backstrides = [] - for i in range(len(arr.shape)): - if arr.shape[i] == 1: - self.strides.append(0) - self.backstrides.append(0) - else: - self.strides.append(arr.strides[i]) - self.backstrides.append(arr.backstrides[i]) - self.res_shape = res_shape - self.strides = [0] * (len(res_shape) - len(arr.shape)) + self.strides - self.backstrides = [0] * (len(res_shape) - len(arr.shape)) + self.backstrides - self._done = False - - @jit.unroll_safe - def next(self, shapelen): - offset = self.offset - indices = [0] * shapelen - _done = False - for i in range(shapelen): - indices[i] = self.indices[i] - for i in range(shapelen - 1, -1, -1): - if indices[i] < self.res_shape[i] - 1: - indices[i] += 1 - offset += self.strides[i] - break - else: - indices[i] = 0 - offset -= self.backstrides[i] - else: - _done = True - res = instantiate(BroadcastIterator) - res.indices = indices - res.offset = offset - res._done = _done - res.strides = self.strides - res.backstrides = self.backstrides - res.res_shape = self.res_shape - return res - - def done(self): - return self._done - - def get_offset(self): - return self.offset - -class Call2Iterator(BaseIterator): - def __init__(self, left, right): - self.left = left - self.right = right - - def next(self, shapelen): - return Call2Iterator(self.left.next(shapelen), - self.right.next(shapelen)) - - def done(self): - if isinstance(self.left, ConstantIterator): - return self.right.done() - return self.left.done() - - def get_offset(self): - if isinstance(self.left, ConstantIterator): - return self.right.get_offset() - return self.left.get_offset() - -class Call1Iterator(BaseIterator): - def __init__(self, child): - self.child = child - - def next(self, shapelen): - return Call1Iterator(self.child.next(shapelen)) - - def done(self): - return self.child.done() - - def get_offset(self): - return self.child.get_offset() - -class ConstantIterator(BaseIterator): - def next(self, shapelen): - return self - - def done(self): - return False - - def get_offset(self): - return 0 - - -class BaseArray(Wrappable): - _attrs_ = ["invalidates", "signature", "shape", "strides", "backstrides", - "start", 'order'] - - _immutable_fields_ = ['start', "order"] + _immutable_fields_ = [] strides = None start = 0 - def __init__(self, shape, order): + def __init__(self, shape): self.invalidates = [] self.shape = shape - self.order = order - if self.strides is None: - self.calc_strides(shape) - - def calc_strides(self, shape): - strides = [] - backstrides = [] - s = 1 - shape_rev = shape[:] - if self.order == 'C': - shape_rev.reverse() - for sh in shape_rev: - strides.append(s) - backstrides.append(s * (sh - 1)) - s *= sh - if self.order == 'C': - strides.reverse() - backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] def invalidated(self): if self.invalidates: @@ -499,33 +290,34 @@ def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( - greens=['shapelen', 'signature'], - reds=['result', 'idx', 'i', 'self', 'cur_best', 'dtype'] + greens=['shapelen', 'sig'], + reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'] ) def loop(self): - i = self.start_iter() - cur_best = self.eval(i) + sig = self.find_sig() + frame = sig.create_frame(self) + cur_best = sig.eval(frame, self) shapelen = len(self.shape) - i = i.next(shapelen) + frame.next(shapelen) dtype = self.find_dtype() result = 0 idx = 1 - while not i.done(): - reduce_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, dtype=dtype, - i=i, result=result, idx=idx, + frame=frame, result=result, + idx=idx, cur_best=cur_best) - new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + new_best = getattr(dtype.itemtype, op_name)(cur_best, sig.eval(frame, self)) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - i = i.next(shapelen) + frame.next(shapelen) idx += 1 return result def impl(self, space): - size = self.find_size() - if size == 0: + if self.size == 0: raise OperationError(space.w_ValueError, space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) @@ -533,15 +325,16 @@ def _all(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - all_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + all_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if not dtype.itemtype.bool(self.eval(i)): + dtype=dtype, frame=frame) + if not dtype.itemtype.bool(sig.eval(frame, self)): return False - i = i.next(shapelen) + frame.next(shapelen) return True def descr_all(self, space): @@ -549,15 +342,16 @@ def _any(self): dtype = self.find_dtype() - i = self.start_iter() + sig = self.find_sig() + frame = sig.create_frame(self) shapelen = len(self.shape) - while not i.done(): - any_driver.jit_merge_point(signature=self.signature, + while not frame.done(): + any_driver.jit_merge_point(sig=sig, frame=frame, shapelen=shapelen, self=self, - dtype=dtype, i=i) - if dtype.itemtype.bool(self.eval(i)): + dtype=dtype) + if dtype.itemtype.bool(sig.eval(frame, self)): return True - i = i.next(shapelen) + frame.next(shapelen) return False def descr_any(self, space): @@ -586,26 +380,33 @@ return space.newtuple([space.wrap(i) for i in self.shape]) def descr_set_shape(self, space, w_iterable): - concrete = self.get_concrete() new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_iterable) - concrete.setshape(space, new_shape) + self.size, w_iterable) + if isinstance(self, Scalar): + return + self.get_concrete().setshape(space, new_shape) def descr_get_size(self, space): - return space.wrap(self.find_size()) + return space.wrap(self.size) def descr_copy(self, space): - return self.get_concrete().copy() + return self.copy(space) + + def copy(self, space): + return self.get_concrete().copy(space) def descr_len(self, space): - return self.get_concrete().descr_len(space) + if len(self.shape): + return space.wrap(self.shape[0]) + raise OperationError(space.w_TypeError, space.wrap( + "len() of unsized object")) def descr_repr(self, space): res = StringBuilder() res.append("array(") concrete = self.get_concrete() dtype = concrete.find_dtype() - if not concrete.find_size(): + if not concrete.size: res.append('[]') if len(self.shape) > 1: # An empty slice reports its shape @@ -617,18 +418,423 @@ concrete.to_str(space, 1, res, indent=' ') if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ - not self.find_size(): + not self.size: res.append(", dtype=" + dtype.name) res.append(")") return space.wrap(res.build()) + def descr_str(self, space): + ret = StringBuilder() + concrete = self.get_concrete_or_scalar() + concrete.to_str(space, 0, ret, ' ') + return space.wrap(ret.build()) + + @jit.unroll_safe + def _single_item_result(self, space, w_idx): + """ The result of getitem/setitem is a single item if w_idx + is a list of scalars that match the size of shape + """ + shape_len = len(self.shape) + if shape_len == 0: + raise OperationError(space.w_IndexError, space.wrap( + "0-d arrays can't be indexed")) + if shape_len == 1: + if space.isinstance_w(w_idx, space.w_int): + return True + if space.isinstance_w(w_idx, space.w_slice): + return False + elif (space.isinstance_w(w_idx, space.w_slice) or + space.isinstance_w(w_idx, space.w_int)): + return False + lgt = space.len_w(w_idx) + if lgt > shape_len: + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if lgt < shape_len: + return False + for w_item in space.fixedview(w_idx): + if space.isinstance_w(w_item, space.w_slice): + return False + return True + + @jit.unroll_safe + def _prepare_slice_args(self, space, w_idx): + if (space.isinstance_w(w_idx, space.w_int) or + space.isinstance_w(w_idx, space.w_slice)): + return [space.decode_index4(w_idx, self.shape[0])] + return [space.decode_index4(w_item, self.shape[i]) for i, w_item in + enumerate(space.fixedview(w_idx))] + + def descr_getitem(self, space, w_idx): + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + item = concrete._index_of_single_item(space, w_idx) + return concrete.getitem(item) + chunks = self._prepare_slice_args(space, w_idx) + return space.wrap(self.create_slice(chunks)) + + def descr_setitem(self, space, w_idx, w_value): + self.invalidated() + if self._single_item_result(space, w_idx): + concrete = self.get_concrete() + item = concrete._index_of_single_item(space, w_idx) + dtype = concrete.find_dtype() + concrete.setitem(item, dtype.coerce(space, w_value)) + return + if not isinstance(w_value, BaseArray): + w_value = convert_to_array(space, w_value) + chunks = self._prepare_slice_args(space, w_idx) + view = self.create_slice(chunks).get_concrete() + view.setslice(space, w_value) + + @jit.unroll_safe + def create_slice(self, chunks): + shape = [] + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + shape.append(lgt) + s = i + 1 + assert s >= 0 + shape += self.shape[s:] + if not isinstance(self, ConcreteArray): + return VirtualSlice(self, chunks, shape) + r = calculate_slice_strides(self.shape, self.start, self.strides, + self.backstrides, chunks) + _, start, strides, backstrides = r + return W_NDimSlice(start, strides[:], backstrides[:], + shape[:], self) + + def descr_reshape(self, space, args_w): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `numpypy.reshape` for full documentation. + + See Also + -------- + numpypy.reshape : equivalent function + """ + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newtuple(args_w) + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, concrete.size, w_shape) + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + # We can create a view, strides somehow match up. + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(concrete.start, new_strides, new_backstrides, + new_shape, self) + else: + # Create copy with contiguous data + arr = concrete.copy(space) + arr.setshape(space, new_shape) + return arr + + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + + def descr_mean(self, space): + return space.div(self.descr_sum(space), space.wrap(self.size)) + + def descr_nonzero(self, space): + if self.size > 1: + raise OperationError(space.w_ValueError, space.wrap( + "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + concr = self.get_concrete_or_scalar() + sig = concr.find_sig() + frame = sig.create_frame(self) + return space.wrap(space.is_true( + sig.eval(frame, concr))) + + def get_concrete_or_scalar(self): + return self.get_concrete() + + def descr_get_transpose(self, space): + concrete = self.get_concrete() + if len(concrete.shape) < 2: + return space.wrap(self) + strides = [] + backstrides = [] + shape = [] + for i in range(len(concrete.shape) - 1, -1, -1): + strides.append(concrete.strides[i]) + backstrides.append(concrete.backstrides[i]) + shape.append(concrete.shape[i]) + return space.wrap(W_NDimSlice(concrete.start, strides[:], + backstrides[:], shape[:], concrete)) + + def descr_get_flatiter(self, space): + return space.wrap(W_FlatIterator(self)) + + def getitem(self, item): + raise NotImplementedError + + def find_sig(self, res_shape=None): + """ find a correct signature for the array + """ + res_shape = res_shape or self.shape + return signature.find_sig(self.create_sig(res_shape), self) + + def descr_array_iface(self, space): + if not self.shape: + raise OperationError(space.w_TypeError, + space.wrap("can't get the array data of a 0-d array for now") + ) + concrete = self.get_concrete() + storage = concrete.storage + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + + def supports_fast_slicing(self): + return False + +def convert_to_array(space, w_obj): + if isinstance(w_obj, BaseArray): + return w_obj + elif space.issequence_w(w_obj): + # Convert to array. + return array(space, w_obj, w_order=None) + else: + # If it's a scalar + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) + return scalar_w(space, dtype, w_obj) + +def scalar_w(space, dtype, w_obj): + return Scalar(dtype, dtype.coerce(space, w_obj)) + +class Scalar(BaseArray): + """ + Intermediate class representing a literal. + """ + size = 1 + _attrs_ = ["dtype", "value", "shape"] + + def __init__(self, dtype, value): + self.shape = [] + BaseArray.__init__(self, []) + self.dtype = dtype + self.value = value + + def find_dtype(self): + return self.dtype + + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): + builder.append(self.dtype.itemtype.str_format(self.value)) + + def copy(self, space): + return Scalar(self.dtype, self.value) + + def create_sig(self, res_shape): + return signature.ScalarSignature(self.dtype) + + def get_concrete_or_scalar(self): + return self + + +class VirtualArray(BaseArray): + """ + Class for representing virtual arrays, such as binary ops or ufuncs + """ + def __init__(self, name, shape, res_dtype): + BaseArray.__init__(self, shape) + self.forced_result = None + self.res_dtype = res_dtype + self.name = name + + def _del_sources(self): + # Function for deleting references to source arrays, to allow garbage-collecting them + raise NotImplementedError + + def compute(self): + result = W_NDimArray(self.size, self.shape, self.find_dtype()) + shapelen = len(self.shape) + sig = self.find_sig() + frame = sig.create_frame(self) + ri = ArrayIterator(self.size) + while not ri.done(): + numpy_driver.jit_merge_point(sig=sig, + shapelen=shapelen, + result_size=self.size, + frame=frame, + ri=ri, + self=self, result=result) + result.dtype.setitem(result.storage, ri.offset, + sig.eval(frame, self)) + frame.next(shapelen) + ri = ri.next(shapelen) + return result + + def force_if_needed(self): + if self.forced_result is None: + self.forced_result = self.compute() + self._del_sources() + + def get_concrete(self): + self.force_if_needed() + res = self.forced_result + assert isinstance(res, ConcreteArray) + return res + + def getitem(self, item): + return self.get_concrete().getitem(item) + + def setitem(self, item, value): + return self.get_concrete().setitem(item, value) + + def find_dtype(self): + return self.res_dtype + +class VirtualSlice(VirtualArray): + def __init__(self, child, chunks, shape): + size = 1 + for sh in shape: + size *= sh + self.child = child + self.chunks = chunks + self.size = size + VirtualArray.__init__(self, 'slice', shape, child.find_dtype()) + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.VirtualSliceSignature( + self.child.create_sig(res_shape)) + + def force_if_needed(self): + if self.forced_result is None: + concr = self.child.get_concrete() + self.forced_result = concr.create_slice(self.chunks) + + def _del_sources(self): + self.child = None + +class Call1(VirtualArray): + def __init__(self, ufunc, name, shape, res_dtype, values): + VirtualArray.__init__(self, name, shape, res_dtype) + self.values = values + self.size = values.size + self.ufunc = ufunc + + def _del_sources(self): + self.values = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.Call1(self.ufunc, self.name, + self.values.create_sig(res_shape)) + +class Call2(VirtualArray): + """ + Intermediate class for performing binary operations. + """ + def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right): + VirtualArray.__init__(self, name, shape, res_dtype) + self.ufunc = ufunc + self.left = left + self.right = right + self.calc_dtype = calc_dtype + self.size = 1 + for s in self.shape: + self.size *= s + + def _del_sources(self): + self.left = None + self.right = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.Call2(self.ufunc, self.name, self.calc_dtype, + self.left.create_sig(res_shape), + self.right.create_sig(res_shape)) + +class ConcreteArray(BaseArray): + """ An array that have actual storage, whether owned or not + """ + _immutable_fields_ = ['storage'] + + def __init__(self, size, shape, dtype, order='C', parent=None): + self.size = size + self.parent = parent + if parent is not None: + self.storage = parent.storage + else: + self.storage = dtype.malloc(size) + self.order = order + self.dtype = dtype + if self.strides is None: + self.calc_strides(shape) + BaseArray.__init__(self, shape) + if parent is not None: + self.invalidates = parent.invalidates + + def get_concrete(self): + return self + + def supports_fast_slicing(self): + return self.order == 'C' and self.strides[-1] == 1 + + def find_dtype(self): + return self.dtype + + def getitem(self, item): + return self.dtype.getitem(self.storage, item) + + def setitem(self, item, value): + self.invalidated() + self.dtype.setitem(self.storage, item, value) + + def calc_strides(self, shape): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if self.order == 'C': + shape_rev.reverse() + for sh in shape_rev: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + + def array_sig(self, res_shape): + if res_shape is not None and self.shape != res_shape: + return signature.ViewSignature(self.dtype) + return signature.ArraySignature(self.dtype) + def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): '''Modifies builder with a representation of the array/slice The items will be seperated by a comma if comma is 1 Multidimensional arrays/slices will span a number of lines, each line will begin with indent. ''' - size = self.find_size() + size = self.size if size < 1: builder.append('[]') return @@ -654,7 +860,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) builder.append('\n' + indent + '..., ') i = self.shape[0] - 3 @@ -669,7 +875,7 @@ builder.append(indent) # create_slice requires len(chunks) > 1 in order to reduce # shape - view = self.create_slice(space, [(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]) + view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) i += 1 elif ndims == 1: @@ -705,12 +911,6 @@ builder.append('[') builder.append(']') - def descr_str(self, space): - ret = StringBuilder() - concrete = self.get_concrete() - concrete.to_str(space, 0, ret, ' ') - return space.wrap(ret.build()) - @jit.unroll_safe def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): @@ -735,456 +935,76 @@ item += v * self.strides[i] return item - @jit.unroll_safe - def _single_item_result(self, space, w_idx): - """ The result of getitem/setitem is a single item if w_idx - is a list of scalars that match the size of shape - """ - shape_len = len(self.shape) - if shape_len == 0: - if not space.isinstance_w(w_idx, space.w_int): - raise OperationError(space.w_IndexError, space.wrap( - "wrong index")) - return True - if shape_len == 1: - if space.isinstance_w(w_idx, space.w_int): - return True - if space.isinstance_w(w_idx, space.w_slice): - return False - elif (space.isinstance_w(w_idx, space.w_slice) or - space.isinstance_w(w_idx, space.w_int)): - return False - lgt = space.len_w(w_idx) - if lgt > shape_len: - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - if lgt < shape_len: - return False - for w_item in space.fixedview(w_idx): - if space.isinstance_w(w_item, space.w_slice): - return False - return True + def setslice(self, space, w_value): + res_shape = shape_agreement(space, self.shape, w_value.shape) + if (res_shape == w_value.shape and self.supports_fast_slicing() and + w_value.supports_fast_slicing() and + self.dtype is w_value.find_dtype()): + self._fast_setslice(space, w_value) + else: + self._sliceloop(w_value, res_shape) - @jit.unroll_safe - def _prepare_slice_args(self, space, w_idx): - if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - return [space.decode_index4(w_idx, self.shape[0])] - return [space.decode_index4(w_item, self.shape[i]) for i, w_item in - enumerate(space.fixedview(w_idx))] + def _fast_setslice(self, space, w_value): + assert isinstance(w_value, ConcreteArray) + itemsize = self.dtype.itemtype.get_element_size() + if len(self.shape) == 1: + rffi.c_memcpy( + rffi.ptradd(self.storage, self.start * itemsize), + rffi.ptradd(w_value.storage, w_value.start * itemsize), + self.size * itemsize + ) + else: + dest = AxisIterator(self) + source = AxisIterator(w_value) + while not dest.done: + rffi.c_memcpy( + rffi.ptradd(self.storage, dest.offset * itemsize), + rffi.ptradd(w_value.storage, source.offset * itemsize), + self.shape[-1] * itemsize + ) + source.next() + dest.next() - def descr_getitem(self, space, w_idx): - if self._single_item_result(space, w_idx): - concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) - item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item) - chunks = self._prepare_slice_args(space, w_idx) - return space.wrap(self.create_slice(space, chunks)) + def _sliceloop(self, source, res_shape): + sig = source.find_sig(res_shape) + frame = sig.create_frame(source, res_shape) + res_iter = view_iter_from_arr(self) + shapelen = len(res_shape) + while not res_iter.done(): + slice_driver.jit_merge_point(sig=sig, + frame=frame, + shapelen=shapelen, + self=self, source=source, + res_iter=res_iter) + self.setitem(res_iter.offset, sig.eval(frame, source).convert_to( + self.find_dtype())) + frame.next(shapelen) + res_iter = res_iter.next(shapelen) - def descr_setitem(self, space, w_idx, w_value): - self.invalidated() - if self._single_item_result(space, w_idx): - concrete = self.get_concrete() - if len(concrete.shape) < 1: - raise OperationError(space.w_IndexError, space.wrap( - "0-d arrays can't be indexed")) - item = concrete._index_of_single_item(space, w_idx) - dtype = concrete.find_dtype() - concrete.setitem(item, dtype.coerce(space, w_value)) - return - if not isinstance(w_value, BaseArray): - w_value = convert_to_array(space, w_value) - chunks = self._prepare_slice_args(space, w_idx) - view = self.create_slice(space, chunks) - view.setslice(space, w_value) + def copy(self, space): + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) + array.setslice(space, self) + return array - @jit.unroll_safe - def create_slice(self, space, chunks): - if len(chunks) == 1: - start, stop, step, lgt = chunks[0] - if step == 0: - shape = self.shape[1:] - strides = self.strides[1:] - backstrides = self.backstrides[1:] - else: - shape = [lgt] + self.shape[1:] - strides = [self.strides[0] * step] + self.strides[1:] - backstrides = [(lgt - 1) * self.strides[0] * step] + self.backstrides[1:] - start *= self.strides[0] - start += self.start - else: - shape = [] - strides = [] - backstrides = [] - start = self.start - i = -1 - for i, (start_, stop, step, lgt) in enumerate(chunks): - if step != 0: - shape.append(lgt) - strides.append(self.strides[i] * step) - backstrides.append(self.strides[i] * (lgt - 1) * step) - start += self.strides[i] * start_ - # add a reminder - s = i + 1 - assert s >= 0 - shape += self.shape[s:] - strides += self.strides[s:] - backstrides += self.backstrides[s:] - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature, - ]) - return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) - def descr_reshape(self, space, args_w): - """reshape(...) - a.reshape(shape) +class ViewArray(ConcreteArray): + def create_sig(self, res_shape): + return signature.ViewSignature(self.dtype) - Returns an array containing the same data with a new shape. - Refer to `numpypy.reshape` for full documentation. - - See Also - -------- - numpypy.reshape : equivalent function -""" - if len(args_w) == 1: - w_shape = args_w[0] - else: - w_shape = space.newtuple(args_w) - concrete = self.get_concrete() - new_shape = get_shape_from_iterable(space, - concrete.find_size(), w_shape) - # Since we got to here, prod(new_shape) == self.size - new_strides = calc_new_strides(new_shape, - concrete.shape, concrete.strides) - if new_strides: - # We can create a view, strides somehow match up. - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) - ndims = len(new_shape) - new_backstrides = [0] * ndims - for nd in range(ndims): - new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - arr = W_NDimSlice(self, new_sig, self.start, new_strides, - new_backstrides, new_shape) - else: - # Create copy with contiguous data - arr = concrete.copy() - arr.setshape(space, new_shape) - return arr - - def descr_tolist(self, space): - if len(self.shape) == 0: - assert isinstance(self, Scalar) - return self.value.descr_tolist(space) - w_result = space.newlist([]) - for i in range(self.shape[0]): - space.call_method(w_result, "append", - space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") - ) - return w_result - - def descr_mean(self, space): - return space.div(self.descr_sum(space), space.wrap(self.find_size())) - - def descr_nonzero(self, space): - if self.find_size() > 1: - raise OperationError(space.w_ValueError, space.wrap( - "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true( - self.get_concrete().eval(self.start_iter(self.shape)) - )) - - def descr_get_transpose(self, space): - concrete = self.get_concrete() - if len(concrete.shape) < 2: - return space.wrap(self) - new_sig = signature.Signature.find_sig([ - W_NDimSlice.signature, self.signature - ]) - strides = [] - backstrides = [] - shape = [] - for i in range(len(concrete.shape) - 1, -1, -1): - strides.append(concrete.strides[i]) - backstrides.append(concrete.backstrides[i]) - shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) - - def descr_get_flatiter(self, space): - return space.wrap(W_FlatIterator(self)) - - def getitem(self, item): - raise NotImplementedError - - def start_iter(self, res_shape=None): - raise NotImplementedError - - def descr_array_iface(self, space): - concrete = self.get_concrete() - storage = concrete.get_storage(space) - addr = rffi.cast(lltype.Signed, storage) - w_d = space.newdict() - space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), - space.w_False])) - return w_d - -def convert_to_array(space, w_obj): - if isinstance(w_obj, BaseArray): - return w_obj - elif space.issequence_w(w_obj): - # Convert to array. - return array(space, w_obj, w_order=None) - else: - # If it's a scalar - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) - return scalar_w(space, dtype, w_obj) - -def scalar_w(space, dtype, w_obj): - return Scalar(dtype, dtype.coerce(space, w_obj)) - -class Scalar(BaseArray): - """ - Intermediate class representing a literal. - """ - signature = signature.BaseSignature() - - _attrs_ = ["dtype", "value", "shape"] - - def __init__(self, dtype, value): - self.shape = self.strides = [] - BaseArray.__init__(self, [], 'C') - self.dtype = dtype - self.value = value - - def find_size(self): - return 1 - - def get_concrete(self): - return self - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - raise NotImplementedError - - def eval(self, iter): - return self.value - - def start_iter(self, res_shape=None): - return ConstantIterator() - - def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.itemtype.str_format(self.value)) - - def copy(self): - return Scalar(self.dtype, self.value) - - def debug_repr(self): - return 'Scalar' - - def setshape(self, space, new_shape): - # In order to get here, we already checked that prod(new_shape) == 1, - # so in order to have a consistent API, let it go through. - pass - - def get_storage(self, space): - raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) - -class VirtualArray(BaseArray): - """ - Class for representing virtual arrays, such as binary ops or ufuncs - """ - def __init__(self, signature, shape, res_dtype, order): - BaseArray.__init__(self, shape, order) - self.forced_result = None - self.signature = signature - self.res_dtype = res_dtype - - def _del_sources(self): - # Function for deleting references to source arrays, to allow garbage-collecting them - raise NotImplementedError - - def compute(self): - i = 0 - signature = self.signature - result_size = self.find_size() - result = W_NDimArray(result_size, self.shape, self.find_dtype()) - shapelen = len(self.shape) - i = self.start_iter() - ri = result.start_iter() - while not ri.done(): - numpy_driver.jit_merge_point(signature=signature, - shapelen=shapelen, - result_size=result_size, i=i, ri=ri, - self=self, result=result) - result.dtype.setitem(result.storage, ri.offset, self.eval(i)) - i = i.next(shapelen) - ri = ri.next(shapelen) - return result - - def force_if_needed(self): - if self.forced_result is None: - self.forced_result = self.compute() - self._del_sources() - - def get_concrete(self): - self.force_if_needed() - return self.forced_result - - def eval(self, iter): - if self.forced_result is not None: - return self.forced_result.eval(iter) - return self._eval(iter) - - def getitem(self, item): - return self.get_concrete().getitem(item) - - def setitem(self, item, value): - return self.get_concrete().setitem(item, value) - - def find_size(self): - if self.forced_result is not None: - # The result has been computed and sources may be unavailable - return self.forced_result.find_size() - return self._find_size() - - def find_dtype(self): - return self.res_dtype - - -class Call1(VirtualArray): - def __init__(self, signature, shape, res_dtype, values, order): - VirtualArray.__init__(self, signature, shape, res_dtype, - values.order) - self.values = values - - def _del_sources(self): - self.values = None - - def _find_size(self): - return self.values.find_size() - - def _find_dtype(self): - return self.res_dtype - - def _eval(self, iter): - assert isinstance(iter, Call1Iterator) - val = self.values.eval(iter.child).convert_to(self.res_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - return call_sig.func(self.res_dtype, val) - - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - return Call1Iterator(self.values.start_iter(res_shape)) - - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call1) - if self.forced_result is not None: - return 'Call1(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call1(%s, %s)' % (call_sig.name, - self.values.debug_repr()) - -class Call2(VirtualArray): - """ - Intermediate class for performing binary operations. - """ - def __init__(self, signature, shape, calc_dtype, res_dtype, left, right): - # XXX do something if left.order != right.order - VirtualArray.__init__(self, signature, shape, res_dtype, left.order) - self.left = left - self.right = right - self.calc_dtype = calc_dtype - self.size = 1 - for s in self.shape: - self.size *= s - - def _del_sources(self): - self.left = None - self.right = None - - def _find_size(self): - return self.size - - def start_iter(self, res_shape=None): - if self.forced_result is not None: - return self.forced_result.start_iter(res_shape) - if res_shape is None: - res_shape = self.shape # we still force the shape on children - return Call2Iterator(self.left.start_iter(res_shape), - self.right.start_iter(res_shape)) - - def _eval(self, iter): - assert isinstance(iter, Call2Iterator) - lhs = self.left.eval(iter.left).convert_to(self.calc_dtype) - rhs = self.right.eval(iter.right).convert_to(self.calc_dtype) - sig = jit.promote(self.signature) - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - return call_sig.func(self.calc_dtype, lhs, rhs) - - def debug_repr(self): - sig = self.signature - assert isinstance(sig, signature.Signature) - call_sig = sig.components[0] - assert isinstance(call_sig, signature.Call2) - if self.forced_result is not None: - return 'Call2(%s, forced=%s)' % (call_sig.name, - self.forced_result.debug_repr()) - return 'Call2(%s, %s, %s)' % (call_sig.name, - self.left.debug_repr(), - self.right.debug_repr()) - -class ViewArray(BaseArray): - """ - Class for representing views of arrays, they will reflect changes of parent - arrays. Example: slices - """ - def __init__(self, parent, signature, strides, backstrides, shape): +class W_NDimSlice(ViewArray): + def __init__(self, start, strides, backstrides, shape, parent): + assert isinstance(parent, ConcreteArray) + if isinstance(parent, W_NDimSlice): + parent = parent.parent + size = 1 + for sh in shape: + size *= sh self.strides = strides self.backstrides = backstrides - BaseArray.__init__(self, shape, parent.order) - self.signature = signature - self.parent = parent - self.invalidates = parent.invalidates - - def get_concrete(self): - # in fact, ViewArray never gets "concrete" as it never stores data. - # This implementation is needed for BaseArray getitem/setitem to work, - # can be refactored. - self.parent.get_concrete() - return self - - def getitem(self, item): - return self.parent.getitem(item) - - def eval(self, iter): - return self.parent.getitem(iter.get_offset()) - - def setitem(self, item, value): - # This is currently not possible to be called from anywhere. - raise NotImplementedError - - def descr_len(self, space): - if self.shape: - return space.wrap(self.shape[0]) - return space.wrap(1) + ViewArray.__init__(self, size, shape, parent.dtype, parent.order, + parent) + self.start = start def setshape(self, space, new_shape): if len(self.shape) < 1: @@ -1220,131 +1040,20 @@ self.backstrides = new_backstrides[:] self.shape = new_shape[:] -class W_NDimSlice(ViewArray): - signature = signature.BaseSignature() - - def __init__(self, parent, signature, start, strides, backstrides, - shape): - if isinstance(parent, W_NDimSlice): - parent = parent.parent - ViewArray.__init__(self, parent, signature, strides, backstrides, shape) - self.start = start - self.size = 1 - for sh in shape: - self.size *= sh - - def find_size(self): - return self.size - - def find_dtype(self): - return self.parent.find_dtype() - - def setslice(self, space, w_value): - res_shape = shape_agreement(space, self.shape, w_value.shape) - self._sliceloop(w_value, res_shape) - - def _sliceloop(self, source, res_shape): - source_iter = source.start_iter(res_shape) - res_iter = self.start_iter(res_shape) - shapelen = len(res_shape) - while not res_iter.done(): - slice_driver.jit_merge_point(signature=source.signature, - shapelen=shapelen, - self=self, source=source, - res_iter=res_iter, - source_iter=source_iter) - self.setitem(res_iter.offset, source.eval(source_iter).convert_to( - self.find_dtype())) - source_iter = source_iter.next(shapelen) - res_iter = res_iter.next(shapelen) - - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - if len(self.shape) == 1: - return OneDimIterator(self.start, self.strides[0], self.shape[0]) - return ViewIterator(self) - - def setitem(self, item, value): - self.parent.setitem(item, value) - - def debug_repr(self): - return 'Slice(%s)' % self.parent.debug_repr() - - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) - iter = self.start_iter() - a_iter = array.start_iter() - while not iter.done(): - array.setitem(a_iter.offset, self.getitem(iter.offset)) - iter = iter.next(len(self.shape)) - a_iter = a_iter.next(len(array.shape)) - return array - - def get_storage(self, space): - return self.parent.get_storage(space) - -class W_NDimArray(BaseArray): +class W_NDimArray(ConcreteArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ - def __init__(self, size, shape, dtype, order='C'): - BaseArray.__init__(self, shape, order) - self.size = size - self.dtype = dtype - self.storage = dtype.malloc(size) - self.signature = dtype.signature - - def get_concrete(self): - return self - - def find_size(self): - return self.size - - def find_dtype(self): - return self.dtype - - def getitem(self, item): - return self.dtype.getitem(self.storage, item) - - def eval(self, iter): - return self.dtype.getitem(self.storage, iter.get_offset()) - - def copy(self): - array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) - rffi.c_memcpy( - array.storage, - self.storage, - self.size * self.dtype.itemtype.get_element_size() - ) - return array - - def descr_len(self, space): - if len(self.shape): - return space.wrap(self.shape[0]) - raise OperationError(space.w_TypeError, space.wrap( - "len() of unsized object")) - def setitem(self, item, value): self.invalidated() self.dtype.setitem(self.storage, item, value) - def start_iter(self, res_shape=None): - if self.order == 'C': - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return ArrayIterator(self.size) - raise NotImplementedError # use ViewIterator simply, test it - def setshape(self, space, new_shape): self.shape = new_shape self.calc_strides(new_shape) - def debug_repr(self): - return 'Array' - - def get_storage(self, space): - return self.storage + def create_sig(self, res_shape): + return self.array_sig(res_shape) def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1396,10 +1105,11 @@ ) arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) + arr_iter = ArrayIterator(arr.size) for i in range(len(elems_w)): w_elem = elems_w[i] - dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + dtype.setitem(arr.storage, arr_iter.offset, + dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr @@ -1492,48 +1202,31 @@ class W_FlatIterator(ViewArray): - signature = signature.BaseSignature() @jit.unroll_safe def __init__(self, arr): + arr = arr.get_concrete() size = 1 for sh in arr.shape: size *= sh - new_sig = signature.Signature.find_sig([ - W_FlatIterator.signature, arr.signature - ]) - ViewArray.__init__(self, arr, new_sig, [arr.strides[-1]], - [arr.backstrides[-1]], [size]) + self.strides = [arr.strides[-1]] + self.backstrides = [arr.backstrides[-1]] + ViewArray.__init__(self, size, [size], arr.dtype, arr.order, + arr) self.shapelen = len(arr.shape) - self.arr = arr - self.iter = self.start_iter() - - def start_iter(self, res_shape=None): - if res_shape is not None and res_shape != self.shape: - return BroadcastIterator(self, res_shape) - return OneDimIterator(self.arr.start, self.strides[0], - self.shape[0]) - - def find_dtype(self): - return self.arr.find_dtype() - - def find_size(self): - return self.shape[0] + self.iter = OneDimIterator(arr.start, self.strides[0], + self.shape[0]) def descr_next(self, space): if self.iter.done(): raise OperationError(space.w_StopIteration, space.w_None) - result = self.eval(self.iter) + result = self.getitem(self.iter.offset) self.iter = self.iter.next(self.shapelen) return result def descr_iter(self): return self - def debug_repr(self): - return 'FlatIter(%s)' % self.arr.debug_repr() - - W_FlatIterator.typedef = TypeDef( 'flatiter', next = interp2app(W_FlatIterator.descr_next), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,20 +2,21 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types +from pypy.module.micronumpy import interp_boxes, interp_dtype, types +from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature, find_sig from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name - reduce_driver = jit.JitDriver( - greens = ['shapelen', "signature"], - reds = ["i", "self", "dtype", "value", "obj"] + greens = ['shapelen', "sig"], + virtualizables = ["frame"], + reds = ["frame", "self", "dtype", "value", "obj"] ) class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] - _immutable_fields_ = ["promote_to_float", "promote_bools"] + _immutable_fields_ = ["promote_to_float", "promote_bools", "name"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -50,6 +51,7 @@ def reduce(self, space, w_obj, multidim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -60,13 +62,16 @@ raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) - size = obj.find_size() + size = obj.size dtype = find_unaryop_result_dtype( space, obj.find_dtype(), promote_to_largest=True ) - start = obj.start_iter(obj.shape) shapelen = len(obj.shape) + sig = find_sig(ReduceSignature(self.func, self.name, dtype, + ScalarSignature(dtype), + obj.create_sig(obj.shape)), obj) + frame = sig.create_frame(obj) if shapelen > 1 and not multidim: raise OperationError(space.w_NotImplementedError, space.wrap("not implemented yet")) @@ -74,34 +79,33 @@ if size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - value = obj.eval(start).convert_to(dtype) - start = start.next(shapelen) + value = sig.eval(frame, obj).convert_to(dtype) + frame.next(shapelen) else: value = self.identity.convert_to(dtype) - new_sig = signature.Signature.find_sig([ - self.reduce_signature, obj.signature - ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) + return self.reduce_loop(shapelen, sig, frame, value, obj, dtype) - def reduce_loop(self, signature, shapelen, i, value, obj, dtype): - while not i.done(): - reduce_driver.jit_merge_point(signature=signature, + def reduce_loop(self, shapelen, sig, frame, value, obj, dtype): + while not frame.done(): + reduce_driver.jit_merge_point(sig=sig, shapelen=shapelen, self=self, - value=value, obj=obj, i=i, + value=value, obj=obj, frame=frame, dtype=dtype) - value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) - i = i.next(shapelen) + assert isinstance(sig, ReduceSignature) + value = sig.binfunc(dtype, value, sig.eval(frame, obj).convert_to(dtype)) + frame.next(shapelen) return value class W_Ufunc1(W_Ufunc): argcount = 1 + _immutable_fields_ = ["func", "name"] + def __init__(self, func, name, promote_to_float=False, promote_bools=False, identity=None): W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func - self.signature = signature.Call1(func) def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, @@ -117,14 +121,13 @@ if isinstance(w_obj, Scalar): return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) - new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) - w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) + w_res = Call1(self.func, self.name, w_obj.shape, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res class W_Ufunc2(W_Ufunc): - _immutable_fields_ = ["comparison_func", "func"] + _immutable_fields_ = ["comparison_func", "func", "name"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -133,8 +136,6 @@ W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func self.comparison_func = comparison_func - self.signature = signature.Call2(func) - self.reduce_signature = signature.BaseSignature() def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, @@ -158,11 +159,9 @@ w_rhs.value.convert_to(calc_dtype) ) - new_sig = signature.Signature.find_sig([ - self.signature, w_lhs.signature, w_rhs.signature - ]) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) - w_res = Call2(new_sig, new_shape, calc_dtype, + w_res = Call2(self.func, self.name, + new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -1,54 +1,322 @@ -from pypy.rlib.objectmodel import r_dict, compute_identity_hash +from pypy.rlib.objectmodel import r_dict, compute_identity_hash, compute_hash from pypy.rlib.rarithmetic import intmask +from pypy.module.micronumpy.interp_iter import ViewIterator, ArrayIterator, \ + OneDimIterator, ConstantIterator +from pypy.module.micronumpy.strides import calculate_slice_strides +from pypy.rlib.jit import hint, unroll_safe, promote +def sigeq(one, two): + return one.eq(two) -def components_eq(lhs, rhs): - if len(lhs) != len(rhs): - return False - for i in range(len(lhs)): - v1, v2 = lhs[i], rhs[i] - if type(v1) is not type(v2) or not v1.eq(v2): +def sigeq_no_numbering(one, two): + """ Cache for iterator numbering should not compare array numbers + """ + return one.eq(two, compare_array_no=False) + +def sighash(sig): + return sig.hash() + +known_sigs = r_dict(sigeq, sighash) + +def find_sig(sig, arr): + sig.invent_array_numbering(arr) + try: + return known_sigs[sig] + except KeyError: + sig.invent_numbering() + known_sigs[sig] = sig + return sig + +class NumpyEvalFrame(object): + _virtualizable2_ = ['iterators[*]', 'final_iter', 'arraylist[*]'] + + @unroll_safe + def __init__(self, iterators, arrays): + self = hint(self, access_directly=True, fresh_virtualizable=True) + self.iterators = iterators[:] + self.arrays = arrays[:] + for i in range(len(self.iterators)): + iter = self.iterators[i] + if not isinstance(iter, ConstantIterator): + self.final_iter = i + break + else: + self.final_iter = -1 + + def done(self): + final_iter = promote(self.final_iter) + if final_iter < 0: return False - return True + return self.iterators[final_iter].done() -def components_hash(components): - res = 0x345678 - for component in components: - res = intmask((1000003 * res) ^ component.hash()) - return res + @unroll_safe + def next(self, shapelen): + for i in range(len(self.iterators)): + self.iterators[i] = self.iterators[i].next(shapelen) -class BaseSignature(object): - _attrs_ = [] +def _add_ptr_to_cache(ptr, cache): + i = 0 + for p in cache: + if ptr == p: + return i + i += 1 + else: + res = len(cache) + cache.append(ptr) + return res - def eq(self, other): - return self is other +class Signature(object): + _attrs_ = ['iter_no', 'array_no'] + _immutable_fields_ = ['iter_no', 'array_no'] + + array_no = 0 + iter_no = 0 + + def invent_numbering(self): + cache = r_dict(sigeq_no_numbering, sighash) + allnumbers = [] + self._invent_numbering(cache, allnumbers) + + def invent_array_numbering(self, arr): + cache = [] + self._invent_array_numbering(arr, cache) + + def _invent_numbering(self, cache, allnumbers): + try: + no = cache[self] + except KeyError: + no = len(allnumbers) + cache[self] = no + allnumbers.append(no) + self.iter_no = no + + def create_frame(self, arr, res_shape=None): + res_shape = res_shape or arr.shape + iterlist = [] + arraylist = [] + self._create_iter(iterlist, arraylist, arr, res_shape, []) + return NumpyEvalFrame(iterlist, arraylist) + +class ConcreteSignature(Signature): + _immutable_fields_ = ['dtype'] + + def __init__(self, dtype): + self.dtype = dtype + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, ConcreteSignature) + if compare_array_no: + if self.array_no != other.array_no: + return False + return self.dtype is other.dtype def hash(self): - return compute_identity_hash(self) + return compute_identity_hash(self.dtype) -class Signature(BaseSignature): - _known_sigs = r_dict(components_eq, components_hash) + def allocate_view_iter(self, arr, res_shape, chunklist): + r = arr.shape, arr.start, arr.strides, arr.backstrides + if chunklist: + for chunkelem in chunklist: + r = calculate_slice_strides(r[0], r[1], r[2], r[3], chunkelem) + shape, start, strides, backstrides = r + if len(res_shape) == 1: + return OneDimIterator(start, strides[0], res_shape[0]) + return ViewIterator(start, strides, backstrides, shape, res_shape) - _attrs_ = ["components"] - _immutable_fields_ = ["components[*]"] +class ArraySignature(ConcreteSignature): + def debug_repr(self): + return 'Array' - def __init__(self, components): - self.components = components + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + self.array_no = _add_ptr_to_cache(concr.storage, cache) - @staticmethod - def find_sig(components): - return Signature._known_sigs.setdefault(components, Signature(components)) + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import ConcreteArray + concr = arr.get_concrete() + assert isinstance(concr, ConcreteArray) + storage = concr.storage + if self.iter_no >= len(iterlist): + iterlist.append(self.allocate_iter(concr, res_shape, chunklist)) + if self.array_no >= len(arraylist): + arraylist.append(storage) -class Call1(BaseSignature): - _immutable_fields_ = ["func", "name"] + def allocate_iter(self, arr, res_shape, chunklist): + if chunklist: + return self.allocate_view_iter(arr, res_shape, chunklist) + return ArrayIterator(arr.size) - def __init__(self, func): - self.func = func - self.name = func.func_name + def eval(self, frame, arr): + iter = frame.iterators[self.iter_no] + return self.dtype.getitem(frame.arrays[self.array_no], iter.offset) -class Call2(BaseSignature): - _immutable_fields_ = ["func", "name"] +class ScalarSignature(ConcreteSignature): + def debug_repr(self): + return 'Scalar' - def __init__(self, func): - self.func = func - self.name = func.func_name + def _invent_array_numbering(self, arr, cache): + pass + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + if self.iter_no >= len(iterlist): + iter = ConstantIterator() + iterlist.append(iter) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Scalar + assert isinstance(arr, Scalar) + return arr.value + +class ViewSignature(ArraySignature): + def debug_repr(self): + return 'Slice' + + def _invent_numbering(self, cache, allnumbers): + # always invent a new number for view + no = len(allnumbers) + allnumbers.append(no) + self.iter_no = no + + def allocate_iter(self, arr, res_shape, chunklist): + return self.allocate_view_iter(arr, res_shape, chunklist) + +class VirtualSliceSignature(Signature): + def __init__(self, child): + self.child = child + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + self.child._invent_array_numbering(arr.child, cache) + + def hash(self): + return intmask(self.child.hash() ^ 1234) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, VirtualSliceSignature) + return self.child.eq(other.child, compare_array_no) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + chunklist.append(arr.chunks) + self.child._create_iter(iterlist, arraylist, arr.child, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import VirtualSlice + assert isinstance(arr, VirtualSlice) + return self.child.eval(frame, arr.child) + +class Call1(Signature): + _immutable_fields_ = ['unfunc', 'name', 'child'] + + def __init__(self, func, name, child): + self.unfunc = func + self.child = child + self.name = name + + def hash(self): + return compute_hash(self.name) ^ intmask(self.child.hash() << 1) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, Call1) + return (self.unfunc is other.unfunc and + self.child.eq(other.child, compare_array_no)) + + def debug_repr(self): + return 'Call1(%s, %s)' % (self.name, self.child.debug_repr()) + + def _invent_numbering(self, cache, allnumbers): + self.child._invent_numbering(cache, allnumbers) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._invent_array_numbering(arr.values, cache) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + self.child._create_iter(iterlist, arraylist, arr.values, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call1 + assert isinstance(arr, Call1) + v = self.child.eval(frame, arr.values).convert_to(arr.res_dtype) + return self.unfunc(arr.res_dtype, v) + +class Call2(Signature): + _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right'] + + def __init__(self, func, name, calc_dtype, left, right): + self.binfunc = func + self.left = left + self.right = right + self.name = name + self.calc_dtype = calc_dtype + + def hash(self): + return (compute_hash(self.name) ^ intmask(self.left.hash() << 1) ^ + intmask(self.right.hash() << 2)) + + def eq(self, other, compare_array_no=True): + if type(self) is not type(other): + return False + assert isinstance(other, Call2) + return (self.binfunc is other.binfunc and + self.calc_dtype is other.calc_dtype and + self.left.eq(other.left, compare_array_no) and + self.right.eq(other.right, compare_array_no)) + + def _invent_array_numbering(self, arr, cache): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + self.left._invent_array_numbering(arr.left, cache) + self.right._invent_array_numbering(arr.right, cache) + + def _invent_numbering(self, cache, allnumbers): + self.left._invent_numbering(cache, allnumbers) + self.right._invent_numbering(cache, allnumbers) + + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + from pypy.module.micronumpy.interp_numarray import Call2 + + assert isinstance(arr, Call2) + self.left._create_iter(iterlist, arraylist, arr.left, res_shape, + chunklist) + self.right._create_iter(iterlist, arraylist, arr.right, res_shape, + chunklist) + + def eval(self, frame, arr): + from pypy.module.micronumpy.interp_numarray import Call2 + assert isinstance(arr, Call2) + lhs = self.left.eval(frame, arr.left).convert_to(self.calc_dtype) + rhs = self.right.eval(frame, arr.right).convert_to(self.calc_dtype) + return self.binfunc(self.calc_dtype, lhs, rhs) + + def debug_repr(self): + return 'Call2(%s, %s, %s)' % (self.name, self.left.debug_repr(), + self.right.debug_repr()) + +class ReduceSignature(Call2): + def _create_iter(self, iterlist, arraylist, arr, res_shape, chunklist): + self.right._create_iter(iterlist, arraylist, arr, res_shape, chunklist) + + def _invent_numbering(self, cache, allnumbers): + self.right._invent_numbering(cache, allnumbers) + + def _invent_array_numbering(self, arr, cache): + self.right._invent_array_numbering(arr, cache) + + def eval(self, frame, arr): + return self.right.eval(frame, arr) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/strides.py @@ -0,0 +1,34 @@ + +def calculate_slice_strides(shape, start, strides, backstrides, chunks): + rstrides = [] + rbackstrides = [] + rstart = start + rshape = [] + i = -1 + for i, (start_, stop, step, lgt) in enumerate(chunks): + if step != 0: + rstrides.append(strides[i] * step) + rbackstrides.append(strides[i] * (lgt - 1) * step) + rshape.append(lgt) + rstart += strides[i] * start_ + # add a reminder + s = i + 1 + assert s >= 0 + rstrides += strides[s:] + rbackstrides += backstrides[s:] + rshape += shape[s:] + return rshape, rstart, rstrides, rbackstrides + +def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape): + rstrides = [] + rbackstrides = [] + for i in range(len(orig_shape)): + if orig_shape[i] == 1: + rstrides.append(0) + rbackstrides.append(0) + else: + rstrides.append(strides[i]) + rbackstrides.append(backstrides[i]) + rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides + rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides + return rstrides, rbackstrides diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -4,7 +4,6 @@ from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) - class BaseNumpyAppTest(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['micronumpy']) @@ -15,20 +14,37 @@ bool_dtype = get_dtype_cache(space).w_booldtype ar = W_NDimArray(10, [10], dtype=float64_dtype) + ar2 = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) - assert v1.signature is not v2.signature + sig1 = v1.find_sig() + sig2 = v2.find_sig() + assert v1 is not v2 + assert sig1.left.iter_no == sig1.right.iter_no + assert sig2.left.iter_no != sig2.right.iter_no + assert sig1.left.array_no == sig1.right.array_no + sig1b = ar2.descr_add(space, ar).find_sig() + assert sig1b.left.array_no != sig1b.right.array_no + assert sig1b is not sig1 v3 = ar.descr_add(space, Scalar(float64_dtype, 1.0)) - assert v2.signature is v3.signature + sig3 = v3.find_sig() + assert sig2 is sig3 v4 = ar.descr_add(space, ar) - assert v1.signature is v4.signature + assert v1.find_sig() is v4.find_sig() bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) - assert v5.signature is not v1.signature - assert v5.signature is not v2.signature + assert v5.find_sig() is not v1.find_sig() + assert v5.find_sig() is not v2.find_sig() v6 = ar.descr_add(space, bool_ar) - assert v5.signature is v6.signature + assert v5.find_sig() is v6.find_sig() + v7 = v6.descr_add(space, v6) + sig7 = v7.find_sig() + assert sig7.left.left.iter_no == sig7.right.left.iter_no + assert sig7.left.left.iter_no != sig7.right.right.iter_no + assert sig7.left.right.iter_no == sig7.right.right.iter_no + v1.forced_result = ar + assert v1.find_sig() is not sig1 def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype @@ -36,11 +52,14 @@ ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) - assert v1.signature is v2.signature + assert v1.find_sig() is v2.find_sig() v3 = v2.descr_add(space, v1) v4 = v1.descr_add(space, v2) - assert v3.signature is v4.signature + assert v3.find_sig() is v4.find_sig() + v5 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 3, 1))) + v6 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 4, 1))) + assert v5.find_sig() is v6.find_sig() class TestUfuncCoerscion(object): def test_binops(self, space): diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -137,6 +137,16 @@ interp = self.run(code) assert interp.results[0].value.value == 15 + def test_sum2(self): + code = """ + a = |30| + b = a + a + sum(b) + """ + interp = self.run(code) + assert interp.results[0].value.value == 30 * (30 - 1) + + def test_array_write(self): code = """ a = [1,2,3,4,5] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -8,8 +8,6 @@ class MockDtype(object): - signature = signature.BaseSignature() - def malloc(self, size): return None @@ -38,92 +36,86 @@ assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') - s = a.create_slice(space, [(3, 0, 0, 1)]) + s = a.create_slice([(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = a.create_slice(space, [(1, 9, 2, 4)]) + s = a.create_slice([(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = a.create_slice(space, [(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2), (1, 2, 1, 1), (1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = a.create_slice(space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(5, 0, 0, 1)]) + s = a.create_slice([(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = s.create_slice(space, [(3, 0, 0, 1)]) + s2 = s.create_slice([(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = a.create_slice(space, [(1, 5, 3, 2)]) - s2 = s.create_slice(space, [(0, 2, 1, 2), (2, 0, 0, 1)]) + s = a.create_slice([(1, 5, 3, 2)]) + s2 = s.create_slice([(0, 2, 1, 2), (2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - space = self.space a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') - s = a.create_slice(space, [(9, -1, -2, 5)]) + s = a.create_slice([(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -132,7 +124,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -142,7 +134,7 @@ a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 - s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) + s = a.create_slice([(0, 10, 1, 10), (2, 0, 0, 1)]) r = s._index_of_single_item(self.space, self.newtuple(1, 0)) assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 0)) r = s._index_of_single_item(self.space, self.newtuple(1, 1)) @@ -897,13 +889,32 @@ a = zeros(1) assert debug_repr(a) == 'Array' assert debug_repr(a + a) == 'Call2(add, Array, Array)' - assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a[::2]) == 'Slice' assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' - assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(a + a.flat) == 'Call2(add, Array, Slice)' assert debug_repr(sin(a)) == 'Call1(sin, Array)' + b = a + a b[0] = 3 - assert debug_repr(b) == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Array' + + def test_virtual_views(self): + from numpypy import arange + a = arange(15) + c = (a + a) + d = c[::2] + assert d[3] == 12 + c[6] = 5 + assert d[3] == 5 + a = arange(15) + c = (a + a) + d = c[::2][::2] + assert d[1] == 8 + b = a + a + c = b[::2] + c[:] = 3 + assert b[0] == 3 + assert b[1] == 2 def test_tolist_scalar(self): from numpypy import int32, bool_ @@ -1066,6 +1077,17 @@ a = ones((1, 2, 3)) assert a[0, 1, 2] == 1.0 + def test_multidim_setslice(self): + from numpypy import zeros, ones + a = zeros((3, 3)) + b = ones((3, 3)) + a[:,1:3] = b[:,1:3] + assert (a == [[0, 1, 1], [0, 1, 1], [0, 1, 1]]).all() + a = zeros((3, 3)) + b = ones((3, 3)) + a[:,::2] = b[:,::2] + assert (a == [[1, 0, 1], [1, 0, 1], [1, 0, 1]]).all() + def test_broadcast_ufunc(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) @@ -1075,10 +1097,10 @@ def test_broadcast_setslice(self): from numpypy import zeros, ones - a = zeros((100, 100)) - b = ones(100) + a = zeros((10, 10)) + b = ones(10) a[:, :] = b - assert a[13, 15] == 1 + assert a[3, 5] == 1 def test_broadcast_shape_agreement(self): from numpypy import zeros, array @@ -1112,6 +1134,14 @@ b[:] = (a + a) assert (b == zeros((4, 3, 5))).all() + def test_broadcast_virtualview(self): + from numpypy import arange, zeros + a = arange(8).reshape([2, 2, 2]) + b = (a + a)[1, 1] + c = zeros((2, 2, 2)) + c[:] = b + assert (c == [[[12, 14], [12, 14]], [[12, 14], [12, 14]]]).all() + def test_argmax(self): from numpypy import array a = array([[1, 2], [3, 4], [5, 6]]) @@ -1173,6 +1203,11 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 + def test_flatiter_varray(self): + from numpypy import ones + a = ones((2, 2)) + assert list(((a + a).flat)) == [2, 2, 2, 2] + def test_slice_copy(self): from numpypy import zeros a = zeros((10, 10)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -49,10 +49,14 @@ interp.run(space) w_res = interp.results[-1] if isinstance(w_res, BaseArray): - w_res = w_res.eval(w_res.start_iter()) - + concr = w_res.get_concrete_or_scalar() + sig = concr.find_sig() + frame = sig.create_frame(concr) + w_res = sig.eval(frame, concr) if isinstance(w_res, interp_boxes.W_Float64Box): return w_res.value + if isinstance(w_res, interp_boxes.W_Int64Box): + return float(w_res.value) elif isinstance(w_res, interp_boxes.W_BoolBox): return float(w_res.value) raise TypeError(w_res) @@ -78,8 +82,9 @@ def test_add(self): result = self.run("add") self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) assert result == 3 + 3 def define_float_add(): @@ -93,7 +98,8 @@ assert result == 3 + 3 self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, "setinteriorfield_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_sum(): return """ @@ -106,8 +112,8 @@ result = self.run("sum") assert result == 2 * sum(range(30)) self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, - "int_add": 2, "int_ge": 1, "guard_false": 1, - "jump": 1}) + "int_add": 1, "int_ge": 1, "guard_false": 1, + "jump": 1, 'arraylen_gc': 1}) def define_prod(): return """ @@ -123,18 +129,22 @@ expected *= i * 2 assert result == expected self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "float_mul": 1, "int_add": 1, + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) - def test_max(self): - py.test.skip("broken, investigate") - result = self.run(""" + def define_max(): + return """ a = |30| a[13] = 128 b = a + a max(b) - """) + """ + + def test_max(self): + result = self.run("max") assert result == 256 + py.test.skip("not there yet, getting though") self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_mul": 1, "int_add": 1, "int_lt": 1, "guard_true": 1, "jump": 1}) @@ -164,9 +174,9 @@ result = self.run("any") assert result == 1 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, + "float_ne": 1, "int_add": 1, "int_ge": 1, "jump": 1, - "guard_false": 2}) + "guard_false": 2, 'arraylen_gc': 1}) def define_already_forced(): return """ @@ -183,14 +193,13 @@ # This is the sum of the ops for both loops, however if you remove the # optimization then you end up with 2 float_adds, so we can still be # sure it was optimized correctly. - # XXX the comment above is wrong now. We need preferrably a way to - # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, - 'getfield_gc': 35, 'getfield_gc_pure': 6, - 'guard_class': 22, 'int_add': 8, 'float_mul': 2, - 'guard_isnull': 2, 'jump': 2, 'int_ge': 4, - 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, - 'guard_value': 2}) + self.check_resops({'setinteriorfield_raw': 4, 'getfield_gc': 26, + 'getarrayitem_gc': 4, 'getarrayitem_gc_pure': 2, + 'getfield_gc_pure': 4, + 'guard_class': 8, 'int_add': 8, 'float_mul': 2, + 'jump': 2, 'int_ge': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, + 'guard_false': 4, 'arraylen_gc': 2, 'same_as': 2}) def define_ufunc(): return """ @@ -204,8 +213,9 @@ result = self.run("ufunc") assert result == -6 self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, - "setinteriorfield_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1}) + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1, + 'arraylen_gc': 1}) def define_specialization(): return """ @@ -248,7 +258,8 @@ 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, - 'jump': 1}) + 'jump': 1, + 'arraylen_gc': 1}) def define_multidim(): return """ @@ -263,8 +274,9 @@ # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setinteriorfield_raw': 1}) + 'guard_false': 1, 'int_add': 2, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1, + 'arraylen_gc': 1}) def define_multidim_slice(): return """ @@ -312,7 +324,25 @@ self.check_trace_count(1) self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, - 'int_eq': 1, 'guard_false': 1, 'jump': 1}) + 'int_lt': 1, 'guard_true': 1, 'jump': 1, + 'arraylen_gc': 3}) + + def define_virtual_slice(): + return """ + a = |30| + c = a + a + d = c -> 1:20 + d -> 1 + """ + + def test_virtual_slice(self): + result = self.run("virtual_slice") + assert result == 4 + self.check_trace_count(1) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 2, + 'int_ge': 1, 'guard_false': 1, 'jump': 1, + 'arraylen_gc': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): diff --git a/pypy/module/micronumpy/test/test_ztranslation.py b/pypy/module/micronumpy/test/test_ztranslation.py --- a/pypy/module/micronumpy/test/test_ztranslation.py +++ b/pypy/module/micronumpy/test/test_ztranslation.py @@ -1,5 +1,8 @@ - +from pypy.module.micronumpy import signature from pypy.objspace.fake.checkmodule import checkmodule def test_numpy_translates(): + # XXX: If there are signatures floating around this might explode. This fix + # is ugly. + signature.known_sigs.clear() checkmodule('micronumpy') diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -252,7 +252,7 @@ # grow the list done = 0 while done < len(self._seen_extras): - print self._seen_extras + #print self._seen_extras ann.build_types(self._seen_extras[done], [], complete_now=False) done += 1 diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -185,7 +185,7 @@ class FlowExecutionContext(ExecutionContext): def __init__(self, space, code, globals, constargs={}, outer_func=None, - name=None): + name=None, is_generator=False): ExecutionContext.__init__(self, space) self.code = code @@ -208,6 +208,7 @@ initialblock = SpamBlock(FrameState(frame).copy()) self.pendingblocks = collections.deque([initialblock]) self.graph = FunctionGraph(name or code.co_name, initialblock) + self.is_generator = is_generator make_link = Link # overridable for transition tracking @@ -247,6 +248,8 @@ return outcome, w_exc_cls, w_exc_value def build_flow(self): + if self.is_generator: + self.produce_generator_mark() while self.pendingblocks: block = self.pendingblocks.popleft() frame = self.create_frame() @@ -259,9 +262,15 @@ self.topframeref = jit.non_virtual_ref(frame) self.crnt_frame = frame try: - w_result = frame.dispatch(frame.pycode, - frame.last_instr, - self) + frame.frame_finished_execution = False + while True: + w_result = frame.dispatch(frame.pycode, + frame.last_instr, + self) + if frame.frame_finished_execution: + break + else: + self.generate_yield(frame, w_result) finally: self.crnt_frame = None self.topframeref = old_frameref @@ -307,6 +316,21 @@ del self.recorder self.fixeggblocks() + def produce_generator_mark(self): + [initialblock] = self.pendingblocks + initialblock.operations.append( + SpaceOperation('generator_mark', [], Variable())) + + def generate_yield(self, frame, w_result): + assert self.is_generator + self.recorder.crnt_block.operations.append( + SpaceOperation('yield', [w_result], Variable())) + # we must push a dummy value that will be POPped: it's the .send() + # passed into the generator (2.5 feature) + assert sys.version_info >= (2, 5) + frame.pushvalue(None) + frame.last_instr += 1 + def fixeggblocks(self): # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -8,6 +8,7 @@ from pypy.interpreter.pycode import PyCode, cpython_code_signature from pypy.interpreter.module import Module from pypy.interpreter.error import OperationError +from pypy.interpreter.astcompiler.consts import CO_GENERATOR from pypy.interpreter import pyframe, argument from pypy.objspace.flow.model import * from pypy.objspace.flow import flowcontext, operation, specialcase @@ -247,15 +248,13 @@ return ecls return None - def build_flow(self, func, constargs={}): + def build_flow(self, func, constargs={}, tweak_for_generator=True): """ """ if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): raise Exception, "%r is tagged as NOT_RPYTHON" % (func,) code = func.func_code - if code.co_flags & 32: - # generator - raise TypeError("%r is a generator" % (func,)) + is_generator = bool(code.co_flags & CO_GENERATOR) code = PyCode._from_code(self, code) if func.func_closure is None: cl = None @@ -271,7 +270,8 @@ class outerfunc: # hack closure = cl ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, outerfunc, name) + constargs, outerfunc, name, + is_generator) graph = ec.graph graph.func = func # attach a signature and defaults to the graph @@ -291,6 +291,11 @@ e = error.FlowingError(formated) raise error.FlowingError, e, tb checkgraph(graph) + # + if is_generator and tweak_for_generator: + from pypy.translator.generator import tweak_generator_graph + tweak_generator_graph(graph) + # return graph def fixedview(self, w_tuple, expected_length=None): diff --git a/pypy/objspace/flow/test/test_generator.py b/pypy/objspace/flow/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/flow/test/test_generator.py @@ -0,0 +1,18 @@ +from pypy.objspace.flow.test.test_objspace import Base + + +class TestGenerator(Base): + + def test_simple_generator(self): + def f(n): + i = 0 + while i < n: + yield i + yield i + i += 1 + graph = self.codetest(f, tweak_for_generator=False) + ops = self.all_operations(graph) + assert ops == {'generator_mark': 1, + 'lt': 1, 'is_true': 1, + 'yield': 2, + 'inplace_add': 1} diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -16,14 +16,14 @@ is_operator = getattr(operator, 'is_', operator.eq) # it's not there 2.2 class Base: - def codetest(self, func): + def codetest(self, func, **kwds): import inspect try: func = func.im_func except AttributeError: pass #name = func.func_name - graph = self.space.build_flow(func) + graph = self.space.build_flow(func, **kwds) graph.source = inspect.getsource(func) self.show(graph) return graph @@ -882,12 +882,6 @@ num = bytecode_spec.opmap[name] flow_meth_names[num] = locals()['old_' + name] - def test_generator(self): - def f(): - yield 3 - - py.test.raises(TypeError, "self.codetest(f)") - def test_dont_capture_RuntimeError(self): class Foo: def __hash__(self): diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -514,29 +514,46 @@ if maxsplit == 0: return space.wrap(input) - # An ok guess at the default size - builder = StringBuilder(len(input)) - first = True - if not sub: upper = len(input) if maxsplit > 0 and maxsplit < upper + 2: upper = maxsplit - 1 assert upper >= 0 - first = False + try: - for i in range(upper): - builder.append(by) - builder.append(input[i]) + result_size = ovfcheck(upper * len(by)) + result_size = ovfcheck(result_size + upper) + result_size = ovfcheck(result_size + len(by)) + remaining_size = len(input) - upper + result_size = ovfcheck(result_size + remaining_size) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long") + ) + builder = StringBuilder(result_size) + for i in range(upper): builder.append(by) - builder.append_slice(input, upper, len(input)) - except MemoryError: + builder.append(input[i]) + builder.append(by) + builder.append_slice(input, upper, len(input)) + else: + # An ok guess for the result size + count = input.count(sub) + if count > maxsplit and maxsplit > 0: + count = maxsplit + diff_len = len(by) - len(sub) + try: + result_size = ovfcheck(diff_len * count) + result_size = ovfcheck(result_size + len(input)) + except OverflowError: raise OperationError(space.w_OverflowError, - space.wrap("replace string too long") + space.wrap("replace string is too long") ) - else: + + builder = StringBuilder(result_size) start = 0 sublen = len(sub) + first = True while maxsplit != 0: next = input.find(sub, start) diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/rpython/test/test_generator.py @@ -0,0 +1,62 @@ +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + + +class BaseTestGenerator(BaseRtypingTest): + + def test_simple_explicit(self): + def g(a, b, c): + yield a + yield b + yield c + def f(): + gen = g(3, 5, 8) + x = gen.next() * 100 + x += gen.next() * 10 + x += gen.next() + return x + res = self.interpret(f, []) + assert res == 358 + + def test_cannot_merge(self): + # merging two different generators is not supported + # right now, but we can use workarounds like here + class MyGen: + _immutable_ = True + def next(self): + raise NotImplementedError + class MyG1(MyGen): + _immutable_ = True + def __init__(self, a): + self._gen = self.g1(a) + def next(self): + return self._gen.next() + @staticmethod + def g1(a): + yield a + 1 + yield a + 2 + class MyG2(MyGen): + _immutable_ = True + def __init__(self): + self._gen = self.g2() + def next(self): + return self._gen.next() + @staticmethod + def g2(): + yield 42 + def f(n): + if n > 0: + gen = MyG1(n) + else: + gen = MyG2() + return gen.next() + res = self.interpret(f, [10]) + assert res == 11 + res = self.interpret(f, [0]) + assert res == 42 + + +class TestLLtype(BaseTestGenerator, LLRtypeMixin): + pass + +class TestOOtype(BaseTestGenerator, OORtypeMixin): + pass diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -3,6 +3,7 @@ from pypy.jit.metainterp.resoperation import opname from pypy.jit.tool.oparser import OpParser from pypy.tool.logparser import parse_log_file, extract_category +from copy import copy class Op(object): bridge = None @@ -387,6 +388,18 @@ loops.append(loop) return log, loops +def split_trace(trace): + labels = [i for i, op in enumerate(trace.operations) + if op.name == 'label'] + labels = [0] + labels + [len(trace.operations) - 1] + parts = [] + for i in range(len(labels) - 1): + start, stop = labels[i], labels[i+1] + part = copy(trace) + part.operations = trace.operations[start : stop + 1] + parts.append(part) + + return parts def parse_log_counts(input, loops): if not input: diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/pypy/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/pypy/tool/jitlogparser/test/test_modulefinder.py @@ -7,12 +7,14 @@ py.test.skip("Specific python 2.6 tests") def test_gather_code_py(): + py.test.skip("XXX broken, fix me") fname = re.__file__ codes = gather_all_code_objs(fname) assert len(codes) == 21 assert sorted(codes.keys()) == [102, 134, 139, 144, 153, 164, 169, 181, 188, 192, 197, 206, 229, 251, 266, 271, 277, 285, 293, 294, 308] def test_load_code(): + py.test.skip("XXX broken, fix me") fname = re.__file__ code = gather_all_code_objs(fname)[144] assert code.co_name == 'sub' diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -1,6 +1,6 @@ from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, Function, adjust_bridges, - import_log, Op) + import_log, split_trace, Op) from pypy.tool.jitlogparser.storage import LoopStorage import py, sys @@ -231,3 +231,21 @@ myrepr = 'c = foobar(a, b, descr=mydescr)' assert op.repr() == myrepr assert op.repr() == myrepr # do it twice + +def test_split_trace(): + loop = parse(''' + [i7] + i9 = int_lt(i7, 1003) + label(i9) + guard_true(i9, descr=) [] + i13 = getfield_raw(151937600, descr=) + label(i13) + i19 = int_lt(i13, 1003) + guard_true(i19, descr=) [] + i113 = getfield_raw(151937600, descr=) + ''') + parts = split_trace(loop) + assert len(parts) == 3 + assert len(parts[0].operations) == 2 + assert len(parts[1].operations) == 4 + assert len(parts[2].operations) == 4 diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/generator.py @@ -0,0 +1,166 @@ +from pypy.objspace.flow.model import Block, Link, SpaceOperation, checkgraph +from pypy.objspace.flow.model import Variable, Constant, FunctionGraph +from pypy.translator.unsimplify import insert_empty_startblock +from pypy.translator.unsimplify import split_block +from pypy.translator.simplify import eliminate_empty_blocks +from pypy.tool.sourcetools import func_with_new_name +from pypy.interpreter.argument import Signature + + +class AbstractPosition(object): + _immutable_ = True + _attrs_ = () + + +def tweak_generator_graph(graph): + if not hasattr(graph.func, '_generator_next_method_of_'): + # This is the first copy of the graph. We replace it with + # a small bootstrap graph. + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + # We attach a 'next' method to the GeneratorIterator class + # that will invoke the real function, based on a second + # copy of the graph. + attach_next_method(GeneratorIterator, graph) + else: + # This is the second copy of the graph. Tweak it. + GeneratorIterator = graph.func._generator_next_method_of_ + tweak_generator_body_graph(GeneratorIterator.Entry, graph) + + +def make_generatoriterator_class(graph): + class GeneratorIterator(object): + class Entry(AbstractPosition): + _immutable_ = True + varnames = get_variable_names(graph.startblock.inputargs) + def __init__(self, entry): + self.current = entry + return GeneratorIterator + +def replace_graph_with_bootstrap(GeneratorIterator, graph): + Entry = GeneratorIterator.Entry + newblock = Block(graph.startblock.inputargs) + v_generator = Variable('generator') + v_entry = Variable('entry') + newblock.operations.append( + SpaceOperation('simple_call', [Constant(Entry)], v_entry)) + assert len(graph.startblock.inputargs) == len(Entry.varnames) + for v, name in zip(graph.startblock.inputargs, Entry.varnames): + newblock.operations.append( + SpaceOperation('setattr', [v_entry, Constant(name), v], + Variable())) + newblock.operations.append( + SpaceOperation('simple_call', [Constant(GeneratorIterator), v_entry], + v_generator)) + newblock.closeblock(Link([v_generator], graph.returnblock)) + graph.startblock = newblock + +def attach_next_method(GeneratorIterator, graph): + func = graph.func + func = func_with_new_name(func, '%s__next' % (func.func_name,)) + func._generator_next_method_of_ = GeneratorIterator + func._always_inline_ = True + # + def next(self): + entry = self.current + self.current = None + (next_entry, return_value) = func(entry) + self.current = next_entry + return return_value + GeneratorIterator.next = next + return func # for debugging + +def get_variable_names(variables): + seen = set() + result = [] + for v in variables: + name = v._name.strip('_') + while name in seen: + name += '_' + result.append('g_' + name) + seen.add(name) + return result + +def _insert_reads(block, varnames): + assert len(varnames) == len(block.inputargs) + v_entry1 = Variable('entry') + for i, name in enumerate(varnames): + block.operations.insert(i, + SpaceOperation('getattr', [v_entry1, Constant(name)], + block.inputargs[i])) + block.inputargs = [v_entry1] + +def tweak_generator_body_graph(Entry, graph): + assert graph.startblock.operations[0].opname == 'generator_mark' + graph.startblock.operations.pop(0) + # + insert_empty_startblock(None, graph) + _insert_reads(graph.startblock, Entry.varnames) + Entry.block = graph.startblock + # + mappings = [Entry] + # + for block in list(graph.iterblocks()): + for exit in block.exits: + if exit.target is graph.returnblock: + exit.args = [Constant(StopIteration), + Constant(StopIteration())] + exit.target = graph.exceptblock + for index in range(len(block.operations)-1, -1, -1): + op = block.operations[index] + if op.opname == 'yield': + [v_yielded_value] = op.args + del block.operations[index] + newlink = split_block(None, block, index) + newblock = newlink.target + # + class Resume(AbstractPosition): + _immutable_ = True + block = newblock + Resume.__name__ = 'Resume%d' % len(mappings) + mappings.append(Resume) + varnames = get_variable_names(newlink.args) + # + _insert_reads(newblock, varnames) + # + v_resume = Variable('resume') + block.operations.append( + SpaceOperation('simple_call', [Constant(Resume)], + v_resume)) + for i, name in enumerate(varnames): + block.operations.append( + SpaceOperation('setattr', [v_resume, Constant(name), + newlink.args[i]], + Variable())) + v_pair = Variable('pair') + block.operations.append( + SpaceOperation('newtuple', [v_resume, v_yielded_value], + v_pair)) + newlink.args = [v_pair] + newlink.target = graph.returnblock + # + regular_entry_block = Block([Variable('entry')]) + block = regular_entry_block + for Resume in mappings: + v_check = Variable() + block.operations.append( + SpaceOperation('simple_call', [Constant(isinstance), + block.inputargs[0], + Constant(Resume)], + v_check)) + block.exitswitch = v_check + link1 = Link([block.inputargs[0]], Resume.block) + link1.exitcase = True + nextblock = Block([Variable('entry')]) + link2 = Link([block.inputargs[0]], nextblock) + link2.exitcase = False + block.closeblock(link1, link2) + block = nextblock + block.closeblock(Link([Constant(AssertionError), + Constant(AssertionError("bad generator class"))], + graph.exceptblock)) + graph.startblock = regular_entry_block + graph.signature = Signature(['entry']) + graph.defaults = () + checkgraph(graph) + eliminate_empty_blocks(graph) diff --git a/pypy/translator/test/test_generator.py b/pypy/translator/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/test/test_generator.py @@ -0,0 +1,156 @@ +from pypy.conftest import option +from pypy.objspace.flow.objspace import FlowObjSpace +from pypy.objspace.flow.model import Variable +from pypy.interpreter.argument import Signature +from pypy.translator.translator import TranslationContext +from pypy.translator.generator import make_generatoriterator_class +from pypy.translator.generator import replace_graph_with_bootstrap +from pypy.translator.generator import get_variable_names +from pypy.translator.generator import tweak_generator_body_graph +from pypy.translator.generator import attach_next_method +from pypy.translator.simplify import join_blocks + + +# ____________________________________________________________ + +def f_gen(n): + i = 0 + while i < n: + yield i + i += 1 + +class GeneratorIterator(object): + def __init__(self, entry): + self.current = entry + def next(self): + e = self.current + self.current = None + if isinstance(e, Yield1): + n = e.n_0 + i = e.i_0 + i += 1 + else: + n = e.n_0 + i = 0 + if i < n: + e = Yield1() + e.n_0 = n + e.i_0 = i + self.current = e + return i + raise StopIteration + + def __iter__(self): + return self + +class AbstractPosition(object): + _immutable_ = True +class Entry1(AbstractPosition): + _immutable_ = True +class Yield1(AbstractPosition): + _immutable_ = True + +def f_explicit(n): + e = Entry1() + e.n_0 = n + return GeneratorIterator(e) + +def test_explicit(): + assert list(f_gen(10)) == list(f_explicit(10)) + +def test_get_variable_names(): + lst = get_variable_names([Variable('a'), Variable('b_'), Variable('a')]) + assert lst == ['g_a', 'g_b', 'g_a_'] + +# ____________________________________________________________ + + +class TestGenerator: + + def test_replace_graph_with_bootstrap(self): + def func(n, x, y, z): + yield n + yield n + # + space = FlowObjSpace() + graph = space.build_flow(func, tweak_for_generator=False) + assert graph.startblock.operations[0].opname == 'generator_mark' + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + if option.view: + graph.show() + block = graph.startblock + ops = block.operations + assert ops[0].opname == 'simple_call' # e = Entry1() + assert ops[1].opname == 'setattr' # e.g_n = n + assert ops[1].args[1].value == 'g_n' + assert ops[2].opname == 'setattr' # e.g_x = x + assert ops[2].args[1].value == 'g_x' + assert ops[3].opname == 'setattr' # e.g_y = y + assert ops[3].args[1].value == 'g_y' + assert ops[4].opname == 'setattr' # e.g_z = z + assert ops[4].args[1].value == 'g_z' + assert ops[5].opname == 'simple_call' # g = GeneratorIterator(e) + assert ops[5].args[1] == ops[0].result + assert len(ops) == 6 + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock + + def test_tweak_generator_body_graph(self): + def f(n, x, y, z=3): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + class Entry: + varnames = ['g_n', 'g_x', 'g_y', 'g_z'] + tweak_generator_body_graph(Entry, graph) + if option.view: + graph.show() + # XXX how to test directly that the graph is correct? :-( + assert len(graph.startblock.inputargs) == 1 + assert graph.signature == Signature(['entry']) + assert graph.defaults == () + + def test_tweak_generator_graph(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + func1 = attach_next_method(GeneratorIterator, graph) + if option.view: + graph.show() + # + assert func1._generator_next_method_of_ is GeneratorIterator + assert hasattr(GeneratorIterator, 'next') + # + graph_next = space.build_flow(GeneratorIterator.next.im_func) + join_blocks(graph_next) + if option.view: + graph_next.show() + # + graph1 = space.build_flow(func1, tweak_for_generator=False) + tweak_generator_body_graph(GeneratorIterator.Entry, graph1) + if option.view: + graph1.show() + + def test_automatic(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f) # tweak_for_generator=True + if option.view: + graph.show() + block = graph.startblock + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -21,6 +21,16 @@ win32api.CloseHandle(proch) except pywintypes.error, e: pass + #Try to avoid opeing a dialog box if one of the tests causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = winapi.SetErrorMode(flags) + winapi.SetErrorMode(old_mode | flags) SIGKILL = SIGTERM = 0 READ_MODE = 'rU' From noreply at buildbot.pypy.org Sun Dec 25 17:54:10 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 Dec 2011 17:54:10 +0100 (CET) Subject: [pypy-commit] pypy default: Tentative fix. Message-ID: <20111225165410.CC83082B12@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50856:cdc91cd4ab61 Date: 2011-12-25 17:53 +0100 http://bitbucket.org/pypy/pypy/changeset/cdc91cd4ab61/ Log: Tentative fix. diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -269,7 +269,8 @@ # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. if newresult is not op.result and not newvalue.is_constant(): - self.short_boxes.alias(newresult, op.result) + # XXX fix me? + #self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations = [op] + self.optimizer._newoperations self.optimizer.flush() From noreply at buildbot.pypy.org Sun Dec 25 17:56:07 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 17:56:07 +0100 (CET) Subject: [pypy-commit] pypy default: since we don't track aliases any more, no point in calling it. Message-ID: <20111225165607.923B482B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50857:6dc65dbb9258 Date: 2011-12-25 18:54 +0200 http://bitbucket.org/pypy/pypy/changeset/6dc65dbb9258/ Log: since we don't track aliases any more, no point in calling it. diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -269,7 +269,6 @@ # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. if newresult is not op.result and not newvalue.is_constant(): - self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations = [op] + self.optimizer._newoperations self.optimizer.flush() From noreply at buildbot.pypy.org Sun Dec 25 17:56:08 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 17:56:08 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20111225165608.B9BF882B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50858:44b0e2106e2d Date: 2011-12-25 18:55 +0200 http://bitbucket.org/pypy/pypy/changeset/44b0e2106e2d/ Log: merge diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -269,6 +269,8 @@ # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. if newresult is not op.result and not newvalue.is_constant(): + # XXX fix me? + #self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations = [op] + self.optimizer._newoperations self.optimizer.flush() From noreply at buildbot.pypy.org Sun Dec 25 18:44:31 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 18:44:31 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: support from the pypyjit module side Message-ID: <20111225174431.53E9382B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50859:79a44aea441d Date: 2011-12-25 19:44 +0200 http://bitbucket.org/pypy/pypy/changeset/79a44aea441d/ Log: support from the pypyjit module side diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -29,8 +29,9 @@ TOTAL_FREED_BRIDGES """ +counter_names = [] + def _setup(): - counter_names = [] names = counters.split() for i, name in enumerate(names): globals()[name] = i diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -8,15 +8,18 @@ 'set_param': 'interp_jit.set_param', 'residual_call': 'interp_jit.residual_call', 'set_compile_hook': 'interp_jit.set_compile_hook', + 'set_abort_hook': 'interp_jit.set_abort_hook', 'DebugMergePoint': 'interp_resop.W_DebugMergePoint', } def setup_after_space_initialization(self): # force the __extend__ hacks to occur early from pypy.module.pypyjit.interp_jit import pypyjitdriver + from pypy.module.pypyjit.policy import pypy_portal # add the 'defaults' attribute from pypy.rlib.jit import PARAMETERS space = self.space pypyjitdriver.space = space w_obj = space.wrap(PARAMETERS) space.setattr(space.wrap(self), space.wrap('defaults'), w_obj) + pypy_portal.space = space diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -229,6 +229,7 @@ def __init__(self, space): self.w_compile_hook = space.w_None + self.w_abort_hook = space.w_None def set_compile_hook(space, w_hook): """ set_compile_hook(hook) @@ -254,3 +255,16 @@ cache.w_compile_hook = w_hook cache.in_recursion = NonConstant(False) return space.w_None + +def set_abort_hook(space, w_hook): + """ set_abort_hook(hook) + + Set a hook (callable) that will be called each time there is tracing + aborted due to some reason. The hook will be called with string describing + the reason as an argument + """ + cache = space.fromcache(Cache) + cache.w_abort_hook = w_hook + cache.in_recursion = NonConstant(False) + return space.w_None + diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -1,4 +1,25 @@ from pypy.jit.codewriter.policy import JitPolicy +from pypy.rlib.jit import JitPortal +from pypy.module.pypyjit.interp_jit import Cache +from pypy.interpreter.error import OperationError +from pypy.jit.metainterp.jitprof import counter_names + +class PyPyPortal(JitPortal): + def on_abort(self, reason): + space = self.space + cache = space.fromcache(Cache) + if cache.in_recursion: + return + if space.is_true(cache.w_abort_hook): + cache.in_recursion = True + try: + space.call_function(cache.w_abort_hook, + space.wrap(counter_names[reason])) + except OperationError, e: + e.write_unraisable(space, "jit hook ", cache.w_abort_hook) + cache.in_recursion = False + +pypy_portal = PyPyPortal() class PyPyJitPolicy(JitPolicy): diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -10,8 +10,10 @@ cast_base_ptr_to_instance) from pypy.rpython.lltypesystem import lltype, llmemory from pypy.module.pypyjit.interp_jit import pypyjitdriver +from pypy.module.pypyjit.policy import pypy_portal from pypy.jit.tool.oparser import parse from pypy.jit.metainterp.typesystem import llhelper +from pypy.jit.metainterp.jitprof import ABORT_TOO_LONG class MockSD(object): class cpu(object): @@ -46,9 +48,13 @@ def interp_on_compile_bridge(): pypyjitdriver.on_compile_bridge(logger, JitCellToken(), oplist, 0) + + def interp_on_abort(): + pypy_portal.on_abort(ABORT_TOO_LONG) cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) + cls.w_on_abort = space.wrap(interp2app(interp_on_abort)) def test_on_compile(self): import pypyjit @@ -124,3 +130,14 @@ import pypyjit dmp = pypyjit.DebugMergePoint(0, 0, self.f.func_code) assert dmp.code is self.f.func_code + + def test_on_abort(self): + import pypyjit + l = [] + + def hook(reason): + l.append(reason) + + pypyjit.set_abort_hook(hook) + self.on_abort() + assert l == ['ABORT_TOO_LONG'] diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -226,8 +226,8 @@ return self.get_entry_point(config) def jitpolicy(self, driver): - from pypy.module.pypyjit.policy import PyPyJitPolicy - return PyPyJitPolicy() + from pypy.module.pypyjit.policy import PyPyJitPolicy, pypy_portal + return PyPyJitPolicy(pypy_portal) def get_entry_point(self, config): from pypy.tool.lib_pypy import import_from_lib_pypy From noreply at buildbot.pypy.org Sun Dec 25 19:17:00 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 19:17:00 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: oops Message-ID: <20111225181700.3C92982B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50860:114e4166cfd6 Date: 2011-12-25 20:16 +0200 http://bitbucket.org/pypy/pypy/changeset/114e4166cfd6/ Log: oops diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -15,7 +15,7 @@ from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.jit.metainterp.jitprof import GUARDS, RECORDED_OPS, ABORT_ESCAPE from pypy.jit.metainterp.jitprof import ABORT_TOO_LONG, ABORT_BRIDGE, \ - ABORT_FORCE_QUASIIMMUT + ABORT_FORCE_QUASIIMMUT, ABORT_BAD_LOOP from pypy.jit.metainterp.jitexc import JitException, get_llexception from pypy.jit.metainterp.heapcache import HeapCache from pypy.rlib.objectmodel import specialize From noreply at buildbot.pypy.org Sun Dec 25 19:20:27 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 19:20:27 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: put it back Message-ID: <20111225182027.0EA4182B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50861:8ac7b4de9412 Date: 2011-12-25 20:19 +0200 http://bitbucket.org/pypy/pypy/changeset/8ac7b4de9412/ Log: put it back diff --git a/pypy/jit/metainterp/jitprof.py b/pypy/jit/metainterp/jitprof.py --- a/pypy/jit/metainterp/jitprof.py +++ b/pypy/jit/metainterp/jitprof.py @@ -18,6 +18,7 @@ OPT_FORCINGS ABORT_TOO_LONG ABORT_BRIDGE +ABORT_BAD_LOOP ABORT_ESCAPE ABORT_FORCE_QUASIIMMUT NVIRTUALS @@ -155,6 +156,7 @@ self._print_intline("abort: trace too long", cnt[ABORT_TOO_LONG]) self._print_intline("abort: compiling", cnt[ABORT_BRIDGE]) self._print_intline("abort: vable escape", cnt[ABORT_ESCAPE]) + self._print_intline("abort: bad loop", cnt[ABORT_BAD_LOOP]) self._print_intline("abort: force quasi-immut", cnt[ABORT_FORCE_QUASIIMMUT]) self._print_intline("nvirtuals", cnt[NVIRTUALS]) From noreply at buildbot.pypy.org Sun Dec 25 19:24:55 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 19:24:55 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: another merge default Message-ID: <20111225182455.A017B82B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50862:979a63998eaa Date: 2011-12-25 20:23 +0200 http://bitbucket.org/pypy/pypy/changeset/979a63998eaa/ Log: another merge default diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -269,7 +269,8 @@ # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. if newresult is not op.result and not newvalue.is_constant(): - self.short_boxes.alias(newresult, op.result) + # XXX fix me? + #self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) self.optimizer._newoperations = [op] + self.optimizer._newoperations self.optimizer.flush() From noreply at buildbot.pypy.org Sun Dec 25 20:37:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 Dec 2011 20:37:54 +0100 (CET) Subject: [pypy-commit] pypy concurrent-marksweep: Add a textual introduction. Needs to think out more precisely Message-ID: <20111225193754.48C2E82B12@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: concurrent-marksweep Changeset: r50863:b0bb363299dd Date: 2011-12-25 20:37 +0100 http://bitbucket.org/pypy/pypy/changeset/b0bb363299dd/ Log: Add a textual introduction. Needs to think out more precisely what it implies for the rest of the document. diff --git a/pypy/rpython/memory/gc/concurrentgen.txt b/pypy/rpython/memory/gc/concurrentgen.txt --- a/pypy/rpython/memory/gc/concurrentgen.txt +++ b/pypy/rpython/memory/gc/concurrentgen.txt @@ -1,14 +1,41 @@ +============================================================ + Overview of the "concurrentgen" collector +============================================================ + +Goal: reduce the total real time by moving a part of the GC to its own +thread that can run in parallel with the main execution thread. + +On current modern hardware with at least two cores, the two cores can +read the same area of memory concurrently. If one of the cores writes +to this area, then I believe that the core doing the writing works at +full speed, whereas the core doing the reading suffers from waiting for +the data to move to it; but it's still ok because the data usually moves +in a cache-to-cache bus, not via the main memory. Also, if an area of +memory is written to by one core, and then read and written to by the +other core only, then performance is fine. The bad case is the one in +which both cores continously read and write the same area of memory. + +So, assuming that the main thread reads and writes to random objects all +the time, it means that the GC thread should *only read* from the +objects. Conversely, the data structures built by the GC thread should +only be *read* from the main thread. In particular: when the GC thread +does marking, it should use off-objects bits; and sweeping should be +done by adding free objects to lists that are not chained lists. In +this way the GC thread never writes to the object's memory. Similarly, +for the same reason, the GC thread should not reset areas of memory to +zero in the background. + + ************************************************************ Minor collection cycles of the "concurrentgen" collector ************************************************************ - Objects mark byte: cym in 'mK': young objs (and all flagged objs) cam in 'Km': aging objs - '#' : old objs - 'S' : static prebuilt objs with no heap pointer + '#' '/' : old objs + '5' : static prebuilt objs with no heap pointer cym = current_young_marker cam = current_aging_marker @@ -29,7 +56,7 @@ Write barrier: change "old obj" to "flagged obj" (if mark != cym: - mark = cym (used to be '#' or 'S') + mark = cym (used to be '#' or '5') record the object in the "flagged" list) - note that we consider that flagged old objs are again young objects @@ -72,7 +99,7 @@ trace and add to gray objs) - also flag old-or-aging objs that point to new young objs (if mark != cym: - mark = cym (used to be '#' or 'S') + mark = cym (used to be '#' or '5') record the object in the "flagged" list) Threading issues: @@ -99,7 +126,7 @@ if obj is "black": (i.e. if mark != cam) make the obj old ( nothing to do here, mark already ok) else: - clear the object space and return it to the available list + return the object to the available list after this there are no more aging objects Write barrier: @@ -107,4 +134,10 @@ - flag old objs that point to new young objs (should not see any 'cam' object any more here) ------------------------------------------------------------- + + +************************************************************ + MAJOR collection cycles of the "concurrentgen" collector +************************************************************ + +NotImplementedError From noreply at buildbot.pypy.org Sun Dec 25 21:45:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 25 Dec 2011 21:45:16 +0100 (CET) Subject: [pypy-commit] pypy better-jit-hooks: speed up _ctypes quite a bit by not forcing virtualizable. Good question *why* Message-ID: <20111225204516.34BAE82B12@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: better-jit-hooks Changeset: r50864:7abc703337f7 Date: 2011-12-25 22:44 +0200 http://bitbucket.org/pypy/pypy/changeset/7abc703337f7/ Log: speed up _ctypes quite a bit by not forcing virtualizable. Good question *why* it did force virtualizable in the first place. diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -73,8 +73,12 @@ class Field(object): def __init__(self, name, offset, size, ctype, num, is_bitfield): - for k in ('name', 'offset', 'size', 'ctype', 'num', 'is_bitfield'): - self.__dict__[k] = locals()[k] + self.__dict__['name'] = name + self.__dict__['offset'] = offset + self.__dict__['size'] = size + self.__dict__['ctype'] = ctype + self.__dict__['num'] = num + self.__dict__['is_bitfield'] = is_bitfield def __setattr__(self, name, value): raise AttributeError(name) From noreply at buildbot.pypy.org Mon Dec 26 10:08:15 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 10:08:15 +0100 (CET) Subject: [pypy-commit] pypy default: remove nonsense hints (those functions contain loops) Message-ID: <20111226090815.DFA6D82B1E@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50865:45a754407e3d Date: 2011-12-26 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/45a754407e3d/ Log: remove nonsense hints (those functions contain loops) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -98,7 +98,6 @@ "Abstract. Get the expected number of locals." raise TypeError, "abstract" - @jit.dont_look_inside def fast2locals(self): # Copy values from the fastlocals to self.w_locals if self.w_locals is None: @@ -112,7 +111,6 @@ w_name = self.space.wrap(name) self.space.setitem(self.w_locals, w_name, w_value) - @jit.dont_look_inside def locals2fast(self): # Copy values from self.w_locals to the fastlocals assert self.w_locals is not None From noreply at buildbot.pypy.org Mon Dec 26 10:08:17 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 10:08:17 +0100 (CET) Subject: [pypy-commit] pypy default: remove some nonsense, handle_exception always raises Message-ID: <20111226090817.1CC1882B1E@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50866:94934ddf3ae9 Date: 2011-12-26 11:07 +0200 http://bitbucket.org/pypy/pypy/changeset/94934ddf3ae9/ Log: remove some nonsense, handle_exception always raises diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -619,7 +619,7 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -655,7 +655,7 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -674,7 +674,7 @@ self.descr_reqcls, args.prepend(w_obj)) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -690,7 +690,7 @@ raise OperationError(space.w_SystemError, space.wrap("unexpected DescrMismatch error")) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -708,7 +708,7 @@ self.descr_reqcls, Arguments(space, [w1])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -726,7 +726,7 @@ self.descr_reqcls, Arguments(space, [w1, w2])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -744,7 +744,7 @@ self.descr_reqcls, Arguments(space, [w1, w2, w3])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -763,7 +763,7 @@ Arguments(space, [w1, w2, w3, w4])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result From noreply at buildbot.pypy.org Mon Dec 26 11:28:33 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 26 Dec 2011 11:28:33 +0100 (CET) Subject: [pypy-commit] pypy default: test for 05c2089f5545, it curently fails Message-ID: <20111226102833.E03C982B49@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50867:fff6b491e07d Date: 2011-12-26 11:27 +0100 http://bitbucket.org/pypy/pypy/changeset/fff6b491e07d/ Log: test for 05c2089f5545, it curently fails diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,10 +1,13 @@ from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot, + FakeMetaInterpStaticData) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.jit.metainterp.optimize import InvalidLoop from py.test import raises +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method class BaseTestMultiLabel(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -84,6 +87,8 @@ return optimized +class OptimizeoptTestMultiLabel(BaseTestMultiLabel): + def test_simple(self): ops = """ [i1] @@ -381,6 +386,55 @@ """ self.optimize_loop(ops, expected) -class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + +class OptRenameStrlen(Optimization): + def propagate_forward(self, op): + dispatch_opt(self, op) + + def optimize_STRLEN(self, op): + newop = op.clone() + newop.result = op.result.clonebox() + self.emit_operation(newop) + self.make_equal_to(op.result, self.getvalue(newop.result)) + +dispatch_opt = make_dispatcher_method(OptRenameStrlen, 'optimize_', + default=OptRenameStrlen.emit_operation) + +class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): + + def _do_optimize_loop(self, loop, call_pure_results): + from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll + from pypy.jit.metainterp.optimizeopt.util import args_dict + from pypy.jit.metainterp.optimizeopt.pure import OptPure + + self.loop = loop + loop.call_pure_results = args_dict() + metainterp_sd = FakeMetaInterpStaticData(self.cpu) + optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) + + def test_optimizer_renaming_boxes(self): + ops = """ + [p1] + i1 = strlen(p1) + label(p1) + i2 = strlen(p1) + i3 = int_add(i2, 7) + jump(p1) + """ + expected = """ + [p1] + i1 = strlen(p1) + i11 = same_as(i1) + label(p1, i11) + i2 = int_add(i11, 7) + jump(p1) + """ + self.optimize_loop(ops, expected) + + + +class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): pass +class TestOptimizerRenamingBoxesLLtype(BaseTestOptimizerRenamingBoxes, LLtypeMixin): + pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7759,7 +7759,7 @@ jump(i0, p0, i2) """ self.optimize_loop(ops, expected) - + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass From noreply at buildbot.pypy.org Mon Dec 26 11:40:40 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 26 Dec 2011 11:40:40 +0100 (CET) Subject: [pypy-commit] pypy default: Since alias tracking is killed the name of the imported box among the label arguments will be the name from the previous part, so we need to place the same_as after the label now. This fixes the test in fff6b491e07d. Message-ID: <20111226104040.908C682B5F@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50868:660488a0ae41 Date: 2011-12-26 11:40 +0100 http://bitbucket.org/pypy/pypy/changeset/660488a0ae41/ Log: Since alias tracking is killed the name of the imported box among the label arguments will be the name from the previous part, so we need to place the same_as after the label now. This fixes the test in fff6b491e07d. diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -424,10 +424,10 @@ expected = """ [p1] i1 = strlen(p1) + label(p1, i1) i11 = same_as(i1) - label(p1, i11) i2 = int_add(i11, 7) - jump(p1) + jump(p1, i11) """ self.optimize_loop(ops, expected) diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -269,10 +269,8 @@ # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. if newresult is not op.result and not newvalue.is_constant(): - # XXX fix me? - #self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) - self.optimizer._newoperations = [op] + self.optimizer._newoperations + self.optimizer._newoperations.append(op) self.optimizer.flush() self.optimizer.emitting_dissabled = False From noreply at buildbot.pypy.org Mon Dec 26 11:57:41 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 11:57:41 +0100 (CET) Subject: [pypy-commit] pypy default: backout 94934ddf3ae9, it's nonsense, but annotator does not understand it and Message-ID: <20111226105741.CA63682B9D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50869:21ca3b5a40aa Date: 2011-12-26 12:57 +0200 http://bitbucket.org/pypy/pypy/changeset/21ca3b5a40aa/ Log: backout 94934ddf3ae9, it's nonsense, but annotator does not understand it and I'm too lazy to convince it diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -619,7 +619,7 @@ self.descr_reqcls, args) except Exception, e: - self.handle_exception(space, e) + raise self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -655,7 +655,7 @@ self.descr_reqcls, args) except Exception, e: - self.handle_exception(space, e) + raise self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -674,7 +674,7 @@ self.descr_reqcls, args.prepend(w_obj)) except Exception, e: - self.handle_exception(space, e) + raise self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -690,7 +690,7 @@ raise OperationError(space.w_SystemError, space.wrap("unexpected DescrMismatch error")) except Exception, e: - self.handle_exception(space, e) + raise self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -708,7 +708,7 @@ self.descr_reqcls, Arguments(space, [w1])) except Exception, e: - self.handle_exception(space, e) + raise self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -726,7 +726,7 @@ self.descr_reqcls, Arguments(space, [w1, w2])) except Exception, e: - self.handle_exception(space, e) + raise self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -744,7 +744,7 @@ self.descr_reqcls, Arguments(space, [w1, w2, w3])) except Exception, e: - self.handle_exception(space, e) + raise self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result @@ -763,7 +763,7 @@ Arguments(space, [w1, w2, w3, w4])) except Exception, e: - self.handle_exception(space, e) + raise self.handle_exception(space, e) if w_result is None: w_result = space.w_None return w_result From noreply at buildbot.pypy.org Mon Dec 26 13:51:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 13:51:13 +0100 (CET) Subject: [pypy-commit] pypy concurrent-marksweep: Document and start implementing an idea to make major collection Message-ID: <20111226125113.C04B482BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: concurrent-marksweep Changeset: r50870:05769b6b553e Date: 2011-12-26 13:46 +0100 http://bitbucket.org/pypy/pypy/changeset/05769b6b553e/ Log: Document and start implementing an idea to make major collection parallel. diff --git a/pypy/rpython/memory/gc/concurrentgen.py b/pypy/rpython/memory/gc/concurrentgen.py --- a/pypy/rpython/memory/gc/concurrentgen.py +++ b/pypy/rpython/memory/gc/concurrentgen.py @@ -14,20 +14,15 @@ from pypy.module.thread import ll_thread # -# A "3/4th concurrent" generational mark&sweep GC. +# A concurrent generational mark&sweep GC. # # This uses a separate thread to run the minor collections in parallel. # See concurrentgen.txt for some details. # -# Major collections are serialized for the mark phase, but the sweep -# phase can be parallelized again. XXX not done so far, YYY investigate -# also completely parallelizing them too -# # Based on observations of the timing of collections with "minimark" # (on translate.py): about 15% of the time in minor collections -# (including 2% in walk_roots), and about 7% in major collections (with -# probably 3-4% in the marking phase). So out of a total of 22% this -# should parallelize 16-17%, i.e. 3/4th. +# (including 2% in walk_roots), and about 7% in major collections. +# So out of a total of 22% this should parallelize 20%. # # This is an entirely non-moving collector, with a generational write # barrier adapted to the concurrent marking done by the collector thread. @@ -44,10 +39,8 @@ # let us know if the 'tid' is valid or is just a word-aligned address): MARK_BYTE_1 = 0x6D # 'm', 109 MARK_BYTE_2 = 0x4B # 'K', 75 -MARK_BYTE_OLD_1 = 0x23 # '#', 35 -MARK_BYTE_OLD_2 = 0x2F # '/', 47 -MARK_BYTE_STATIC = 0x35 # '5', 53 -mark_byte_is_old = lambda n: n <= MARK_BYTE_OLD_2 +MARK_BYTE_3 = 0x23 # '#', 35 +MARK_BYTE_STATIC = 0x53 # 'S', 83 # Next lower byte: a combination of flags. FL_WITHHASH = 0x0100 FL_EXTRA = 0x0200 @@ -144,8 +137,10 @@ # # See concurrentgen.txt for more information about these fields. self.current_young_marker = MARK_BYTE_1 - self.collector.current_aging_marker = MARK_BYTE_2 + self.current_aging_marker = MARK_BYTE_2 + self.current_old_marker = MARK_BYTE_3 # + self.num_major_collects = 0 #self.ready_to_start_lock = ...built in setup() #self.finished_lock = ...built in setup() #self.mutex_lock = ...built in setup() @@ -321,10 +316,11 @@ # def force_scan(obj): cym = self.current_young_marker + com = self.current_old_marker mark = self.get_mark(obj) #debug_print("deletion_barrier:", mark, obj) # - if mark_byte_is_old(mark): # most common case, make it fast + if mark == com: # most common case, make it fast # self.set_mark(obj, cym) # @@ -344,7 +340,7 @@ mark = self.get_mark(obj) self.set_mark(obj, cym) # - if mark == self.collector.current_aging_marker: + if mark == self.current_aging_marker: # # it is only possible to reach this point if there is # a collection running in collector_mark(), before it @@ -363,10 +359,10 @@ "write barrier: oups!?") # else: - # MARK_BYTE_OLD_* is possible here: the collector thread + # a 'com' mark is possible here: the collector thread # sets it in parallel to objects. In that case it has # been handled already. - ll_assert(mark_byte_is_old(mark), + ll_assert(mark == self.current_old_marker, "write barrier: bogus object mark") # self.release(self.mutex_lock) @@ -438,9 +434,10 @@ self.finalizer_lock_count -= 1 - def collect(self, gen=3): + def collect(self, gen=4): """ - gen=0: Trigger a minor collection if none is running. Never blocks. + gen=0: Trigger a minor collection if none is running. Never blocks, + except if it happens to start a major collection. gen=1: The same, but if a minor collection is running, wait for it to finish before triggering the next one. Guarantees that @@ -451,18 +448,17 @@ finish. Guarantees that young objects not reachable when collect() is called will be freed by the time collect() returns. - gen>=3: Major collection. + gen=3: Trigger a major collection, waiting for it to start. + Guarantees that any object not reachable when collect() is called + will soon be freed. - XXX later: - gen=3: Do a major collection, but don't wait for sweeping to finish. - The most useful default. - gen>=4: Do a full synchronous major collection. + gen>=4: Do a full synchronous major collection. """ debug_start("gc-forced-collect") debug_print("collect, gen =", gen) if gen >= 1 or self.collector.running <= 0: self.trigger_next_collection(gen >= 3) - if gen >= 2: + if gen == 2 or gen >= 4: self.wait_for_the_end_of_collection() self.execute_finalizers_ll() debug_stop("gc-forced-collect") @@ -483,7 +479,7 @@ self.execute_finalizers_ll() - def _start_minor_collection(self): + def _start_minor_collection(self, major_collection_phase=0): # debug_start("gc-start") # @@ -513,18 +509,17 @@ # # Exchange the meanings of 'cym' and 'cam' other = self.current_young_marker - self.current_young_marker = self.collector.current_aging_marker - self.collector.current_aging_marker = other + self.current_young_marker = self.current_aging_marker + self.current_aging_marker = other # # Copy a few 'mutator' fields to 'collector' fields - collector = self.collector - collector.aging_objects = self.new_young_objects + self.collector.aging_objects = self.new_young_objects self.new_young_objects = self.NULL #self.collect_weakref_pages = self.weakref_pages #self.collect_finalizer_pages = self.finalizer_pages # # Start the collector thread - self._start_collection_common(False) + self._start_collection_common(major_collection_phase) # debug_stop("gc-start") @@ -532,13 +527,20 @@ # debug_start("gc-major-collection") # - # Clear this list, which is not relevant for major collections. - # For simplicity we first reset the markers on the objects it - # contains, which are all originally old objects. - self.flagged_objects.foreach(self._reset_flagged_root, None) - self.flagged_objects.clear() + # Force a minor collection's marking step to occur now + self._start_minor_collection(major_collection_phase=1) # - # Scan the stack roots and the refs in non-GC objects + # Wait for it to finish + self._stop_collection() + # + # Assert that this list is still empty (cleared by the call to + # _start_minor_collection) + ll_assert(not self.flagged_objects.non_empty(), + "flagged_objects should be empty here") + ll_assert(self.new_young_objects == self.NULL, + "new_young_obejcts should be empty here") + # + # Scan again the stack roots and the refs in non-GC objects self.root_walker.walk_roots( ConcurrentGenGC._add_stack_root, # stack roots ConcurrentGenGC._add_stack_root, # in prebuilt non-gc @@ -550,27 +552,31 @@ # Add all prebuilt objects that have ever been mutated self.prebuilt_root_objects.foreach(self._add_prebuilt_root, None) # + # Exchange the meanings of 'com' and 'cam' + other = self.current_old_marker + self.current_old_marker = self.current_aging_marker + self.current_aging_marker = other + # # Copy a few 'mutator' fields to 'collector' fields - collector = self.collector - collector.aging_objects = self.new_young_objects - self.new_young_objects = self.NULL - - collector.collect_old_objects = self.old_objects + self.collector.delayed_aging_objects = self.collector.aging_objects + self.collector.aging_objects = self.old_objects self.old_objects = self.NULL #self.collect_weakref_pages = self.weakref_pages #self.collect_finalizer_pages = self.finalizer_pages # - # Start the collector thread - self._start_collection_common(True) + # Start again the collector thread + self._start_collection_common(major_collection_phase=2) # - # Pause the mutator thread while a major collection is running - self._stop_collection() - # + self.num_major_collects += 1 + debug_print("major collection", self.num_major_collects, "started") debug_stop("gc-major-collection") - def _start_collection_common(self, is_major): - self.collector.is_major_collection = is_major + def _start_collection_common(self, major_collection_phase): + self.collector.current_young_marker = self.current_young_marker + self.collector.current_aging_marker = self.current_aging_marker + self.collector.current_old_marker = self.current_old_marker + self.collector.major_collection_phase = major_collection_phase self.collector.running = 1 #debug_print("collector.running = 1") self.release(self.ready_to_start_lock) @@ -589,9 +595,9 @@ # # Important: the mark on 'obj' must be 'cym', otherwise it will not # be scanned at all. It should generally be, except in rare cases - # where it was reset to MARK_BYTE_OLD_* by the collector thread. + # where it was reset to 'com' by the collector thread. mark = self.get_mark(obj) - if mark_byte_is_old(mark): + if mark == self.current_old_marker: self.set_mark(obj, self.current_young_marker) else: ll_assert(mark == self.current_young_marker, @@ -599,9 +605,6 @@ # self.collector.gray_objects.append(obj) - def _reset_flagged_root(self, obj, ignored): - self.set_mark(obj, self.collector.current_old_marker) - def _add_prebuilt_root(self, obj, ignored): self.get_mark(obj) self.collector.gray_objects.append(obj) @@ -649,8 +652,7 @@ mark = self.header(obj).tid & 0xFF ll_assert(mark == MARK_BYTE_1 or mark == MARK_BYTE_2 or - mark == MARK_BYTE_OLD_1 or - mark == MARK_BYTE_OLD_2 or + mark == MARK_BYTE_3 or mark == MARK_BYTE_STATIC, "bad mark byte in object") return mark @@ -750,9 +752,7 @@ # when the collection starts, we make all young objects aging and # move 'new_young_objects' into 'aging_objects' self.aging_objects = self.NULL - self.collect_old_objects = self.NULL - self.current_old_marker = MARK_BYTE_OLD_1 - self.num_major_collects = 0 + self.delayed_aging_objects = self.NULL def setup(self): self.ready_to_start_lock = self.gc.ready_to_start_lock @@ -811,6 +811,7 @@ self.release(self.finished_lock) break # + self.collector_presweep() # Mark # collection_running == 1 self.collector_mark() # # collection_running == 2 @@ -824,10 +825,6 @@ def collector_mark(self): - if self.is_major_collection: - self.collector_mark_major() - return - # surviving_size = r_uint(0) # while True: @@ -939,77 +936,29 @@ self.get_mark(obj) self.gray_objects.append(obj) - def collector_mark_major(self): - # Marking for a major collection. Differs from marking for - # a minor collection, because we have to follow references - # to objects whose mark is 'cym' or 'oom', and replace them - # with 'nom'. We must stop if objects have already 'nom', - # or if they have MARK_BYTE_STATIC. For now they cannot - # have 'cam'. - # - # Get 'oom' and 'nom' from current_old_marker, and switch - # the value in that field: - oom = self.current_old_marker - nom = oom ^ (MARK_BYTE_OLD_1 ^ MARK_BYTE_OLD_2) - self.current_old_marker = nom - # - debug_print() - debug_print(".----------- Full collection ------------------") - # - self.num_major_collects += 1 - debug_print("| number of major collects: ", self.num_major_collects) - # - surviving_size = r_uint(0) - # - while self.gray_objects.non_empty(): - obj = self.gray_objects.pop() - mark = self.get_mark(obj) - if mark == nom or mark == MARK_BYTE_STATIC: - continue - # - # Record the object's size - surviving_size += raw_malloc_usage(self.gc.get_size(obj)) - # - # Scan the content of 'obj'. - self.gc.trace(obj, self._collect_add_pending, None) - self.set_mark(obj, nom) - # - next_major_collection_limit = ( # as a float - self.gc.nursery_size + - (self.gc.fill_factor - 1.0) * float(surviving_size)) - if next_major_collection_limit > FLOAT_ALMOST_MAXINT: - next_major_collection_limit = FLOAT_ALMOST_MAXINT - # - self.gc.size_still_available_before_major = int( - next_major_collection_limit) - # - debug_print("| surviving size: ", surviving_size) - debug_print("| next major collection after:", - self.gc.size_still_available_before_major) - def collector_sweep(self): - if self.is_major_collection: - # - cym = self.gc.current_young_marker - nom = self.current_old_marker - oom = nom ^ (MARK_BYTE_OLD_1 ^ MARK_BYTE_OLD_2) - self._collect_do_sweep(self.aging_objects, cym, False) - self._collect_do_sweep(self.collect_old_objects, oom, False) - # - debug_print("`----------------------------------------------") - # - else: - # - cam = self.current_aging_marker - self._collect_do_sweep(self.aging_objects, cam, True) - # + if self.major_collection_phase != 1: # no sweeping during phase 1 + lst = self._collect_do_sweep(self.aging_objects, + self.current_aging_marker, + self.gc.old_objects) + self.gc.old_objects = lst # self.running = -1 #debug_print("collection_running = -1") - def _collect_do_sweep(self, hdr, still_not_marked, write_barrier_active): - linked_list = self.gc.old_objects + def collector_presweep(self): + if self.major_collection_phase == 2: # only in this phase + # Finish the delayed sweep from the previous minor collection. + # The objects left unmarked were left with 'cam', which is + # now 'com' because we switched their values. + lst = self._collect_do_sweep(self.delayed_aging_objects, + self.current_old_marker, + self.aging_objects) + self.aging_objects = lst + self.delayed_aging_objects = self.NULL + + def _collect_do_sweep(self, hdr, still_not_marked, linked_list): # while hdr != self.NULL: nexthdr = hdr.next @@ -1023,15 +972,15 @@ else: # the object was marked: relink it ll_assert(mark == self.current_old_marker or - (write_barrier_active and - mark == self.gc.current_young_marker), + mark == self.current_aging_marker or + mark == self.current_young_marker, "sweep: bad mark") hdr.next = linked_list linked_list = hdr # hdr = nexthdr # - self.gc.old_objects = linked_list + return linked_list # ------------------------- @@ -1138,8 +1087,7 @@ def emulate_set_mark(p, v): "NOT_RPYTHON" - assert v in (MARK_BYTE_1, MARK_BYTE_2, - MARK_BYTE_OLD_1, MARK_BYTE_OLD_2, MARK_BYTE_STATIC) + assert v in (MARK_BYTE_1, MARK_BYTE_2, MARK_BYTE_3, MARK_BYTE_STATIC) concurrent_setter_lock.acquire(True) p.tid = (p.tid &~ 0xFF) | v concurrent_setter_lock.release() diff --git a/pypy/rpython/memory/gc/concurrentgen.txt b/pypy/rpython/memory/gc/concurrentgen.txt --- a/pypy/rpython/memory/gc/concurrentgen.txt +++ b/pypy/rpython/memory/gc/concurrentgen.txt @@ -25,6 +25,9 @@ for the same reason, the GC thread should not reset areas of memory to zero in the background. +This goal is not reached so far: both threads read and write the object +mark byte; there are no off-objects bits. + ************************************************************ Minor collection cycles of the "concurrentgen" collector @@ -32,13 +35,14 @@ Objects mark byte: - cym in 'mK': young objs (and all flagged objs) - cam in 'Km': aging objs - '#' '/' : old objs - '5' : static prebuilt objs with no heap pointer + cym: young objs (and all flagged objs) + cam: aging objs + com: old objs + 'S': static prebuilt objs with no heap pointer cym = current_young_marker cam = current_aging_marker +com = current_old_marker The write barrier activates when writing into an object whose mark byte is different from 'cym'. @@ -56,13 +60,13 @@ Write barrier: change "old obj" to "flagged obj" (if mark != cym: - mark = cym (used to be '#' or '5') + mark = cym (used to be com or 'S') record the object in the "flagged" list) - note that we consider that flagged old objs are again young objects ------------------------------------------------------------ -Step 2. Preparation of running the collector. (Still single-threaded.) +Step 2. Preparation for running the collector. (Still single-threaded.) - young objs -> aging objs (exchange the values of 'cam' and 'cym'. @@ -89,28 +93,28 @@ skip obj if not an aging obj (i.e. if mark != cam: continue) for each obj found by tracing: add to gray objs (if not an aging obj, will be skipped later) - gray obj -> black obj (i.e. mark = '#') + gray obj -> black obj (i.e. mark = com) Write barrier: - perform as a "deletion barrier", detecting changes done to aging objs (i.e. if mark == cam, - mark = '#' + mark = com trace and add to gray objs) - also flag old-or-aging objs that point to new young objs (if mark != cym: - mark = cym (used to be '#' or '5') + mark = cym (used to be com or 'S') record the object in the "flagged" list) Threading issues: - it's possible that both threads will trace the same object, if we're unlucky, but it does not have buggy effects - - the "mark = '#'" in the collector thread can conflict with the + - the "mark = com" in the collector thread can conflict with the "mark = cym" in the mutator write barrier, but again, it should not have buggy effects beyond occasionally triggering the write barrier twice on the same object, adding it twice in "flagged" (and never more) - - it is essential to have "mark = '#'" _after_ tracing in the collector + - it is essential to have "mark = com" _after_ tracing in the collector thread; otherwise, the write barrier in the mutator thread would be ignored in case it occurs between the two, and then the tracing done by the collector thread doesn't see the original values any more. @@ -140,4 +144,30 @@ MAJOR collection cycles of the "concurrentgen" collector ************************************************************ -NotImplementedError +Works mostly like a minor collection cycle. The only difference +is in step 2, which is replaced with: + + +Step 2+. Preparation for running a major collection. (Still single-threaded.) + + - force a minor collection's marking step to occur sequentially + (steps 2 and 3), to get rid of 'cym' objects. Objects are left + either 'cam' (non-marked) or 'com' (marked). + + - empty the "flagged" list + + - collect roots; add roots to the "gray objs" list + + - com <-> cam + (exchange the values of 'com' and 'cam'. + there are no 'cym' object right now. + the newly 'com' objects are the ones marked unreachable above.) + + +Major collections only worry about old objects. To avoid serializing +the complete major collection, we serialize the minor collection's +marking step that occurs first; the goal is to be sure that all objects +are in the 'com' state. We can minimize the non-parallelized delay +introduced by this step by doing the major collection just after the +previous minor collection finished, when the quantity of new young +objects should still be small. From noreply at buildbot.pypy.org Mon Dec 26 13:51:26 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 13:51:26 +0100 (CET) Subject: [pypy-commit] pypy concurrent-marksweep: hg merge default Message-ID: <20111226125126.65D9982BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: concurrent-marksweep Changeset: r50871:a0048b726e62 Date: 2011-12-26 13:49 +0100 http://bitbucket.org/pypy/pypy/changeset/a0048b726e62/ Log: hg merge default diff too long, truncating to 10000 out of 56002 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,3 +1,4 @@ b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 +ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -74,7 +74,8 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + f = open(name, "w") + f.close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -201,7 +201,7 @@ RegrTest('test_difflib.py'), RegrTest('test_dircache.py', core=True), RegrTest('test_dis.py'), - RegrTest('test_distutils.py'), + RegrTest('test_distutils.py', skip=True), RegrTest('test_dl.py', skip=True), RegrTest('test_doctest.py', usemodules="thread"), RegrTest('test_doctest2.py'), diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py --- a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py +++ b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py @@ -1,6 +1,5 @@ import unittest from ctypes import * -from ctypes.test import xfail class MyInt(c_int): def __cmp__(self, other): @@ -27,7 +26,6 @@ self.assertEqual(None, cb()) - @xfail def test_int_callback(self): args = [] def func(arg): diff --git a/lib-python/modified-2.7/heapq.py b/lib-python/modified-2.7/heapq.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/heapq.py @@ -0,0 +1,442 @@ +# -*- coding: latin-1 -*- + +"""Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues + +[explanation by Fran�ois Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +an usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + +from itertools import islice, repeat, count, imap, izip, tee, chain +from operator import itemgetter +import bisect + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + else: + returnitem = lastelt + return returnitem + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(heap)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(xrange(n//2)): + _siftup(x, i) + +def nlargest(n, iterable): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, reverse=True)[:n] + """ + if n < 0: # for consistency with the c impl + return [] + it = iter(iterable) + result = list(islice(it, n)) + if not result: + return result + heapify(result) + _heappushpop = heappushpop + for elem in it: + _heappushpop(result, elem) + result.sort(reverse=True) + return result + +def nsmallest(n, iterable): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable)[:n] + """ + if n < 0: # for consistency with the c impl + return [] + if hasattr(iterable, '__len__') and n * 10 <= len(iterable): + # For smaller values of n, the bisect method is faster than a minheap. + # It is also memory efficient, consuming only n elements of space. + it = iter(iterable) + result = sorted(islice(it, 0, n)) + if not result: + return result + insort = bisect.insort + pop = result.pop + los = result[-1] # los --> Largest of the nsmallest + for elem in it: + if los <= elem: + continue + insort(result, elem) + pop() + los = result[-1] + return result + # An alternative approach manifests the whole iterable in memory but + # saves comparisons by heapifying all at once. Also, saves time + # over bisect.insort() which has O(n) data movement time for every + # insertion. Finding the n smallest of an m length iterable requires + # O(m) + O(n log m) comparisons. + h = list(iterable) + heapify(h) + return map(heappop, repeat(h, min(n, len(h)))) + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom __cmp__ methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass + +def merge(*iterables): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + ''' + _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration + + h = [] + h_append = h.append + for itnum, it in enumerate(map(iter, iterables)): + try: + next = it.next + h_append([next(), itnum, next]) + except _StopIteration: + pass + heapify(h) + + while 1: + try: + while 1: + v, itnum, next = s = h[0] # raises IndexError when h is empty + yield v + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except _StopIteration: + _heappop(h) # remove empty iterator + except IndexError: + return + +# Extend the implementations of nsmallest and nlargest to use a key= argument +_nsmallest = nsmallest +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + # Short-cut for n==1 is to use min() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [min(chain(head, it))] + return [min(chain(head, it), key=key)] + + # When n>=size, it's faster to use sort() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = izip(iterable, count()) # decorate + result = _nsmallest(n, it) + return map(itemgetter(0), result) # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = izip(imap(key, in1), count(), in2) # decorate + result = _nsmallest(n, it) + return map(itemgetter(2), result) # undecorate + +_nlargest = nlargest +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [max(chain(head, it))] + return [max(chain(head, it), key=key)] + + # When n>=size, it's faster to use sort() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = izip(iterable, count(0,-1)) # decorate + result = _nlargest(n, it) + return map(itemgetter(0), result) # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = izip(imap(key, in1), count(0,-1), in2) # decorate + result = _nlargest(n, it) + return map(itemgetter(2), result) # undecorate + +if __name__ == "__main__": + # Simple sanity test + heap = [] + data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] + for item in data: + heappush(heap, item) + sort = [] + while heap: + sort.append(heappop(heap)) + print sort + + import doctest + doctest.testmod() diff --git a/lib-python/2.7/pkgutil.py b/lib-python/modified-2.7/pkgutil.py copy from lib-python/2.7/pkgutil.py copy to lib-python/modified-2.7/pkgutil.py --- a/lib-python/2.7/pkgutil.py +++ b/lib-python/modified-2.7/pkgutil.py @@ -244,7 +244,8 @@ return mod def get_data(self, pathname): - return open(pathname, "rb").read() + with open(pathname, "rb") as f: + return f.read() def _reopen(self): if self.file and self.file.closed: diff --git a/lib-python/modified-2.7/test/test_heapq.py b/lib-python/modified-2.7/test/test_heapq.py --- a/lib-python/modified-2.7/test/test_heapq.py +++ b/lib-python/modified-2.7/test/test_heapq.py @@ -186,6 +186,11 @@ self.assertFalse(sys.modules['heapq'] is self.module) self.assertTrue(hasattr(self.module.heapify, 'func_code')) + def test_islice_protection(self): + m = self.module + self.assertFalse(m.nsmallest(-1, [1])) + self.assertFalse(m.nlargest(-1, [1])) + class TestHeapC(TestHeap): module = c_heapq diff --git a/lib-python/modified-2.7/test/test_import.py b/lib-python/modified-2.7/test/test_import.py --- a/lib-python/modified-2.7/test/test_import.py +++ b/lib-python/modified-2.7/test/test_import.py @@ -64,6 +64,7 @@ except ImportError, err: self.fail("import from %s failed: %s" % (ext, err)) else: + # XXX importing .pyw is missing on Windows self.assertEqual(mod.a, a, "module loaded (%s) but contents invalid" % mod) self.assertEqual(mod.b, b, diff --git a/lib-python/modified-2.7/test/test_repr.py b/lib-python/modified-2.7/test/test_repr.py --- a/lib-python/modified-2.7/test/test_repr.py +++ b/lib-python/modified-2.7/test/test_repr.py @@ -254,8 +254,14 @@ eq = self.assertEqual touch(os.path.join(self.subpkgname, self.pkgname + os.extsep + 'py')) from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation - eq(repr(areallylongpackageandmodulenametotestreprtruncation), - "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) + # On PyPy, we use %r to format the file name; on CPython it is done + # with '%s'. It seems to me that %r is safer . + if '__pypy__' in sys.builtin_module_names: + eq(repr(areallylongpackageandmodulenametotestreprtruncation), + "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) + else: + eq(repr(areallylongpackageandmodulenametotestreprtruncation), + "" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__)) eq(repr(sys), "") def test_type(self): diff --git a/lib-python/2.7/test/test_subprocess.py b/lib-python/modified-2.7/test/test_subprocess.py copy from lib-python/2.7/test/test_subprocess.py copy to lib-python/modified-2.7/test/test_subprocess.py --- a/lib-python/2.7/test/test_subprocess.py +++ b/lib-python/modified-2.7/test/test_subprocess.py @@ -16,11 +16,11 @@ # Depends on the following external programs: Python # -if mswindows: - SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' - 'os.O_BINARY);') -else: - SETBINARY = '' +#if mswindows: +# SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' +# 'os.O_BINARY);') +#else: +# SETBINARY = '' try: @@ -420,8 +420,9 @@ self.assertStderrEqual(stderr, "") def test_universal_newlines(self): - p = subprocess.Popen([sys.executable, "-c", - 'import sys,os;' + SETBINARY + + # NB. replaced SETBINARY with the -u flag + p = subprocess.Popen([sys.executable, "-u", "-c", + 'import sys,os;' + #SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' @@ -448,8 +449,9 @@ def test_universal_newlines_communicate(self): # universal newlines through communicate() - p = subprocess.Popen([sys.executable, "-c", - 'import sys,os;' + SETBINARY + + # NB. replaced SETBINARY with the -u flag + p = subprocess.Popen([sys.executable, "-u", "-c", + 'import sys,os;' + #SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' diff --git a/lib-python/modified-2.7/urllib2.py b/lib-python/modified-2.7/urllib2.py --- a/lib-python/modified-2.7/urllib2.py +++ b/lib-python/modified-2.7/urllib2.py @@ -395,11 +395,7 @@ meth_name = protocol+"_response" for processor in self.process_response.get(protocol, []): meth = getattr(processor, meth_name) - try: - response = meth(req, response) - except: - response.close() - raise + response = meth(req, response) return response diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -124,7 +124,8 @@ # for now, we always allow types.pointer, else a lot of tests # break. We need to rethink how pointers are represented, though if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p: - raise ArgumentError, "expected %s instance, got %s" % (type(value), ffitype) + raise ArgumentError("expected %s instance, got %s" % (type(value), + ffitype)) return value._get_buffer_value() def _cast_addr(obj, _, tp): diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -17,7 +17,7 @@ if len(f) == 3: if (not hasattr(tp, '_type_') or not isinstance(tp._type_, str) - or tp._type_ not in "iIhHbBlL"): + or tp._type_ not in "iIhHbBlLqQ"): #XXX: are those all types? # we just dont get the type name # in the interp levle thrown TypeError diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,117 +1,6 @@ -"""qvfgbcvna naq hgbcvna punvef -qlfgbcvna naq hgbcvna punvef -V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur png nf jryy? -V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur punve nf jryy? -jr cnffrq gur RH erivrj -cbfg RhebClguba fcevag fgnegf 12.IVV.2007, 10nz -RhebClguba raqrq -n Pyrna Ragrecevfrf cebqhpgvba -npnqrzl vf n pbzcyvpngrq ebyr tnzr -npnqrzvn vf n pbzcyvpngrq ebyr tnzr -jbexvat pbqr vf crn fbhc -abg lbhe snhyg, zber yvxr vg'f n zbivat gnetrg -guvf fragrapr vf snyfr -abguvat vf gehr -Yncfnat Fbhpubat -Oenpunzhgnaqn -fbeel, V'yy grnpu gur pnpghf ubj gb fjvz yngre -Jul fb znal znal znal znal znal ivbyvaf? -Jul fb znal znal znal znal znal bowrpgf? -"eha njnl naq yvir ba n snez" nccebnpu gb fbsgjner qrirybczrag -"va snpg, lbh zvtug xabj zber nobhg gur genafyngvba gbbypunva nsgre znfgrevat eclguba guna fbzr angvir fcrnxre xabjf nobhg uvf zbgure gbathr" - kbeNkNk -"jurer qvq nyy gur ivbyvaf tb?" -- ClCl fgnghf oybt: uggc://zberclcl.oybtfcbg.pbz/ -uggc://kxpq.pbz/353/ -pnfhnyvgl ivbyngvbaf naq sylvat -wrgmg abpu fpubxbynqvtre -R09 2X @PNN:85? -vs lbh'er gelvat gb oybj hc fghss, jub pnerf? -vs fghss oybjf hc, lbh pner -2008 jvyy or gur lrne bs clcl ba gur qrfxgbc -2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl -2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl, Wnahnel jvyy or gur zbagu bs gur nyc gbcf -lrf, ohg jung'g gur frafr bs 0 < "qhena qhena" -eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb -2009 jvyy or gur lrne bs WVG ba gur qrfxgbc -N ynathntr vf n qvnyrpg jvgu na nezl naq anil -gbcvpf ner sbe gur srroyr zvaqrq -2009 vf gur lrne bs ersyrpgvba ba gur qrfxgbc -gur tybor vf bhe cbal, gur pbfzbf bhe erny ubefr -jub nz V naq vs lrf, ubj znal? -cebtenzzvat va orq vf n cresrpgyl svar npgvivgl -zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja -EClguba: jr hfr vg fb lbh qba'g unir gb -Zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja. EClguba: haqrpvqrq. -guvatf jvyy or avpr naq fghss -qba'g cbfg yvaxf gb cngragf urer -Abg lbhe hfhny nanylfrf. -Gur Neg bs gur Punaary -Clguba 300 -V fhccbfr ZO bs UGZY cre frpbaq vf abg gur hfhny fcrrq zrnfher crbcyr jbhyq rkcrpg sbe n wvg -gur fha arire frgf ba gur ClCl rzcver -ghegyrf ner snfgre guna lbh guvax -cebtenzzvat vf na nrfgrguvp raqrnibhe -P vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner -trezna vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner -trezna vf tbbq sbe artngvbaf, whfg abg sbe jevgvat fbsgjner -# nffreg qvq abg penfu -lbh fubhyq fgneg n cresrpg fbsgjner zbirzrag -lbh fubhyq fgneg n cresrpg punaary gbcvp zbirzrag -guvf vf n cresrpg punaary gbcvp -guvf vf n frys-ersreragvny punaary gbcvp -crrcubcr bcgvzvmngvbaf ner jung n Fhssvpvragyl Fzneg Pbzcvyre hfrf -"crrcubcr" bcgvzvmngvbaf ner jung na bcgvzvfgvp Pbzcvyre hfrf -pubbfr lbhe unpx -gur 'fhcre' xrljbeq vf abg gung uhttnoyr -wlguba cngpurf ner abg rabhtu sbe clcl -- qb lbh xabj oreyva? - nyy bs vg? - jryy, whfg oreyva -- ubj jvyy gur snpg gung gurl ner hfrq va bhe ercy punatr bhe gbcvpf? -- ubj pna vg rire unir jbexrq? -- jurer fubhyq gur unpx or fgberq? -- Vg'f uneq gb fnl rknpgyl jung pbafgvghgrf erfrnepu va gur pbzchgre jbeyq, ohg nf n svefg nccebkvzngvba, vg'f fbsgjner gung qbrfa'g unir hfref. -- Cebtenzzvat vf nyy nobhg xabjvat jura gb obvy gur benatr fcbatr qbaxrl npebff gur cuvyyvcvarf -- Jul fb znal, znal, znal, znal, znal, znal qhpxyvatf? -- ab qrgnvy vf bofpher rabhtu gb abg unir fbzr pbqr qrcraqvat ba vg. -- jung V trarenyyl jnag vf serr fcrrqhcf -- nyy bs ClCl vf kv-dhnyvgl -"lbh pna nyjnlf xvyy -9 be bf._rkvg() vs lbh'er va n uheel" -Ohernhpengf ohvyq npnqrzvp rzcverf juvpu puhea bhg zrnavatyrff fbyhgvbaf gb veeryrinag ceboyrzf. -vg'f abg n unpx, vg'f n jbexnebhaq -ClCl qbrfa'g unir pbcbylinevnqvp qrcraqragyl-zbabzbecurq ulcresyhknqf -ClCl qbrfa'g punatr gur shaqnzragny culfvpf pbafgnagf -Qnapr bs gur Fhtnecyhz Snvel -Wnin vf whfg tbbq rabhtu gb or cenpgvpny, ohg abg tbbq rabhtu gb or hfnoyr. -RhebClguba vf unccravat, qba'g rkcrpg nal dhvpx erfcbafr gvzrf. -"V jbhyq yvxr gb fgnl njnl sebz ernyvgl gura" -"gung'f jul gur 'be' vf ernyyl na 'naq' " -jvgu nyy nccebcevngr pbagrkghnyvfngvbavat -qba'g gevc ba gur cbjre pbeq -vzcyrzragvat YBTB va YBTB: "ghegyrf nyy gur jnl qbja" -gur ohooyrfbeg jbhyq or gur jebat jnl gb tb -gur cevapvcyr bs pbafreingvba bs zrff -gb fnir n gerr, rng n ornire -Qre Ovore znpugf evpugvt: Antg nyyrf xnchgg. -"Nal jbeyqivrj gung vfag jenpxrq ol frys-qbhog naq pbashfvba bire vgf bja vqragvgl vf abg n jbeyqivrj sbe zr." - Fpbgg Nnebafba -jr oryvrir va cnapnxrf, znlor -jr oryvrir va ghegyrf, znlor -jr qrsvavgryl oryvrir va zrgn -gur zngevk unf lbh -"Yvsr vf uneq, gura lbh anc" - n png -Vf Nezva ubzr jura gur havirefr prnfrf gb rkvfg? -Qhrffryqbes fcevag fgnegrq -frys.nobeeg("pnaabg ybnq negvpyrf") -QRAGVFGEL FLZOBY YVTUG IREGVPNY NAQ JNIR -"Gur UUH pnzchf vf n tbbq Dhnxr yriry" - Nezva -"Gur UUH pnzchf jbhyq or n greevoyr dhnxr yriry - lbh'q arire unir n pyhr jurer lbh ner" - zvpunry -N enqvbnpgvir png unf 18 unys-yvirf. - : j [fvtu] -f -pbybe-pbqrq oyhrf -"Neebtnapr va pbzchgre fpvrapr vf zrnfherq va anab-Qvwxfgenf." -ClCl arrqf n Whfg-va-Gvzr WVG -"Lbh pna'g gvzr geniry whfg ol frggvat lbhe pybpxf jebat" -Gjb guernqf jnyx vagb n one. Gur onexrrcre ybbxf hc naq lryyf, "url, V jnag qba'g nal pbaqvgvbaf enpr yvxr gvzr ynfg!" Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! Clguba 2.k vf abg qrnq Riregvzr fbzrbar nethrf jvgu "Fznyygnyx unf nyjnlf qbar K", vg vf nyjnlf n tbbq uvag gung fbzrguvat arrqf gb or punatrq snfg. - Znephf Qraxre @@ -119,7 +8,6 @@ __kkk__ naq __ekkk__ if bcrengvba fybgf: cnegvpyr dhnaghz fhcrecbfvgvba xvaq bs sha ClCl vf na rkpvgvat grpuabybtl gung yrgf lbh gb jevgr snfg, cbegnoyr, zhygv-cyngsbez vagrecergref jvgu yrff rssbeg Nezva: "Cebybt vf n zrff.", PS: "Ab, vg'f irel pbby!", Nezva: "Vfa'g guvf jung V fnvq?" - tbbq, grfgf ner hfrshy fbzrgvzrf :-) ClCl vf yvxr nofheq gurngre jr unir ab nagv-vzcbffvoyr fgvpx gung znxrf fher gung nyy lbhe cebtenzf unyg clcl vf n enpr orgjrra crbcyr funivat lnxf naq gur havirefr cebqhpvat zber orneqrq lnxf. Fb sne, gur havirefr vf jvaavat @@ -136,14 +24,14 @@ ClCl 1.1.0orgn eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy "gurer fubhyq or bar naq bayl bar boivbhf jnl gb qb vg". ClCl inevnag: "gurer pna or A unys-ohttl jnlf gb qb vg" 1.1 svany eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy -1.1 svany eryrnfrq | nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba + nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba Vf gurer n clcl gvzr? - vs lbh pna srry vg (?) gura gurer vf ab, abezny jbex vf fhpu zhpu yrff gvevat guna inpngvbaf ab, abezny jbex vf fb zhpu yrff gvevat guna inpngvbaf -SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva. +-SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva.- vg'f Fhaqnl, znlor vg'f Fhaqnl, ntnva -"3 + 3 = 8" Nagb va gur WVG gnyx +"3 + 3 = 8" - Nagb va gur WVG gnyx RPBBC vf unccravat RPBBC vf svavfurq cflpb rngf bar oenva cre vapu bs cebterff @@ -175,10 +63,108 @@ "nu, whfg va gvzr qbphzragngvba" (__nc__) ClCl vf abg n erny IZ: ab frtsnhyg unaqyref gb qb gur ener pnfrf lbh pna'g unir obgu pbairavrapr naq fcrrq -gur WVG qbrfa'g jbex ba BF/K (abi'09) -ab fhccbeg sbe BF/K evtug abj! (abi'09) fyvccref urvtug pna or zrnfherq va k86 ertvfgref clcl vf n enpr orgjrra gur vaqhfgel gelvat gb ohvyq znpuvarf jvgu zber naq zber erfbheprf, naq gur clcl qrirybcref gelvat gb rng nyy bs gurz. Fb sne, gur jvaare vf fgvyy hapyrne +"znl pbagnva ahgf naq/be lbhat cbvagref" +vg'f nyy irel fvzcyr, yvxr gur ubyvqnlf +unccl ClCl'f lrne 2010! +fnzhryr fnlf gung jr ybfg n enmbe. fb jr pna'g funir lnxf +"yrg'f abg or bofpher, hayrff jr ernyyl arrq gb" + (abg guernq-fnsr, ohg jryy, abguvat vf) +clcl unf znal ceboyrzf, ohg rnpu bar unf znal fbyhgvbaf +whfg nabgure vgrz (1.333...) ba bhe erny-ahzorerq gbqb yvfg +ClCl vf Fuveg Bevtnzv erfrnepu + nafjrevat n dhrfgvba: "ab -- sbe ng yrnfg bar cbffvoyr vagrecergngvba bs lbhe fragrapr" +eryrnfr 1.2 hcpbzvat +ClCl 1.2 eryrnfrq - uggc://clcl.bet/ +AB IPF QVFPHFFVBAF +EClguba vf n svar pnzry unve oehfu +ClCl vf n npghnyyl n ivfhnyvmngvba cebwrpg, jr whfg ohvyq vagrecergref gb unir vagrerfgvat qngn gb ivfhnyvmr +clcl vf yvxr fnhfntrf +naq abj sbe fbzrguvat pbzcyrgryl qvssrerag +n 10gu bs sberire vf 1u45 +pbeerpg pbqr qbrfag arrq nal grfgf +cbfgfgehpghenyvfz rgp. +clcl UVG trarengbe +gur arj clcl fcbeg vf gb cnff clcl ohtf nf pclguba ohtf +jr unir zhpu zber vagrecergref guna hfref +ClCl 1.3 njnvgvat eryrnfr +ClCl 1.3 eryrnfrq +vg frrzf gb zr gung bapr lbh frggyr ba na rkrphgvba / bowrpg zbqry naq / be olgrpbqr sbezng, lbh'ir nyernql qrpvqrq jung ynathntrf (jurer gur 'f' frrzf fhcresyhbhf) fhccbeg vf tbvat gb or svefg pynff sbe +"Nyy ceboyrzf va ClCl pna or fbyirq ol nabgure yriry bs vagrecergngvba" +ClCl 1.3 eryrnfrq (jvaqbjf ovanevrf vapyhqrq) +jul qvq lbh thlf unir gb znxr gur ohvygva sbeghar zber vagrerfgvat guna npghny jbex? v whfg pngpurq zlfrys erfgnegvat clcl 20 gvzrf +"jr hfrq gb unir n zrff jvgu na bofpher vagresnpr, abj jr unir zrff urer naq bofpher vagresnpr gurer. cebterff" crqebavf ba n clcl fcevag +"phcf bs pbssrr ner yvxr nanybtvrf va gung V'z znxvat bar evtug abj" +"vg'f nyjnlf hc gb hf, va n jnl be gur bgure" +ClCl vf infg, naq pbagnvaf zhygvghqrf +qravny vf eneryl n tbbq qrohttvat grpuavdhr +"Yrg'f tb." - "Jr pna'g" - "Jul abg?" - "Jr'er jnvgvat sbe n Genafyngvba." - (qrfcnvevatyl) "Nu!" +'gung'f qrsvavgryl n pnfr bs "hu????"' +va gurbel gurer vf gur Ybbc, va cenpgvpr gurer ner oevqtrf +gur uneqqevir - pbafgnag qngn cvytevzntr +ClCl vf n gbby gb xrrc bgurejvfr qnatrebhf zvaqf fnsryl bpphcvrq. +jr ner n trareny senzrjbex ohvyg ba pbafvfgrag nccyvpngvba bs nqubp-arff +gur jnl gb nibvq n jbexnebhaq vf gb vagebqhpr n fgebatre jbexnebhaq fbzrjurer ryfr +pnyyvat gur genafyngvba gbby punva n 'fpevcg' vf xvaq bs bssrafvir +ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq pbafhzr nyy gur zrzbel ng nal gvzr +ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq qvr ng nal gvzr orpnhfr bs gur 32-ovg 4TO yvzvg bs ENZ +Qh jvefg rora tranh qnf reervpura, jbena xrvare tynhog +vs fjvgmreynaq jrer jurer terrpr vf (ba vfynaqf) jbhyq gurl nyy or pbaarpgrq ol oevqtrf? +genafyngvat clcl jvgu pclguba vf fbbbbbb fybj +ClCl 1.4 eryrnfrq! +Jr ner abg urebrf, whfg irel cngvrag. +QBAR zrnaf vg'f qbar +jul gurer vf ab "ClCl 1.4 eryrnfrq" va gbcvp nal zber? +fabj! fabj! +svanyyl, zrephevny zvtengvba vf unccravat! +Gur zvtengvba gb zrephevny vf pbzcyrgrq! uggc://ovgohpxrg.bet/clcl/clcl +fabj! fabj! (gre) +unccl arj lrne +naq anaanaw gb lbh nf jryy +Frrvat nf gur ynjf bs culfvpf ner ntnvafg lbh, lbh unir gb pnershyyl pbafvqre lbhe fpbcr fb gung lbhe tbnyf ner ernfbanoyr. +nf hfhny va clcl, gur fbyhgvba nccrnef pbzcyrgryl qvfcebcbegvbangr gb gur ceboyrz naq vafgrnq jr'yy tb sbe n pbzcyrgryl qvssrerag fvzcyre nccebnpu gb gur bevtvany ceboyrz +fabj, fabj! +va clcl lbh ner nyjnlf ng gur jebat yriry, va bar jnl be gur bgure +jryy, vg'f jebat ohg abg fb "irel jebat" nf vg ybbxrq + V ybir clcl +ynmvarff vzcngvrapr naq uhoevf +fabj, fabj +EClguba: guvatf lbh jbhyqa'g qb va Clguba, naq pna'g qb va P. +vg vf gur rkcrpgrq orunivbe, rkprcg jura lbh qba'g rkcrpg vg +erqrsvavat lryybj frrzf yvxr n orggre vqrn +"gung'f ubjrire whfg ratvarrevat" (svwny) +"[vg] whfg fubjf ntnva gung eclguba vf bofpher" (psobym) +"naljnl, clguba vf n infg ynathntr" (svwny) +bhg-bs-yvr-thneqf +"gurer ner qnlf ba juvpu lbh ybbx nebhaq naq abguvat fubhyq unir rire jbexrq" (svwny) +clcl vf n orggre xvaq bs sbbyvfuarff - ynp +ehaavat grfgf vf rffragvny sbe qrirybcvat clcl -- hu? qvq V oernx gur grfg? (svwny) +V'ir tbg guvf sybbe jnk gung'f nyfb n TERNG qrffreg gbccvat!! +rknexha: "gur cneg gung V gubhtug jnf tbvat gb or uneq jnf gevivny, fb abj V whfg unir guvf cneg gung V qvqa'g rira guvax bs gung vf uneq" +V fhccbfr jr pna yvir jvgu gur bofphevgl, nf ybat nf gurer vf n pbzzrag znxvat vg yvtugre +V nz n ovt oryvrire va ernfbaf. ohg gur nccnerag xvaq ner zl snibevgr. +clcl: trg n WVG sbe serr (jryy gur svefg qnl lbh jba'g znantr naq vg jvyy or irel sehfgengvat) + thgjbegu: bu, jr fubhyq znxr gur WVG zntvpnyyl orggre, jvgu qrpbengbef naq fghss +vg'f n pbzcyrgr unpx, ohg n irel zvavzny bar (nevtngb) +svefg gurl ynhtu ng lbh, gura gurl vtaber lbh, gura gurl svtug lbh, gura lbh jva +ClCl vf snzvyl sevraqyl +jr yvxr pbzcynvagf +gbqnl jr'er snfgre guna lrfgreqnl (hfhnyyl) +ClCl naq PClguba: gurl ner zbegny rarzvrf vagrag ba xvyyvat rnpu bgure +nethnoyl, rirelguvat vf n avpur +clcl unf ynlref yvxr bavbaf: crryvat gurz onpx jvyy znxr lbh pel +EClguba zntvpnyyl znxrf lbh evpu naq snzbhf (fnlf fb ba gur gva) +Vf evtbobg nebhaq jura gur havirefr prnfrf gb rkvfg? +ClCl vf gbb pbby sbe dhrelfgevatf. +< nevtngb> gura jung bpphef? < svwny> tbbq fghss V oryvrir +ClCl 1.6 eryrnfrq! + jurer ner gur grfgf? +uggc://gjvgcvp.pbz/52nr8s +N enaqbz dhbgr +Nyy rkprcgoybpxf frrz fnar. +N cvax tyvggrel ebgngvat ynzoqn +"vg'f yvxryl grzcbenel hagvy sberire" nevtb """ def some_topic(): diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,6 +231,11 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None +HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension") +if HAS_LOAD_EXTENSION: + sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] + sqlite.sqlite3_enable_load_extension.restype = c_int + ########################################## # END Wrapped SQLite C API and constants ########################################## @@ -705,6 +710,15 @@ from sqlite3.dump import _iterdump return _iterdump(self) + if HAS_LOAD_EXTENSION: + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() + + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") + DML, DQL, DDL = range(3) class Cursor(object): diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/lib_pypy/pyrepl/commands.py b/lib_pypy/pyrepl/commands.py --- a/lib_pypy/pyrepl/commands.py +++ b/lib_pypy/pyrepl/commands.py @@ -33,10 +33,9 @@ class Command(object): finish = 0 kills_digit_arg = 1 - def __init__(self, reader, (event_name, event)): + def __init__(self, reader, cmd): self.reader = reader - self.event = event - self.event_name = event_name + self.event_name, self.event = cmd def do(self): pass diff --git a/lib_pypy/pyrepl/pygame_console.py b/lib_pypy/pyrepl/pygame_console.py --- a/lib_pypy/pyrepl/pygame_console.py +++ b/lib_pypy/pyrepl/pygame_console.py @@ -130,7 +130,7 @@ s.fill(c, [0, 600 - bmargin, 800, bmargin]) s.fill(c, [800 - rmargin, 0, lmargin, 600]) - def refresh(self, screen, (cx, cy)): + def refresh(self, screen, cxy): self.screen = screen self.pygame_screen.fill(colors.bg, [0, tmargin + self.cur_top + self.scroll, @@ -139,8 +139,8 @@ line_top = self.cur_top width, height = self.fontsize - self.cxy = (cx, cy) - cp = self.char_pos(cx, cy) + self.cxy = cxy + cp = self.char_pos(*cxy) if cp[1] < tmargin: self.scroll = - (cy*self.fh + self.cur_top) self.repaint() @@ -148,7 +148,7 @@ self.scroll += (600 - bmargin) - (cp[1] + self.fh) self.repaint() if self.curs_vis: - self.pygame_screen.blit(self.cursor, self.char_pos(cx, cy)) + self.pygame_screen.blit(self.cursor, self.char_pos(*cxy)) for line in screen: if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh): if line: diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -231,7 +231,11 @@ return ''.join(chars) def _histline(self, line): - return unicode(line.rstrip('\n'), ENCODING) + line = line.rstrip('\n') + try: + return unicode(line, ENCODING) + except UnicodeDecodeError: # bah, silently fall back... + return unicode(line, 'utf-8') def get_history_length(self): return self.saved_history_length @@ -268,7 +272,10 @@ f = open(os.path.expanduser(filename), 'w') for entry in history: if isinstance(entry, unicode): - entry = entry.encode(ENCODING) + try: + entry = entry.encode(ENCODING) + except UnicodeEncodeError: # bah, silently fall back... + entry = entry.encode('utf-8') entry = entry.replace('\n', '\r\n') # multiline history support f.write(entry + '\n') f.close() diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -163,7 +163,7 @@ def change_encoding(self, encoding): self.encoding = encoding - def refresh(self, screen, (cx, cy)): + def refresh(self, screen, cxy): # this function is still too long (over 90 lines) if not self.__gone_tall: @@ -198,6 +198,7 @@ # we make sure the cursor is on the screen, and that we're # using all of the screen if we can + cx, cy = cxy if cy < offset: offset = cy elif cy >= offset + height: @@ -411,7 +412,12 @@ e.args[4] == 'unexpected end of data': pass else: - raise + # was: "raise". But it crashes pyrepl, and by extension the + # pypy currently running, in which we are e.g. in the middle + # of some debugging session. Argh. Instead just print an + # error message to stderr and continue running, for now. + self.partial_char = '' + sys.stderr.write('\n%s: %s\n' % (e.__class__.__name__, e)) else: self.partial_char = '' self.event_queue.push(c) diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -38,9 +38,27 @@ _setlogmask.argtypes = (c_int,) _setlogmask.restype = c_int +_S_log_open = False +_S_ident_o = None + +def _get_argv(): + try: + import sys + script = sys.argv[0] + if isinstance(script, str): + return script[script.rfind('/')+1:] or None + except Exception: + pass + return None + @builtinify -def openlog(ident, option, facility): - _openlog(ident, option, facility) +def openlog(ident=None, logoption=0, facility=LOG_USER): + global _S_ident_o, _S_log_open + if ident is None: + ident = _get_argv() + _S_ident_o = c_char_p(ident) # keepalive + _openlog(_S_ident_o, logoption, facility) + _S_log_open = True @builtinify def syslog(arg1, arg2=None): @@ -48,11 +66,18 @@ priority, message = arg1, arg2 else: priority, message = LOG_INFO, arg1 + # if log is not opened, open it now + if not _S_log_open: + openlog() _syslog(priority, "%s", message) @builtinify def closelog(): - _closelog() + global _S_log_open, S_ident_o + if _S_log_open: + _closelog() + _S_log_open = False + _S_ident_o = None @builtinify def setlogmask(mask): diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 @@ -307,7 +308,7 @@ self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo - self.typename = self.type.__name__ + self.typename = getattr(self.type, "__name__", "???") self.traceback = py.code.Traceback(tb) def __repr__(self): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -252,7 +252,26 @@ # unsignedness is considered a rare and contagious disease def union((int1, int2)): - knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + if int1.unsigned == int2.unsigned: + knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype) + else: + t1 = int1.knowntype + if t1 is bool: + t1 = int + t2 = int2.knowntype + if t2 is bool: + t2 = int + + if t2 is int: + if int2.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + knowntype = t1 + elif t1 is int: + if int1.nonneg == False: + raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + knowntype = t2 + else: + raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -180,7 +180,12 @@ if name is None: name = pyobj.func_name if signature is None: - signature = cpython_code_signature(pyobj.func_code) + if hasattr(pyobj, '_generator_next_method_of_'): + from pypy.interpreter.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyobj.func_code) if defaults is None: defaults = pyobj.func_defaults self.name = name diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -591,13 +591,11 @@ immutable = True def __init__(self, method): self.method = method - -NUMBER = object() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), (s_None, lltype.Void), # also matches SomeImpossibleValue() (s_Bool, lltype.Bool), - (SomeInteger(knowntype=r_ulonglong), NUMBER), (SomeFloat(), lltype.Float), (SomeLongFloat(), lltype.LongFloat), (SomeChar(), lltype.Char), @@ -623,10 +621,11 @@ return lltype.Ptr(p.PARENTTYPE) if isinstance(s_val, SomePtr): return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + for witness, T in annotation_to_ll_map: if witness.contains(s_val): - if T is NUMBER: - return lltype.build_number(None, s_val.knowntype) return T if info is None: info = '' @@ -635,7 +634,7 @@ raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( info, s_val)) -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER]) +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) def lltype_to_annotation(T): try: diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -856,6 +856,46 @@ py.test.raises(Exception, a.build_types, f, []) # if you want to get a r_uint, you have to be explicit about it + def test_add_different_ints(self): + def f(a, b): + return a + b + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_different_ints(self): + def f(a, b): + if a: + c = a + else: + c = b + return c + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, [r_uint, int]) + + def test_merge_ruint_zero(self): + def f(a): + if a: + c = a + else: + c = 0 + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_merge_ruint_nonneg_signed(self): + def f(a, b): + if a: + c = a + else: + assert b >= 0 + c = b + return c + a = self.RPythonAnnotator() + s = a.build_types(f, [r_uint, int]) + assert s == annmodel.SomeInteger(nonneg = True, unsigned = True) + + def test_prebuilt_long_that_is_not_too_long(self): small_constant = 12L def f(): @@ -3029,7 +3069,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - a.build_types(f, [int, int]) + py.test.raises(Exception, a.build_types, f, [int, int]) def test_compare_with_zero(self): def g(): diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -92,7 +92,7 @@ module_import_dependencies = { # no _rawffi if importing pypy.rlib.clibffi raises ImportError - # or CompilationError + # or CompilationError or py.test.skip.Exception "_rawffi" : ["pypy.rlib.clibffi"], "_ffi" : ["pypy.rlib.clibffi"], @@ -113,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError), e: + except (ImportError, CompilationError, py.test.skip.Exception), e: errcls = e.__class__.__name__ config.add_warning( "The module %r is disabled\n" % (modname,) + @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -281,6 +285,9 @@ "actually create the full list until the resulting " "list is mutated", default=False), + BoolOption("withliststrategies", + "enable optimized ways to store lists of primitives ", + default=True), BoolOption("withtypeversion", "version type objects when changing them", @@ -362,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/config/test/test_translationoption.py b/pypy/config/test/test_translationoption.py new file mode 100644 --- /dev/null +++ b/pypy/config/test/test_translationoption.py @@ -0,0 +1,10 @@ +import py +from pypy.config.translationoption import get_combined_translation_config +from pypy.config.translationoption import set_opt_level +from pypy.config.config import ConflictConfigError + + +def test_no_gcrootfinder_with_boehm(): + config = get_combined_translation_config() + config.translation.gcrootfinder = "shadowstack" + py.test.raises(ConflictConfigError, set_opt_level, config, '0') diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -70,8 +70,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm"), - ("translation.continuation", False)], # breaks + "boehm": [("translation.continuation", False), # breaks + ("translation.gctransformer", "boehm")], "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], "concurrentms": [("translation.gctransformer", "framework")], @@ -403,6 +403,10 @@ # make_sure_not_resized often relies on it, so we always enable them config.translation.suggest(list_comprehension_operations=True) + # finally, make the choice of the gc definitive. This will fail + # if we have specified strange inconsistent settings. + config.translation.gc = config.translation.gc + # ---------------------------------------------------------------- def set_platform(config): diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -270,7 +270,12 @@ - *slicing*: the slice start must be within bounds. The stop doesn't need to, but it must not be smaller than the start. All negative indexes are disallowed, except for - the [:-1] special case. No step. + the [:-1] special case. No step. Slice deletion follows the same rules. + + - *slice assignment*: + only supports ``lst[x:y] = sublist``, if ``len(sublist) == y - x``. + In other words, slice assignment cannot change the total length of the list, + but just replace items. - *other operators*: ``+``, ``+=``, ``in``, ``*``, ``*=``, ``==``, ``!=`` work as expected. diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.std.withliststrategies.txt b/pypy/doc/config/objspace.std.withliststrategies.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withliststrategies.txt @@ -0,0 +1,2 @@ +Enable list strategies: Use specialized representations for lists of primitive +objects, such as ints. diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -262,6 +262,26 @@ documented as such (as e.g. for hasattr()), in most cases PyPy lets the exception propagate instead. +Object Identity of Primitive Values, ``is`` and ``id`` +------------------------------------------------------- + +Object identity of primitive values works by value equality, not by identity of +the wrapper. This means that ``x + 1 is x + 1`` is always true, for arbitrary +integers ``x``. The rule applies for the following types: + + - ``int`` + + - ``float`` + + - ``long`` + + - ``complex`` + +This change requires some changes to ``id`` as well. ``id`` fulfills the +following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the +above types will return a value that is computed from the argument, and can +thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long). + Miscellaneous ------------- @@ -284,14 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. -* Do not compare immutable objects with ``is``. For example on CPython - it is true that ``x is 0`` works, i.e. does the same as ``type(x) is - int and x == 0``, but it is so by accident. If you do instead - ``x is 1000``, then it stops working, because 1000 is too large and - doesn't come from the internal cache. In PyPy it fails to work in - both cases, because we have no need for a cache at all. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. -* Also, object identity of immutable keys in dictionaries is not necessarily - preserved. .. include:: _ref.txt diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,6 +1,3 @@ -.. include:: needswork.txt - -.. needs work, it talks about svn. also, it is not really user documentation Making a PyPy Release ======================= @@ -12,11 +9,8 @@ forgetting things. A set of todo files may also work. Check and prioritize all issues for the release, postpone some if necessary, -create new issues also as necessary. A meeting (or meetings) should be -organized to decide what things are priorities, should go in and work for -the release. - -An important thing is to get the documentation into an up-to-date state! +create new issues also as necessary. An important thing is to get +the documentation into an up-to-date state! Release Steps ---------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -17,17 +17,26 @@ projects, or anything else in PyPy, pop up on IRC or write to us on the `mailing list`_. +Make big integers faster +------------------------- + +PyPy's implementation of the Python ``long`` type is slower than CPython's. +Find out why and optimize them. + +Make bytearray type fast +------------------------ + +PyPy's bytearray type is very inefficient. It would be an interesting +task to look into possible optimizations on this. + Numpy improvements ------------------ -This is more of a project-container than a single project. Possible ideas: +The numpy is rapidly progressing in pypy, so feel free to come to IRC and +ask for proposed topic. A not necesarilly up-to-date `list of topics`_ +is also available. -* experiment with auto-vectorization using SSE or implement vectorization - without automatically detecting it for array operations. - -* improve numpy, for example implement memory views. - -* interface with fortran/C libraries. +.. _`list of topics`: https://bitbucket.org/pypy/extradoc/src/extradoc/planning/micronumpy.txt Improving the jitviewer ------------------------ diff --git a/pypy/doc/release-1.7.0.rst b/pypy/doc/release-1.7.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-1.7.0.rst @@ -0,0 +1,94 @@ +================================== +PyPy 1.7 - widening the sweet spot +================================== + +We're pleased to announce the 1.7 release of PyPy. As became a habit, this +release brings a lot of bugfixes and performance improvements over the 1.6 +release. However, unlike the previous releases, the focus has been on widening +the "sweet spot" of PyPy. That is, classes of Python code that PyPy can greatly +speed up should be vastly improved with this release. You can download the 1.7 +release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 1.7 and cpython 2.7.1`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 32/64 or +Windows 32. Windows 64 work is ongoing, but not yet natively supported. + +The main topic of this release is widening the range of code which PyPy +can greatly speed up. On average on +our benchmark suite, PyPy 1.7 is around **30%** faster than PyPy 1.6 and up +to **20 times** faster on some benchmarks. + +.. _`pypy 1.7 and cpython 2.7.1`: http://speed.pypy.org + + +Highlights +========== + +* Numerous performance improvements. There are too many examples which python + constructs now should behave faster to list them. + +* Bugfixes and compatibility fixes with CPython. + +* Windows fixes. + +* PyPy now comes with stackless features enabled by default. However, + any loop using stackless features will interrupt the JIT for now, so no real + performance improvement for stackless-based programs. Contact pypy-dev for + info how to help on removing this restriction. + +* NumPy effort in PyPy was renamed numpypy. In order to try using it, simply + write:: + + import numpypy as numpy + + at the beginning of your program. There is a huge progress on numpy in PyPy + since 1.6, the main feature being implementation of dtypes. + +* JSON encoder (but not decoder) has been replaced with a new one. This one + is written in pure Python, but is known to outperform CPython's C extension + up to **2 times** in some cases. It's about **20 times** faster than + the one that we had in 1.6. + +* The memory footprint of some of our RPython modules has been drastically + improved. This should impact any applications using for example cryptography, + like tornado. + +* There was some progress in exposing even more CPython C API via cpyext. + +Things that didn't make it, expect in 1.8 soon +============================================== + +There is an ongoing work, which while didn't make it to the release, is +probably worth mentioning here. This is what you should probably expect in +1.8 some time soon: + +* Specialized list implementation. There is a branch that implements lists of + integers/floats/strings as compactly as array.array. This should drastically + improve performance/memory impact of some applications + +* NumPy effort is progressing forward, with multi-dimensional arrays coming + soon. + +* There are two brand new JIT assembler backends, notably for the PowerPC and + ARM processors. + +Fundraising +=========== + +It's maybe worth mentioning that we're running fundraising campaigns for +NumPy effort in PyPy and for Python 3 in PyPy. In case you want to see any +of those happen faster, we urge you to donate to `numpy proposal`_ or +`py3k proposal`_. In case you want PyPy to progress, but you trust us with +the general direction, you can always donate to the `general pot`_. + +.. _`numpy proposal`: http://pypy.org/numpydonate.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`general pot`: http://pypy.org diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -188,6 +187,12 @@ # ------------------------------------------------------------------- + def is_w(self, space, w_other): + return self is w_other + + def immutable_unique_id(self, space): + return None + def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) raise OperationError(space.w_TypeError, w_msg) @@ -482,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." @@ -513,8 +528,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -681,9 +696,20 @@ """shortcut for space.is_true(space.eq(w_obj1, w_obj2))""" return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2)) - def is_w(self, w_obj1, w_obj2): - """shortcut for space.is_true(space.is_(w_obj1, w_obj2))""" - return self.is_true(self.is_(w_obj1, w_obj2)) + def is_(self, w_one, w_two): + return self.newbool(self.is_w(w_one, w_two)) + + def is_w(self, w_one, w_two): + # done by a method call on w_two (and not on w_one, because of the + # expected programming style where we say "if x is None" or + # "if x is object"). + return w_two.is_w(self, w_one) + + def id(self, w_obj): + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" @@ -777,22 +803,63 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - # If we know the expected length we can preallocate. if expected_length == -1: + # xxx special hack for speed + from pypy.interpreter.generator import GeneratorIterator + if isinstance(w_iterator, GeneratorIterator): + lst_w = [] + w_iterator.unpack_into(lst_w) + return lst_w + # /xxx + return self._unpackiterable_unknown_length(w_iterator, w_iterable) + else: + lst_w = self._unpackiterable_known_length(w_iterator, + expected_length) + return lst_w[:] # make the resulting list resizable + + @jit.dont_look_inside + def _unpackiterable_unknown_length(self, w_iterator, w_iterable): + # Unpack a variable-size list of unknown length. + # The JIT does not look inside this function because it + # contains a loop (made explicit with the decorator above). + # + # If we can guess the expected length we can preallocate. + try: + lgt_estimate = self.len_w(w_iterable) + except OperationError, o: + if (not o.match(self, self.w_AttributeError) and + not o.match(self, self.w_TypeError)): + raise + items = [] + else: try: - lgt_estimate = self.len_w(w_iterable) - except OperationError, o: - if (not o.match(self, self.w_AttributeError) and - not o.match(self, self.w_TypeError)): + items = newlist(lgt_estimate) + except MemoryError: + items = [] # it might have lied + # + while True: + try: + w_item = self.next(w_iterator) + except OperationError, e: + if not e.match(self, self.w_StopIteration): raise - items = [] - else: - try: - items = newlist(lgt_estimate) - except MemoryError: - items = [] # it might have lied - else: - items = [None] * expected_length + break # done + items.append(w_item) + # + return items + + @jit.dont_look_inside + def _unpackiterable_known_length(self, w_iterator, expected_length): + # Unpack a known length list, without letting the JIT look inside. + # Implemented by just calling the @jit.unroll_safe version, but + # the JIT stopped looking inside already. + return self._unpackiterable_known_length_jitlook(w_iterator, + expected_length) + + @jit.unroll_safe + def _unpackiterable_known_length_jitlook(self, w_iterator, + expected_length): + items = [None] * expected_length idx = 0 while True: try: @@ -801,26 +868,29 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and idx == expected_length: + if idx == expected_length: raise OperationError(self.w_ValueError, - self.wrap("too many values to unpack")) - if expected_length == -1: - items.append(w_item) - else: - items[idx] = w_item + self.wrap("too many values to unpack")) + items[idx] = w_item idx += 1 - if expected_length != -1 and idx < expected_length: + if idx < expected_length: if idx == 1: plural = "" else: plural = "s" - raise OperationError(self.w_ValueError, - self.wrap("need more than %d value%s to unpack" % - (idx, plural))) + raise operationerrfmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, plural) return items - unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, - 'unpackiterable_unroll')) + def unpackiterable_unroll(self, w_iterable, expected_length): + # Like unpackiterable(), but for the cases where we have + # an expected_length and want to unroll when JITted. + # Returns a fixed-size list. + w_iterator = self.iter(w_iterable) + assert expected_length != -1 + return self._unpackiterable_known_length_jitlook(w_iterator, + expected_length) def fixedview(self, w_iterable, expected_length=-1): """ A fixed list view of w_iterable. Don't modify the result @@ -835,6 +905,16 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_str(self, w_list): + """ Return a list of unwrapped strings out of a list of strings. If the + argument is not a list or does not contain only strings, return None. + May return None anyway. + """ + return None + + def newlist_str(self, list_s): + return self.newlist([self.wrap(s) for s in list_s]) + @jit.unroll_safe def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" @@ -969,9 +1049,6 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) - def id(self, w_obj): - return self.wrap(compute_unique_id(w_obj)) - # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the __builtin__ @@ -1543,6 +1620,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -1,14 +1,15 @@ +from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.gateway import NoneNotWrapped +from pypy.interpreter.pyopcode import LoopBlock from pypy.rlib import jit -from pypy.interpreter.pyopcode import LoopBlock +from pypy.rlib.objectmodel import specialize class GeneratorIterator(Wrappable): "An iterator created by a generator." _immutable_fields_ = ['pycode'] - + def __init__(self, frame): self.space = frame.space self.frame = frame # turned into None when frame_finished_execution @@ -81,7 +82,7 @@ # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: self.frame = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, space.w_None) else: return w_result # YIELDed finally: @@ -97,21 +98,21 @@ def throw(self, w_type, w_val, w_tb): from pypy.interpreter.pytraceback import check_traceback space = self.space - + msg = "throw() third argument must be a traceback object" if space.is_w(w_tb, space.w_None): tb = None else: tb = check_traceback(space, w_tb, msg) - + operr = OperationError(w_type, w_val, tb) operr.normalize_exception(space) return self.send_ex(space.w_None, operr) - + def descr_next(self): """x.next() -> the next value, or raise StopIteration""" return self.send_ex(self.space.w_None) - + def descr_close(self): """x.close(arg) -> raise GeneratorExit inside generator.""" assert isinstance(self, GeneratorIterator) @@ -124,7 +125,7 @@ e.match(space, space.w_GeneratorExit): return space.w_None raise - + if w_retval is not None: msg = "generator ignored GeneratorExit" raise OperationError(space.w_RuntimeError, space.wrap(msg)) @@ -155,3 +156,44 @@ "interrupting generator of ") break block = block.previous + + # Results can be either an RPython list of W_Root, or it can be an + # app-level W_ListObject, which also has an append() method, that's why we + # generate 2 versions of the function and 2 jit drivers. + def _create_unpack_into(): + jitdriver = jit.JitDriver(greens=['pycode'], + reds=['self', 'frame', 'results']) + def unpack_into(self, results): + """This is a hack for performance: runs the generator and collects + all produced items in a list.""" + # XXX copied and simplified version of send_ex() + space = self.space + if self.running: + raise OperationError(space.w_ValueError, + space.wrap('generator already executing')) + frame = self.frame + if frame is None: # already finished + return + self.running = True + try: + pycode = self.pycode + while True: + jitdriver.jit_merge_point(self=self, frame=frame, + results=results, pycode=pycode) + try: + w_result = frame.execute_frame(space.w_None) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + # if the frame is now marked as finished, it was RETURNed from + if frame.frame_finished_execution: + break + results.append(w_result) # YIELDed + finally: + frame.f_backref = jit.vref_None + self.running = False + self.frame = None + return unpack_into + unpack_into = _create_unpack_into() + unpack_into_w = _create_unpack_into() \ No newline at end of file diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated, instantiate from pypy.rlib.jit import hint from pypy.rlib.debug import make_sure_not_resized, check_nonneg -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib import jit from pypy.tool import stdlib_opcode from pypy.tool.stdlib_opcode import host_bytecode_spec @@ -167,7 +167,7 @@ # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. - next_instr = self.last_instr + 1 + next_instr = r_uint(self.last_instr + 1) if next_instr != 0: self.pushvalue(w_inputvalue) # @@ -691,6 +691,7 @@ handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 + assert handlerposition >= 0 blk = instantiate(get_block_class(opname)) blk.handlerposition = handlerposition blk.valuestackdepth = valuestackdepth diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,6 +837,7 @@ raise Yield def jump_absolute(self, jumpto, next_instr, ec): + check_nonneg(jumpto) return jumpto def JUMP_FORWARD(self, jumpby, next_instr): @@ -1278,7 +1279,7 @@ def handle(self, frame, unroller): next_instr = self.really_handle(frame, unroller) # JIT hack - return next_instr + return r_uint(next_instr) def really_handle(self, frame, unroller): """ Purely abstract method diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py --- a/pypy/interpreter/test/test_executioncontext.py +++ b/pypy/interpreter/test/test_executioncontext.py @@ -292,7 +292,7 @@ import os, sys print sys.executable, self.tmpfile if sys.platform == "win32": - cmdformat = '""%s" "%s""' # excellent! tons of "! + cmdformat = '"%s" "%s"' else: cmdformat = "'%s' '%s'" g = os.popen(cmdformat % (sys.executable, self.tmpfile), 'r') diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -587,7 +587,7 @@ assert isinstance(meth2, Method) assert meth2.call_args(args) == obj1 # Check method returned from unbound_method.__get__() - w_meth3 = descr_function_get(space, func, None, space.type(obj2)) + w_meth3 = descr_function_get(space, func, space.w_None, space.type(obj2)) meth3 = space.unwrap(w_meth3) w_meth4 = meth3.descr_method_get(obj2, space.w_None) meth4 = space.unwrap(w_meth4) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -117,7 +117,7 @@ g = f() raises(NameError, g.throw, NameError, "Error", None) - + def test_throw_fail(self): def f(): yield 1 @@ -129,7 +129,7 @@ yield 1 g = f() raises(TypeError, g.throw, list()) - + def test_throw_fail3(self): def f(): yield 1 @@ -188,7 +188,7 @@ g = f() g.next() raises(NameError, g.close) - + def test_close_fail(self): def f(): try: @@ -267,3 +267,15 @@ assert r.startswith("= self.min_fragment: - self.total_mallocs -= (stop - middle) + self.total_mallocs -= r_uint(stop - middle) self._add_free_block(middle, stop) return True else: @@ -77,7 +77,7 @@ # Hack to make sure that mcs are not within 32-bits of one # another for testing purposes rmmap.hint.pos += 0x80000000 - size - self.total_memory_allocated += size + self.total_memory_allocated += r_uint(size) data = rffi.cast(lltype.Signed, data) return self._add_free_block(data, data + size) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,204 +65,158 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() @@ -284,33 +226,86 @@ def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack - ffi_flags = 0 + result_type = '\x00' + result_flag = '\x00' + ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags + # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which + # makes sense on Windows as it's the one for all the C functions + # we are compiling together with the JIT. On non-Windows platforms + # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -338,16 +333,20 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): + from pypy.rlib.clibffi import FFI_DEFAULT_ABI + assert self.get_call_conv() == FFI_DEFAULT_ABI, ( + "%r: create_call_stub() with a non-default call ABI" % (self,)) + def process(c): if c == 'L': assert longlong.supports_longlong @@ -379,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -404,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -415,161 +425,56 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,14 +1,12 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo, ffi_flags): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,88 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - has_finalizer = bool(tid & (1<", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -736,50 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - has_finalizer = bool(self.layoutbuilder.has_finalizer(S)) - flags = int(has_finalizer) << llgroup.HALFSHIFT - descr.tid = llop.combine_ushort(lltype.Signed, type_id, flags) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -793,99 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -49,6 +123,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -68,7 +146,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -84,11 +169,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -0,0 +1,328 @@ +import sys +from pypy.rlib.rarithmetic import ovfcheck +from pypy.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr +from pypy.jit.metainterp.resoperation import ResOperation, rop +from pypy.jit.codewriter import heaptracker +from pypy.jit.backend.llsupport.symbolic import WORD +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr + + +class GcRewriterAssembler(object): + # This class performs the following rewrites on the list of operations: + # + # - Remove the DEBUG_MERGE_POINTs. + # + # - Turn all NEW_xxx to either a CALL_MALLOC_GC, or a CALL_MALLOC_NURSERY + # followed by SETFIELDs in order to initialize their GC fields. The + # two advantages of CALL_MALLOC_NURSERY is that it inlines the common + # path, and we need only one such operation to allocate several blocks + # of memory at once. + # + # - Add COND_CALLs to the write barrier before SETFIELD_GC and + # SETARRAYITEM_GC operations. + + _previous_size = -1 + _op_malloc_nursery = None + _v_last_malloced_nursery = None + c_zero = ConstInt(0) + + def __init__(self, gc_ll_descr, cpu): + self.gc_ll_descr = gc_ll_descr + self.cpu = cpu + self.newops = [] + self.known_lengths = {} + self.recent_mallocs = {} # set of variables + + def rewrite(self, operations): + # we can only remember one malloc since the next malloc can possibly + # collect; but we can try to collapse several known-size mallocs into + # one, both for performance and to reduce the number of write + # barriers. We do this on each "basic block" of operations, which in + # this case means between CALLs or unknown-size mallocs. + # + for op in operations: + if op.getopnum() == rop.DEBUG_MERGE_POINT: + continue + # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- + if op.is_malloc(): + self.handle_malloc_operation(op) + continue + elif op.can_malloc(): + self.emitting_an_operation_that_can_collect() + elif op.getopnum() == rop.LABEL: + self.emitting_an_operation_that_can_collect() + self.known_lengths.clear() + # ---------- write barriers ---------- + if self.gc_ll_descr.write_barrier_descr is not None: + if op.getopnum() == rop.SETFIELD_GC: + self.handle_write_barrier_setfield(op) + continue + if op.getopnum() == rop.SETINTERIORFIELD_GC: + self.handle_write_barrier_setinteriorfield(op) + continue + if op.getopnum() == rop.SETARRAYITEM_GC: + self.handle_write_barrier_setarrayitem(op) + continue + # ---------- + self.newops.append(op) + return self.newops + + # ---------- + + def handle_malloc_operation(self, op): + opnum = op.getopnum() + if opnum == rop.NEW: + self.handle_new_fixedsize(op.getdescr(), op) + elif opnum == rop.NEW_WITH_VTABLE: + classint = op.getarg(0).getint() + descr = heaptracker.vtable2descr(self.cpu, classint) + self.handle_new_fixedsize(descr, op) + if self.gc_ll_descr.fielddescr_vtable is not None: + op = ResOperation(rop.SETFIELD_GC, + [op.result, ConstInt(classint)], None, + descr=self.gc_ll_descr.fielddescr_vtable) + self.newops.append(op) + elif opnum == rop.NEW_ARRAY: + descr = op.getdescr() + assert isinstance(descr, ArrayDescr) + self.handle_new_array(descr, op) + elif opnum == rop.NEWSTR: + self.handle_new_array(self.gc_ll_descr.str_descr, op) + elif opnum == rop.NEWUNICODE: + self.handle_new_array(self.gc_ll_descr.unicode_descr, op) + else: + raise NotImplementedError(op.getopname()) + + def handle_new_fixedsize(self, descr, op): + assert isinstance(descr, SizeDescr) + size = descr.size + self.gen_malloc_nursery(size, op.result) + self.gen_initialize_tid(op.result, descr.tid) + + def handle_new_array(self, arraydescr, op): + v_length = op.getarg(0) + total_size = -1 + if isinstance(v_length, ConstInt): + num_elem = v_length.getint() + self.known_lengths[op.result] = num_elem + try: + var_size = ovfcheck(arraydescr.itemsize * num_elem) + total_size = ovfcheck(arraydescr.basesize + var_size) + except OverflowError: + pass # total_size is still -1 + elif arraydescr.itemsize == 0: + total_size = arraydescr.basesize + if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily + self.gen_malloc_nursery(total_size, op.result) + self.gen_initialize_tid(op.result, arraydescr.tid) + self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) + elif self.gc_ll_descr.kind == 'boehm': + self.gen_boehm_malloc_array(arraydescr, v_length, op.result) + else: + opnum = op.getopnum() + if opnum == rop.NEW_ARRAY: + self.gen_malloc_array(arraydescr, v_length, op.result) + elif opnum == rop.NEWSTR: + self.gen_malloc_str(v_length, op.result) + elif opnum == rop.NEWUNICODE: + self.gen_malloc_unicode(v_length, op.result) + else: + raise NotImplementedError(op.getopname()) + + # ---------- + + def emitting_an_operation_that_can_collect(self): + # must be called whenever we emit an operation that can collect: + # forgets the previous MALLOC_NURSERY, if any; and empty the + # set 'recent_mallocs', so that future SETFIELDs will generate + # a write barrier as usual. + self._op_malloc_nursery = None + self.recent_mallocs.clear() + + def _gen_call_malloc_gc(self, args, v_result, descr): + """Generate a CALL_MALLOC_GC with the given args.""" + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) + self.newops.append(op) + # mark 'v_result' as freshly malloced + self.recent_mallocs[v_result] = None + + def gen_malloc_fixedsize(self, size, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). + Note that with the framework GC, this should be called very rarely. + """ + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, + self.gc_ll_descr.malloc_fixedsize_descr) + + def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + self._gen_call_malloc_gc([ConstInt(addr), + ConstInt(arraydescr.basesize), + v_num_elem, + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset)], + v_result, + self.gc_ll_descr.malloc_array_descr) + + def gen_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) going either + to the standard or the nonstandard version of the function.""" + # + if (arraydescr.basesize == self.gc_ll_descr.standard_array_basesize + and arraydescr.lendescr.offset == + self.gc_ll_descr.standard_array_length_ofs): + # this is a standard-looking array, common case + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + args = [ConstInt(addr), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_descr + else: + # rare case, so don't care too much about the number of arguments + addr = self.gc_ll_descr.get_malloc_fn_addr( + 'malloc_array_nonstandard') + args = [ConstInt(addr), + ConstInt(arraydescr.basesize), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_nonstandard_descr + self._gen_call_malloc_gc(args, v_result, calldescr) + + def gen_malloc_str(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_str') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_str_descr) + + def gen_malloc_unicode(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_unicode') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_unicode_descr) + + def gen_malloc_nursery(self, size, v_result): + """Try to generate or update a CALL_MALLOC_NURSERY. + If that fails, generate a plain CALL_MALLOC_GC instead. + """ + size = self.round_up_for_allocation(size) + if not self.gc_ll_descr.can_use_nursery_malloc(size): + self.gen_malloc_fixedsize(size, v_result) + return + # + op = None + if self._op_malloc_nursery is not None: + # already a MALLOC_NURSERY: increment its total size + total_size = self._op_malloc_nursery.getarg(0).getint() + total_size += size + if self.gc_ll_descr.can_use_nursery_malloc(total_size): + # if the total size is still reasonable, merge it + self._op_malloc_nursery.setarg(0, ConstInt(total_size)) + op = ResOperation(rop.INT_ADD, + [self._v_last_malloced_nursery, + ConstInt(self._previous_size)], + v_result) + if op is None: + # if we failed to merge with a previous MALLOC_NURSERY, emit one + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_NURSERY, + [ConstInt(size)], + v_result) + self._op_malloc_nursery = op + # + self.newops.append(op) + self._previous_size = size + self._v_last_malloced_nursery = v_result + self.recent_mallocs[v_result] = None + + def gen_initialize_tid(self, v_newgcobj, tid): + if self.gc_ll_descr.fielddescr_tid is not None: + # produce a SETFIELD to initialize the GC header + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, ConstInt(tid)], None, + descr=self.gc_ll_descr.fielddescr_tid) + self.newops.append(op) + + def gen_initialize_len(self, v_newgcobj, v_length, arraylen_descr): + # produce a SETFIELD to initialize the array length + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, v_length], None, + descr=arraylen_descr) + self.newops.append(op) + + # ---------- + + def handle_write_barrier_setfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(1) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setinteriorfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setarrayitem(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier_array(op.getarg(0), + op.getarg(1), v) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.newops.append(op) + + def gen_write_barrier(self, v_base, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + args = [v_base, v_value] + self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, + descr=write_barrier_descr)) + + def gen_write_barrier_array(self, v_base, v_index, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + if write_barrier_descr.has_write_barrier_from_array(self.cpu): + # If we know statically the length of 'v', and it is not too + # big, then produce a regular write_barrier. If it's unknown or + # too big, produce instead a write_barrier_from_array. + LARGE = 130 + length = self.known_lengths.get(v_base, LARGE) + if length >= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -358,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -365,33 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +361,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +381,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +405,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -13,44 +14,52 @@ def test_call_descr_dynamic(): args = [types.sint, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, + ffi_flags=42) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 args = [types.sint, types.double, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.void) + descr = get_call_descr_dynamic(FakeCPU(), args, types.void, None, 42) assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), - args, types.void, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + args, types.void, None, ffi_flags=43) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 - descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True - descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: - descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong) + descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong, + None, 42) assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), - [], types.slonglong, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + [], types.slonglong, None, ffi_flags=43) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong - descr = get_call_descr_dynamic(FakeCPU(), [], types.float) + descr = get_call_descr_dynamic(FakeCPU(), [], types.float, None, 42) assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), - [], types.float, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + [], types.float, None, ffi_flags=44) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,22 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, - has_finalizer, contains_weakptr): + has_finalizer, has_light_finalizer, + contains_weakptr): assert not contains_weakptr - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - flags = int(has_finalizer) << 16 - tid = llop.combine_ushort(lltype.Signed, type_id, flags) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -320,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -402,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -425,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -453,189 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] @@ -40,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -280,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -303,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -325,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -346,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] From noreply at buildbot.pypy.org Mon Dec 26 13:51:27 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 13:51:27 +0100 (CET) Subject: [pypy-commit] pypy concurrent-marksweep: hg merge default Message-ID: <20111226125127.A54EB82BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: concurrent-marksweep Changeset: r50872:130f4c04e5c4 Date: 2011-12-26 13:50 +0100 http://bitbucket.org/pypy/pypy/changeset/130f4c04e5c4/ Log: hg merge default diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -98,7 +98,6 @@ "Abstract. Get the expected number of locals." raise TypeError, "abstract" - @jit.dont_look_inside def fast2locals(self): # Copy values from the fastlocals to self.w_locals if self.w_locals is None: @@ -112,7 +111,6 @@ w_name = self.space.wrap(name) self.space.setitem(self.w_locals, w_name, w_value) - @jit.dont_look_inside def locals2fast(self): # Copy values from self.w_locals to the fastlocals assert self.w_locals is not None diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,10 +1,13 @@ from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot, + FakeMetaInterpStaticData) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.jit.metainterp.optimize import InvalidLoop from py.test import raises +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method class BaseTestMultiLabel(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -84,6 +87,8 @@ return optimized +class OptimizeoptTestMultiLabel(BaseTestMultiLabel): + def test_simple(self): ops = """ [i1] @@ -381,6 +386,55 @@ """ self.optimize_loop(ops, expected) -class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + +class OptRenameStrlen(Optimization): + def propagate_forward(self, op): + dispatch_opt(self, op) + + def optimize_STRLEN(self, op): + newop = op.clone() + newop.result = op.result.clonebox() + self.emit_operation(newop) + self.make_equal_to(op.result, self.getvalue(newop.result)) + +dispatch_opt = make_dispatcher_method(OptRenameStrlen, 'optimize_', + default=OptRenameStrlen.emit_operation) + +class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): + + def _do_optimize_loop(self, loop, call_pure_results): + from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll + from pypy.jit.metainterp.optimizeopt.util import args_dict + from pypy.jit.metainterp.optimizeopt.pure import OptPure + + self.loop = loop + loop.call_pure_results = args_dict() + metainterp_sd = FakeMetaInterpStaticData(self.cpu) + optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) + + def test_optimizer_renaming_boxes(self): + ops = """ + [p1] + i1 = strlen(p1) + label(p1) + i2 = strlen(p1) + i3 = int_add(i2, 7) + jump(p1) + """ + expected = """ + [p1] + i1 = strlen(p1) + label(p1, i1) + i11 = same_as(i1) + i2 = int_add(i11, 7) + jump(p1, i11) + """ + self.optimize_loop(ops, expected) + + + +class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): pass +class TestOptimizerRenamingBoxesLLtype(BaseTestOptimizerRenamingBoxes, LLtypeMixin): + pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7759,7 +7759,7 @@ jump(i0, p0, i2) """ self.optimize_loop(ops, expected) - + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -269,10 +269,8 @@ # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. if newresult is not op.result and not newvalue.is_constant(): - # XXX fix me? - #self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) - self.optimizer._newoperations = [op] + self.optimizer._newoperations + self.optimizer._newoperations.append(op) self.optimizer.flush() self.optimizer.emitting_dissabled = False From noreply at buildbot.pypy.org Mon Dec 26 15:37:55 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 15:37:55 +0100 (CET) Subject: [pypy-commit] pypy default: Simplify the graph before turning it into a generator, to get rid Message-ID: <20111226143755.EA3B182BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50873:d373deb39b4d Date: 2011-12-26 15:22 +0100 http://bitbucket.org/pypy/pypy/changeset/d373deb39b4d/ Log: Simplify the graph before turning it into a generator, to get rid of the extra variables passed around. diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py --- a/pypy/translator/generator.py +++ b/pypy/translator/generator.py @@ -2,7 +2,7 @@ from pypy.objspace.flow.model import Variable, Constant, FunctionGraph from pypy.translator.unsimplify import insert_empty_startblock from pypy.translator.unsimplify import split_block -from pypy.translator.simplify import eliminate_empty_blocks +from pypy.translator.simplify import eliminate_empty_blocks, simplify_graph from pypy.tool.sourcetools import func_with_new_name from pypy.interpreter.argument import Signature @@ -64,6 +64,7 @@ def next(self): entry = self.current self.current = None + assert entry is not None # else, recursive generator invocation (next_entry, return_value) = func(entry) self.current = next_entry return return_value @@ -91,6 +92,10 @@ block.inputargs = [v_entry1] def tweak_generator_body_graph(Entry, graph): + # First, always run simplify_graph in order to reduce the number of + # variables passed around + simplify_graph(graph) + # assert graph.startblock.operations[0].opname == 'generator_mark' graph.startblock.operations.pop(0) # From noreply at buildbot.pypy.org Mon Dec 26 15:37:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 15:37:57 +0100 (CET) Subject: [pypy-commit] pypy default: A failing test. Message-ID: <20111226143757.2820082BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50874:c9706c6e6914 Date: 2011-12-26 15:22 +0100 http://bitbucket.org/pypy/pypy/changeset/c9706c6e6914/ Log: A failing test. diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py --- a/pypy/rpython/test/test_generator.py +++ b/pypy/rpython/test/test_generator.py @@ -54,6 +54,26 @@ res = self.interpret(f, [0]) assert res == 42 + def test_except_block(self): + def foo(): + raise ValueError + def g(a, b, c): + yield a + yield b + try: + foo() + except ValueError: + pass + yield c + def f(): + gen = g(3, 5, 8) + x = gen.next() * 100 + x += gen.next() * 10 + x += gen.next() + return x + res = self.interpret(f, []) + assert res == 358 + class TestLLtype(BaseTestGenerator, LLRtypeMixin): pass From noreply at buildbot.pypy.org Mon Dec 26 15:37:58 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 15:37:58 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the test. Message-ID: <20111226143758.5325982BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50875:cce139c7a9c6 Date: 2011-12-26 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/cce139c7a9c6/ Log: Fix the test. diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py --- a/pypy/translator/generator.py +++ b/pypy/translator/generator.py @@ -105,12 +105,20 @@ # mappings = [Entry] # + stopblock = Block([]) + v0 = Variable(); v1 = Variable() + stopblock.operations = [ + SpaceOperation('simple_call', [Constant(StopIteration)], v0), + SpaceOperation('type', [v0], v1), + ] + stopblock.closeblock(Link([v1, v0], graph.exceptblock)) + # for block in list(graph.iterblocks()): for exit in block.exits: if exit.target is graph.returnblock: - exit.args = [Constant(StopIteration), - Constant(StopIteration())] - exit.target = graph.exceptblock + exit.args = [] + exit.target = stopblock + assert block is not stopblock for index in range(len(block.operations)-1, -1, -1): op = block.operations[index] if op.opname == 'yield': From noreply at buildbot.pypy.org Mon Dec 26 16:35:55 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 16:35:55 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add Leysin draft announcement Message-ID: <20111226153555.60B8482BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r3991:9091b9fd97f8 Date: 2011-12-26 16:34 +0100 http://bitbucket.org/pypy/extradoc/changeset/9091b9fd97f8/ Log: Add Leysin draft announcement diff --git a/sprintinfo/leysin-winter-2011/announcement.txt b/sprintinfo/leysin-winter-2012/announcement.txt copy from sprintinfo/leysin-winter-2011/announcement.txt copy to sprintinfo/leysin-winter-2012/announcement.txt --- a/sprintinfo/leysin-winter-2011/announcement.txt +++ b/sprintinfo/leysin-winter-2012/announcement.txt @@ -1,30 +1,16 @@ ===================================================================== - PyPy Leysin Winter Sprint (16-22nd January 2011) + PyPy Leysin Winter Sprint (15-22nd January 2012) ===================================================================== The next PyPy sprint will be in Leysin, Switzerland, for the -seventh time. This is a fully public sprint: newcomers and topics +eighth time. This is a fully public sprint: newcomers and topics other than those proposed below are welcome. ------------------------------ Goals and topics of the sprint ------------------------------ -* Now that we have released 1.4, and plan to release 1.4.1 soon - (possibly before the sprint), the sprint itself is going to be - mainly working on fixing issues reported by various users. Of - course this does not prevent people from showing up with a more - precise interest in mind. If there are newcomers, we will gladly - give introduction talks. - -* We will also work on polishing and merging the long-standing - branches that are around, which could eventually lead to the - next PyPy release. These branches are notably: - - - fast-forward (Python 2.7 support, by Benjamin, Amaury, and others) - - jit-unroll-loops (improve JITting of smaller loops, by Hakan) - - arm-backend (a JIT backend for ARM, by David) - - jitypes2 (fast ctypes calls with the JIT, by Antonio). +* xxx * And as usual, the main side goal is to have fun in winter sports :-) We can take a day off for ski. @@ -33,8 +19,9 @@ Exact times ----------- -The work days should be 16-22 January 2011. People may arrive on -the 15th already and/or leave on the 23rd. +The work days should be 15-21 January 2011 (Sunday-Saturday). The +official plans are for people to arrive on the 14th or the 15th, and to +leave on the 22nd. ----------------------- Location & Accomodation @@ -56,13 +43,14 @@ expensive) and maybe the possibility to get a single room if you really want to. -Please register by svn: +Please register by Mercurial:: - http://codespeak.net/svn/pypy/extradoc/sprintinfo/leysin-winter-2011/people.txt + https://bitbucket.org/pypy/extradoc/ + https://bitbucket.org/pypy/extradoc/raw/extradoc/sprintinfo/leysin-winter-2012 -or on the pypy-sprint mailing list if you do not yet have check-in rights: +or on the pypy-dev mailing list if you do not yet have check-in rights: - http://codespeak.net/mailman/listinfo/pypy-sprint + http://mail.python.org/mailman/listinfo/pypy-dev You need a Swiss-to-(insert country here) power adapter. There will be some Swiss-to-EU adapters around -- bring a EU-format power strip if you From noreply at buildbot.pypy.org Mon Dec 26 16:38:53 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 16:38:53 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add the standard people.txt Message-ID: <20111226153853.9F7E082BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r3992:fc335a8ec80e Date: 2011-12-26 16:38 +0100 http://bitbucket.org/pypy/extradoc/changeset/fc335a8ec80e/ Log: Add the standard people.txt diff --git a/sprintinfo/leysin-winter-2012/people.txt b/sprintinfo/leysin-winter-2012/people.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2012/people.txt @@ -0,0 +1,57 @@ + +People coming to the Leysin sprint Winter 2011 +================================================== + +People who have a ``?`` in their arrive/depart or accomodation +column are known to be coming but there are no details +available yet from them. + + +==================== ============== ======================= + Name Arrive/Depart Accomodation +==================== ============== ======================= +Armin Rigo private +==================== ============== ======================= + + +People on the following list were present at previous sprints: + +==================== ============== ===================== + Name Arrive/Depart Accomodation +==================== ============== ===================== +Antonio Cuni ? ? +Michael Foord ? ? +Maciej Fijalkowski ? ? +David Schneider ? ? +Jacob Hallen ? ? +Laura Creighton ? ? +Hakan Ardo ? ? +Carl Friedrich Bolz ? ? +Samuele Pedroni ? ? +Anders Hammarquist ? ? +Christian Tismer ? ? +Niko Matsakis ? ? +Toby Watson ? ? +Paul deGrandis ? ? +Michael Hudson ? ? +Anders Lehmann ? ? +Niklaus Haldimann ? ? +Lene Wagner ? ? +Amaury Forgeot d'Arc ? ? +Valentino Volonghi ? ? +Boris Feigin ? ? +Andrew Thompson ? ? +Bert Freudenberg ? ? +Beatrice Duering ? ? +Richard Emslie ? ? +Johan Hahn ? ? +Stephan Diehl ? ? +Alexander Schremmer ? ? +Anders Chrigstroem ? ? +Eric van Riet Paap ? ? +Holger Krekel ? ? +Guido Wesdorp ? ? +Leonardo Santagada ? ? +Alexandre Fayolle ? ? +Sylvain Th�nault ? ? +==================== ============== ===================== From noreply at buildbot.pypy.org Mon Dec 26 16:46:37 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 16:46:37 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add some topics. Message-ID: <20111226154637.52AF882BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r3993:1eeeeaed6a6d Date: 2011-12-26 16:46 +0100 http://bitbucket.org/pypy/extradoc/changeset/1eeeeaed6a6d/ Log: Add some topics. diff --git a/sprintinfo/leysin-winter-2012/announcement.txt b/sprintinfo/leysin-winter-2012/announcement.txt --- a/sprintinfo/leysin-winter-2012/announcement.txt +++ b/sprintinfo/leysin-winter-2012/announcement.txt @@ -10,7 +10,14 @@ Goals and topics of the sprint ------------------------------ -* xxx +* Py3k topics: ... + +* NumPyPy topics: ... + +* JIT backends: integrate tests for ARM; look at the PowerPC 64; + maybe try again to write an LLVM- or GCC-based one + +* STM and STM-related topics; or the Concurrent Mark-n-Sweep GC * And as usual, the main side goal is to have fun in winter sports :-) We can take a day off for ski. From noreply at buildbot.pypy.org Mon Dec 26 16:57:29 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 26 Dec 2011 16:57:29 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: hg merge default Message-ID: <20111226155729.9327E82BA2@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50876:f75c6c5a133a Date: 2011-12-26 13:49 +0100 http://bitbucket.org/pypy/pypy/changeset/f75c6c5a133a/ Log: hg merge default diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -180,7 +180,12 @@ if name is None: name = pyobj.func_name if signature is None: - signature = cpython_code_signature(pyobj.func_code) + if hasattr(pyobj, '_generator_next_method_of_'): + from pypy.interpreter.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyobj.func_code) if defaults is None: defaults = pyobj.func_defaults self.name = name diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -98,7 +98,6 @@ "Abstract. Get the expected number of locals." raise TypeError, "abstract" - @jit.dont_look_inside def fast2locals(self): # Copy values from the fastlocals to self.w_locals if self.w_locals is None: @@ -112,7 +111,6 @@ w_name = self.space.wrap(name) self.space.setitem(self.w_locals, w_name, w_value) - @jit.dont_look_inside def locals2fast(self): # Copy values from self.w_locals to the fastlocals assert self.w_locals is not None diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -39,6 +39,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_unique_id # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -147,12 +148,13 @@ def finish_once(self): if self._debug: debug_start('jit-backend-counts') - for struct in self.loop_run_counters: - if struct.bridge: - prefix = 'bridge ' + for i in range(len(self.loop_run_counters)): + struct = self.loop_run_counters[i] + if not struct.bridge: + prefix = 'TargetToken(%d)' % struct.number else: - prefix = 'loop ' - debug_print(prefix + str(struct.number) + ':' + str(struct.i)) + prefix = 'bridge ' + str(struct.number) + debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') def _build_float_constants(self): @@ -422,8 +424,8 @@ self.setup(looptoken) if log: - self._register_counter(False, looptoken.number) - operations = self._inject_debugging_code(looptoken, operations) + operations = self._inject_debugging_code(looptoken, operations, + False, looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -489,8 +491,8 @@ self.setup(original_loop_token) if log: - self._register_counter(True, descr_number) - operations = self._inject_debugging_code(faildescr, operations) + operations = self._inject_debugging_code(faildescr, operations, + True, descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -597,17 +599,21 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number): - if self._debug: - # YYY very minor leak -- we need the counters to stay alive - # forever, just because we want to report them at the end - # of the process - struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', - track_allocation=False) - struct.i = 0 - struct.bridge = int(bridge) + def _register_counter(self, bridge, number, token): + # YYY very minor leak -- we need the counters to stay alive + # forever, just because we want to report them at the end + # of the process + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', + track_allocation=False) + struct.i = 0 + struct.bridge = int(bridge) + if bridge: struct.number = number - self.loop_run_counters.append(struct) + else: + assert token + struct.number = compute_unique_id(token) + self.loop_run_counters.append(struct) + return struct def _find_failure_recovery_bytecode(self, faildescr): adr_jump_offset = faildescr._x86_adr_jump_offset @@ -651,27 +657,37 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None + def _append_debugging_code(self, operations, bridge, number, token): + counter = self._register_counter(bridge, number, token) + c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations.extend(ops) + @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations): + def _inject_debugging_code(self, looptoken, operations, bridge, number): if self._debug: # before doing anything, let's increase a counter s = 0 for op in operations: s += op.getopnum() looptoken._x86_debug_checksum = s - c_adr = ConstInt(rffi.cast(lltype.Signed, - self.loop_run_counters[-1])) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - if operations[0].getopnum() == rop.LABEL: - operations = [operations[0]] + ops + operations[1:] - else: - operations = ops + operations + + newoperations = [] + if bridge: + self._append_debugging_code(newoperations, bridge, number, + None) + for op in operations: + newoperations.append(op) + if op.getopnum() == rop.LABEL: + self._append_debugging_code(newoperations, bridge, number, + op.getdescr()) + operations = newoperations return operations def _assemble(self, regalloc, operations): diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -519,16 +519,23 @@ from pypy.tool.logparser import parse_log_file, extract_category from pypy.rlib import debug + targettoken, preambletoken = TargetToken(), TargetToken() loop = """ [i0] - label(i0, descr=targettoken) + label(i0, descr=preambletoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1, descr=targettoken) + label(i1, descr=targettoken) + debug_merge_point('xyz', 0) + i11 = int_add(i1, 1) + i12 = int_ge(i11, 10) + guard_false(i12) [] + jump(i11, descr=targettoken) """ - ops = parse(loop, namespace={'targettoken': TargetToken()}) + ops = parse(loop, namespace={'targettoken': targettoken, + 'preambletoken': preambletoken}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) @@ -537,11 +544,15 @@ self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] - assert struct.i == 10 + assert struct.i == 1 + struct = self.cpu.assembler.loop_run_counters[1] + assert struct.i == 9 self.cpu.finish_once() finally: debug._log = None - assert ('jit-backend-counts', [('debug_print', 'loop -1:10')]) in dlog + l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') + l2 = ('debug_print', targettoken.repr_of_descr() + ':9') + assert ('jit-backend-counts', [l1, l2]) in dlog def test_debugger_checksum(self): loop = """ diff --git a/pypy/jit/backend/x86/test/test_zrpy_platform.py b/pypy/jit/backend/x86/test/test_zrpy_platform.py --- a/pypy/jit/backend/x86/test/test_zrpy_platform.py +++ b/pypy/jit/backend/x86/test/test_zrpy_platform.py @@ -74,8 +74,8 @@ myjitdriver = jit.JitDriver(greens = [], reds = ['n']) def entrypoint(argv): - myjitdriver.set_param('threshold', 2) - myjitdriver.set_param('trace_eagerness', 0) + jit.set_param(myjitdriver, 'threshold', 2) + jit.set_param(myjitdriver, 'trace_eagerness', 0) n = 16 while n > 0: myjitdriver.can_enter_jit(n=n) diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -42,8 +42,7 @@ except AttributeError: pass - def is_candidate(graph): - return policy.look_inside_graph(graph) + is_candidate = policy.look_inside_graph assert len(self.jitdrivers_sd) > 0 todo = [jd.portal_graph for jd in self.jitdrivers_sd] diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -1007,25 +1007,6 @@ # a jump back to itself and possibly a few bridges ending with finnish. # Only the operations within the loop formed by that single jump will # be counted. - - # XXX hacked version, ignore and remove me when jit-targets is merged. - loops = self.get_all_loops() - loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX - assert len(loops) == 1 - loop, = loops - jumpop = loop.operations[-1] - assert jumpop.getopnum() == rop.JUMP - insns = {} - for op in loop.operations: - opname = op.getopname() - insns[opname] = insns.get(opname, 0) + 1 - return self._check_insns(insns, expected, check) - - def check_simple_loop(self, expected=None, **check): - # Usefull in the simplest case when we have only one trace ending with - # a jump back to itself and possibly a few bridges ending with finnish. - # Only the operations within the loop formed by that single jump will - # be counted. loops = self.get_all_loops() assert len(loops) == 1 loop = loops[0] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,10 +1,13 @@ from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot, + FakeMetaInterpStaticData) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.jit.metainterp.optimize import InvalidLoop from py.test import raises +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method class BaseTestMultiLabel(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -84,6 +87,8 @@ return optimized +class OptimizeoptTestMultiLabel(BaseTestMultiLabel): + def test_simple(self): ops = """ [i1] @@ -381,6 +386,55 @@ """ self.optimize_loop(ops, expected) -class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + +class OptRenameStrlen(Optimization): + def propagate_forward(self, op): + dispatch_opt(self, op) + + def optimize_STRLEN(self, op): + newop = op.clone() + newop.result = op.result.clonebox() + self.emit_operation(newop) + self.make_equal_to(op.result, self.getvalue(newop.result)) + +dispatch_opt = make_dispatcher_method(OptRenameStrlen, 'optimize_', + default=OptRenameStrlen.emit_operation) + +class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): + + def _do_optimize_loop(self, loop, call_pure_results): + from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll + from pypy.jit.metainterp.optimizeopt.util import args_dict + from pypy.jit.metainterp.optimizeopt.pure import OptPure + + self.loop = loop + loop.call_pure_results = args_dict() + metainterp_sd = FakeMetaInterpStaticData(self.cpu) + optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) + + def test_optimizer_renaming_boxes(self): + ops = """ + [p1] + i1 = strlen(p1) + label(p1) + i2 = strlen(p1) + i3 = int_add(i2, 7) + jump(p1) + """ + expected = """ + [p1] + i1 = strlen(p1) + label(p1, i1) + i11 = same_as(i1) + i2 = int_add(i11, 7) + jump(p1, i11) + """ + self.optimize_loop(ops, expected) + + + +class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): pass +class TestOptimizerRenamingBoxesLLtype(BaseTestOptimizerRenamingBoxes, LLtypeMixin): + pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7759,7 +7759,7 @@ jump(i0, p0, i2) """ self.optimize_loop(ops, expected) - + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -265,7 +265,12 @@ self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) newresult = newvalue.get_key_box() - assert newresult is op.result or newvalue.is_constant() + # note that emitting here SAME_AS should not happen, but + # in case it does, we would prefer to be suboptimal in asm + # to a fatal RPython exception. + if newresult is not op.result and not newvalue.is_constant(): + op = ResOperation(rop.SAME_AS, [op.result], newresult) + self.optimizer._newoperations.append(op) self.optimizer.flush() self.optimizer.emitting_dissabled = False diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -58,6 +58,7 @@ class W_PyCFunctionObject(Wrappable): def __init__(self, space, ml, w_self, w_module=None): self.ml = ml + self.name = rffi.charp2str(self.ml.c_ml_name) self.w_self = w_self self.w_module = w_module @@ -69,7 +70,7 @@ flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST) if space.is_true(w_kw) and not flags & METH_KEYWORDS: raise OperationError(space.w_TypeError, space.wrap( - rffi.charp2str(self.ml.c_ml_name) + "() takes no keyword arguments")) + self.name + "() takes no keyword arguments")) func = rffi.cast(PyCFunction, self.ml.c_ml_meth) length = space.int_w(space.len(w_args)) @@ -80,13 +81,12 @@ if length == 0: return generic_cpy_call(space, func, w_self, None) raise OperationError(space.w_TypeError, space.wrap( - rffi.charp2str(self.ml.c_ml_name) + "() takes no arguments")) + self.name + "() takes no arguments")) elif flags & METH_O: if length != 1: raise OperationError(space.w_TypeError, space.wrap("%s() takes exactly one argument (%d given)" % ( - rffi.charp2str(self.ml.c_ml_name), - length))) + self.name, length))) w_arg = space.getitem(w_args, space.wrap(0)) return generic_cpy_call(space, func, w_self, w_arg) elif flags & METH_VARARGS: @@ -199,6 +199,7 @@ __call__ = interp2app(cfunction_descr_call), __doc__ = GetSetProperty(W_PyCFunctionObject.get_doc), __module__ = interp_attrproperty_w('w_module', cls=W_PyCFunctionObject), + __name__ = interp_attrproperty('name', cls=W_PyCFunctionObject), ) W_PyCFunctionObject.typedef.acceptable_as_base_class = False diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -63,6 +63,7 @@ ), ]) assert mod.getarg_O(1) == 1 + assert mod.getarg_O.__name__ == "getarg_O" raises(TypeError, mod.getarg_O) raises(TypeError, mod.getarg_O, 1, 1) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -185,7 +185,7 @@ class FlowExecutionContext(ExecutionContext): def __init__(self, space, code, globals, constargs={}, outer_func=None, - name=None): + name=None, is_generator=False): ExecutionContext.__init__(self, space) self.code = code @@ -208,6 +208,7 @@ initialblock = SpamBlock(FrameState(frame).copy()) self.pendingblocks = collections.deque([initialblock]) self.graph = FunctionGraph(name or code.co_name, initialblock) + self.is_generator = is_generator make_link = Link # overridable for transition tracking @@ -247,6 +248,8 @@ return outcome, w_exc_cls, w_exc_value def build_flow(self): + if self.is_generator: + self.produce_generator_mark() while self.pendingblocks: block = self.pendingblocks.popleft() frame = self.create_frame() @@ -259,9 +262,15 @@ self.topframeref = jit.non_virtual_ref(frame) self.crnt_frame = frame try: - w_result = frame.dispatch(frame.pycode, - frame.last_instr, - self) + frame.frame_finished_execution = False + while True: + w_result = frame.dispatch(frame.pycode, + frame.last_instr, + self) + if frame.frame_finished_execution: + break + else: + self.generate_yield(frame, w_result) finally: self.crnt_frame = None self.topframeref = old_frameref @@ -307,6 +316,21 @@ del self.recorder self.fixeggblocks() + def produce_generator_mark(self): + [initialblock] = self.pendingblocks + initialblock.operations.append( + SpaceOperation('generator_mark', [], Variable())) + + def generate_yield(self, frame, w_result): + assert self.is_generator + self.recorder.crnt_block.operations.append( + SpaceOperation('yield', [w_result], Variable())) + # we must push a dummy value that will be POPped: it's the .send() + # passed into the generator (2.5 feature) + assert sys.version_info >= (2, 5) + frame.pushvalue(None) + frame.last_instr += 1 + def fixeggblocks(self): # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -8,6 +8,7 @@ from pypy.interpreter.pycode import PyCode, cpython_code_signature from pypy.interpreter.module import Module from pypy.interpreter.error import OperationError +from pypy.interpreter.astcompiler.consts import CO_GENERATOR from pypy.interpreter import pyframe, argument from pypy.objspace.flow.model import * from pypy.objspace.flow import flowcontext, operation, specialcase @@ -247,15 +248,13 @@ return ecls return None - def build_flow(self, func, constargs={}): + def build_flow(self, func, constargs={}, tweak_for_generator=True): """ """ if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): raise Exception, "%r is tagged as NOT_RPYTHON" % (func,) code = func.func_code - if code.co_flags & 32: - # generator - raise TypeError("%r is a generator" % (func,)) + is_generator = bool(code.co_flags & CO_GENERATOR) code = PyCode._from_code(self, code) if func.func_closure is None: cl = None @@ -271,7 +270,8 @@ class outerfunc: # hack closure = cl ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, outerfunc, name) + constargs, outerfunc, name, + is_generator) graph = ec.graph graph.func = func # attach a signature and defaults to the graph @@ -291,6 +291,11 @@ e = error.FlowingError(formated) raise error.FlowingError, e, tb checkgraph(graph) + # + if is_generator and tweak_for_generator: + from pypy.translator.generator import tweak_generator_graph + tweak_generator_graph(graph) + # return graph def fixedview(self, w_tuple, expected_length=None): diff --git a/pypy/objspace/flow/test/test_generator.py b/pypy/objspace/flow/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/flow/test/test_generator.py @@ -0,0 +1,18 @@ +from pypy.objspace.flow.test.test_objspace import Base + + +class TestGenerator(Base): + + def test_simple_generator(self): + def f(n): + i = 0 + while i < n: + yield i + yield i + i += 1 + graph = self.codetest(f, tweak_for_generator=False) + ops = self.all_operations(graph) + assert ops == {'generator_mark': 1, + 'lt': 1, 'is_true': 1, + 'yield': 2, + 'inplace_add': 1} diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -16,14 +16,14 @@ is_operator = getattr(operator, 'is_', operator.eq) # it's not there 2.2 class Base: - def codetest(self, func): + def codetest(self, func, **kwds): import inspect try: func = func.im_func except AttributeError: pass #name = func.func_name - graph = self.space.build_flow(func) + graph = self.space.build_flow(func, **kwds) graph.source = inspect.getsource(func) self.show(graph) return graph @@ -882,12 +882,6 @@ num = bytecode_spec.opmap[name] flow_meth_names[num] = locals()['old_' + name] - def test_generator(self): - def f(): - yield 3 - - py.test.raises(TypeError, "self.codetest(f)") - def test_dont_capture_RuntimeError(self): class Foo: def __hash__(self): diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/rpython/test/test_generator.py @@ -0,0 +1,62 @@ +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + + +class BaseTestGenerator(BaseRtypingTest): + + def test_simple_explicit(self): + def g(a, b, c): + yield a + yield b + yield c + def f(): + gen = g(3, 5, 8) + x = gen.next() * 100 + x += gen.next() * 10 + x += gen.next() + return x + res = self.interpret(f, []) + assert res == 358 + + def test_cannot_merge(self): + # merging two different generators is not supported + # right now, but we can use workarounds like here + class MyGen: + _immutable_ = True + def next(self): + raise NotImplementedError + class MyG1(MyGen): + _immutable_ = True + def __init__(self, a): + self._gen = self.g1(a) + def next(self): + return self._gen.next() + @staticmethod + def g1(a): + yield a + 1 + yield a + 2 + class MyG2(MyGen): + _immutable_ = True + def __init__(self): + self._gen = self.g2() + def next(self): + return self._gen.next() + @staticmethod + def g2(): + yield 42 + def f(n): + if n > 0: + gen = MyG1(n) + else: + gen = MyG2() + return gen.next() + res = self.interpret(f, [10]) + assert res == 11 + res = self.interpret(f, [0]) + assert res == 42 + + +class TestLLtype(BaseTestGenerator, LLRtypeMixin): + pass + +class TestOOtype(BaseTestGenerator, OORtypeMixin): + pass diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -3,6 +3,7 @@ from pypy.jit.metainterp.resoperation import opname from pypy.jit.tool.oparser import OpParser from pypy.tool.logparser import parse_log_file, extract_category +from copy import copy class Op(object): bridge = None @@ -387,6 +388,18 @@ loops.append(loop) return log, loops +def split_trace(trace): + labels = [i for i, op in enumerate(trace.operations) + if op.name == 'label'] + labels = [0] + labels + [len(trace.operations) - 1] + parts = [] + for i in range(len(labels) - 1): + start, stop = labels[i], labels[i+1] + part = copy(trace) + part.operations = trace.operations[start : stop + 1] + parts.append(part) + + return parts def parse_log_counts(input, loops): if not input: diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/pypy/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/pypy/tool/jitlogparser/test/test_modulefinder.py @@ -7,12 +7,14 @@ py.test.skip("Specific python 2.6 tests") def test_gather_code_py(): + py.test.skip("XXX broken, fix me") fname = re.__file__ codes = gather_all_code_objs(fname) assert len(codes) == 21 assert sorted(codes.keys()) == [102, 134, 139, 144, 153, 164, 169, 181, 188, 192, 197, 206, 229, 251, 266, 271, 277, 285, 293, 294, 308] def test_load_code(): + py.test.skip("XXX broken, fix me") fname = re.__file__ code = gather_all_code_objs(fname)[144] assert code.co_name == 'sub' diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -1,6 +1,6 @@ from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, Function, adjust_bridges, - import_log, Op) + import_log, split_trace, Op) from pypy.tool.jitlogparser.storage import LoopStorage import py, sys @@ -231,3 +231,21 @@ myrepr = 'c = foobar(a, b, descr=mydescr)' assert op.repr() == myrepr assert op.repr() == myrepr # do it twice + +def test_split_trace(): + loop = parse(''' + [i7] + i9 = int_lt(i7, 1003) + label(i9) + guard_true(i9, descr=) [] + i13 = getfield_raw(151937600, descr=) + label(i13) + i19 = int_lt(i13, 1003) + guard_true(i19, descr=) [] + i113 = getfield_raw(151937600, descr=) + ''') + parts = split_trace(loop) + assert len(parts) == 3 + assert len(parts[0].operations) == 2 + assert len(parts[1].operations) == 4 + assert len(parts[2].operations) == 4 diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/generator.py @@ -0,0 +1,166 @@ +from pypy.objspace.flow.model import Block, Link, SpaceOperation, checkgraph +from pypy.objspace.flow.model import Variable, Constant, FunctionGraph +from pypy.translator.unsimplify import insert_empty_startblock +from pypy.translator.unsimplify import split_block +from pypy.translator.simplify import eliminate_empty_blocks +from pypy.tool.sourcetools import func_with_new_name +from pypy.interpreter.argument import Signature + + +class AbstractPosition(object): + _immutable_ = True + _attrs_ = () + + +def tweak_generator_graph(graph): + if not hasattr(graph.func, '_generator_next_method_of_'): + # This is the first copy of the graph. We replace it with + # a small bootstrap graph. + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + # We attach a 'next' method to the GeneratorIterator class + # that will invoke the real function, based on a second + # copy of the graph. + attach_next_method(GeneratorIterator, graph) + else: + # This is the second copy of the graph. Tweak it. + GeneratorIterator = graph.func._generator_next_method_of_ + tweak_generator_body_graph(GeneratorIterator.Entry, graph) + + +def make_generatoriterator_class(graph): + class GeneratorIterator(object): + class Entry(AbstractPosition): + _immutable_ = True + varnames = get_variable_names(graph.startblock.inputargs) + def __init__(self, entry): + self.current = entry + return GeneratorIterator + +def replace_graph_with_bootstrap(GeneratorIterator, graph): + Entry = GeneratorIterator.Entry + newblock = Block(graph.startblock.inputargs) + v_generator = Variable('generator') + v_entry = Variable('entry') + newblock.operations.append( + SpaceOperation('simple_call', [Constant(Entry)], v_entry)) + assert len(graph.startblock.inputargs) == len(Entry.varnames) + for v, name in zip(graph.startblock.inputargs, Entry.varnames): + newblock.operations.append( + SpaceOperation('setattr', [v_entry, Constant(name), v], + Variable())) + newblock.operations.append( + SpaceOperation('simple_call', [Constant(GeneratorIterator), v_entry], + v_generator)) + newblock.closeblock(Link([v_generator], graph.returnblock)) + graph.startblock = newblock + +def attach_next_method(GeneratorIterator, graph): + func = graph.func + func = func_with_new_name(func, '%s__next' % (func.func_name,)) + func._generator_next_method_of_ = GeneratorIterator + func._always_inline_ = True + # + def next(self): + entry = self.current + self.current = None + (next_entry, return_value) = func(entry) + self.current = next_entry + return return_value + GeneratorIterator.next = next + return func # for debugging + +def get_variable_names(variables): + seen = set() + result = [] + for v in variables: + name = v._name.strip('_') + while name in seen: + name += '_' + result.append('g_' + name) + seen.add(name) + return result + +def _insert_reads(block, varnames): + assert len(varnames) == len(block.inputargs) + v_entry1 = Variable('entry') + for i, name in enumerate(varnames): + block.operations.insert(i, + SpaceOperation('getattr', [v_entry1, Constant(name)], + block.inputargs[i])) + block.inputargs = [v_entry1] + +def tweak_generator_body_graph(Entry, graph): + assert graph.startblock.operations[0].opname == 'generator_mark' + graph.startblock.operations.pop(0) + # + insert_empty_startblock(None, graph) + _insert_reads(graph.startblock, Entry.varnames) + Entry.block = graph.startblock + # + mappings = [Entry] + # + for block in list(graph.iterblocks()): + for exit in block.exits: + if exit.target is graph.returnblock: + exit.args = [Constant(StopIteration), + Constant(StopIteration())] + exit.target = graph.exceptblock + for index in range(len(block.operations)-1, -1, -1): + op = block.operations[index] + if op.opname == 'yield': + [v_yielded_value] = op.args + del block.operations[index] + newlink = split_block(None, block, index) + newblock = newlink.target + # + class Resume(AbstractPosition): + _immutable_ = True + block = newblock + Resume.__name__ = 'Resume%d' % len(mappings) + mappings.append(Resume) + varnames = get_variable_names(newlink.args) + # + _insert_reads(newblock, varnames) + # + v_resume = Variable('resume') + block.operations.append( + SpaceOperation('simple_call', [Constant(Resume)], + v_resume)) + for i, name in enumerate(varnames): + block.operations.append( + SpaceOperation('setattr', [v_resume, Constant(name), + newlink.args[i]], + Variable())) + v_pair = Variable('pair') + block.operations.append( + SpaceOperation('newtuple', [v_resume, v_yielded_value], + v_pair)) + newlink.args = [v_pair] + newlink.target = graph.returnblock + # + regular_entry_block = Block([Variable('entry')]) + block = regular_entry_block + for Resume in mappings: + v_check = Variable() + block.operations.append( + SpaceOperation('simple_call', [Constant(isinstance), + block.inputargs[0], + Constant(Resume)], + v_check)) + block.exitswitch = v_check + link1 = Link([block.inputargs[0]], Resume.block) + link1.exitcase = True + nextblock = Block([Variable('entry')]) + link2 = Link([block.inputargs[0]], nextblock) + link2.exitcase = False + block.closeblock(link1, link2) + block = nextblock + block.closeblock(Link([Constant(AssertionError), + Constant(AssertionError("bad generator class"))], + graph.exceptblock)) + graph.startblock = regular_entry_block + graph.signature = Signature(['entry']) + graph.defaults = () + checkgraph(graph) + eliminate_empty_blocks(graph) diff --git a/pypy/translator/test/test_generator.py b/pypy/translator/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/translator/test/test_generator.py @@ -0,0 +1,156 @@ +from pypy.conftest import option +from pypy.objspace.flow.objspace import FlowObjSpace +from pypy.objspace.flow.model import Variable +from pypy.interpreter.argument import Signature +from pypy.translator.translator import TranslationContext +from pypy.translator.generator import make_generatoriterator_class +from pypy.translator.generator import replace_graph_with_bootstrap +from pypy.translator.generator import get_variable_names +from pypy.translator.generator import tweak_generator_body_graph +from pypy.translator.generator import attach_next_method +from pypy.translator.simplify import join_blocks + + +# ____________________________________________________________ + +def f_gen(n): + i = 0 + while i < n: + yield i + i += 1 + +class GeneratorIterator(object): + def __init__(self, entry): + self.current = entry + def next(self): + e = self.current + self.current = None + if isinstance(e, Yield1): + n = e.n_0 + i = e.i_0 + i += 1 + else: + n = e.n_0 + i = 0 + if i < n: + e = Yield1() + e.n_0 = n + e.i_0 = i + self.current = e + return i + raise StopIteration + + def __iter__(self): + return self + +class AbstractPosition(object): + _immutable_ = True +class Entry1(AbstractPosition): + _immutable_ = True +class Yield1(AbstractPosition): + _immutable_ = True + +def f_explicit(n): + e = Entry1() + e.n_0 = n + return GeneratorIterator(e) + +def test_explicit(): + assert list(f_gen(10)) == list(f_explicit(10)) + +def test_get_variable_names(): + lst = get_variable_names([Variable('a'), Variable('b_'), Variable('a')]) + assert lst == ['g_a', 'g_b', 'g_a_'] + +# ____________________________________________________________ + + +class TestGenerator: + + def test_replace_graph_with_bootstrap(self): + def func(n, x, y, z): + yield n + yield n + # + space = FlowObjSpace() + graph = space.build_flow(func, tweak_for_generator=False) + assert graph.startblock.operations[0].opname == 'generator_mark' + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + if option.view: + graph.show() + block = graph.startblock + ops = block.operations + assert ops[0].opname == 'simple_call' # e = Entry1() + assert ops[1].opname == 'setattr' # e.g_n = n + assert ops[1].args[1].value == 'g_n' + assert ops[2].opname == 'setattr' # e.g_x = x + assert ops[2].args[1].value == 'g_x' + assert ops[3].opname == 'setattr' # e.g_y = y + assert ops[3].args[1].value == 'g_y' + assert ops[4].opname == 'setattr' # e.g_z = z + assert ops[4].args[1].value == 'g_z' + assert ops[5].opname == 'simple_call' # g = GeneratorIterator(e) + assert ops[5].args[1] == ops[0].result + assert len(ops) == 6 + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock + + def test_tweak_generator_body_graph(self): + def f(n, x, y, z=3): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + class Entry: + varnames = ['g_n', 'g_x', 'g_y', 'g_z'] + tweak_generator_body_graph(Entry, graph) + if option.view: + graph.show() + # XXX how to test directly that the graph is correct? :-( + assert len(graph.startblock.inputargs) == 1 + assert graph.signature == Signature(['entry']) + assert graph.defaults == () + + def test_tweak_generator_graph(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f, tweak_for_generator=False) + GeneratorIterator = make_generatoriterator_class(graph) + replace_graph_with_bootstrap(GeneratorIterator, graph) + func1 = attach_next_method(GeneratorIterator, graph) + if option.view: + graph.show() + # + assert func1._generator_next_method_of_ is GeneratorIterator + assert hasattr(GeneratorIterator, 'next') + # + graph_next = space.build_flow(GeneratorIterator.next.im_func) + join_blocks(graph_next) + if option.view: + graph_next.show() + # + graph1 = space.build_flow(func1, tweak_for_generator=False) + tweak_generator_body_graph(GeneratorIterator.Entry, graph1) + if option.view: + graph1.show() + + def test_automatic(self): + def f(n, x, y, z): + z *= 10 + yield n + 1 + z -= 10 + # + space = FlowObjSpace() + graph = space.build_flow(f) # tweak_for_generator=True + if option.view: + graph.show() + block = graph.startblock + assert len(block.exits) == 1 + assert block.exits[0].target is graph.returnblock From noreply at buildbot.pypy.org Mon Dec 26 16:57:30 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 26 Dec 2011 16:57:30 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: fix test Message-ID: <20111226155730.BB3D282BA2@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50877:be85523c5872 Date: 2011-12-26 14:11 +0100 http://bitbucket.org/pypy/pypy/changeset/be85523c5872/ Log: fix test diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -756,7 +756,7 @@ res = self.meta_interp(interpret, [1]) assert res == interpret(1) # XXX it's unsure how many loops should be there - self.check_trace_count(3) + self.check_trace_count(2) def test_path_with_operations_not_from_start(self): jitdriver = JitDriver(greens = ['k'], reds = ['n', 'z']) From noreply at buildbot.pypy.org Mon Dec 26 16:57:31 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Mon, 26 Dec 2011 16:57:31 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: more generally ensure that TargetTokens of labels has the correct original_jitcell_token Message-ID: <20111226155731.F2D4882BA2@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50878:5706bc85eb3b Date: 2011-12-26 16:57 +0100 http://bitbucket.org/pypy/pypy/changeset/5706bc85eb3b/ Log: more generally ensure that TargetTokens of labels has the correct original_jitcell_token diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -176,10 +176,10 @@ loop.original_jitcell_token = jitcell_token for label in all_target_tokens: assert isinstance(label, TargetToken) - label.original_jitcell_token = jitcell_token if label.virtual_state and label.short_preamble: metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) jitcell_token.target_tokens = all_target_tokens + propagate_original_jitcell_token(loop) send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") record_loop_or_bridge(metainterp_sd, loop) return all_target_tokens[0] @@ -247,11 +247,11 @@ for box in loop.inputargs: assert isinstance(box, Box) - target_token = loop.operations[-1].getdescr() + target_token = loop.operations[-1].getdescr() resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() assert isinstance(target_token, TargetToken) - target_token.original_jitcell_token = loop.original_jitcell_token record_loop_or_bridge(metainterp_sd, loop) return target_token @@ -288,6 +288,15 @@ assert i == len(inputargs) loop.operations = extra_ops + loop.operations +def propagate_original_jitcell_token(trace): + for op in trace.operations: + if op.getopnum() == rop.LABEL: + token = op.getdescr() + assert isinstance(token, TargetToken) + assert token.original_jitcell_token is None + token.original_jitcell_token = trace.original_jitcell_token + + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: @@ -558,6 +567,7 @@ inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations + propagate_original_jitcell_token(new_loop) send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.original_jitcell_token) @@ -744,6 +754,7 @@ jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) + propagate_original_jitcell_token(new_loop) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time From noreply at buildbot.pypy.org Mon Dec 26 17:11:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Dec 2011 17:11:07 +0100 (CET) Subject: [pypy-commit] pypy default: Fix comment, and simplify the final logic. Message-ID: <20111226161107.608F282BA2@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r50879:f9932c00e2d2 Date: 2011-12-26 17:07 +0100 http://bitbucket.org/pypy/pypy/changeset/f9932c00e2d2/ Log: Fix comment, and simplify the final logic. diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -537,7 +537,7 @@ builder.append(by) builder.append_slice(input, upper, len(input)) else: - # An ok guess for the result size + # First compute the exact result size count = input.count(sub) if count > maxsplit and maxsplit > 0: count = maxsplit @@ -553,21 +553,16 @@ builder = StringBuilder(result_size) start = 0 sublen = len(sub) - first = True while maxsplit != 0: next = input.find(sub, start) if next < 0: break - if not first: - builder.append(by) - first = False builder.append_slice(input, start, next) + builder.append(by) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - if not first: - builder.append(by) builder.append_slice(input, start, len(input)) return space.wrap(builder.build()) From noreply at buildbot.pypy.org Mon Dec 26 20:18:49 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 20:18:49 +0100 (CET) Subject: [pypy-commit] pypy default: another attempt to remove a very confusing raise Message-ID: <20111226191850.04BFB82BA2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50880:a6b6b5a373a1 Date: 2011-12-26 21:18 +0200 http://bitbucket.org/pypy/pypy/changeset/a6b6b5a373a1/ Log: another attempt to remove a very confusing raise diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -619,7 +619,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -655,7 +656,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -674,7 +676,8 @@ self.descr_reqcls, args.prepend(w_obj)) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -690,7 +693,8 @@ raise OperationError(space.w_SystemError, space.wrap("unexpected DescrMismatch error")) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -708,7 +712,8 @@ self.descr_reqcls, Arguments(space, [w1])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -726,7 +731,8 @@ self.descr_reqcls, Arguments(space, [w1, w2])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -744,7 +750,8 @@ self.descr_reqcls, Arguments(space, [w1, w2, w3])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -763,7 +770,8 @@ Arguments(space, [w1, w2, w3, w4])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result From noreply at buildbot.pypy.org Mon Dec 26 21:23:58 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 21:23:58 +0100 (CET) Subject: [pypy-commit] pypy default: remove some unnecessary copies (they used to be necessary, not any more) Message-ID: <20111226202358.8B28B82BA2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50881:c3142d5c9c35 Date: 2011-12-26 22:23 +0200 http://bitbucket.org/pypy/pypy/changeset/c3142d5c9c35/ Log: remove some unnecessary copies (they used to be necessary, not any more) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -578,8 +578,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete.start, strides[:], - backstrides[:], shape[:], concrete)) + return space.wrap(W_NDimSlice(concrete.start, strides, + backstrides, shape, concrete)) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -820,8 +820,8 @@ if self.order == 'C': strides.reverse() backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] + self.strides = strides + self.backstrides = backstrides def array_sig(self, res_shape): if res_shape is not None and self.shape != res_shape: @@ -1025,9 +1025,9 @@ strides.reverse() backstrides.reverse() new_shape.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] - self.shape = new_shape[:] + self.strides = strides + self.backstrides = backstrides + self.shape = new_shape return new_strides = calc_new_strides(new_shape, self.shape, self.strides) if new_strides is None: @@ -1037,7 +1037,7 @@ for nd in range(len(new_shape)): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] self.strides = new_strides[:] - self.backstrides = new_backstrides[:] + self.backstrides = new_backstrides self.shape = new_shape[:] class W_NDimArray(ConcreteArray): From noreply at buildbot.pypy.org Mon Dec 26 22:06:24 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 26 Dec 2011 22:06:24 +0100 (CET) Subject: [pypy-commit] pypy default: it's fine to inline this. Message-ID: <20111226210624.9DD4B82BA2@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50882:21bb1c0bd208 Date: 2011-12-26 15:05 -0600 http://bitbucket.org/pypy/pypy/changeset/21bb1c0bd208/ Log: it's fine to inline this. diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -162,7 +162,6 @@ _ll_4_list_setslice = rlist.ll_listsetslice _ll_2_list_delslice_startonly = rlist.ll_listdelslice_startonly _ll_3_list_delslice_startstop = rlist.ll_listdelslice_startstop -_ll_1_list_list2fixed = lltypesystem_rlist.ll_list2fixed _ll_2_list_inplace_mul = rlist.ll_inplace_mul _ll_2_list_getitem_foldable = _ll_2_list_getitem diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -375,7 +375,6 @@ newitems = malloc(LIST.items.TO, n) rgc.ll_arraycopy(olditems, newitems, 0, 0, n) return newitems -ll_list2fixed.oopspec = 'list.list2fixed(l)' def ll_list2fixed_exact(l): ll_assert(l.length == len(l.items), "ll_list2fixed_exact: bad length") From noreply at buildbot.pypy.org Mon Dec 26 22:06:25 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 26 Dec 2011 22:06:25 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20111226210625.C76CE82BA2@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50883:a0b6f54563e6 Date: 2011-12-26 15:06 -0600 http://bitbucket.org/pypy/pypy/changeset/a0b6f54563e6/ Log: merged upstream diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -619,7 +619,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -655,7 +656,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -674,7 +676,8 @@ self.descr_reqcls, args.prepend(w_obj)) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -690,7 +693,8 @@ raise OperationError(space.w_SystemError, space.wrap("unexpected DescrMismatch error")) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -708,7 +712,8 @@ self.descr_reqcls, Arguments(space, [w1])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -726,7 +731,8 @@ self.descr_reqcls, Arguments(space, [w1, w2])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -744,7 +750,8 @@ self.descr_reqcls, Arguments(space, [w1, w2, w3])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -763,7 +770,8 @@ Arguments(space, [w1, w2, w3, w4])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -578,8 +578,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete.start, strides[:], - backstrides[:], shape[:], concrete)) + return space.wrap(W_NDimSlice(concrete.start, strides, + backstrides, shape, concrete)) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -820,8 +820,8 @@ if self.order == 'C': strides.reverse() backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] + self.strides = strides + self.backstrides = backstrides def array_sig(self, res_shape): if res_shape is not None and self.shape != res_shape: @@ -1025,9 +1025,9 @@ strides.reverse() backstrides.reverse() new_shape.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] - self.shape = new_shape[:] + self.strides = strides + self.backstrides = backstrides + self.shape = new_shape return new_strides = calc_new_strides(new_shape, self.shape, self.strides) if new_strides is None: @@ -1037,7 +1037,7 @@ for nd in range(len(new_shape)): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] self.strides = new_strides[:] - self.backstrides = new_backstrides[:] + self.backstrides = new_backstrides self.shape = new_shape[:] class W_NDimArray(ConcreteArray): From noreply at buildbot.pypy.org Mon Dec 26 22:13:11 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 22:13:11 +0100 (CET) Subject: [pypy-commit] jitviewer default: move stuff around - helps with setup.py develop Message-ID: <20111226211311.D733C82BA2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r178:1d7244f7b370 Date: 2011-12-26 23:11 +0200 http://bitbucket.org/pypy/jitviewer/changeset/1d7244f7b370/ Log: move stuff around - helps with setup.py develop diff --git a/_jitviewer/static/DroidSansMono.ttf b/static/DroidSansMono.ttf rename from _jitviewer/static/DroidSansMono.ttf rename to static/DroidSansMono.ttf diff --git a/_jitviewer/static/jquery-1.2.6.min.js b/static/jquery-1.2.6.min.js rename from _jitviewer/static/jquery-1.2.6.min.js rename to static/jquery-1.2.6.min.js diff --git a/_jitviewer/static/jquery.min.js b/static/jquery.min.js rename from _jitviewer/static/jquery.min.js rename to static/jquery.min.js diff --git a/_jitviewer/static/jquery.scrollTo-1.4.1.js b/static/jquery.scrollTo-1.4.1.js rename from _jitviewer/static/jquery.scrollTo-1.4.1.js rename to static/jquery.scrollTo-1.4.1.js diff --git a/_jitviewer/static/jquery.scrollTo-1.4.2-min.js b/static/jquery.scrollTo-1.4.2-min.js rename from _jitviewer/static/jquery.scrollTo-1.4.2-min.js rename to static/jquery.scrollTo-1.4.2-min.js diff --git a/_jitviewer/static/loop.js b/static/loop.js rename from _jitviewer/static/loop.js rename to static/loop.js diff --git a/_jitviewer/static/pygments.css b/static/pygments.css rename from _jitviewer/static/pygments.css rename to static/pygments.css diff --git a/_jitviewer/static/qt_workaround.css b/static/qt_workaround.css rename from _jitviewer/static/qt_workaround.css rename to static/qt_workaround.css diff --git a/_jitviewer/static/script.js b/static/script.js rename from _jitviewer/static/script.js rename to static/script.js diff --git a/_jitviewer/static/style.css b/static/style.css rename from _jitviewer/static/style.css rename to static/style.css diff --git a/_jitviewer/templates/index.html b/templates/index.html rename from _jitviewer/templates/index.html rename to templates/index.html diff --git a/_jitviewer/templates/loop.html b/templates/loop.html rename from _jitviewer/templates/loop.html rename to templates/loop.html From noreply at buildbot.pypy.org Mon Dec 26 22:39:00 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 22:39:00 +0100 (CET) Subject: [pypy-commit] pypy default: some tests and fixes Message-ID: <20111226213900.43CFC82BA2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50884:40a95aa4967f Date: 2011-12-26 23:36 +0200 http://bitbucket.org/pypy/pypy/changeset/40a95aa4967f/ Log: some tests and fixes diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -389,14 +389,22 @@ return log, loops def split_trace(trace): - labels = [i for i, op in enumerate(trace.operations) - if op.name == 'label'] - labels = [0] + labels + [len(trace.operations) - 1] + labels = [0] + if trace.comment and 'Guard' in trace.comment: + descrs = ['bridge ' + re.search('Guard (\d+)', trace.comment).group(1)] + else: + descrs = [''] + for i, op in enumerate(trace.operations): + if op.name == 'label': + labels.append(i) + descrs.append(op.descr) + labels.append(len(trace.operations) - 1) parts = [] for i in range(len(labels) - 1): start, stop = labels[i], labels[i+1] part = copy(trace) part.operations = trace.operations[start : stop + 1] + part.descr = descrs[i] parts.append(part) return parts @@ -407,11 +415,7 @@ lines = input[-1].splitlines() mapping = {} for loop in loops: - com = loop.comment - if 'Loop' in com: - mapping['loop ' + re.search('Loop (\d+)', com).group(1)] = loop - else: - mapping['bridge ' + re.search('Guard (\d+)', com).group(1)] = loop + mapping[loop.descr] = loop for line in lines: if line: num, count = line.split(':', 2) diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -1,6 +1,7 @@ from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, Function, adjust_bridges, - import_log, split_trace, Op) + import_log, split_trace, Op, + parse_log_counts) from pypy.tool.jitlogparser.storage import LoopStorage import py, sys @@ -236,10 +237,10 @@ loop = parse(''' [i7] i9 = int_lt(i7, 1003) - label(i9) + label(i9, descr=grrr) guard_true(i9, descr=) [] i13 = getfield_raw(151937600, descr=) - label(i13) + label(i13, descr=asb) i19 = int_lt(i13, 1003) guard_true(i19, descr=) [] i113 = getfield_raw(151937600, descr=) @@ -249,3 +250,32 @@ assert len(parts[0].operations) == 2 assert len(parts[1].operations) == 4 assert len(parts[2].operations) == 4 + assert parts[1].descr == 'grrr' + assert parts[2].descr == 'asb' + +def test_parse_log_counts(): + loop = parse(''' + [i7] + i9 = int_lt(i7, 1003) + label(i9, descr=grrr) + guard_true(i9, descr=) [] + i13 = getfield_raw(151937600, descr=) + label(i13, descr=asb) + i19 = int_lt(i13, 1003) + guard_true(i19, descr=) [] + i113 = getfield_raw(151937600, descr=) + ''') + bridge = parse(''' + # bridge out of Guard 2 with 1 ops + [] + i0 = int_lt(1, 2) + finish(i0) + ''') + bridge.comment = 'bridge out of Guard 2 with 1 ops' + loop.comment = '' + loops = split_trace(loop) + split_trace(bridge) + input = ['grrr:123\nasb:12\nbridge 2:1234'] + parse_log_counts(input, loops) + assert loops[-1].count == 1234 + assert loops[1].count == 123 + assert loops[2].count == 12 From noreply at buildbot.pypy.org Mon Dec 26 22:39:01 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 22:39:01 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20111226213901.6AD0F82BA2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50885:cc6d58803952 Date: 2011-12-26 23:38 +0200 http://bitbucket.org/pypy/pypy/changeset/cc6d58803952/ Log: merge diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -162,7 +162,6 @@ _ll_4_list_setslice = rlist.ll_listsetslice _ll_2_list_delslice_startonly = rlist.ll_listdelslice_startonly _ll_3_list_delslice_startstop = rlist.ll_listdelslice_startstop -_ll_1_list_list2fixed = lltypesystem_rlist.ll_list2fixed _ll_2_list_inplace_mul = rlist.ll_inplace_mul _ll_2_list_getitem_foldable = _ll_2_list_getitem diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -375,7 +375,6 @@ newitems = malloc(LIST.items.TO, n) rgc.ll_arraycopy(olditems, newitems, 0, 0, n) return newitems -ll_list2fixed.oopspec = 'list.list2fixed(l)' def ll_list2fixed_exact(l): ll_assert(l.length == len(l.items), "ll_list2fixed_exact: bad length") From noreply at buildbot.pypy.org Mon Dec 26 22:40:56 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 22:40:56 +0100 (CET) Subject: [pypy-commit] jitviewer default: a bit of cleanup, reuse more of jitlogparser Message-ID: <20111226214056.743C582BA2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r179:c63afaebb8d6 Date: 2011-12-26 23:40 +0200 http://bitbucket.org/pypy/jitviewer/changeset/c63afaebb8d6/ Log: a bit of cleanup, reuse more of jitlogparser diff --git a/_jitviewer/parser.py b/_jitviewer/parser.py --- a/_jitviewer/parser.py +++ b/_jitviewer/parser.py @@ -143,19 +143,3 @@ def html_repr(self): return "inlined call to %s in %s" % (self.name, self.filename) - -def parse_log_counts(input, loops): - if not input: - return - lines = input[-1].splitlines() - mapping = {} - for loop in loops: - com = loop.comment - if 'Loop' in com: - mapping['loop ' + re.search('Loop (\d+)', com).group(1)] = loop - else: - mapping['bridge ' + re.search('Guard (\d+)', com).group(1)] = loop - for line in lines: - if line: - num, count = line.split(':', 2) - mapping[num].count = int(count) diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -46,9 +46,10 @@ import time from pypy.tool.logparser import extract_category from pypy.tool.jitlogparser.storage import LoopStorage -from pypy.tool.jitlogparser.parser import adjust_bridges, import_log +from pypy.tool.jitlogparser.parser import adjust_bridges, import_log,\ + parse_log_counts # -from _jitviewer.parser import ParserWithHtmlRepr, FunctionHtml, parse_log_counts +from _jitviewer.parser import ParserWithHtmlRepr, FunctionHtml from _jitviewer.display import CodeRepr, CodeReprNoFile import _jitviewer diff --git a/log.pypylog b/log.pypylog --- a/log.pypylog +++ b/log.pypylog @@ -1,651 +1,609 @@ -[364172f7361] {jit-backend-dump +[d0e7d8d709c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6000 +0 4157415641554154415341524151415057565554535251504889E341BBD065EB0041FFD34889DF4883E4F041BB801DD60041FFD3488D65D8415F415E415D415C5B5DC3 -[36417307525] jit-backend-dump} -[36417308a0d] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a000 +0 4157415641554154415341524151415057565554535251504889E341BB503AF20041FFD34889DF4883E4F041BB100CD30041FFD3488D65D8415F415E415D415C5B5DC3 +[d0e7d8f4034] jit-backend-dump} +[d0e7d8f674c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6043 +0 4157415641554154415341524151415057565554535251504889E341BBE065EB0041FFD34889DF4883E4F041BB801DD60041FFD3488D65D8415F415E415D415C5B5DC3 -[3641730a067] jit-backend-dump} -[3641730d3fd] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a043 +0 4157415641554154415341524151415057565554535251504889E341BB003AF20041FFD34889DF4883E4F041BB100CD30041FFD3488D65D8415F415E415D415C5B5DC3 +[d0e7d8f99b6] jit-backend-dump} +[d0e7d8ff830] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6086 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BBD065EB0041FFD34889DF4883E4F041BB801DD60041FFD3488D65D8415F415E415D415C5B5DC3 -[3641730f3e9] jit-backend-dump} -[364173100f7] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a086 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BB503AF20041FFD34889DF4883E4F041BB100CD30041FFD3488D65D8415F415E415D415C5B5DC3 +[d0e7d904384] jit-backend-dump} +[d0e7d906196] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6137 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BBE065EB0041FFD34889DF4883E4F041BB801DD60041FFD3488D65D8415F415E415D415C5B5DC3 -[36417311d51] jit-backend-dump} -[36417318001] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a137 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BB003AF20041FFD34889DF4883E4F041BB100CD30041FFD3488D65D8415F415E415D415C5B5DC3 +[d0e7d90a4e6] jit-backend-dump} +[d0e7d91000c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6210 +0 F20F11442410F20F114C2418F20F11542420F20F115C2428F20F11642430F20F116C2438F20F11742440F20F117C2448F2440F11442450F2440F114C2458F2440F11542460F2440F115C2468F2440F11642470F2440F116C2478F2440F11B42480000000F2440F11BC24880000004829C248894D804C8945A04C894DA84C8955B04889759048897D984889D741BBE09AD10041FFE3 -[36417319b49] jit-backend-dump} -[3641731fcfb] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a210 +0 41BB6039F20041FFD3B803000000488D65D8415F415E415D415C5B5DC3 +[d0e7d9121cc] jit-backend-dump} +[d0e7d91b0ac] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f62a5 +0 488B4D804C8B45A04C8B4DA84C8B55B0488B7590488B7D98F20F10442410F20F104C2418F20F10542420F20F105C2428F20F10642430F20F106C2438F20F10742440F20F107C2448F2440F10442450F2440F104C2458F2440F10542460F2440F105C2468F2440F10642470F2440F106C2478F2440F10B42480000000F2440F10BC2488000000488B142590EF4501C3 -[3641732173f] jit-backend-dump} -[36417326f23] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a22d +0 F20F11442410F20F114C2418F20F11542420F20F115C2428F20F11642430F20F116C2438F20F11742440F20F117C2448F2440F11442450F2440F114C2458F2440F11542460F2440F115C2468F2440F11642470F2440F116C2478F2440F11B42480000000F2440F11BC24880000004829C24C894DA848894D804C8955B04C8945A048897D98488975904889D741BB30C6CE0041FFE3 +[d0e7d91efca] jit-backend-dump} +[d0e7d92a670] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6334 +0 57565251415041514883EC40F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438488D7D1041BBF032AC0041FFD3488B0425908D77024885C0753CF20F107C2438F20F10742430F20F106C2428F20F10642420F20F105C2418F20F10542410F20F104C2408F20F1004244883C44041594158595A5E5FC3488B0425988D770248C70425908D77020000000048C70425988D7702000000004889042550525D0141BBD065EB0041FFD3B8040000004883C478C3 -[3641732901f] jit-backend-dump} -[36417329d85] {jit-backend-counts -[3641732a13b] jit-backend-counts} -[3641769c473] {jit-backend -[36417716e1a] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a2c2 +0 4C8B4DA8488B4D804C8B55B04C8B45A0488B7D98488B7590F20F10442410F20F104C2418F20F10542420F20F105C2428F20F10642430F20F106C2438F20F10742440F20F107C2448F2440F10442450F2440F104C2458F2440F10542460F2440F105C2468F2440F10642470F2440F106C2478F2440F10B42480000000F2440F10BC24880000004885C07409488B142550C95401C349BB10A279E5D57F000041FFE3 +[d0e7d92ea2c] jit-backend-dump} +[d0e7d932e9c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6400 +0 554889E5534154415541564157488DA5000000004C8B3C2550525D0148C7042550525D01000000004C8B342558525D0148C7042558525D01000000004C8B2C2560525D0148C7042560525D01000000004C8B242568525D0148C7042568525D01000000004C8B142530E863014C8B0C2578525D0148C7042578525D01000000004C8B042540E86301488B3C2548E86301488B342590525D0148C7042590525D0100000000488B1C2598525D0148C7042598525D0100000000488B1425A0525D0148C70425A0525D0100000000488B0C25A8525D0148C70425A8525D0100000000488B0425B0525D0148C70425B0525D010000000048898570FFFFFF49BBC02003C6827F0000498B034883C00149BBC02003C6827F00004989034983F8030F8500000000813A602E00000F8500000000488B7A104885FF0F84000000004C8B4208488B47204885C00F8500000000488B47084939C00F8C0000000048C74210000000004983FA000F85000000004D8B5110418139A82401000F85000000004D8B49184983F9020F85000000004885F60F84000000004D85D20F85000000004D8B576841C687950000000141F64704017417564152514C89FF4C89EE41BB9062C90041FFD359415A5E4D896F5041F64704017417564152514C89FF4C89E641BB9062C90041FFD359415A5E4D89677841C687960000000049C747600000000049C787800000000200000049C747582A00000041F6420401742A41F6420440751E564152514C89D74889F2BE0000000041BB0064C90041FFD359415A5EEB0541804AFF014989721041F6420401742A41F6420440751E564152514C89D7BE010000004889DA41BB0064C90041FFD359415A5EEB0541804AFF0149895A1849C742200000000041F6420401742A41F6420440751E564152514C89D7BE030000004889CA41BB0064C90041FFD359415A5EEB0541804AFF0149894A28488B8D70FFFFFF41F6420401742A41F6420440751E564152514C89D7BE040000004889CA41BB0064C90041FFD359415A5EEB0541804AFF0149894A304889342550525D0141BBD065EB0041FFD3B800000000488D65D8415F415E415D415C5B5DC3488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA570FFFFFF4989FF4989F64989D54989CC4D89C24C8B5D104D89D84C8B5D184C89DF4C8B5D204C89DE4C8B5D284C89DB4C8B5D304C89DA4C8B5D384C89D94C8B5D404C899D70FFFFFFE964FDFFFF49BB00609FC3827F000041FFD321383C343029241D180C080440030500000049BB00609FC3827F000041FFD3383C0834302924180C0440030600000049BB00609FC3827F000041FFD3383C081C34302924180C0440030700000049BB00609FC3827F000041FFD3383C08211C0034302924180C0440030800000049BB00609FC3827F000041FFD3383C08211C34302924180C0440030900000049BB00609FC3827F000041FFD329383C343024180C0440030A00000049BB00609FC3827F000041FFD3383C24343028180C0440030B00000049BB00609FC3827F000041FFD3383C25343028180C0440030C00000049BB00609FC3827F000041FFD3383C183430280C0440030D00000049BB00609FC3827F000041FFD3383C182834300C0440030E000000 -[3641773371c] jit-backend-dump} -[36417734058] {jit-backend-addr -Loop 0 ( #19 FOR_ITER) has address 7f82c39f64fb to 7f82c39f6718 (bootstrap 7f82c39f6400) -[36417735e49] jit-backend-addr} -[36417737271] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a363 +0 57565251415041514883EC40F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C24384889E741BB50D2A80041FFD3488B0425400C9C024885C0753CF20F107C2438F20F10742430F20F106C2428F20F10642420F20F105C2418F20F10542410F20F104C2408F20F1004244883C44041594158595A5E5FC341BB003AF20041FFD3B8030000004883C478C3 +[d0e7d937156] jit-backend-dump} +[d0e7d938ba2] {jit-backend-counts +[d0e7d9394fc] jit-backend-counts} +[d0e7fb5a273] {jit-backend +[d0e8032b45f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6410 +0 70FFFFFF -[364177380ed] jit-backend-dump} -[36417738a6b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a406 +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284D8B40304889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48899548FFFFFF48898D40FFFFFF4C898538FFFFFF49BBF03007E8D57F00004D8B034983C00149BBF03007E8D57F00004D89034983FA030F85000000008138306900000F85000000004C8B50104D85D20F84000000004C8B4008498B4A108139702703000F85000000004D8B5208498B4A08498B52104D8B52184983F8000F8C000000004D39D00F8D000000004D89C14C0FAFC24989CC4C01C14983C1014C8948084983FD000F85000000004883FB017206813BF81600000F850000000049BBE09C8DE5D57F00004D39DE0F85000000004C8B73084983C6010F8000000000488B1C25E80A9C024883FB000F8C0000000048898D30FFFFFF49BB083107E8D57F0000498B0B4883C10149BB083107E8D57F000049890B4D39D10F8D000000004C89C94C0FAFCA4C89E34D01CC4883C101488948084D89F14983C6010F80000000004C8B0C25E80A9C024983F9000F8C000000004C89A530FFFFFF4989C94989DCE993FFFFFF49BB00A079E5D57F000041FFD32944404838354C510C5400585C030400000049BB00A079E5D57F000041FFD34440004838354C0C54585C030500000049BB00A079E5D57F000041FFD3444000284838354C0C54585C030600000049BB00A079E5D57F000041FFD34440002104284838354C0C54585C030700000049BB00A079E5D57F000041FFD3444000212909054838354C0C54585C030800000049BB00A079E5D57F000041FFD34440002109054838354C0C54585C030900000049BB00A079E5D57F000041FFD335444048384C0C54005C05030A00000049BB00A079E5D57F000041FFD344400C48384C005C05030B00000049BB00A079E5D57F000041FFD3444038484C0C005C05030C00000049BB00A079E5D57F000041FFD344400C39484C0005030D00000049BB00A079E5D57F000041FFD34440484C003905030E00000049BB00A079E5D57F000041FFD34440484C003905030F00000049BB00A079E5D57F000041FFD3444000250931484C6139031000000049BB00A079E5D57F000041FFD3444039484C00310725031100000049BB00A079E5D57F000041FFD34440484C0039310707031200000049BB00A079E5D57F000041FFD34440484C00393107070313000000 +[d0e803559b7] jit-backend-dump} +[d0e80356de5] {jit-backend-addr +Loop 0 ( #19 FOR_ITER) has address 7fd5e579a43c to 7fd5e579a5fb (bootstrap 7fd5e579a406) +[d0e803590a1] jit-backend-addr} +[d0e8035a421] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f651f +0 74020000 -[364177395ae] jit-backend-dump} -[36417739a7f] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a438 +0 30FFFFFF +[d0e8035c161] jit-backend-dump} +[d0e8035d487] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f652b +0 87020000 -[3641773a44e] jit-backend-dump} -[3641773a95b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a4cf +0 28010000 +[d0e8035eb7f] jit-backend-dump} +[d0e8035f647] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6538 +0 97020000 -[3641773b348] jit-backend-dump} -[3641773b975] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a4db +0 3B010000 +[d0e80360bb9] jit-backend-dump} +[d0e803615df] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6549 +0 A4020000 -[3641773c58d] jit-backend-dump} -[3641773cb4e] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a4e8 +0 4B010000 +[d0e80362a49] jit-backend-dump} +[d0e80363415] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6556 +0 B7020000 -[3641773d5f2] jit-backend-dump} -[3641773daae] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a4fc +0 55010000 +[d0e80364879] jit-backend-dump} +[d0e80365233] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6568 +0 C4020000 -[3641773e46b] jit-backend-dump} -[3641773e936] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a516 +0 5B010000 +[d0e80366895] jit-backend-dump} +[d0e80367333] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6579 +0 CF020000 -[3641773f2f3] jit-backend-dump} -[3641773f7bb] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a51f +0 73010000 +[d0e803688bd] jit-backend-dump} +[d0e80369259] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6587 +0 DD020000 -[3641774026b] jit-backend-dump} -[3641774085f] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a53e +0 74010000 +[d0e8036a6c9] jit-backend-dump} +[d0e8036b065] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6590 +0 F0020000 -[36417741354] jit-backend-dump} -[36417741846] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a550 +0 7F010000 +[d0e8036c4cf] jit-backend-dump} +[d0e8036ce23] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6599 +0 02030000 -[36417743af3] jit-backend-dump} -[36417744df3] jit-backend} -[36417745995] {jit-log-opt-loop -# Loop 0 : entry bridge with 36 ops -[p0, p1, p2, p3, i4, p5, i6, i7, p8, p9, p10, p11, p12] +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a563 +0 87010000 +[d0e8036e28d] jit-backend-dump} +[d0e8036ed5b] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a571 +0 94010000 +[d0e8037030f] jit-backend-dump} +[d0e8037119d] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a583 +0 B5010000 +[d0e803726d9] jit-backend-dump} +[d0e80373069] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a5b1 +0 A0010000 +[d0e803744d9] jit-backend-dump} +[d0e80374e51] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a5d3 +0 9A010000 +[d0e803762c1] jit-backend-dump} +[d0e80376c99] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a5e5 +0 BE010000 +[d0e80378349] jit-backend-dump} +[d0e803796ff] jit-backend} +[d0e8037b13f] {jit-log-opt-loop +# Loop 0 : loop with 73 ops +[p0, p1] ++54: p2 = getfield_gc(p0, descr=) ++58: p3 = getfield_gc(p0, descr=) ++62: i4 = getfield_gc(p0, descr=) ++70: p5 = getfield_gc(p0, descr=) ++74: i6 = getfield_gc(p0, descr=) ++81: i7 = getfield_gc(p0, descr=) ++85: p8 = getfield_gc(p0, descr=) ++89: p10 = getarrayitem_gc(p8, 0, descr=) ++93: p12 = getarrayitem_gc(p8, 1, descr=) ++97: p14 = getarrayitem_gc(p8, 2, descr=) ++101: p16 = getarrayitem_gc(p8, 3, descr=) ++105: p18 = getarrayitem_gc(p8, 4, descr=) ++109: p19 = getfield_gc(p0, descr=) ++109: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, descr=TargetToken(140556656117424)) debug_merge_point(0, ' #19 FOR_ITER') -+281: guard_value(i6, 3, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p8, p9, p10, p11, p12] -+291: guard_class(p10, 21373152, descr=) [p1, p0, p10, p2, p3, i4, p5, p8, p9, p11, p12] -+303: p15 = getfield_gc(p10, descr=) -+307: guard_nonnull(p15, descr=) [p1, p0, p10, p15, p2, p3, i4, p5, p8, p9, p11, p12] -+316: i16 = getfield_gc(p10, descr=) -+320: p17 = getfield_gc(p15, descr=) -+324: guard_isnull(p17, descr=) [p1, p0, p10, i16, p15, p17, p2, p3, i4, p5, p8, p9, p11, p12] -+333: i18 = getfield_gc(p15, descr=) -+337: i19 = int_ge(i16, i18) -guard_true(i19, descr=) [p1, p0, p10, i16, p15, p2, p3, i4, p5, p8, p9, p11, p12] -+346: setfield_gc(p10, ConstPtr(ptr20), descr=) -+354: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p8, p9, p11, p12] -debug_merge_point(0, ' #38 POP_BLOCK') -+364: p22 = getfield_gc_pure(p5, descr=) -+368: guard_class(p5, 21436200, descr=) [p1, p0, p5, p2, p3, p22, p8, p9, p11, p12] -+381: i24 = getfield_gc_pure(p5, descr=) -+385: guard_value(i24, 2, descr=) [p1, p0, i24, p2, p3, p22, p8, p9, p11, p12] -debug_merge_point(0, ' #39 LOAD_FAST') -+395: guard_nonnull(p8, descr=) [p1, p0, p8, p2, p3, p22, p9, p11, p12] -debug_merge_point(0, ' #42 RETURN_VALUE') -+404: guard_isnull(p22, descr=) [p1, p0, p8, p22, p2, p3, p9, p11, p12] -+413: p26 = getfield_gc(p0, descr=) -+417: setfield_gc(p0, 1, descr=) -setfield_gc(p0, p2, descr=) -setfield_gc(p0, p3, descr=) -+493: setfield_gc(p0, 0, descr=) -+501: setfield_gc(p0, ConstPtr(ptr28), descr=) -+509: setfield_gc(p0, 2, descr=) -+520: setfield_gc(p0, 42, descr=) -setarrayitem_gc(p26, 0, p8, descr=) -setarrayitem_gc(p26, 1, p9, descr=) -+634: setarrayitem_gc(p26, 2, ConstPtr(ptr34), descr=) -setarrayitem_gc(p26, 3, p11, descr=) -setarrayitem_gc(p26, 4, p12, descr=) -+755: finish(p8, descr=) -+792: --end of the loop-- -[364177c2d89] jit-log-opt-loop} -[364179e17ad] {jit-backend -[36417a25ca1] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f68d8 +0 488DA50000000049BBD82003C6827F0000498B0B4883C10149BBD82003C6827F000049890B488B4F10488B7F184C89C04C0FAFC74C01C14883C001488942084983FA000F85000000004883FE017206813E980C00000F850000000049BB00DAB3C3827F00004D39DC0F85000000004C8B66084983C4010F8000000000488B3425E88C77024883EE0148893425E88C77024883FE000F8C0000000048899568FFFFFF488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700980C00004889142590EF45014C89600848898560FFFFFF488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700980C00004889142590EF45014889480848898558FFFFFF49BB00DAB3C3827F00004D89DC41BA0000000041B803000000BF13000000488BB560FFFFFF488B9D58FFFFFF488B9568FFFFFFB90000000048C78570FFFFFF0000000049BBFB649FC3827F000041FFE349BB00609FC3827F000041FFD329383C343024180C084005030F00000049BB00609FC3827F000041FFD3383C18343024084005031000000049BB00609FC3827F000041FFD3383C30342418084005031100000049BB00609FC3827F000041FFD3383C183134240805031200000049BB00609FC3827F000041FFD3383C34240831050313000000 -[36417a32919] jit-backend-dump} -[36417a3451d] {jit-backend-addr -bridge out of Guard 9 has address 7f82c39f68d8 to 7f82c39f6a5d -[36417a3526d] jit-backend-addr} -[36417a358c1] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f68db +0 D0FEFFFF -[36417a366e3] jit-backend-dump} -[36417a36cb1] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f691d +0 3C010000 -[36417a377bd] jit-backend-dump} -[36417a37d29] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f692f +0 47010000 -[36417a38859] jit-backend-dump} -[36417a38dd7] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6942 +0 4F010000 -[36417a39827] jit-backend-dump} -[36417a39c95] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6950 +0 5C010000 -[36417a3a587] jit-backend-dump} -[36417a3a9db] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f696e +0 58010000 -[36417a3b2c3] jit-backend-dump} -[36417a3bd61] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6556 +0 7E030000 -[36417a3c817] jit-backend-dump} -[36417a3ce4d] jit-backend} -[36417a3de9f] {jit-log-opt-bridge -# bridge out of Guard 9 with 29 ops -[p0, p1, p2, i3, p4, p5, p6, i7, p8, p9, p10, p11, p12] -+37: i13 = getfield_gc(p4, descr=) -+41: i14 = getfield_gc(p4, descr=) -+45: i15 = int_mul(i3, i14) -+52: i16 = int_add(i13, i15) -+55: i18 = int_add(i3, 1) -+59: setfield_gc(p2, i18, descr=) -+63: guard_value(i7, 0, descr=) [i7, p0, p1, p5, p6, p8, p9, p10, p2, p12, i16] ++195: guard_value(i6, 3, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18] ++205: guard_class(p14, 38308720, descr=) [p1, p0, p14, p2, p3, i4, p5, p10, p12, p16, p18] ++217: p22 = getfield_gc(p14, descr=) ++221: guard_nonnull(p22, descr=) [p1, p0, p14, p22, p2, p3, i4, p5, p10, p12, p16, p18] ++230: i23 = getfield_gc(p14, descr=) ++234: p24 = getfield_gc(p22, descr=) ++238: guard_class(p24, 38488496, descr=) [p1, p0, p14, i23, p24, p22, p2, p3, i4, p5, p10, p12, p16, p18] ++250: p26 = getfield_gc(p22, descr=) ++254: i27 = getfield_gc_pure(p26, descr=) ++258: i28 = getfield_gc_pure(p26, descr=) ++262: i29 = getfield_gc_pure(p26, descr=) ++266: i31 = int_lt(i23, 0) +guard_false(i31, descr=) [p1, p0, p14, i23, i29, i28, i27, p2, p3, i4, p5, p10, p12, p16, p18] ++276: i32 = int_ge(i23, i29) +guard_false(i32, descr=) [p1, p0, p14, i23, i28, i27, p2, p3, i4, p5, p10, p12, p16, p18] ++285: i33 = int_mul(i23, i28) ++292: i34 = int_add(i27, i33) ++298: i36 = int_add(i23, 1) ++302: setfield_gc(p14, i36, descr=) ++306: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p18, i34] debug_merge_point(0, ' #22 STORE_FAST') debug_merge_point(0, ' #25 LOAD_FAST') -+73: guard_nonnull_class(p9, ConstClass(W_IntObject), descr=) [p0, p1, p9, p5, p6, p8, p2, p12, i16] ++316: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, p5, p14, p18, i34] debug_merge_point(0, ' #28 LOAD_CONST') -+91: guard_value(p6, ConstPtr(ptr21), descr=) [p0, p1, p6, p5, p8, p9, p2, p12, i16] ++334: guard_value(p3, ConstPtr(ptr39), descr=) [p1, p0, p3, p2, p5, p10, p14, p18, i34] debug_merge_point(0, ' #31 INPLACE_ADD') -+110: i22 = getfield_gc_pure(p9, descr=) -+114: i24 = int_add_ovf(i22, 1) -guard_no_overflow(, descr=) [p0, p1, p9, i24, p5, p8, p2, i16] ++353: i40 = getfield_gc_pure(p10, descr=) ++357: i42 = int_add_ovf(i40, 1) +guard_no_overflow(, descr=) [p1, p0, p10, i42, p2, p5, p14, i34] debug_merge_point(0, ' #32 STORE_FAST') debug_merge_point(0, ' #35 JUMP_ABSOLUTE') -+124: i26 = getfield_raw(41389288, descr=) -+132: i28 = int_sub(i26, 1) -+136: setfield_raw(41389288, i28, descr=) -+144: i30 = int_lt(i28, 0) -guard_false(i30, descr=) [p0, p1, p5, p8, p2, i24, i16] ++367: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i42, i34] ++367: i44 = getfield_raw(43780840, descr=) ++375: i46 = int_lt(i44, 0) +guard_false(i46, descr=) [p1, p0, p2, p5, p14, i42, i34] debug_merge_point(0, ' #19 FOR_ITER') -+154: p32 = new_with_vtable(ConstClass(W_IntObject)) -+224: setfield_gc(p32, i24, descr=) -+228: p34 = new_with_vtable(ConstClass(W_IntObject)) -+298: setfield_gc(p34, i16, descr=) -+302: jump(p1, p0, p5, ConstPtr(ptr35), 0, p8, 3, 19, p32, p34, p2, ConstPtr(ptr39), ConstPtr(ptr40), descr=) -+389: --end of the loop-- -[36417a5ffcd] jit-log-opt-bridge} -[36417bccad5] {jit-backend -[36417bf916b] {jit-backend-dump ++385: label(p0, p1, p2, p5, i42, i34, p14, i36, i29, i28, i27, descr=TargetToken(140556656117504)) +debug_merge_point(0, ' #19 FOR_ITER') ++422: i47 = int_ge(i36, i29) +guard_false(i47, descr=) [p1, p0, p14, i36, i28, i27, p2, p5, i34, i42] ++431: i48 = int_mul(i36, i28) ++438: i49 = int_add(i27, i48) ++444: i50 = int_add(i36, 1) +debug_merge_point(0, ' #22 STORE_FAST') +debug_merge_point(0, ' #25 LOAD_FAST') +debug_merge_point(0, ' #28 LOAD_CONST') +debug_merge_point(0, ' #31 INPLACE_ADD') ++448: setfield_gc(p14, i50, descr=) ++452: i51 = int_add_ovf(i42, 1) +guard_no_overflow(, descr=) [p1, p0, i51, p2, p5, p14, i49, None, i42] +debug_merge_point(0, ' #32 STORE_FAST') +debug_merge_point(0, ' #35 JUMP_ABSOLUTE') ++465: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] ++465: i53 = getfield_raw(43780840, descr=) ++473: i54 = int_lt(i53, 0) +guard_false(i54, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] +debug_merge_point(0, ' #19 FOR_ITER') ++483: jump(p0, p1, p2, p5, i51, i49, p14, i50, i29, i28, i27, descr=TargetToken(140556656117504)) ++501: --end of the loop-- +[d0e804a30d1] jit-log-opt-loop} +[d0e842b177e] {jit-backend +[d0e8436c0d8] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6ae3 +0 554889E5534154415541564157488DA500000000488B042550525D0148C7042550525D010000000048898570FFFFFF488B042558525D0148C7042558525D010000000048898568FFFFFF488B042560525D0148C7042560525D010000000048898560FFFFFF488B042568525D0148C7042568525D010000000048898558FFFFFF4C8B3C2530E863014C8B342538E8630149BBF02003C6827F00004D8B2B4983C50149BBF02003C6827F00004D892B4981FE102700000F8D000000004C89F0B90200000048898550FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004C89F84983C7010F8000000000488B8550FFFFFF4883C0014C8B3425E88C77024983EE014C893425E88C77024983FE000F8C0000000048898548FFFFFF4C8BB548FFFFFFE95BFFFFFF488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA540FFFFFF4889BD70FFFFFF4889B568FFFFFF48899560FFFFFF48898D58FFFFFF4D89C74D89CEE9FEFEFFFF49BB00609FC3827F000041FFD34440484C3D39031400000049BB00609FC3827F000041FFD34440484C013D51031500000049BB00609FC3827F000041FFD344403D484C070151031600000049BB00609FC3827F000041FFD34440484C013D0707070317000000 -[36417bfe90d] jit-backend-dump} -[36417bfeef7] {jit-backend-addr -Loop 1 ( #15 LOAD_FAST) has address 7f82c39f6b73 to 7f82c39f6c18 (bootstrap 7f82c39f6ae3) -[36417bffc99] jit-backend-addr} -[36417c00275] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a7c2 +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B40204D8B40284889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48898548FFFFFF4C898540FFFFFF49BBD83007E8D57F00004D8B034983C00149BBD83007E8D57F00004D89034983FA020F85000000004883FA017206813AF81600000F85000000004983FD000F850000000049BB989D8DE5D57F00004D39DE0F85000000004C8B72084981FE102700000F8D0000000049BB00000000000000804D39DE0F84000000004C89F0B90200000048899538FFFFFF48898530FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004883FB017206813BF81600000F8500000000488B43084883C0010F8000000000488B9D30FFFFFF4883C3014C8B3425E80A9C024983FE000F8C0000000049BB203107E8D57F00004D8B334983C60149BB203107E8D57F00004D89334881FB102700000F8D0000000049BB00000000000000804C39DB0F840000000048898528FFFFFF4889D8B90200000048898520FFFFFF489948F7F94889D048C1FA3FBB020000004821D34801D84883F8000F8500000000488B8528FFFFFF4883C0010F8000000000488B9D20FFFFFF4883C301488B1425E80A9C024883FA000F8C00000000E958FFFFFF49BB00A079E5D57F000041FFD32944404838354C510C085458031400000049BB00A079E5D57F000041FFD34440084838354C0C5458031500000049BB00A079E5D57F000041FFD335444048384C0C0858031600000049BB00A079E5D57F000041FFD3444038484C0C0858031700000049BB00A079E5D57F000041FFD3444008484C0C031800000049BB00A079E5D57F000041FFD344400839484C0C031900000049BB00A079E5D57F000041FFD34440484C0C5C01031A00000049BB00A079E5D57F000041FFD344400C484C5C07031B00000049BB00A079E5D57F000041FFD344400C01484C5C07031C00000049BB00A079E5D57F000041FFD34440484C010D07031D00000049BB00A079E5D57F000041FFD34440484C010D07031E00000049BB00A079E5D57F000041FFD34440484C010D031F00000049BB00A079E5D57F000041FFD344400D484C0107032000000049BB00A079E5D57F000041FFD34440484C016569032100000049BB00A079E5D57F000041FFD3444001484C076569032200000049BB00A079E5D57F000041FFD34440484C0D01070707032300000049BB00A079E5D57F000041FFD34440484C0D010707070324000000 +[d0e8437f56c] jit-backend-dump} +[d0e843802c8] {jit-backend-addr +Loop 1 ( #15 LOAD_FAST) has address 7fd5e579a7f8 to 7fd5e579a9f4 (bootstrap 7fd5e579a7c2) +[d0e84382392] jit-backend-addr} +[d0e843830f4] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6af3 +0 40FFFFFF -[36417c00f43] jit-backend-dump} -[36417c014fb] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a7f4 +0 20FFFFFF +[d0e84384e94] jit-backend-dump} +[d0e84385f8c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6b9a +0 D7000000 -[36417c020d3] jit-backend-dump} -[36417c025a5] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a880 +0 70010000 +[d0e843877c8] jit-backend-dump} +[d0e843882ba] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6bcb +0 BE000000 -[36417c02fb5] jit-backend-dump} -[36417c03425] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a892 +0 7C010000 +[d0e8438995e] jit-backend-dump} +[d0e8438a3ea] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6bd8 +0 CA000000 -[36417c03d33] jit-backend-dump} -[36417c041bb] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a89c +0 8E010000 +[d0e8438b926] jit-backend-dump} +[d0e8438c2c2] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6c01 +0 BB000000 -[36417c04aab] jit-backend-dump} -[36417c050eb] jit-backend} -[36417c066e3] {jit-log-opt-loop -# Loop 1 : loop with 35 ops -[p0, p1, p2, p3, i4, i5] +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a8af +0 96010000 +[d0e8439bf82] jit-backend-dump} +[d0e8439cf90] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a8c0 +0 9F010000 +[d0e8439e832] jit-backend-dump} +[d0e8439f210] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a8d3 +0 A4010000 +[d0e843a094a] jit-backend-dump} +[d0e843a1436] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a90b +0 85010000 +[d0e843a2a14] jit-backend-dump} +[d0e843a34ac] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a91d +0 8C010000 +[d0e843a4988] jit-backend-dump} +[d0e843a535a] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a92b +0 97010000 +[d0e843a6800] jit-backend-dump} +[d0e843a7484] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a948 +0 AD010000 +[d0e843a896c] jit-backend-dump} +[d0e843a937a] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a973 +0 9B010000 +[d0e843aaa7e] jit-backend-dump} +[d0e843ab552] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a986 +0 A0010000 +[d0e843acb0c] jit-backend-dump} +[d0e843ad4f6] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a9bd +0 82010000 +[d0e843ae97e] jit-backend-dump} +[d0e843af350] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a9ce +0 8A010000 +[d0e843b07d2] jit-backend-dump} +[d0e843b11f2] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a9eb +0 A2010000 +[d0e843b27b2] jit-backend-dump} +[d0e843b396a] jit-backend} +[d0e843b50e6] {jit-log-opt-loop +# Loop 1 : loop with 92 ops +[p0, p1] ++54: p2 = getfield_gc(p0, descr=) ++58: p3 = getfield_gc(p0, descr=) ++62: i4 = getfield_gc(p0, descr=) ++70: p5 = getfield_gc(p0, descr=) ++74: i6 = getfield_gc(p0, descr=) ++81: i7 = getfield_gc(p0, descr=) ++85: p8 = getfield_gc(p0, descr=) ++89: p10 = getarrayitem_gc(p8, 0, descr=) ++93: p12 = getarrayitem_gc(p8, 1, descr=) ++97: p14 = getarrayitem_gc(p8, 2, descr=) ++101: p16 = getarrayitem_gc(p8, 3, descr=) ++105: p17 = getfield_gc(p0, descr=) ++105: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, descr=TargetToken(140556656121504)) debug_merge_point(0, ' #15 LOAD_FAST') ++184: guard_value(i6, 2, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16] ++194: guard_nonnull_class(p12, ConstClass(W_IntObject), descr=) [p1, p0, p12, p2, p3, i4, p5, p10, p14, p16] ++212: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p16] debug_merge_point(0, ' #18 LOAD_CONST') ++222: guard_value(p3, ConstPtr(ptr21), descr=) [p1, p0, p3, p2, p5, p10, p12, p16] debug_merge_point(0, ' #21 COMPARE_OP') -+174: i7 = int_lt(i5, 10000) -guard_true(i7, descr=) [p1, p0, p2, p3, i4, i5] ++241: i22 = getfield_gc_pure(p12, descr=) ++245: i24 = int_lt(i22, 10000) +guard_true(i24, descr=) [p1, p0, p12, p2, p5, p10] debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') debug_merge_point(0, ' #27 LOAD_FAST') debug_merge_point(0, ' #30 LOAD_CONST') debug_merge_point(0, ' #33 BINARY_MODULO') -+187: i9 = int_mod(i5, 2) -+207: i11 = int_rshift(i9, 63) -+214: i12 = int_and(2, i11) -+223: i13 = int_add(i9, i12) ++258: i26 = int_eq(i22, -9223372036854775808) +guard_false(i26, descr=) [p1, p0, p12, i22, p2, p5, p10] ++277: i28 = int_mod(i22, 2) ++304: i30 = int_rshift(i28, 63) ++311: i31 = int_and(2, i30) ++320: i32 = int_add(i28, i31) debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') -+226: i14 = int_is_true(i13) -guard_false(i14, descr=) [p1, p0, p2, p3, i13, i4, i5] ++323: i33 = int_is_true(i32) +guard_false(i33, descr=) [p1, p0, p2, p5, p10, p12, i32] debug_merge_point(0, ' #53 LOAD_FAST') ++333: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p5, p12, None] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 INPLACE_ADD') -+236: i16 = int_add_ovf(i4, 1) -guard_no_overflow(, descr=) [p1, p0, i16, p2, p3, None, i4, i5] ++351: i36 = getfield_gc_pure(p10, descr=) ++355: i38 = int_add_ovf(i36, 1) +guard_no_overflow(, descr=) [p1, p0, p10, i38, p2, p5, p12, None] debug_merge_point(0, ' #60 STORE_FAST') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') debug_merge_point(0, ' #69 INPLACE_ADD') -+249: i19 = int_add(i5, 1) ++365: i40 = int_add(i22, 1) debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+260: i21 = getfield_raw(41389288, descr=) -+268: i23 = int_sub(i21, 1) -+272: setfield_raw(41389288, i23, descr=) -+280: i25 = int_lt(i23, 0) -guard_false(i25, descr=) [p1, p0, p2, p3, i19, i16, None, None, None] ++376: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i38, i40, None] ++376: i42 = getfield_raw(43780840, descr=) ++384: i44 = int_lt(i42, 0) +guard_false(i44, descr=) [p1, p0, p2, p5, i38, i40, None] debug_merge_point(0, ' #15 LOAD_FAST') -+290: jump(p0, p1, p2, p3, i16, i19, descr=) -+309: --end of the loop-- -[36417c25023] jit-log-opt-loop} -[36417c26cfd] {jit-backend -[36417c5beb7] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6cdb +0 554889E5534154415541564157488DA5000000004C8B3C2550525D0148C7042550525D01000000004C8B342558525D0148C7042558525D01000000004C8B2C2560525D0148C7042560525D01000000004C8B242568525D0148C7042568525D01000000004C8B142530E863014C8B0C2578525D0148C7042578525D01000000004C8B042540E86301488B3C2548E86301488B342590525D0148C7042590525D0100000000488B1C2598525D0148C7042598525D0100000000488B1425A0525D0148C70425A0525D0100000000488B0C25A8525D0148C70425A8525D010000000049BB082103C6827F0000498B034883C00149BB082103C6827F00004989034983F8020F85000000004883FB017206813B980C00000F85000000004983FA000F850000000049BB2060B0C3827F00004D39DC0F8500000000488B4B084881F9102700000F8D0000000049BB00000000000000804C39D90F84000000004889C8B90200000048898570FFFFFF489948F7F94889D048C1FA3FB9020000004821D14801C84883F8000F85000000004883FE017206813E980C00000F8500000000488B46084883C0010F8000000000488B9D70FFFFFF4883C301488B3425E88C77024883EE0148893425E88C77024883FE000F8C0000000048898568FFFFFF4C89BD70FFFFFF4C89AD60FFFFFF4C898D58FFFFFF4C8BBD68FFFFFF4C89B568FFFFFF4989DE49BB736B9FC3827F000041FFE3488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA540FFFFFF4989FF4989F64989D54989CC4D89C24C8B5D104D89D84C8B5D184C89DF4C8B5D204C89DE4C8B5D284C89DB4C8B5D304C89DA4C8B5D384C89D9E95EFEFFFF49BB00609FC3827F000041FFD321383C343029241D180C0804031800000049BB00609FC3827F000041FFD3383C0C34302924180804031900000049BB00609FC3827F000041FFD329383C343024180C04031A00000049BB00609FC3827F000041FFD3383C303424180C04031B00000049BB00609FC3827F000041FFD3383C0C342418031C00000049BB00609FC3827F000041FFD3383C0C05342418031D00000049BB00609FC3827F000041FFD3383C3424180C01031E00000049BB00609FC3827F000041FFD3383C1834240C07031F00000049BB00609FC3827F000041FFD3383C180134240C07032000000049BB00609FC3827F000041FFD3383C3424010D070321000000 -[36417c63911] jit-backend-dump} -[36417c63f3b] {jit-backend-addr -Loop 2 ( #15 LOAD_FAST) has address 7f82c39f6dbb to 7f82c39f6ee9 (bootstrap 7f82c39f6cdb) -[36417c64b99] jit-backend-addr} -[36417c6518f] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6ceb +0 40FFFFFF -[36417c65ed1] jit-backend-dump} -[36417c6644b] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6ddf +0 7A010000 -[36417c66f7b] jit-backend-dump} -[36417c675a5] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6df1 +0 86010000 -[36417c67ff5] jit-backend-dump} -[36417c68637] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6dfb +0 98010000 -[36417c68fd5] jit-backend-dump} -[36417c6940f] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6e0e +0 A0010000 -[36417c69ce7] jit-backend-dump} -[36417c6a125] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6e1f +0 A9010000 -[36417c6a9e9] jit-backend-dump} -[36417c6af41] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6e32 +0 AE010000 -[36417c7050f] jit-backend-dump} -[36417c70b95] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6e62 +0 97010000 -[36417c715f7] jit-backend-dump} -[36417c71b77] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6e74 +0 9E010000 -[36417c724f5] jit-backend-dump} -[36417c7292f] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6e82 +0 A9010000 -[36417c73209] jit-backend-dump} -[36417c73669] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6eab +0 9A010000 -[36417c7408f] jit-backend-dump} -[36417c74655] jit-backend} -[36417c74f83] {jit-log-opt-loop -# Loop 2 : entry bridge with 44 ops -[p0, p1, p2, p3, i4, p5, i6, i7, p8, p9, p10, p11] ++394: label(p0, p1, p2, p5, i38, i40, descr=TargetToken(140556656121584)) debug_merge_point(0, ' #15 LOAD_FAST') -+254: guard_value(i6, 2, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p8, p9, p10, p11] -+264: guard_nonnull_class(p9, ConstClass(W_IntObject), descr=) [p1, p0, p9, p2, p3, i4, p5, p8, p10, p11] -+282: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p8, p9, p11] debug_merge_point(0, ' #18 LOAD_CONST') -+292: guard_value(p3, ConstPtr(ptr15), descr=) [p1, p0, p3, p2, p5, p8, p9, p11] debug_merge_point(0, ' #21 COMPARE_OP') -+311: i16 = getfield_gc_pure(p9, descr=) -+315: i18 = int_lt(i16, 10000) -guard_true(i18, descr=) [p1, p0, p9, p2, p5, p8] ++424: i45 = int_lt(i40, 10000) +guard_true(i45, descr=) [p1, p0, p2, p5, i38, i40] debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') debug_merge_point(0, ' #27 LOAD_FAST') debug_merge_point(0, ' #30 LOAD_CONST') debug_merge_point(0, ' #33 BINARY_MODULO') -+328: i20 = int_eq(i16, -9223372036854775808) -guard_false(i20, descr=) [p1, p0, p9, i16, p2, p5, p8] -+347: i22 = int_mod(i16, 2) -+367: i24 = int_rshift(i22, 63) -+374: i25 = int_and(2, i24) -+382: i26 = int_add(i22, i25) ++437: i46 = int_eq(i40, -9223372036854775808) +guard_false(i46, descr=) [p1, p0, i40, p2, p5, i38, None] ++456: i47 = int_mod(i40, 2) ++483: i48 = int_rshift(i47, 63) ++490: i49 = int_and(2, i48) ++498: i50 = int_add(i47, i49) debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') -+385: i27 = int_is_true(i26) -guard_false(i27, descr=) [p1, p0, p2, p5, p8, p9, i26] ++501: i51 = int_is_true(i50) +guard_false(i51, descr=) [p1, p0, p2, p5, i50, i38, i40] debug_merge_point(0, ' #53 LOAD_FAST') -+395: guard_nonnull_class(p8, ConstClass(W_IntObject), descr=) [p1, p0, p8, p2, p5, p9, None] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 INPLACE_ADD') -+413: i30 = getfield_gc_pure(p8, descr=) -+417: i32 = int_add_ovf(i30, 1) -guard_no_overflow(, descr=) [p1, p0, p8, i32, p2, p5, p9, None] ++511: i52 = int_add_ovf(i38, 1) +guard_no_overflow(, descr=) [p1, p0, i52, p2, p5, None, i38, i40] debug_merge_point(0, ' #60 STORE_FAST') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') debug_merge_point(0, ' #69 INPLACE_ADD') -+427: i34 = int_add(i16, 1) ++528: i53 = int_add(i40, 1) debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+438: i36 = getfield_raw(41389288, descr=) -+446: i38 = int_sub(i36, 1) -+450: setfield_raw(41389288, i38, descr=) -+458: i40 = int_lt(i38, 0) -guard_false(i40, descr=) [p1, p0, p2, p5, i32, i34, None] ++539: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] ++539: i54 = getfield_raw(43780840, descr=) ++547: i55 = int_lt(i54, 0) +guard_false(i55, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] debug_merge_point(0, ' #15 LOAD_FAST') -+468: jump(p0, p1, p2, p5, i32, i34, descr=) -+526: --end of the loop-- -[36417c98f33] jit-log-opt-loop} -[36418376e79] {jit-backend -[364183a3465] {jit-backend-dump ++557: jump(p0, p1, p2, p5, i52, i53, descr=TargetToken(140556656121584)) ++562: --end of the loop-- +[d0e84448eae] jit-log-opt-loop} +[d0e85322b25] {jit-backend +[d0e85385784] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7062 +0 554889E5534154415541564157488DA500000000488B042550525D0148C7042550525D010000000048898570FFFFFF488B042558525D0148C7042558525D010000000048898568FFFFFF488B042560525D0148C7042560525D010000000048898560FFFFFF488B042568525D0148C7042568525D010000000048898558FFFFFF4C8B3C2530E863014C8B342538E8630149BBA82003C6827F00004D8B2B4983C50149BBA82003C6827F00004D892B4981FE102700000F8D000000004C89F0B90200000048898550FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004C89F84983C7010F8000000000488B8550FFFFFF4883C0014C8B3425E88C77024983EE0D4C893425E88C77024983FE000F8C0000000048898548FFFFFF4C8BB548FFFFFFE95BFFFFFF488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA540FFFFFF4889BD70FFFFFF4889B568FFFFFF48899560FFFFFF48898D58FFFFFF4D89C74D89CEE9FEFEFFFF49BB00609FC3827F000041FFD34440484C393D032200000049BB00609FC3827F000041FFD34440484C01513D032300000049BB00609FC3827F000041FFD344403D484C075101032400000049BB00609FC3827F000041FFD34440484C013D0707070325000000 -[364183a91a9] jit-backend-dump} -[364183a9797] {jit-backend-addr -Loop 3 ( #15 LOAD_FAST) has address 7f82c39f70f2 to 7f82c39f7197 (bootstrap 7f82c39f7062) -[364183aa5e9] jit-backend-addr} -[364183aacc1] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579abb9 +0 488DA50000000049BB383107E8D57F00004D8B234983C40149BB383107E8D57F00004D89234C8BA558FFFFFF498B54241048C740100000000041813C24288801000F85000000004D8B6424184983FC020F85000000004885D20F8500000000488B9570FFFFFF4C8B6268488B042550C95401488D5020483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C700F8160000488B9570FFFFFF40C68295000000014C8B8D60FFFFFFF64204017417504151524889D74C89CE41BB8045C50041FFD35A4159584C894A50F6420401741D50524889D749BBE09C8DE5D57F00004C89DE41BB8045C50041FFD35A5849BBE09C8DE5D57F00004C895A7840C682960000000048C742600000000048C782800000000200000048C742582A00000041F644240401742641F6442404407518504C89E7BE000000004889C241BBE042C50041FFD358EB0641804C24FF0149894424104889C24883C01048C700F81600004C8B8D30FFFFFF4C89480841F644240401742841F644240440751A52504C89E7BE010000004889C241BBE042C50041FFD3585AEB0641804C24FF01498944241849C74424200000000049C74424280000000049C7442430000000004C89720848891425F05F710141BB503AF20041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD344403048083961032500000049BB00A079E5D57F000041FFD344403148083961032600000049BB00A079E5D57F000041FFD34440084839610327000000 +[d0e8539120a] jit-backend-dump} +[d0e853923bc] {jit-backend-addr +bridge out of Guard 16 has address 7fd5e579abb9 to 7fd5e579adb2 +[d0e85393ad2] jit-backend-addr} +[d0e85394658] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7072 +0 40FFFFFF -[364183aba11] jit-backend-dump} -[364183ac033] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579abbc +0 A0FEFFFF +[d0e85395d96] jit-backend-dump} +[d0e853968e0] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7119 +0 D7000000 -[364183aca69] jit-backend-dump} -[364183acf27] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579abfc +0 B2010000 +[d0e85397ba5] jit-backend-dump} +[d0e85398401] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f714a +0 BE000000 -[364183ad857] jit-backend-dump} -[364183adde1] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ac0b +0 BC010000 +[d0e8539952c] jit-backend-dump} +[d0e85399d06] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7157 +0 CA000000 -[364183ae86d] jit-backend-dump} -[364183aee5d] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ac14 +0 CC010000 +[d0e8539af85] jit-backend-dump} +[d0e8539bb06] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7180 +0 BB000000 -[364183af8dd] jit-backend-dump} -[364183afee1] jit-backend} -[364183b09bf] {jit-log-opt-loop -# Loop 3 : loop with 34 ops -[p0, p1, p2, p3, i4, i5] -debug_merge_point(0, ' #18 LOAD_CONST') -debug_merge_point(0, ' #21 COMPARE_OP') -+174: i7 = int_lt(i5, 10000) -guard_true(i7, descr=) [p1, p0, p2, p3, i5, i4] -debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') -debug_merge_point(0, ' #27 LOAD_FAST') -debug_merge_point(0, ' #30 LOAD_CONST') -debug_merge_point(0, ' #33 BINARY_MODULO') -+187: i9 = int_mod(i5, 2) -+207: i11 = int_rshift(i9, 63) -+214: i12 = int_and(2, i11) -+223: i13 = int_add(i9, i12) -debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') -+226: i14 = int_is_true(i13) -guard_false(i14, descr=) [p1, p0, p2, p3, i13, i5, i4] -debug_merge_point(0, ' #53 LOAD_FAST') -debug_merge_point(0, ' #56 LOAD_CONST') -debug_merge_point(0, ' #59 INPLACE_ADD') -+236: i16 = int_add_ovf(i4, 1) -guard_no_overflow(, descr=) [p1, p0, i16, p2, p3, None, i5, i4] -debug_merge_point(0, ' #60 STORE_FAST') -debug_merge_point(0, ' #63 LOAD_FAST') -debug_merge_point(0, ' #66 LOAD_CONST') -debug_merge_point(0, ' #69 INPLACE_ADD') -+249: i19 = int_add(i5, 1) -debug_merge_point(0, ' #70 STORE_FAST') -debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+260: i21 = getfield_raw(41389288, descr=) -+268: i23 = int_sub(i21, 13) -+272: setfield_raw(41389288, i23, descr=) -+280: i25 = int_lt(i23, 0) -guard_false(i25, descr=) [p1, p0, p2, p3, i19, i16, None, None, None] -debug_merge_point(0, ' #15 LOAD_FAST') -+290: jump(p0, p1, p2, p3, i16, i19, descr=) -+309: --end of the loop-- -[364183d4b25] jit-log-opt-loop} -[364184f0bf3] {jit-backend -[3641876f97b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a5b1 +0 04060000 +[d0e8539cc6d] jit-backend-dump} +[d0e8539d95b] jit-backend} +[d0e8539e9aa] {jit-log-opt-bridge +# bridge out of Guard 16 with 28 ops +[p0, p1, p2, i3, i4, i5, p6, p7, i8, i9] +debug_merge_point(0, ' #38 POP_BLOCK') ++37: p10 = getfield_gc_pure(p7, descr=) ++49: setfield_gc(p2, ConstPtr(ptr11), descr=) ++57: guard_class(p7, 38382184, descr=) [p0, p1, p7, p6, p10, i9, i8] ++71: i13 = getfield_gc_pure(p7, descr=) ++76: guard_value(i13, 2, descr=) [p0, p1, i13, p6, p10, i9, i8] +debug_merge_point(0, ' #39 LOAD_FAST') +debug_merge_point(0, ' #42 RETURN_VALUE') ++86: guard_isnull(p10, descr=) [p0, p1, p10, p6, i9, i8] ++95: p15 = getfield_gc(p1, descr=) ++106: p16 = getfield_gc(p1, descr=) +p18 = new_with_vtable(ConstClass(W_IntObject)) ++169: setfield_gc(p1, 1, descr=) +setfield_gc(p1, p6, descr=) +setfield_gc(p1, ConstPtr(ptr20), descr=) ++273: setfield_gc(p1, 0, descr=) ++281: setfield_gc(p1, ConstPtr(ptr22), descr=) ++289: setfield_gc(p1, 2, descr=) ++300: setfield_gc(p1, 42, descr=) +setarrayitem_gc(p15, 0, p18, descr=) +p27 = new_with_vtable(ConstClass(W_IntObject)) ++373: setfield_gc(p27, i8, descr=) +setarrayitem_gc(p15, 1, p27, descr=) ++437: setarrayitem_gc(p15, 2, ConstPtr(ptr30), descr=) ++446: setarrayitem_gc(p15, 3, ConstPtr(ptr32), descr=) ++455: setarrayitem_gc(p15, 4, ConstPtr(ptr32), descr=) ++464: setfield_gc(p18, i9, descr=) ++468: finish(p18, descr=) ++505: --end of the loop-- +[d0e853d860a] jit-log-opt-bridge} +[d0e8a56c11b] {jit-backend +[d0e8aaa04c9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f737a +0 488DA50000000049BB202103C6827F0000498B034883C00149BB202103C6827F0000498903488B8570FFFFFF4C8B700849BB20A0B0C3827F00004D39DE0F85000000004D8B6E104981FD60164F010F850000000049BBD881AFC3827F00004D8B334983FE01720741813E802701000F85000000004D8B6E1849BB00DAB3C3827F00004D39DD0F85000000004D8B6E4041BB20268A0041FFD34C8B60384C8D9578FFFFFF4C8B48484D85C90F85000000004C8B48284983F9000F850000000049BB20A0B0C3827F00004D39DD0F85000000004D8B4D104981F960164F010F850000000049BBD83AB6C3827F00004D8B2B4D85ED0F85000000004C8B2C2560F151014981FD60164F010F85000000004C8B2C25284C66014981FDA0D86A010F85000000004C8B2C25E88C77024983ED084C892C25E88C77024983FD000F8C000000004C8DAD78FFFFFF48898548FFFFFF488B042590EF4501488D5018483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700903000004889142590EF45014C8950084C8B9548FFFFFF41F642040174154152504C89D74889C641BB9062C90041FFD358415A49894238488B9570FFFFFF4C896A1848898540FFFFFF488B042590EF4501488D9098000000483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700702001004889142590EF450148898538FFFFFF488B042590EF4501488D5038483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700880000004889142590EF450148C740080500000048898530FFFFFF488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700980C00004889142590EF450148C7400801000000488B9530FFFFFFF64204017417415252504889D74889C641BB9062C90041FFD3585A415A4889421048898528FFFFFF488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700980C00004889142590EF4501488B9530FFFFFFF64204017417415252504889D74889C641BB9062C90041FFD3585A415A4889421848898520FFFFFF488B042590EF4501488D5018483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700602E00004889142590EF450148C740080100000048898518FFFFFF488B042590EF4501488D5028483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700604700004889142590EF450148C740180100000048C7400803000000488B9518FFFFFFF64204017417524152504889D74889C641BB9062C90041FFD358415A5A48894210488B8530FFFFFFF64004017417524152504889C74889D641BB9062C90041FFD358415A5A488950204C8BAD38FFFFFF41F64504017417524152504C89EF4889C641BB9062C90041FFD358415A5A4989456841F6450401741F5241524C89EF49BB20A0B0C3827F00004C89DE41BB9062C90041FFD3415A5A49BB20A0B0C3827F00004D895D0849C785800000000300000049C745581300000041C785900000001500000041F6450401741F5241524C89EF49BB00DAB3C3827F00004C89DE41BB9062C90041FFD3415A5A49BB00DAB3C3827F00004D895D7841F645040174155241524C89EF4C89E641BB9062C90041FFD3415A5A4D89653049C745700200000041F6450401741F5241524C89EF49BBE0C4B6C3827F00004C89DE41BB9062C90041FFD3415A5A49BBE0C4B6C3827F00004D895D604C89B510FFFFFF4C89BD08FFFFFF48C78578FFFFFF2600000041BB030000004C891C2441BB130000004C895C24084C8B9D28FFFFFF4C895C24104C8B9D20FFFFFF4C895C2418488954242041BB000000004C895C242841BB000000004C895C24304C89EF4C89D6BA0000000049BB00DAB3C3827F00004C89D941B80000000049BBE0C4B6C3827F00004D89D949BB18679FC3827F000041FFD34883F80074154889C7488BB538FFFFFF41BB5098980041FFD3EB23488B8538FFFFFF48C7401800000000488B042550525D0148C7042550525D01000000004883BD78FFFFFF000F8C0000000048833C25908D7702000F8500000000488B9548FFFFFF4C8B52484D85D20F85000000004C8B52284C8BBD38FFFFFF49C74750000000004983FA000F85000000004C8B52384D8B6F304D0FB6B794000000F64204017417524152504889D74C89EE41BB9062C90041FFD358415A5A4C896A384D85F60F85000000004C8BB540FFFFFF49C74608FDFFFFFF8138980C00000F85000000004C8B7008488B9508FFFFFF4C01F20F8000000000488B8550FFFFFF4883C0010F80000000004C8B3425E88C77024983EE0B4C893425E88C77024983FE000F8C0000000048898500FFFFFF488995F8FEFFFF488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700980C00004889142590EF4501488B95F8FEFFFF48895008488985F0FEFFFF488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700980C00004889142590EF4501488B9500FFFFFF48895008488985E8FEFFFF4C8BBD70FFFFFF4C8BB568FFFFFF4C8BAD60FFFFFF49BB2060B0C3827F00004D89DC41BA000000004C8B8D58FFFFFF41B802000000BF0F000000488BB5F0FEFFFF488B9DE8FEFFFFBA00000000B90000000049BBBB6D9FC3827F000041FFE349BB00609FC3827F000041FFD3440038484C3D51032700000049BB00609FC3827F000041FFD344003438484C3D51032800000049BB00609FC3827F000041FFD3440038484C3D51032900000049BB00609FC3827F000041FFD344003438484C3D51032A00000049BB00609FC3827F000041FFD344400024484C383034293D51032B00000049BB00609FC3827F000041FFD3444000484C383034293D51032C00000049BB00609FC3827F000041FFD344400034484C383007293D51032D00000049BB00609FC3827F000041FFD34440002434484C383007293D51032E00000049BB00609FC3827F000041FFD344400034484C383007293D51032F00000049BB00609FC3827F000041FFD3444000484C383007293D51033000000049BB00609FC3827F000041FFD344400034484C383007293D51033100000049BB00609FC3827F000041FFD344400034484C383007293D51033200000049BB00609FC3827F000041FFD3444000484C383007293D51033300000049BB43609FC3827F000041FFD34440545C0058484C707551032600000049BB43609FC3827F000041FFD34440545C0058484C707551033400000049BB00609FC3827F000041FFD3444000085C2858484C707551033500000049BB00609FC3827F000041FFD34440003C0858484C707551033600000049BB00609FC3827F000041FFD3444000283C0858484C707551033700000049BB00609FC3827F000041FFD3444000484C7551033800000049BB00609FC3827F000041FFD344400009484C7551033900000049BB00609FC3827F000041FFD3444001484C090751033A00000049BB00609FC3827F000041FFD34440484C01090707033B000000 -[36418785ad5] jit-backend-dump} -[36418786459] {jit-backend-addr -bridge out of Guard 21 has address 7f82c39f737a to 7f82c39f7b83 -[364187870cf] jit-backend-addr} -[3641878781d] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ae4b +0 488DA50000000049BB503107E8D57F0000498B034883C00149BB503107E8D57F0000498903488B8570FFFFFF4C8B780849BBA8EB8BE5D57F00004D39DF0F85000000004D8B771049BBC0EB8BE5D57F00004D39DE0F850000000041BB10468D0041FFD34C8B78404C8B70504D85F60F85000000004C8B70284983FE000F85000000004C8B342500F584014981FEE02687010F85000000004C8B3425E80A9C024983FE000F8C0000000048898518FFFFFF488B042550C95401488D9048010000483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C700388001004889C24881C09800000048C7008800000048C74008000000004989C64883C01048C7008800000048C74008050000004989C54883C03848C700F81600004989C44883C01048C700F81600004989C24883C01048C700306900004989C14883C01848C700E02900004989C04883C01848C7004083010048896808488BBD18FFFFFFF6470401741E5057415241505241514889C641BB8045C50041FFD341595A4158415A5F5848894740488BB570FFFFFF48896E1848C742700200000049BB803AF8E7D57F00004C895A6048C74258130000004C89722849BBE09C8DE5D57F00004C895A7848C7828000000003000000C78290000000150000004C897A3049BBA8EB8BE5D57F00004C895A0849C7442408010000004D8965104D89551849C741080100000049C74010A0C19F0149BBA03AF8E7D57F00004D8958084D8941104D894D204C896A6848899510FFFFFF48898508FFFFFF48C78578FFFFFF280000004889FE4889D749BB06A479E5D57F000041FFD34883F80274154889C7488BB510FFFFFF41BB50C2940041FFD3EB23488B8510FFFFFF48C7401800000000488B0425F05F710148C70425F05F7101000000004883BD78FFFFFF000F8C0000000048833C25400C9C02000F8500000000488BBD18FFFFFF488B77504885F60F8500000000488B7728488B9510FFFFFF48C74250000000004883FE000F8500000000488B77404C8B6A304C0FB68A94000000F647040174185057564151524C89EE41BB8045C50041FFD35A41595E5F584C896F404D85C90F85000000004C8B8D08FFFFFF49C74108FDFFFFFF8138F81600000F85000000004C8B4808488BBD28FFFFFF4C01CF0F8000000000488B8520FFFFFF4883C0010F80000000004C8B0C25E80A9C024983F9000F8C0000000049BB683107E8D57F00004D8B0B4983C10149BB683107E8D57F00004D890B4881F8102700000F8D0000000049BB00000000000000804C39D80F8400000000B90200000048898500FFFFFF489948F7F94889D048C1FA3F41B9020000004921D14C01C84883F8000F85000000004889F84883C7010F8000000000488B8500FFFFFF4883C0014C8B0C25E80A9C024983F9000F8C000000004889C34889F849BB4CA979E5D57F000041FFE349BB00A079E5D57F000041FFD344003C484C6569032900000049BB00A079E5D57F000041FFD34400383C484C6569032A00000049BB00A079E5D57F000041FFD344003C484C6569032B00000049BB00A079E5D57F000041FFD344400038484C3C156569032C00000049BB00A079E5D57F000041FFD3444000484C3C156569032D00000049BB00A079E5D57F000041FFD3444000484C3C156569032E00000049BB00A079E5D57F000041FFD344400038484C3C156569032F00000049BB00A079E5D57F000041FFD3444000484C3C156569033000000049BB43A079E5D57F000041FFD344406C700074484C6569032800000049BB43A079E5D57F000041FFD344406C700074484C6569033100000049BB00A079E5D57F000041FFD344401C00701874484C6569033200000049BB00A079E5D57F000041FFD3444000081C74484C6569033300000049BB00A079E5D57F000041FFD344400018081C74484C6569033400000049BB00A079E5D57F000041FFD3444000484C6569033500000049BB00A079E5D57F000041FFD34440001D484C6569033600000049BB00A079E5D57F000041FFD3444001484C1D0769033700000049BB00A079E5D57F000041FFD34440484C011D0707033800000049BB00A079E5D57F000041FFD34440484C011D0707033900000049BB00A079E5D57F000041FFD34440484C011D033A00000049BB00A079E5D57F000041FFD3444001484C071D033B00000049BB00A079E5D57F000041FFD34440484C01791D033C00000049BB00A079E5D57F000041FFD344401D484C077901033D00000049BB00A079E5D57F000041FFD34440484C1D01070707033E00000049BB00A079E5D57F000041FFD34440484C1D01070707033F000000 +[d0e8aad0247] jit-backend-dump} +[d0e8aad14a7] {jit-backend-addr +bridge out of Guard 33 has address 7fd5e579ae4b to 7fd5e579b270 +[d0e8aad346f] jit-backend-addr} +[d0e8aad44a1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f737d +0 60FEFFFF -[364187884ed] jit-backend-dump} -[36418788af5] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ae4e +0 70FEFFFF +[d0e8aad635b] jit-backend-dump} +[d0e8aad7675] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f73b9 +0 C6070000 -[36418789683] jit-backend-dump} -[36418789cd3] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ae8a +0 E2030000 +[d0e8aad8e15] jit-backend-dump} +[d0e8aad98bf] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f73ca +0 CE070000 -[3641878a63d] jit-backend-dump} -[3641878ab5d] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579aea1 +0 E4030000 +[d0e8aadae37] jit-backend-dump} +[d0e8aadbaf7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f73ea +0 C8070000 -[3641878b453] jit-backend-dump} -[3641878b8af] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579aebb +0 FD030000 +[d0e8aadd1ad] jit-backend-dump} +[d0e8aaddcb7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7401 +0 CA070000 -[3641878c167] jit-backend-dump} -[3641878c5e9] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579aec9 +0 0B040000 +[d0e8aadf319] jit-backend-dump} +[d0e8aadff9d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7426 +0 BF070000 -[3641878d187] jit-backend-dump} -[3641878d721] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579aede +0 2C040000 +[d0e8aae15c9] jit-backend-dump} +[d0e8aae20d3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7434 +0 CF070000 -[3641878e09d] jit-backend-dump} -[3641878e4ff] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579aef0 +0 36040000 +[d0e8aae361b] jit-backend-dump} +[d0e8aae3fdb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7447 +0 D9070000 -[3641878edc3] jit-backend-dump} -[3641878f201] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b0f6 +0 4B020000 +[d0e8aae5427] jit-backend-dump} +[d0e8aae5de1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7458 +0 E6070000 -[3641878fa9d] jit-backend-dump} -[3641878fecf] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b105 +0 58020000 +[d0e8aae722d] jit-backend-dump} +[d0e8aae7bbd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f746e +0 EF070000 -[36418790791] jit-backend-dump} -[36418790ec9] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b119 +0 60020000 +[d0e8aae92bb] jit-backend-dump} +[d0e8aae9d71] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7483 +0 15080000 -[3641879189d] jit-backend-dump} -[36418791d89] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b136 +0 60020000 +[d0e8aaeb2d1] jit-backend-dump} +[d0e8aaebc91] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7498 +0 1E080000 -[364187962ad] jit-backend-dump} -[36418796947] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b171 +0 41020000 +[d0e8aaed0dd] jit-backend-dump} +[d0e8aaeda6d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f74b6 +0 1E080000 -[36418797381] jit-backend-dump} -[36418797877] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b18c +0 43020000 +[d0e8aaeeec5] jit-backend-dump} +[d0e8aaef861] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7998 +0 59030000 -[36418798197] jit-backend-dump} -[364187985cb] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b1a0 +0 48020000 +[d0e8aaf0cb3] jit-backend-dump} +[d0e8aaf16df] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f79a7 +0 67030000 -[36418799093] jit-backend-dump} -[364187995f5] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b1b1 +0 51020000 +[d0e8aaf2d29] jit-backend-dump} +[d0e8aaf4049] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f79bb +0 70030000 -[36418799f65] jit-backend-dump} -[3641879a3d1] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b1c3 +0 73020000 +[d0e8aaf55af] jit-backend-dump} +[d0e8aaf5f2d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f79d8 +0 71030000 -[3641879ac6d] jit-backend-dump} -[3641879b0b3] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b1ee +0 62020000 +[d0e8aaf737f] jit-backend-dump} +[d0e8aaf7d09] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7a12 +0 54030000 -[3641879b973] jit-backend-dump} -[3641879bda5] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b201 +0 67020000 +[d0e8aaf9161] jit-backend-dump} +[d0e8aaf9ae5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7a2d +0 57030000 -[3641879c641] jit-backend-dump} -[3641879cb9d] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b22f +0 52020000 +[d0e8aafb0c9] jit-backend-dump} +[d0e8aafbbfd] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7a41 +0 5C030000 -[3641879d54b] jit-backend-dump} -[3641879daad] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b23c +0 5E020000 +[d0e8aafd247] jit-backend-dump} +[d0e8aafdc43] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7a52 +0 65030000 -[3641879e34b] jit-backend-dump} -[3641879e791] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b259 +0 76020000 +[d0e8aaff0bf] jit-backend-dump} +[d0e8ab0006d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7a70 +0 61030000 -[3641879f065] jit-backend-dump} -[3641879f689] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f6bcb +0 AB070000 -[3641879ff29] jit-backend-dump} -[364187a057f] jit-backend} -[364187a10fb] {jit-log-opt-bridge -# bridge out of Guard 21 with 119 ops +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a9bd +0 8A040000 +[d0e8ab014dd] jit-backend-dump} +[d0e8ab02695] jit-backend} +[d0e8ab03ea1] {jit-log-opt-bridge +# bridge out of Guard 33 with 137 ops [p0, p1, p2, p3, i4, i5, i6] debug_merge_point(0, ' #37 LOAD_FAST') debug_merge_point(0, ' #40 LOAD_GLOBAL') -+37: p7 = getfield_gc(p1, descr=) -+48: guard_value(p7, ConstPtr(ptr8), descr=) [p0, p1, p7, p2, p3, i5, i6] -+67: p9 = getfield_gc(p7, descr=) -+71: guard_value(p9, ConstPtr(ptr10), descr=) [p0, p1, p9, p7, p2, p3, i5, i6] -+84: p12 = getfield_gc(ConstPtr(ptr11), descr=) -+97: guard_nonnull_class(p12, ConstClass(Function), descr=) [p0, p1, p12, p2, p3, i5, i6] ++37: p7 = getfield_gc(p1, descr=) ++48: guard_value(p7, ConstPtr(ptr8), descr=) [p0, p1, p7, p2, p3, i5, i6] ++67: p9 = getfield_gc(p7, descr=) ++71: guard_value(p9, ConstPtr(ptr10), descr=) [p0, p1, p9, p7, p2, p3, i5, i6] ++90: guard_not_invalidated(, descr=) [p0, p1, p7, p2, p3, i5, i6] debug_merge_point(0, ' #43 CALL_FUNCTION') -+116: p14 = getfield_gc(p12, descr=) -+120: guard_value(p14, ConstPtr(ptr15), descr=) [p0, p1, p14, p12, p2, p3, i5, i6] -+139: p16 = getfield_gc(p12, descr=) -+143: p17 = getfield_gc(p12, descr=) -+143: p19 = call(ConstClass(getexecutioncontext), descr=) -+152: p20 = getfield_gc(p19, descr=) -+156: i21 = force_token() -+163: p22 = getfield_gc(p19, descr=) -+167: guard_isnull(p22, descr=) [p0, p1, p19, p22, p2, p3, p12, p20, p16, i21, i5, i6] -+176: i23 = getfield_gc(p19, descr=) -+180: i24 = int_is_zero(i23) -guard_true(i24, descr=) [p0, p1, p19, p2, p3, p12, p20, p16, i21, i5, i6] ++90: p12 = call(ConstClass(getexecutioncontext), descr=) ++99: p13 = getfield_gc(p12, descr=) ++103: i14 = force_token() ++103: p15 = getfield_gc(p12, descr=) ++107: guard_isnull(p15, descr=) [p0, p1, p12, p15, p2, p3, p13, i14, i5, i6] ++116: i16 = getfield_gc(p12, descr=) ++120: i17 = int_is_zero(i16) +guard_true(i17, descr=) [p0, p1, p12, p2, p3, p13, i14, i5, i6] debug_merge_point(1, ' #0 LOAD_CONST') debug_merge_point(1, ' #3 STORE_FAST') debug_merge_point(1, ' #6 SETUP_LOOP') debug_merge_point(1, ' #9 LOAD_GLOBAL') -+190: guard_value(p16, ConstPtr(ptr25), descr=) [p0, p1, p19, p16, p2, p3, p12, p20, None, i21, i5, i6] -+209: p27 = getfield_gc(p16, descr=) -+213: guard_value(p27, ConstPtr(ptr28), descr=) [p0, p1, p19, p27, p16, p2, p3, p12, p20, None, i21, i5, i6] -+226: p30 = getfield_gc(ConstPtr(ptr29), descr=) -+239: guard_isnull(p30, descr=) [p0, p1, p19, p30, p2, p3, p12, p20, None, i21, i5, i6] -+248: guard_not_invalidated(, descr=) [p0, p1, p19, p2, p3, p12, p20, None, i21, i5, i6] -+248: p32 = getfield_gc(ConstPtr(ptr31), descr=) -+256: guard_value(p32, ConstPtr(ptr33), descr=) [p0, p1, p19, p32, p2, p3, p12, p20, None, i21, i5, i6] -+269: p35 = getfield_gc(ConstPtr(ptr34), descr=) -+277: guard_value(p35, ConstPtr(ptr36), descr=) [p0, p1, p19, p35, p2, p3, p12, p20, None, i21, i5, i6] ++130: guard_not_invalidated(, descr=) [p0, p1, p12, p2, p3, p13, i14, i5, i6] ++130: p19 = getfield_gc(ConstPtr(ptr18), descr=) ++138: guard_value(p19, ConstPtr(ptr20), descr=) [p0, p1, p12, p19, p2, p3, p13, i14, i5, i6] debug_merge_point(1, ' #12 LOAD_CONST') debug_merge_point(1, ' #15 CALL_FUNCTION') debug_merge_point(1, ' #18 GET_ITER') @@ -656,1446 +614,1443 @@ debug_merge_point(1, ' #31 INPLACE_ADD') debug_merge_point(1, ' #32 STORE_FAST') debug_merge_point(1, ' #35 JUMP_ABSOLUTE') -+290: i38 = getfield_raw(41389288, descr=) -+298: i40 = int_sub(i38, 8) -+302: setfield_raw(41389288, i40, descr=) -+310: i42 = int_lt(i40, 0) -guard_false(i42, descr=) [p0, p1, p19, p2, p3, p12, p20, None, i21, i5, i6] ++151: i22 = getfield_raw(43780840, descr=) ++159: i24 = int_lt(i22, 0) +guard_false(i24, descr=) [p0, p1, p12, p2, p3, p13, i14, i5, i6] debug_merge_point(1, ' #19 FOR_ITER') -+320: i43 = force_token() -+327: p45 = new_with_vtable(21373712) -+397: setfield_gc(p45, i21, descr=) -setfield_gc(p19, p45, descr=) -+440: setfield_gc(p1, i43, descr=) -+451: p47 = new_with_vtable(21435120) -+524: p49 = new_array(5, descr=) -+602: p51 = new_with_vtable(ConstClass(W_IntObject)) -+672: setfield_gc(p51, 1, descr=) -setarrayitem_gc(p49, 0, p51, descr=) -+720: p55 = new_with_vtable(ConstClass(W_IntObject)) -setarrayitem_gc(p49, 1, p55, descr=) -+830: p58 = new_with_vtable(21373152) -+900: setfield_gc(p58, 1, descr=) -+908: p61 = new_with_vtable(21379552) -+978: setfield_gc(p61, 1, descr=) -+986: setfield_gc(p61, 3, descr=) -setfield_gc(p58, p61, descr=) -setarrayitem_gc(p49, 2, p58, descr=) -setfield_gc(p47, p49, descr=) -setfield_gc(p47, ConstPtr(ptr25), descr=) -+1167: setfield_gc(p47, 3, descr=) -+1178: setfield_gc(p47, 19, descr=) -+1186: setfield_gc(p47, 21, descr=) -setfield_gc(p47, ConstPtr(ptr15), descr=) -setfield_gc(p47, p20, descr=) -+1281: setfield_gc(p47, 2, descr=) -setfield_gc(p47, ConstPtr(ptr69), descr=) -+1341: p74 = call_assembler(p47, p19, ConstPtr(ptr70), ConstPtr(ptr15), 0, ConstPtr(ptr69), 3, 19, p51, p55, p58, ConstPtr(ptr72), ConstPtr(ptr73), descr=) -guard_not_forced(, descr=) [p0, p1, p19, p47, p74, p45, p2, p3, p12, i5, i6] -+1570: guard_no_exception(, descr=) [p0, p1, p19, p47, p74, p45, p2, p3, p12, i5, i6] -+1585: p75 = getfield_gc(p19, descr=) -+1596: guard_isnull(p75, descr=) [p0, p1, p74, p19, p47, p75, p45, p2, p3, p12, i5, i6] -+1605: i76 = getfield_gc(p19, descr=) -+1609: setfield_gc(p47, ConstPtr(ptr77), descr=) -+1624: i78 = int_is_true(i76) -guard_false(i78, descr=) [p0, p1, p74, p47, p19, p45, p2, p3, p12, i5, i6] -+1634: p79 = getfield_gc(p19, descr=) -+1638: p80 = getfield_gc(p47, descr=) -+1642: i81 = getfield_gc(p47, descr=) -setfield_gc(p19, p80, descr=) -+1683: guard_false(i81, descr=) [p0, p1, p74, p79, p47, p19, p45, p2, p3, p12, i5, i6] ++169: i25 = force_token() +p27 = new_with_vtable(38380152) +p29 = new_array(0, descr=) +p31 = new_array(5, descr=) +p33 = new_with_vtable(ConstClass(W_IntObject)) +p35 = new_with_vtable(ConstClass(W_IntObject)) +p37 = new_with_vtable(38308720) +p39 = new_with_vtable(ConstClass(W_ListObject)) +p41 = new_with_vtable(38380928) ++359: setfield_gc(p41, i14, descr=) +setfield_gc(p12, p41, descr=) ++410: setfield_gc(p1, i25, descr=) ++421: setfield_gc(p27, 2, descr=) ++429: setfield_gc(p27, ConstPtr(ptr43), descr=) ++443: setfield_gc(p27, 19, descr=) ++451: setfield_gc(p27, p29, descr=) ++455: setfield_gc(p27, ConstPtr(ptr45), descr=) ++469: setfield_gc(p27, 3, descr=) ++480: setfield_gc(p27, 21, descr=) ++490: setfield_gc(p27, p13, descr=) ++494: setfield_gc(p27, ConstPtr(ptr8), descr=) ++508: setfield_gc(p33, 1, descr=) ++517: setarrayitem_gc(p31, 0, p33, descr=) ++521: setarrayitem_gc(p31, 1, p35, descr=) ++525: setfield_gc(p37, 1, descr=) ++533: setfield_gc(p39, ConstPtr(ptr52), descr=) ++541: setfield_gc(p39, ConstPtr(ptr53), descr=) ++555: setfield_gc(p37, p39, descr=) ++559: setarrayitem_gc(p31, 2, p37, descr=) ++563: setfield_gc(p27, p31, descr=) ++567: p55 = call_assembler(p27, p12, descr=) +guard_not_forced(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i5, i6] ++687: guard_no_exception(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i5, i6] ++702: p56 = getfield_gc(p12, descr=) ++713: guard_isnull(p56, descr=) [p0, p1, p12, p55, p27, p56, p41, p2, p3, i5, i6] ++722: i57 = getfield_gc(p12, descr=) ++726: setfield_gc(p27, ConstPtr(ptr58), descr=) ++741: i59 = int_is_true(i57) +guard_false(i59, descr=) [p0, p1, p55, p27, p12, p41, p2, p3, i5, i6] ++751: p60 = getfield_gc(p12, descr=) ++755: p61 = getfield_gc(p27, descr=) ++759: i62 = getfield_gc(p27, descr=) +setfield_gc(p12, p61, descr=) ++801: guard_false(i62, descr=) [p0, p1, p55, p60, p27, p12, p41, p2, p3, i5, i6] debug_merge_point(0, ' #46 INPLACE_ADD') -+1692: setfield_gc(p45, -3, descr=) -+1707: guard_class(p74, ConstClass(W_IntObject), descr=) [p0, p1, p74, p2, p3, i5, i6] -+1719: i84 = getfield_gc_pure(p74, descr=) -+1723: i85 = int_add_ovf(i5, i84) -guard_no_overflow(, descr=) [p0, p1, p74, i85, p2, p3, i5, i6] ++810: setfield_gc(p41, -3, descr=) ++825: guard_class(p55, ConstClass(W_IntObject), descr=) [p0, p1, p55, p2, p3, i5, i6] ++837: i65 = getfield_gc_pure(p55, descr=) ++841: i66 = int_add_ovf(i5, i65) +guard_no_overflow(, descr=) [p0, p1, p55, i66, p2, p3, i5, i6] debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 JUMP_FORWARD') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') debug_merge_point(0, ' #69 INPLACE_ADD') -+1739: i87 = int_add_ovf(i6, 1) -guard_no_overflow(, descr=) [p0, p1, i87, p2, p3, i85, None, i6] ++857: i68 = int_add_ovf(i6, 1) +guard_no_overflow(, descr=) [p0, p1, i68, p2, p3, i66, None, i6] debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+1756: i89 = getfield_raw(41389288, descr=) -+1764: i91 = int_sub(i89, 11) -+1768: setfield_raw(41389288, i91, descr=) -+1776: i93 = int_lt(i91, 0) -guard_false(i93, descr=) [p0, p1, p2, p3, i87, i85, None, None] ++874: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i68, i66, None, None] ++874: i71 = getfield_raw(43780840, descr=) ++882: i73 = int_lt(i71, 0) +guard_false(i73, descr=) [p0, p1, p2, p3, i68, i66, None, None] debug_merge_point(0, ' #15 LOAD_FAST') -+1786: p95 = new_with_vtable(ConstClass(W_IntObject)) -+1863: setfield_gc(p95, i85, descr=) -+1874: p97 = new_with_vtable(ConstClass(W_IntObject)) -+1944: setfield_gc(p97, i87, descr=) -+1955: jump(p1, p0, p2, ConstPtr(ptr98), 0, p3, 2, 15, p95, p97, ConstPtr(ptr102), ConstPtr(ptr103), descr=) -+2057: --end of the loop-- -[36418812ac9] jit-log-opt-bridge} -[36418dfa1a9] {jit-backend -[36418e4a079] {jit-backend-dump ++892: label(p1, p0, p2, p3, i66, i68, descr=TargetToken(140556656123584)) +debug_merge_point(0, ' #18 LOAD_CONST') +debug_merge_point(0, ' #21 COMPARE_OP') ++922: i75 = int_lt(i68, 10000) +guard_true(i75, descr=) [p0, p1, p2, p3, i68, i66] +debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') +debug_merge_point(0, ' #27 LOAD_FAST') +debug_merge_point(0, ' #30 LOAD_CONST') +debug_merge_point(0, ' #33 BINARY_MODULO') ++935: i77 = int_eq(i68, -9223372036854775808) +guard_false(i77, descr=) [p0, p1, i68, p2, p3, None, i66] ++954: i79 = int_mod(i68, 2) ++971: i81 = int_rshift(i79, 63) ++978: i82 = int_and(2, i81) ++987: i83 = int_add(i79, i82) +debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') ++990: i84 = int_is_true(i83) +guard_false(i84, descr=) [p0, p1, p2, p3, i83, i68, i66] +debug_merge_point(0, ' #53 LOAD_FAST') +debug_merge_point(0, ' #56 LOAD_CONST') +debug_merge_point(0, ' #59 INPLACE_ADD') ++1000: i86 = int_add_ovf(i66, 1) +guard_no_overflow(, descr=) [p0, p1, i86, p2, p3, None, i68, i66] +debug_merge_point(0, ' #60 STORE_FAST') +debug_merge_point(0, ' #63 LOAD_FAST') +debug_merge_point(0, ' #66 LOAD_CONST') +debug_merge_point(0, ' #69 INPLACE_ADD') ++1013: i88 = int_add(i68, 1) +debug_merge_point(0, ' #70 STORE_FAST') +debug_merge_point(0, ' #73 JUMP_ABSOLUTE') ++1024: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] ++1024: i90 = getfield_raw(43780840, descr=) ++1032: i92 = int_lt(i90, 0) +guard_false(i92, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] +debug_merge_point(0, ' #15 LOAD_FAST') ++1042: jump(p1, p0, p2, p3, i86, i88, descr=TargetToken(140556656121584)) ++1061: --end of the loop-- +[d0e8abe8f9f] jit-log-opt-bridge} +[d0e8adedcd7] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7e53 +0 554889E5534154415541564157488DA500000000488B042550525D0148C7042550525D010000000048898570FFFFFF488B042558525D0148C7042558525D010000000048898568FFFFFF488B042560525D0148C7042560525D010000000048898560FFFFFF488B042568525D0148C7042568525D010000000048898558FFFFFF4C8B3C2530E86301488B042578525D0148C7042578525D010000000048898550FFFFFF488B042580525D0148C7042580525D010000000048898548FFFFFF488B042588525D0148C7042588525D010000000048898540FFFFFF488B042590525D0148C7042590525D010000000048898538FFFFFF488B042598525D0148C7042598525D010000000048898530FFFFFF4C8B342560E86301488B042568E8630148898528FFFFFF488B0425B0525D0148C70425B0525D010000000048898520FFFFFF488B042578E8630148898518FFFFFF488B042580E8630148898510FFFFFF488B0425C8525D0148C70425C8525D010000000048898508FFFFFF49BB382103C6827F00004D8B2B4983C50149BB382103C6827F00004D892B4C3BB528FFFFFF0F8D000000004D89F74C0FAFB518FFFFFF4C8BAD10FFFFFF4D01F54983C7014C8BB538FFFFFF4D897E084C89EF41BBA080D20041FFD348833C25908D7702000F85000000004C8BA508FFFFFF4D8B5424084D89D14983C20148898500FFFFFF4C898DF8FEFFFF4C89E74C89D641BB40967E0041FFD348833C25908D7702000F85000000004D8B4C2410488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700900700004889142590EF4501488B9500FFFFFF48895008488B95F8FEFFFF41F6410401743141F6410440751C415152504C89CF4889D64889C241BB0064C90041FFD3585A4159EB0E5248C1EA074883F2F8490FAB115A498944D110488B0425E88C77024883E80348890425E88C77024883F8000F8C000000004C89B538FFFFFF4D89FE4C89A508FFFFFF4D89EFE98DFEFFFF488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA570FEFFFF4889BD70FFFFFF4889B568FFFFFF48899560FFFFFF48898D58FFFFFF4D89C74C898D50FFFFFF4C8B5D104C899D48FFFFFF4C8B5D184C899D40FFFFFF4C8B5D204C899D38FFFFFF4C8B5D284C899D30FFFFFF4C8B5D304D89DE4C8B5D384C899D28FFFFFF4C8B5D404C899D20FFFFFF4C8B5D484C899D18FFFFFF4C8B5D504C899D10FFFFFF4C8B5D584C899D08FFFFFFE9C2FDFFFF49BB00609FC3827F000041FFD344405C3968484C505458603D033C00000049BB00609FC3827F000041FFD34440484C50545838603507033D00000049BB43609FC3827F000041FFD3444000484C50545838603507033E00000049BB43609FC3827F000041FFD344407D30484C5054583860783507033F00000049BB00609FC3827F000041FFD34440484C5054583860350340000000 -[36418e53041] jit-backend-dump} -[36418e537c1] {jit-backend-addr -Loop 4 ( #13 FOR_ITER) has address 7f82c39f7fcd to 7f82c39f8140 (bootstrap 7f82c39f7e53) -[36418e5456d] jit-backend-addr} -[36418e54ceb] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a93a +0 E9A1010000 +[d0e8adf23d5] jit-backend-dump} +[d0e8adf30f5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7e63 +0 70FEFFFF -[36418e55aab] jit-backend-dump} -[36418e5615d] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579a9dd +0 E994010000 +[d0e8adf4bad] jit-backend-dump} +[d0e8adf568d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f7ff4 +0 13020000 -[36418e56c1d] jit-backend-dump} -[36418e571a1] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579aea5 +0 E9F9030000 +[d0e8adf6c77] jit-backend-dump} +[d0e8adf7631] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8033 +0 0F020000 -[36418e57ac9] jit-backend-dump} -[36418e57f75] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579aecd +0 E921040000 +[d0e8adf8c51] jit-backend-dump} +[d0e8adf9689] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8072 +0 EE010000 -[36418e58869] jit-backend-dump} -[36418e58ce1] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b1b5 +0 E966020000 +[d0e8adfac61] jit-backend-dump} +[d0e8adfb5e5] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8123 +0 5D010000 -[36418e596e5] jit-backend-dump} -[36418e59ca1] jit-backend} -[36418e5a7c9] {jit-log-opt-loop -# Loop 4 : loop with 31 ops -[p0, p1, p2, p3, i4, p5, p6, p7, p8, p9, i10, i11, p12, i13, i14, p15] +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b24b +0 E968020000 +[d0e8adfcaf7] jit-backend-dump} +[d0e9120bfcd] {jit-backend +[d0e913919c1] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b562 +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B482848898D70FFFFFF498B48304C89BD68FFFFFF4D8B78384889BD60FFFFFF498B78404D8B40484889B558FFFFFF4C89A550FFFFFF4C898D48FFFFFF48899D40FFFFFF48899538FFFFFF48898530FFFFFF4C89BD28FFFFFF4889BD20FFFFFF4C898518FFFFFF49BB803107E8D57F00004D8B034983C00149BB803107E8D57F00004D89034983FA050F85000000008139306900000F85000000004C8B51104D85D20F84000000004C8B4108498B7A10813F702703000F85000000004D8B5208498B7A084D8B7A104D8B52184983F8000F8C000000004D39D00F8D000000004C89C04D0FAFC74889FA4C01C74883C001488941084983FD000F850000000049BB902190E5D57F00004D39DE0F85000000004C8BB560FFFFFF4D8B6E0849BBA8EB8BE5D57F00004D39DD0F85000000004D8B451049BBC0EB8BE5D57F00004D39D80F85000000004C8B2C2500F584014981FDE02687010F850000000048898D10FFFFFF4C899508FFFFFF48899500FFFFFF4889BDF8FEFFFF488985F0FEFFFF41BB2003EE0041FFD348833C25400C9C02000F8500000000488BBD70FFFFFF488B5710813AA0CA01000F8500000000488B57084C8B52084C89D14983C201488985E8FEFFFF488995E0FEFFFF48898DD8FEFFFF4889D74C89D641BB8017790041FFD348833C25400C9C02000F8500000000488B8DE0FEFFFF488B5110488BBDD8FEFFFF4C8B95E8FEFFFFF64204017432F6420440751E57515241524889FE4889D74C89D241BBE042C50041FFD3415A5A595FEB0E5748C1EF074883F7F8480FAB3A5F4C8954FA104C8B1425E80A9C024983FA000F8C0000000049BB983107E8D57F00004D8B334983C60149BB983107E8D57F00004D89334C8BB5F0FEFFFF4C3BB508FFFFFF0F8D000000004D0FAFF74C8B9500FFFFFF4D01F24C8BB5F0FEFFFF4983C601488BBD10FFFFFF4C89770848898DD0FEFFFF4C8995F8FEFFFF4C89D741BB2003EE0041FFD348833C25400C9C02000F85000000004C8B95D0FEFFFF498B4A084889CF4883C1014889BDC8FEFFFF488985C0FEFFFF4C89D74889CE41BB8017790041FFD348833C25400C9C02000F85000000004C8B95D0FEFFFF498B4210488B8DC8FEFFFF488BBDC0FEFFFFF64004017432F6400440751E41525750514889CE4889FA4889C741BBE042C50041FFD359585F415AEB0E5148C1E9074883F1F8480FAB085948897CC810488B3C25E80A9C024883FF000F8C000000004C89B5F0FEFFFF4C89D1E9CCFEFFFF49BB00A079E5D57F000041FFD3294C484438355055585C60400464686C034000000049BB00A079E5D57F000041FFD34C480444383550585C604064686C034100000049BB00A079E5D57F000041FFD34C48042844383550585C604064686C034200000049BB00A079E5D57F000041FFD34C4804211C2844383550585C604064686C034300000049BB00A079E5D57F000041FFD34C480421293D1D44383550585C604064686C034400000049BB00A079E5D57F000041FFD34C4804213D1D44383550585C604064686C034500000049BB00A079E5D57F000041FFD3354C48443850585C604004686C1D034600000049BB00A079E5D57F000041FFD34C483844505C604004686C1D034700000049BB00A079E5D57F000041FFD34C383444505C604004686C1D034800000049BB00A079E5D57F000041FFD34C38203444505C604004686C1D034900000049BB00A079E5D57F000041FFD34C383444505C604004686C1D034A00000049BB00A079E5D57F000041FFD34C383444505C604004686C1D034B00000049BB43A079E5D57F000041FFD34C380044505C6040706C7D034C00000049BB00A079E5D57F000041FFD34C38081C44505C60706C007D034D00000049BB43A079E5D57F000041FFD34C388D018401880144505C6040706C077D034E00000049BB00A079E5D57F000041FFD34C3844505C6040706C077D034F00000049BB00A079E5D57F000041FFD34C4870393D7944505C60406C7D035000000049BB00A079E5D57F000041FFD34C4844505C60401C6C2907035100000049BB43A079E5D57F000041FFD34C480044505C6040706C7D07035200000049BB43A079E5D57F000041FFD34C4895019801900144505C6040706C7D07035300000049BB00A079E5D57F000041FFD34C4844505C6040706C7D070354000000 +[d0e913c0d91] jit-backend-dump} +[d0e913c2081] {jit-backend-addr +Loop 2 ( #13 FOR_ITER) has address 7fd5e579b598 to 7fd5e579b953 (bootstrap 7fd5e579b562) +[d0e913c4781] jit-backend-addr} +[d0e913c58c1] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b594 +0 C0FEFFFF +[d0e913c76c1] jit-backend-dump} +[d0e913c8add] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b653 +0 FC020000 +[d0e913ca157] jit-backend-dump} +[d0e913caccd] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b65f +0 12030000 +[d0e913cc3a1] jit-backend-dump} +[d0e913cce15] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b66c +0 25030000 +[d0e913ce351] jit-backend-dump} +[d0e913ced8f] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b680 +0 32030000 +[d0e913d02c5] jit-backend-dump} +[d0e913d0cf1] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b69a +0 3B030000 +[d0e913d2143] jit-backend-dump} +[d0e913d2ccb] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b6a3 +0 56030000 +[d0e913d42fd] jit-backend-dump} +[d0e913d4e31] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b6c2 +0 5A030000 +[d0e913d6475] jit-backend-dump} +[d0e913d6ee9] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b6d5 +0 67030000 +[d0e913d840d] jit-backend-dump} +[d0e913d8db5] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b6f3 +0 67030000 +[d0e913da201] jit-backend-dump} +[d0e913dab8b] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b70a +0 6E030000 +[d0e913e0f93] jit-backend-dump} +[d0e913e1fe3] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b71f +0 96030000 +[d0e913e36cf] jit-backend-dump} +[d0e913e40ad] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b75a +0 79030000 +[d0e913e5601] jit-backend-dump} +[d0e913e5f67] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b771 +0 7F030000 +[d0e913e73d1] jit-backend-dump} +[d0e913e7d8b] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b7b3 +0 5B030000 +[d0e913e9201] jit-backend-dump} +[d0e913e9bc1] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b81b +0 16030000 +[d0e913eb229] jit-backend-dump} +[d0e913ebd03] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b84d +0 01030000 +[d0e913ed37d] jit-backend-dump} +[d0e913eddcd] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b89a +0 F0020000 +[d0e913ef1e3] jit-backend-dump} +[d0e913efb85] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b8d8 +0 D0020000 +[d0e913f0f9b] jit-backend-dump} +[d0e913f1925] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579b940 +0 8B020000 +[d0e913f2dd1] jit-backend-dump} +[d0e913f4313] jit-backend} +[d0e913f5cb1] {jit-log-opt-loop +# Loop 2 : loop with 100 ops +[p0, p1] ++54: p2 = getfield_gc(p0, descr=) ++58: p3 = getfield_gc(p0, descr=) ++62: i4 = getfield_gc(p0, descr=) ++70: p5 = getfield_gc(p0, descr=) ++74: i6 = getfield_gc(p0, descr=) ++81: i7 = getfield_gc(p0, descr=) ++85: p8 = getfield_gc(p0, descr=) ++89: p10 = getarrayitem_gc(p8, 0, descr=) ++93: p12 = getarrayitem_gc(p8, 1, descr=) ++97: p14 = getarrayitem_gc(p8, 2, descr=) ++101: p16 = getarrayitem_gc(p8, 3, descr=) ++105: p18 = getarrayitem_gc(p8, 4, descr=) ++116: p20 = getarrayitem_gc(p8, 5, descr=) ++127: p22 = getarrayitem_gc(p8, 6, descr=) ++138: p24 = getarrayitem_gc(p8, 7, descr=) ++142: p25 = getfield_gc(p0, descr=) ++142: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140556696702032)) debug_merge_point(0, ' #13 FOR_ITER') -+408: i16 = int_ge(i10, i11) -guard_false(i16, descr=) [p1, p0, p8, i10, p12, p2, p3, p5, p6, p7, p9, i4] -+421: i17 = int_mul(i10, i13) -+432: i18 = int_add(i14, i17) -+442: i20 = int_add(i10, 1) ++235: guard_value(i6, 5, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] ++245: guard_class(p18, 38308720, descr=) [p1, p0, p18, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++257: p28 = getfield_gc(p18, descr=) ++261: guard_nonnull(p28, descr=) [p1, p0, p18, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++270: i29 = getfield_gc(p18, descr=) ++274: p30 = getfield_gc(p28, descr=) ++278: guard_class(p30, 38488496, descr=) [p1, p0, p18, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++290: p32 = getfield_gc(p28, descr=) ++294: i33 = getfield_gc_pure(p32, descr=) ++298: i34 = getfield_gc_pure(p32, descr=) ++302: i35 = getfield_gc_pure(p32, descr=) ++306: i37 = int_lt(i29, 0) +guard_false(i37, descr=) [p1, p0, p18, i29, i35, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++316: i38 = int_ge(i29, i35) +guard_false(i38, descr=) [p1, p0, p18, i29, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++325: i39 = int_mul(i29, i34) ++332: i40 = int_add(i33, i39) ++338: i42 = int_add(i29, 1) ++342: setfield_gc(p18, i42, descr=) ++346: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p18, p22, p24, i40] debug_merge_point(0, ' #16 STORE_FAST') debug_merge_point(0, ' #19 LOAD_GLOBAL') -+446: setfield_gc(p8, i20, descr=) -+457: guard_not_invalidated(, descr=) [p1, p0, p2, p3, p5, p6, p7, p8, p9, i18, None] ++356: guard_value(p3, ConstPtr(ptr44), descr=) [p1, p0, p3, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++375: p45 = getfield_gc(p0, descr=) ++386: guard_value(p45, ConstPtr(ptr46), descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++405: p47 = getfield_gc(p45, descr=) ++409: guard_value(p47, ConstPtr(ptr48), descr=) [p1, p0, p47, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++428: guard_not_invalidated(, descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++428: p50 = getfield_gc(ConstPtr(ptr49), descr=) ++436: guard_value(p50, ConstPtr(ptr51), descr=) [p1, p0, p50, p2, p5, p12, p14, p16, p18, p22, p24, i40] debug_merge_point(0, ' #22 LOAD_FAST') debug_merge_point(0, ' #25 CALL_FUNCTION') -+457: p23 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i18, descr=) -+469: guard_no_exception(, descr=) [p1, p0, p23, p2, p3, p5, p6, p7, p8, p9, i18, None] ++449: p53 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i40, descr=) ++493: guard_no_exception(, descr=) [p1, p0, p53, p2, p5, p12, p14, p16, p18, p24, i40] debug_merge_point(0, ' #28 LIST_APPEND') -+484: i24 = getfield_gc(p15, descr=) -+496: i26 = int_add(i24, 1) -+503: call(ConstClass(_ll_list_resize_ge__listPtr_Signed), p15, i26, descr=) -+532: guard_no_exception(, descr=) [p1, p0, i24, p15, p2, p3, p5, p6, p7, p8, p9, p23, i18, None] -+547: p28 = getfield_gc(p15, descr=) -+552: p30 = new_with_vtable(ConstClass(W_StringObject)) -+615: setfield_gc(p30, p23, descr=) -setarrayitem_gc(p28, i24, p30, descr=) ++508: p54 = getfield_gc(p16, descr=) ++519: guard_class(p54, 38399200, descr=) [p1, p0, p54, p16, p2, p5, p12, p14, p18, p24, p53, i40] ++531: p56 = getfield_gc(p16, descr=) ++535: i57 = getfield_gc(p56, descr=) ++539: i59 = int_add(i57, 1) ++546: p60 = getfield_gc(p56, descr=) ++546: i61 = arraylen_gc(p60, descr=) ++546: call(ConstClass(_ll_list_resize_ge_trampoline__v717___simple_call__function__), p56, i59, descr=) ++582: guard_no_exception(, descr=) [p1, p0, i57, p53, p56, p2, p5, p12, p14, p16, p18, p24, None, i40] ++597: p64 = getfield_gc(p56, descr=) +setarrayitem_gc(p64, i57, p53, descr=) debug_merge_point(0, ' #31 JUMP_ABSOLUTE') -+694: i32 = getfield_raw(41389288, descr=) -+702: i34 = int_sub(i32, 3) -+706: setfield_raw(41389288, i34, descr=) -+714: i36 = int_lt(i34, 0) -guard_false(i36, descr=) [p1, p0, p2, p3, p5, p6, p7, p8, p9, i18] ++683: i66 = getfield_raw(43780840, descr=) ++691: i68 = int_lt(i66, 0) +guard_false(i68, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, None, i40] debug_merge_point(0, ' #13 FOR_ITER') -+724: jump(p0, p1, p2, p3, i18, p5, p6, p7, p8, p9, i20, i11, p12, i13, i14, p15, descr=) -+749: --end of the loop-- -[36418e86159] jit-log-opt-loop} -[36418e88b7d] {jit-backend -[3641909f00f] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f82fd +0 554889E5534154415541564157488DA5000000004C8B3C2550525D0148C7042550525D01000000004C8B342558525D0148C7042558525D01000000004C8B2C2560525D0148C7042560525D01000000004C8B242568525D0148C7042568525D01000000004C8B142530E863014C8B0C2578525D0148C7042578525D01000000004C8B042540E86301488B3C2548E86301488B342590525D0148C7042590525D0100000000488B1C2598525D0148C7042598525D0100000000488B1425A0525D0148C70425A0525D0100000000488B0C25A8525D0148C70425A8525D0100000000488B0425B0525D0148C70425B0525D010000000048898570FFFFFF488B0425B8525D0148C70425B8525D010000000048898568FFFFFF488B0425C0525D0148C70425C0525D010000000048898560FFFFFF488B0425C8525D0148C70425C8525D010000000048898558FFFFFF49BB502103C6827F0000498B034883C00149BB502103C6827F00004989034983F8050F8500000000488BBD70FFFFFF813F602E00000F85000000004C8B47104D85C00F8400000000488B47084C89BD50FFFFFF4D8B78204D85FF0F85000000004D8B78084C39F80F8D000000004C89AD48FFFFFF4D8B681048899D40FFFFFF498B581848898538FFFFFF480FAFC34C89AD30FFFFFF4901C5488B8538FFFFFF4883C001488947084983FA000F850000000049BB28DCB3C3827F00004D39DC0F85000000004C8BA550FFFFFF498B74240849BB20A0B0C3827F00004C39DE0F85000000004C8B56104981FA60164F010F850000000049BB78B3FBC5827F0000498B334885F60F8500000000488B342560F151014881FE60164F010F8500000000488B3425D84B66014881FE80204D010F85000000004C898528FFFFFF48898D20FFFFFF48898518FFFFFF48899510FFFFFF4C898D08FFFFFF4C89EF41BBA080D20041FFD348833C25908D7702000F85000000004C8B8D20FFFFFF498B5108488B4A084889CF4883C10148899500FFFFFF4889BDF8FEFFFF488985F0FEFFFF4889D74889CE41BB40967E0041FFD348833C25908D7702000F8500000000488B8500FFFFFF4C8B4810488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700900700004889142590EF4501488B95F0FEFFFF48895008488B95F8FEFFFF41F6410401743141F6410440751C505241514C89CF4889D64889C241BB0064C90041FFD341595A58EB0E5248C1EA074883F2F8490FAB115A498944D110488B0425E88C77024883E80348890425E88C77024883F8000F8C000000004C89B568FFFFFF488B8548FFFFFF48898560FFFFFF488B8540FFFFFF48898550FFFFFF488B8510FFFFFF48898548FFFFFF488B8520FFFFFF48898540FFFFFF488B8570FFFFFF48898538FFFFFF4C8BB518FFFFFF488B8528FFFFFF48898520FFFFFF48899D18FFFFFF488B8530FFFFFF48898510FFFFFF4C89A570FFFFFF488B8558FFFFFF48898530FFFFFF4C89BD28FFFFFF488B8508FFFFFF48898558FFFFFF4D89EF488B8500FFFFFF48898508FFFFFF49BBCD7F9FC3827F000041FFE3488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA560FEFFFF4989FF4989F64989D54989CC4D89C24C8B5D104D89D84C8B5D184C89DF4C8B5D204C89DE4C8B5D284C89DB4C8B5D304C89DA4C8B5D384C89D94C8B5D404C899D70FFFFFF4C8B5D484C899D68FFFFFF4C8B5D504C899D60FFFFFF4C8B5D584C899D58FFFFFFE92FFCFFFF49BB00609FC3827F000041FFD321383C343029241D180C08044044484C034100000049BB00609FC3827F000041FFD3383C1C34302924180C080444484C034200000049BB00609FC3827F000041FFD3383C1C2034302924180C080444484C034300000049BB00609FC3827F000041FFD338501C01203C34302924180C080444484C034400000049BB00609FC3827F000041FFD338501C012034302924180C080444484C034500000049BB00609FC3827F000041FFD3293850543024185808041C484C35034600000049BB00609FC3827F000041FFD338503054245808041C484C35034700000049BB00609FC3827F000041FFD338301854245808041C484C35034800000049BB00609FC3827F000041FFD33830281854245808041C484C35034900000049BB00609FC3827F000041FFD338301854245808041C484C35034A00000049BB00609FC3827F000041FFD3383054245808041C484C35034B00000049BB00609FC3827F000041FFD338301854245808041C484C35034C00000049BB00609FC3827F000041FFD338301854245808041C484C35034D00000049BB43609FC3827F000041FFD33830005474587068404C35034E00000049BB43609FC3827F000041FFD338307D785474587068404C800135034F00000049BB00609FC3827F000041FFD338305474587068404C350350000000 -[364190b0867] jit-backend-dump} -[364190b1115] {jit-backend-addr -Loop 5 ( #13 FOR_ITER) has address 7f82c39f8449 to 7f82c39f877a (bootstrap 7f82c39f82fd) -[364190b1fd1] jit-backend-addr} -[364190b26dd] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f830d +0 60FEFFFF -[364190b3439] jit-backend-dump} -[364190b3a4f] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f846d +0 A9030000 -[364190b465f] jit-backend-dump} -[364190b4c1d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8480 +0 B8030000 -[364190b559b] jit-backend-dump} -[364190b59fd] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f848d +0 CB030000 -[364190b632d] jit-backend-dump} -[364190b67c7] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f84a5 +0 D4030000 -[364190b70bb] jit-backend-dump} -[364190b7503] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f84b2 +0 EA030000 -[364190b7dd9] jit-backend-dump} -[364190b8459] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f84f6 +0 C8030000 -[364190b8e79] jit-backend-dump} -[364190b940f] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8509 +0 D5030000 -[364190b9cf7] jit-backend-dump} -[364190ba143] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8528 +0 D4030000 -[364190baa1b] jit-backend-dump} -[364190bae6b] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8539 +0 E1030000 -[364190bd8e1] jit-backend-dump} -[364190bdf29] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f854f +0 EA030000 -[364190be9ab] jit-backend-dump} -[364190bf191] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8564 +0 10040000 -[364190bfb41] jit-backend-dump} -[364190bff8d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8579 +0 19040000 -[364190c08ab] jit-backend-dump} -[364190c0cdf] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f85b7 +0 F9030000 -[364190c15c3] jit-backend-dump} -[364190c1a07] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8600 +0 CD030000 -[364190c2477] jit-backend-dump} -[364190c299b] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f86b7 +0 36030000 -[364190c333d] jit-backend-dump} -[364190c3953] jit-backend} -[364190c450d] {jit-log-opt-loop -# Loop 5 : entry bridge with 54 ops -[p0, p1, p2, p3, i4, p5, i6, i7, p8, p9, p10, p11, p12, p13, p14, p15] ++701: p69 = same_as(ConstPtr(ptr48)) ++701: label(p0, p1, p2, p5, i40, p12, p14, p16, p18, p24, i42, i35, i34, i33, p56, descr=TargetToken(140556696702112)) debug_merge_point(0, ' #13 FOR_ITER') -+362: guard_value(i6, 5, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p8, p9, p10, p11, p12, p13, p14, p15] -+372: guard_class(p12, 21373152, descr=) [p1, p0, p12, p2, p3, i4, p5, p8, p9, p10, p11, p13, p14, p15] -+391: p18 = getfield_gc(p12, descr=) -+395: guard_nonnull(p18, descr=) [p1, p0, p12, p18, p2, p3, i4, p5, p8, p9, p10, p11, p13, p14, p15] -+404: i19 = getfield_gc(p12, descr=) -+408: p20 = getfield_gc(p18, descr=) -+419: guard_isnull(p20, descr=) [p1, p0, p12, i19, p18, p20, p2, p3, i4, p5, p8, p9, p10, p11, p13, p14, p15] -+428: i21 = getfield_gc(p18, descr=) -+432: i22 = int_ge(i19, i21) -guard_false(i22, descr=) [p1, p0, p12, i19, p18, p2, p3, i4, p5, p8, p9, p10, p11, p13, p14, p15] -+441: i23 = getfield_gc(p18, descr=) -+452: i24 = getfield_gc(p18, descr=) -+463: i25 = int_mul(i19, i24) -+474: i26 = int_add(i23, i25) -+484: i28 = int_add(i19, 1) -+495: setfield_gc(p12, i28, descr=) -+499: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p8, p9, p10, p11, p12, p14, p15, i26] ++731: i70 = int_ge(i42, i35) +guard_false(i70, descr=) [p1, p0, p18, i42, i34, i33, p2, p5, p12, p14, p16, p24, i40] ++751: i71 = int_mul(i42, i34) ++755: i72 = int_add(i33, i71) ++765: i73 = int_add(i42, 1) debug_merge_point(0, ' #16 STORE_FAST') debug_merge_point(0, ' #19 LOAD_GLOBAL') -+509: guard_value(p3, ConstPtr(ptr30), descr=) [p1, p0, p3, p2, p5, p9, p10, p11, p12, p14, p15, i26] -+528: p31 = getfield_gc(p0, descr=) -+540: guard_value(p31, ConstPtr(ptr32), descr=) [p1, p0, p31, p2, p5, p9, p10, p11, p12, p14, p15, i26] -+559: p33 = getfield_gc(p31, descr=) -+563: guard_value(p33, ConstPtr(ptr34), descr=) [p1, p0, p33, p31, p2, p5, p9, p10, p11, p12, p14, p15, i26] -+576: p36 = getfield_gc(ConstPtr(ptr35), descr=) -+589: guard_isnull(p36, descr=) [p1, p0, p36, p2, p5, p9, p10, p11, p12, p14, p15, i26] -+598: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p9, p10, p11, p12, p14, p15, i26] -+598: p38 = getfield_gc(ConstPtr(ptr37), descr=) -+606: guard_value(p38, ConstPtr(ptr39), descr=) [p1, p0, p38, p2, p5, p9, p10, p11, p12, p14, p15, i26] -+619: p41 = getfield_gc(ConstPtr(ptr40), descr=) -+627: guard_value(p41, ConstPtr(ptr42), descr=) [p1, p0, p41, p2, p5, p9, p10, p11, p12, p14, p15, i26] ++776: setfield_gc(p18, i73, descr=) ++787: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #22 LOAD_FAST') debug_merge_point(0, ' #25 CALL_FUNCTION') -+640: p44 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i26, descr=) -+687: guard_no_exception(, descr=) [p1, p0, p44, p2, p5, p9, p10, p11, p12, p15, i26] ++787: p74 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i72, descr=) ++813: guard_no_exception(, descr=) [p1, p0, p74, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #28 LIST_APPEND') -+702: p45 = getfield_gc(p11, descr=) -+713: i46 = getfield_gc(p45, descr=) -+717: i48 = int_add(i46, 1) -+724: call(ConstClass(_ll_list_resize_ge__listPtr_Signed), p45, i48, descr=) -+760: guard_no_exception(, descr=) [p1, p0, i46, p45, p2, p5, p9, p10, p11, p12, p15, p44, i26] -+775: p50 = getfield_gc(p45, descr=) -+786: p52 = new_with_vtable(ConstClass(W_StringObject)) -+849: setfield_gc(p52, p44, descr=) -setarrayitem_gc(p50, i46, p52, descr=) ++828: i75 = getfield_gc(p56, descr=) ++839: i76 = int_add(i75, 1) ++846: p77 = getfield_gc(p56, descr=) ++846: i78 = arraylen_gc(p77, descr=) ++846: call(ConstClass(_ll_list_resize_ge_trampoline__v717___simple_call__function__), p56, i76, descr=) ++875: guard_no_exception(, descr=) [p1, p0, i75, p74, p56, p2, p5, p12, p14, p16, p18, p24, i72, None] ++890: p79 = getfield_gc(p56, descr=) +setarrayitem_gc(p79, i75, p74, descr=) debug_merge_point(0, ' #31 JUMP_ABSOLUTE') -+928: i54 = getfield_raw(41389288, descr=) -+936: i56 = int_sub(i54, 3) -+940: setfield_raw(41389288, i56, descr=) -+948: i58 = int_lt(i56, 0) -guard_false(i58, descr=) [p1, p0, p2, p5, p9, p10, p11, p12, p15, i26] ++976: i80 = getfield_raw(43780840, descr=) ++984: i81 = int_lt(i80, 0) +guard_false(i81, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #13 FOR_ITER') -+958: jump(p0, p1, p2, p5, i26, p9, p10, p11, p12, p15, i28, i21, p18, i24, i23, p45, descr=) -+1149: --end of the loop-- -[364190fa269] jit-log-opt-loop} -[364194b2dbb] {jit-backend -[364194cd73d] {jit-backend-dump ++994: jump(p0, p1, p2, p5, i72, p12, p14, p16, p18, p24, i73, i35, i34, i33, p56, descr=TargetToken(140556696702112)) ++1009: --end of the loop-- +[d0e914beb85] jit-log-opt-loop} +[d0e91ddd13d] {jit-backend +[d0e91e0341d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8a0d +0 554889E5534154415541564157488DA5000000004C8B3C2510E863014C8B342558525D0148C7042558525D010000000049BB682103C6827F00004D8B2B4983C50149BB682103C6827F00004D892B4D8B6E404F0FB66C3D184983FD330F85000000004D89FD4983C7014D897E1849C74620000000004D896E28B8010000004889042510E8630141BBD065EB0041FFD3B801000000488D65D8415F415E415D415C5B5DC3488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA570FFFFFF4989FF4989F6E94CFFFFFF49BB00609FC3827F000041FFD33D380351000000 -[364194d177b] jit-backend-dump} -[364194d1d51] {jit-backend-addr -Loop 6 (StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) has address 7f82c39f8a3d to 7f82c39f8ab0 (bootstrap 7f82c39f8a0d) -[364194d2bb2] jit-backend-addr} -[364194d332c] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bbec +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7E404D0FB67C3F184983FF330F85000000004989FF4883C70148897E1848C74620000000004C897E28B80100000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD31D180355000000 +[d0e91e0a4fd] jit-backend-dump} +[d0e91e0b193] {jit-backend-addr +Loop 3 (StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) has address 7fd5e579bc22 to 7fd5e579bc77 (bootstrap 7fd5e579bbec) +[d0e91e0ceeb] jit-backend-addr} +[d0e91e0dc5f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8a1d +0 70FFFFFF -[364194d3f8f] jit-backend-dump} -[364194d4562] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bc1e +0 70FFFFFF +[d0e91e0f6a5] jit-backend-dump} +[d0e91e1047f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8a6b +0 82000000 -[364194d502a] jit-backend-dump} -[364194d55e2] jit-backend} -[364194d5ef4] {jit-log-opt-loop -# Loop 6 : entry bridge with 10 ops +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bc32 +0 41000000 +[d0e91e11bcb] jit-backend-dump} +[d0e91e12a71] jit-backend} +[d0e91e13d37] {jit-log-opt-loop +# Loop 3 : entry bridge with 10 ops [i0, p1] debug_merge_point(0, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') -+78: p2 = getfield_gc(p1, descr=) -+82: i3 = strgetitem(p2, i0) -+88: i5 = int_eq(i3, 51) -guard_true(i5, descr=) [i0, p1] -+98: i7 = int_add(i0, 1) -+105: setfield_gc(p1, i7, descr=) -+109: setfield_gc(p1, ConstPtr(ptr8), descr=) -+117: setfield_gc(p1, i0, descr=) -+121: finish(1, descr=) -+163: --end of the loop-- -[364194ea42c] jit-log-opt-loop} -[3641968d922] {jit-backend -[364196a3bd6] {jit-backend-dump ++54: p2 = getfield_gc(p1, descr=) ++58: i3 = strgetitem(p2, i0) ++64: i5 = int_eq(i3, 51) +guard_true(i5, descr=) [i0, p1] ++74: i7 = int_add(i0, 1) ++81: setfield_gc(p1, i7, descr=) ++85: setfield_gc(p1, ConstPtr(ptr8), descr=) ++93: setfield_gc(p1, i0, descr=) ++97: finish(1, descr=) ++139: --end of the loop-- +[d0e91e29451] jit-log-opt-loop} +[d0e9266372d] {jit-backend +[d0e9268c073] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8b05 +0 488DA50000000049BB802103C6827F00004D8B2B4983C50149BB802103C6827F00004D892B4983C7014D8B6E084D39EF0F8D0000000049BB3D8A9FC3827F000041FFE349BB00609FC3827F000041FFD33D380352000000 -[364196a6d3b] jit-backend-dump} -[364196a7434] {jit-backend-addr -bridge out of Guard 81 has address 7f82c39f8b05 to 7f82c39f8b48 -[364196a7ff2] jit-backend-addr} -[364196a8679] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bc8b +0 488DA50000000049BBB03107E8D57F00004D8B3B4983C70149BBB03107E8D57F00004D893B4883C7014C8B7E084C39FF0F8D000000004C8B76404D0FB6743E184983FE330F84000000004883C7014C39FF0F8C00000000B80000000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD31D18035600000049BB00A079E5D57F000041FFD31D18035700000049BB00A079E5D57F000041FFD31D180358000000 +[d0e9268f9b9] jit-backend-dump} +[d0e9268ff35] {jit-backend-addr +bridge out of Guard 85 has address 7fd5e579bc8b to 7fd5e579bd0c +[d0e9269b5ef] jit-backend-addr} +[d0e9269bef1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8b08 +0 70FFFFFF -[364196a934b] jit-backend-dump} -[364196a99b1] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bc8e +0 70FFFFFF +[d0e9269cc89] jit-backend-dump} +[d0e9269d493] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8b37 +0 0D000000 -[364196aa5fc] jit-backend-dump} -[364196aaecc] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bcbd +0 4B000000 +[d0e9269df29] jit-backend-dump} +[d0e9269e3d9] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8a6b +0 96000000 -[364196aba27] jit-backend-dump} -[364196ac090] jit-backend} -[364196ac915] {jit-log-opt-bridge -# bridge out of Guard 81 with 6 ops +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bcd1 +0 4B000000 +[d0e9269ed2f] jit-backend-dump} +[d0e9269f1a1] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bcde +0 52000000 +[d0e9269fbbf] jit-backend-dump} +[d0e926a03a3] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bc32 +0 55000000 +[d0e926a0ea5] jit-backend-dump} +[d0e926a1805] jit-backend} +[d0e926a21ab] {jit-log-opt-bridge +# bridge out of Guard 85 with 13 ops [i0, p1] +37: i3 = int_add(i0, 1) -+41: i4 = getfield_gc_pure(p1, descr=) ++41: i4 = getfield_gc_pure(p1, descr=) +45: i5 = int_lt(i3, i4) -guard_true(i5, descr=) [i3, p1] +guard_true(i5, descr=) [i3, p1] debug_merge_point(0, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') -+54: jump(i3, p1, descr=) -+67: --end of the loop-- -[364196b43ee] jit-log-opt-bridge} -[36419963403] {jit-backend -[3641996ea7c] {jit-backend-dump ++54: p6 = getfield_gc(p1, descr=) ++58: i7 = strgetitem(p6, i3) ++64: i9 = int_eq(i7, 51) +guard_false(i9, descr=) [i3, p1] ++74: i11 = int_add(i3, 1) ++78: i12 = int_lt(i11, i4) +guard_false(i12, descr=) [i11, p1] ++87: finish(0, descr=) ++129: --end of the loop-- +[d0e926aeccf] jit-log-opt-bridge} +[d0e929bf4d7] {jit-backend +[d0e929d10f1] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8b5c +0 488DA50000000049BB982103C6827F00004D8B334983C60149BB982103C6827F00004D8933B8000000004889042510E8630141BBD065EB0041FFD3B801000000488D65D8415F415E415D415C5B5DC3 -[3641997111c] jit-backend-dump} -[36419971671] {jit-backend-addr -bridge out of Guard 82 has address 7f82c39f8b5c to 7f82c39f8bab -[36419972016] jit-backend-addr} -[36419972646] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bd48 +0 488DA50000000049BBC83107E8D57F00004D8B3B4983C70149BBC83107E8D57F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD31D18035900000049BB00A079E5D57F000041FFD31D18035A000000 +[d0e929d4389] jit-backend-dump} +[d0e929d489d] {jit-backend-addr +bridge out of Guard 88 has address 7fd5e579bd48 to 7fd5e579bdbc +[d0e929d52eb] jit-backend-addr} +[d0e929d58ad] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8b5f +0 70FFFFFF -[3641997317a] jit-backend-dump} -[364199737a4] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bd4b +0 70FFFFFF +[d0e929d6415] jit-backend-dump} +[d0e929d6a3b] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8b37 +0 21000000 -[364199741c4] jit-backend-dump} -[36419974764] jit-backend} -[36419974e72] {jit-log-opt-bridge -# bridge out of Guard 82 with 1 ops +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bd7d +0 3B000000 +[d0e929d75a1] jit-backend-dump} +[d0e929d7b23] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bd8e +0 3E000000 +[d0e929d858b] jit-backend-dump} +[d0e929d8b6d] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bcde +0 66000000 +[d0e929d949b] jit-backend-dump} +[d0e929d9b33] jit-backend} +[d0e929da23b] {jit-log-opt-bridge +# bridge out of Guard 88 with 10 ops [i0, p1] -+37: finish(0, descr=) +debug_merge_point(0, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') ++37: p2 = getfield_gc(p1, descr=) ++41: i3 = strgetitem(p2, i0) ++47: i5 = int_eq(i3, 51) +guard_false(i5, descr=) [i0, p1] ++57: i7 = int_add(i0, 1) ++61: i8 = getfield_gc_pure(p1, descr=) ++65: i9 = int_lt(i7, i8) +guard_false(i9, descr=) [i7, p1] ++74: finish(0, descr=) ++116: --end of the loop-- +[d0e929e3651] jit-log-opt-bridge} +[d0e92d4390b] {jit-backend +[d0e92d4e3fd] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bde4 +0 488DA50000000049BBE03107E8D57F0000498B334883C60149BBE03107E8D57F0000498933B80000000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC3 +[d0e92d50c27] jit-backend-dump} +[d0e92d51189] {jit-backend-addr +bridge out of Guard 86 has address 7fd5e579bde4 to 7fd5e579be33 +[d0e92d51acb] jit-backend-addr} +[d0e92d52073] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bde7 +0 70FFFFFF +[d0e92d52c5b] jit-backend-dump} +[d0e92d534d5] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bcbd +0 23010000 +[d0e92d5af55] jit-backend-dump} +[d0e92d5b7c1] jit-backend} +[d0e92d5bf3f] {jit-log-opt-bridge +# bridge out of Guard 86 with 1 ops +[i0, p1] ++37: finish(0, descr=) +79: --end of the loop-- -[36419977b45] jit-log-opt-bridge} -[3641abee1bd] {jit-backend -[3641ad2a377] {jit-backend-dump +[d0e92d60e07] jit-log-opt-bridge} +[d0e95d023e2] {jit-backend +[d0e96040dee] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8d2f +0 554889E5534154415541564157488DA500000000488B042550525D0148C7042550525D010000000048898570FFFFFF488B042558525D0148C7042558525D010000000048898568FFFFFF488B042560525D0148C7042560525D010000000048898560FFFFFF488B042568525D0148C7042568525D010000000048898558FFFFFF488B042570525D0148C7042570525D010000000048898550FFFFFF488B042578525D0148C7042578525D010000000048898548FFFFFF4C8B3C2580525D0148C7042580525D0100000000488B042588525D0148C7042588525D010000000048898540FFFFFF4C8B342550E86301488B042598525D0148C7042598525D010000000048898538FFFFFF488B0425A0525D0148C70425A0525D010000000048898530FFFFFF49BBB02103C6827F00004D8B2B4983C50149BBB02103C6827F00004D892B4C8BAD40FFFFFF4D8B65184D85E40F84000000004D8B55084D8B4C24084D39CA0F8D000000004D8B6424104F8B64D4104983C2014C8BBD70FFFFFF4D8B4F084D89550849BB20A0B0C3827F00004D39D90F85000000004D8B51104981FA60164F010F850000000049BB0882AFC3827F00004D8B0B4983F9017207418139384100000F85000000004D8B510849BB28A1B0C3827F00004D39DA0F85000000004D8B42104981F860164F010F850000000049BB4885AFC3827F00004D8B134983FA01720741813A802701000F85000000004983FC01720841813C24900700000F85000000004D8B4A1849BBB863B0C3827F00004D39D90F85000000004D8B4A204D8B4108BF030000004C29C74883FF020F8F00000000498B7A404C89C64983E8014939F00F8D000000004F8B44C1104C8D8D78FFFFFF4983FE000F850000000049BB28A1B0C3827F00004C39DF0F85000000004C8B77104981FE60164F010F850000000049BB1884AFC3827F0000498B3B4883FF017206813F802701000F85000000004D85C00F84000000004C8B771849BB00C3B3C3827F00004D39DE0F85000000004C8B7740488DB578FFFFFF49BB28A1B0C3827F00004D39DE0F8500000000498B5E104881FB60164F010F850000000049BB1863FFC5827F00004D8B334D85F60F85000000004C8B342560F151014981FE60164F010F85000000004C8B3425084A66014981FE80A44C010F850000000049BB2883AFC3827F00004D8B334983FE01720741813E880F00000F8500000000498B5E10813B688B01000F8500000000498B5E08488D9578FFFFFF48899528FFFFFF488B042590EF4501488D5018483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700903000004889142590EF450148897008488BB538FFFFFFF646040174215741504151415250564889F74889C641BB9062C90041FFD35E58415A415941585F48894638488B9528FFFFFF4989571848898520FFFFFF488B042590EF4501488D5028483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700880000004889142590EF450148C740080300000048C7401080204D0149BB0073B4C3827F00004C8958184C8940204C899518FFFFFF48898510FFFFFF4C898D08FFFFFF4C89A500FFFFFF4C89B5F8FEFFFF48899DF0FEFFFF4C8985E8FEFFFF4889BDE0FEFFFF48C78578FFFFFF530000004889C741BB7070960041FFD34883BD78FFFFFF000F8C0000000048833C25908D7702000F8500000000488DBD78FFFFFF4C8B8570FFFFFF49897818488985D8FEFFFF488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700380600004889142590EF4501488B9510FFFFFF48895008488985D0FEFFFF48C78578FFFFFF54000000488BBDF0FEFFFF4889C6488B95D8FEFFFF41BB8061BF0041FFD34883BD78FFFFFF000F8C0000000048833C25908D7702000F85000000004989C049BB00000000000000804C21D84883F8000F8500000000488BBDF0FEFFFF4C89C641BBB063BE0041FFD348833C25908D7702000F85000000004883F80172068138084203000F85000000004881F8908B4D010F84000000004C8B8538FFFFFF498B50484885D20F8500000000498B50284883FA000F8500000000488B9520FFFFFF48C74208FDFFFFFF488B9500FFFFFF488B7A08488B5F1049BBFFFFFFFFFFFFFF7F4C39DB0F8D000000004C8B68104C8B70184D8B65104983FC110F85000000004D8B65204D89E14983E4014983FC000F84000000004D8B4D384983F9010F8F000000004D8B4D184983C1014F8B64CD104983FC130F85000000004D89CC4983C1014F8B4CCD104983C4024883FB000F8E000000004983FC0B0F85000000004983F9330F850000000049BBA023FDC5827F00004D39DD0F85000000004C8DAD78FFFFFF488985C8FEFFFF488B042590EF4501488D5018483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700903000004889142590EF4501488B9508FFFFFF4889500841F64004017417574150504C89C74889C641BB9062C90041FFD35841585F49894038488B9570FFFFFF4C896A18488985C0FEFFFF488B042590EF4501488D5048483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700984C00004889142590EF45014889784049BBA023FDC5827F00004C895838488958084C897010488985B8FEFFFF48C78578FFFFFF55000000BF000000004889C649BBB08A9FC3827F000041FFD34883F80174134889C7BE0000000041BBA027990041FFD3EB08488B042510E863014883BD78FFFFFF000F8C0000000048833C25908D7702000F85000000004885C00F8400000000488B8538FFFFFF4C8B40484D85C00F85000000004C8B40284983F8000F85000000004C8B3425E88C77024983EE194C893425E88C7702488B9D30FFFFFFF640040174155041504889C74889DE41BB9062C90041FFD341585848895838488BBDC0FEFFFF48C74708FDFFFFFF4983FE000F8C000000004C8BBD00FFFFFF4D89C648899D30FFFFFFE973F8FFFF488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA530FEFFFF4889BD70FFFFFF4889B568FFFFFF48899560FFFFFF48898D58FFFFFF4C898550FFFFFF4C898D48FFFFFF4C8B5D104D89DF4C8B5D184C899D40FFFFFF4C8B5D204D89DE4C8B5D284C899D38FFFFFF4C8B5D304C899D30FFFFFFE9DFF7FFFF49BB00609FC3827F000041FFD344403430484C50543C035600000049BB00609FC3827F000041FFD34440342930484C50543C035700000049BB00609FC3827F000041FFD3443C24484C50543034035800000049BB00609FC3827F000041FFD3443C2824484C50543034035900000049BB00609FC3827F000041FFD3443C24484C50543034035A00000049BB00609FC3827F000041FFD3443C2428484C50543034035B00000049BB00609FC3827F000041FFD3443C242028484C50543034035C00000049BB00609FC3827F000041FFD3443C2824484C50543034035D00000049BB00609FC3827F000041FFD3443C30484C50543428035E00000049BB00609FC3827F000041FFD3443C2428484C50543034035F00000049BB00609FC3827F000041FFD3443C28484C50543034036000000049BB00609FC3827F000041FFD3443C192128484C505430341C036100000049BB00609FC3827F000041FFD3443C5C484C50543034282025601C036200000049BB00609FC3827F000041FFD3443C5C1C484C505430342820256007036300000049BB00609FC3827F000041FFD3443C5C381C484C505430342820256007036400000049BB00609FC3827F000041FFD3443C5C1C484C505430342820256007036500000049BB00609FC3827F000041FFD3443C5C20484C50543034281C07256007036600000049BB00609FC3827F000041FFD3443C5C381C484C50543034280720256007036700000049BB00609FC3827F000041FFD3443C5C38484C5054303428191C20256007036800000049BB00609FC3827F000041FFD3443C5C0C38484C5054303428191C20256007036900000049BB00609FC3827F000041FFD3443C5C38484C5054303428191C20256007036A00000049BB00609FC3827F000041FFD3443C5C484C5054303428191C20256007036B00000049BB00609FC3827F000041FFD3443C5C38484C5054303428191C20256007036C00000049BB00609FC3827F000041FFD3443C5C38484C5054303428191C20256007036D00000049BB00609FC3827F000041FFD3443C5C38484C5054303428191C20256007036E00000049BB00609FC3827F000041FFD3443C5C380C484C5054303428191C20256007036F00000049BB43609FC3827F000041FFD344405C7C80010168484C505478586C88018401607570035300000049BB43609FC3827F000041FFD344405C7C80010168484C505478586C88018401607570037000000049BB43609FC3827F000041FFD344405C90017C01800168484C505478586C880184017560035400000049BB43609FC3827F000041FFD344405C90017C01800168484C505478586C880184017560037100000049BB00609FC3827F000041FFD344405C90017C21800168484C505478586C880184017560037200000049BB43609FC3827F000041FFD344405C90017C0068484C505478586C880184017560037300000049BB00609FC3827F000041FFD344405C90017C0068484C505478586C880184017560037400000049BB00609FC3827F000041FFD344405C68484C505478586C009001880184017560037500000049BB00609FC3827F000041FFD3444020000868484C505478586C079001880184017560037600000049BB00609FC3827F000041FFD34440200068484C505478586C079001880184017560037700000049BB00609FC3827F000041FFD3444020484C505478586C00070784017560037800000049BB00609FC3827F000041FFD3444020001C484C505408586C07070784017560037900000049BB00609FC3827F000041FFD344402000484C505408586C1C390D3407070784017560037A00000049BB00609FC3827F000041FFD34440200025484C505408586C1C390D3407070784017560037B00000049BB00609FC3827F000041FFD344402000484C505408586C1C390D3407070784017560037C00000049BB00609FC3827F000041FFD34440200025484C505408586C1C390D3407070784017560037D00000049BB00609FC3827F000041FFD3444020002531484C505408586C1C390D3407070784017560037E00000049BB00609FC3827F000041FFD344402000253134484C505408586C1C390D0707070784017560037F00000049BB00609FC3827F000041FFD3444020002534484C505408586C1C390D0707070784017560038000000049BB00609FC3827F000041FFD34440200034484C505408586C1C390D0707070784017560038100000049BB43609FC3827F000041FFD344405C94019C01019801484C505478586C840160035500000049BB43609FC3827F000041FFD344405C94019C01019801484C505478586C840160038200000049BB00609FC3827F000041FFD344405C94019C019801484C505478586C840160038300000049BB00609FC3827F000041FFD3444000209801484C505478586C9C019401840160038400000049BB00609FC3827F000041FFD34440009801484C505478586C9C019401840160038500000049BB00609FC3827F000041FFD34440484C50547858070707070386000000 -[3641ad5730b] jit-backend-dump} -[3641ad57dd1] {jit-backend-addr -Loop 7 ( #44 FOR_ITER) has address 7f82c39f8e52 to 7f82c39f95df (bootstrap 7f82c39f8d2f) -[3641ad59369] jit-backend-addr} -[3641ad59f0f] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c015 +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B482848898D70FFFFFF498B48304C89BD68FFFFFF4D8B783848899D60FFFFFF498B58404D8B40484889B558FFFFFF4C89A550FFFFFF4C898D48FFFFFF48899540FFFFFF48898538FFFFFF48898D30FFFFFF4C89BD28FFFFFF48899D20FFFFFF4C898518FFFFFF49BBF83107E8D57F00004D8B034983C00149BBF83107E8D57F00004D89034983FA040F85000000004C8B9570FFFFFF41813A306900000F85000000004D8B42104D85C00F8400000000498B5A084D8B781041813FA0CA01000F85000000004D8B40084D8B78084C39FB0F83000000004D8B40104D8B44D8104D85C00F84000000004883C30149895A084983FD000F850000000049BB902190E5D57F00004D39DE0F85000000004C8B770849BBA8EB8BE5D57F00004D39DE0F85000000004D8B6E1049BBC0EB8BE5D57F00004D39DD0F850000000049BB28D78FE5D57F00004D8B3349BB30D78FE5D57F00004D39DE0F85000000004889BD10FFFFFF4C898508FFFFFF41BB10468D0041FFD34C8B40404C8B50504D85D20F85000000004C8B50284983FA000F850000000049BBA02991E5D57F00004D8B134983FA000F8F000000004C8B142500F584014981FAE02687010F850000000049BB58D78FE5D57F00004D8B1341813A98D901000F850000000049BB50D78FE5D57F00004D8B1348898500FFFFFF488B042550C95401488D5040483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C7008800000048C74008030000004889C24883C02848C7004083010048896808488BBD00FFFFFFF6470401741A524150415257504889C641BB8045C50041FFD3585F415A41585A488947404C8BB510FFFFFF49896E1848C74210A0ED820149BBF00090E5D57F00004C895A1849BB800D90E5D57F00004C895A20488985F8FEFFFF4C8995F0FEFFFF4C8985E8FEFFFF488995E0FEFFFF48C78578FFFFFF5B0000004889D741BBF06D920041FFD34883BD78FFFFFF000F8C0000000048833C25400C9C02000F8500000000488985D8FEFFFF488B042550C95401488D5010483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C70038210000488B9510FFFFFF48896A184C8B85E0FEFFFF4C894008488985D0FEFFFF48C78578FFFFFF5C000000488BBDF0FEFFFF4889C6488B95D8FEFFFF41BB8021790041FFD34883BD78FFFFFF000F8C0000000048833C25400C9C02000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B85F0FEFFFF488B4018486BD218488B5410184883FA017206813A18E203000F85000000004881FA805371010F8400000000488B8500FFFFFF4C8B40504D85C00F85000000004C8B40284983F8000F85000000004C8B85F8FEFFFF49C74008FDFFFFFF4C8B8508FFFFFF4D8B501049BBFFFFFFFFFFFFFF7F4D39DA0F8D00000000488B7A104C8B72184C8B6F104983FD110F85000000004C8B6F204C89EB4983E5014983FD000F8400000000488B5F384883FB010F8F00000000488B5F184883C3014C8B6CDF104983FD130F85000000004989DD4883C301488B5CDF104983C5024983FA000F8E000000004983FD0B0F85000000004883FB330F850000000049BB102BFAE7D57F00004C39DF0F8500000000488995C8FEFFFF488B042550C95401488D5060483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C700D8ED00004889C24883C04848C7004083010048896808488BBD00FFFFFFF6470401741A505741504152524889C641BB8045C50041FFD35A415A41585F5848894740488B9D10FFFFFF48896B184C8952084C8972104C89424049BB102BFAE7D57F00004C895A38488995C0FEFFFF488985B8FEFFFF48C78578FFFFFF5D000000BF000000004889D649BBECBB79E5D57F000041FFD34883F80174134889C7BE0000000041BB707A950041FFD3EB08488B0425107B54014883BD78FFFFFF000F8C0000000048833C25400C9C02000F85000000004885C00F8500000000488B8500FFFFFF488B78504885FF0F8500000000488B78284883FF000F85000000004C8B85E8FEFFFFF64004017417415057504889C74C89C641BB8045C50041FFD3585F41584C894040488B95B8FEFFFF48C74208FDFFFFFF488B1425E80A9C024883FA000F8C0000000049BB103207E8D57F0000498B134883C20149BB103207E8D57F0000498913488B9570FFFFFF488B5A104885DB0F84000000004C8B72084C8B531041813AA0CA01000F8500000000488B5B084C8B53084D39D60F8300000000488B5B104A8B5CF3104885DB0F84000000004983C6014C8B9510FFFFFF4D8B6A084C89720849BBA8EB8BE5D57F00004D39DD0F85000000004D8B751049BBC0EB8BE5D57F00004D39DE0F850000000049BB28D78FE5D57F00004D8B2B49BB30D78FE5D57F00004D39DD0F85000000004883FF000F850000000049BBA02991E5D57F0000498B3B4883FF000F8F00000000488B3C2500F584014881FFE02687010F850000000049BB58D78FE5D57F0000498B3B813F98D901000F850000000049BB50D78FE5D57F0000498B3B488985B0FEFFFF488B042550C95401488D5040483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C7008800000048C74008030000004889C24883C02848C70040830100488968084C8BADB0FEFFFF41F6450401741D574150505241524C89EF4889C641BB8045C50041FFD3415A5A5841585F4989454049896A1848C74210A0ED820149BBF00090E5D57F00004C895A1849BB800D90E5D57F00004C895A20488995A8FEFFFF488985A0FEFFFF4C898598FEFFFF48899D08FFFFFF4889BD90FEFFFF48C78578FFFFFF5E0000004889D741BBF06D920041FFD34883BD78FFFFFF000F8C0000000048833C25400C9C02000F850000000048898588FEFFFF488B042550C95401488D5010483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C70038210000488B9510FFFFFF48896A18488BBDA8FEFFFF4889780848898580FEFFFF48C78578FFFFFF5F000000488BBD90FEFFFF4889C6488B9588FEFFFF41BB8021790041FFD34883BD78FFFFFF000F8C0000000048833C25400C9C02000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B8590FEFFFF488B4018486BD218488B5410184883FA017206813A18E203000F85000000004881FA805371010F8400000000488B85B0FEFFFF488B78504885FF0F8500000000488B78284883FF000F8500000000488BBDA0FEFFFF48C74708FDFFFFFF488BBD08FFFFFF488B5F1049BBFFFFFFFFFFFFFF7F4C39DB0F8D000000004C8B42104C8B6A184D8B50104983FA110F85000000004D8B50204D89D64983E2014983FA000F84000000004D8B70384983FE010F8F000000004D8B70184983C6014F8B54F0104983FA130F85000000004D89F24983C6014F8B74F0104983C2024883FB000F8E000000004983FA0B0F85000000004983FE330F850000000049BB102BFAE7D57F00004D39D80F850000000048899578FEFFFF488B042550C95401488D5060483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C700D8ED00004889C24883C04848C70040830100488968084C8B85B0FEFFFF41F6400401741957524150504C89C74889C641BB8045C50041FFD35841585A5F498940404C8BB510FFFFFF49896E1848895A084C896A1048897A4049BB102BFAE7D57F00004C895A3848898570FEFFFF48899568FEFFFF48C78578FFFFFF60000000BF000000004889D649BBECBB79E5D57F000041FFD34883F80174134889C7BE0000000041BB707A950041FFD3EB08488B0425107B54014883BD78FFFFFF000F8C0000000048833C25400C9C02000F85000000004885C00F8500000000488B85B0FEFFFF488B78504885FF0F8500000000488B78284883FF000F8500000000488B9598FEFFFFF640040174155752504889C74889D641BB8045C50041FFD3585A5F488950404C8B8570FEFFFF49C74008FDFFFFFF4C8B0425E80A9C024983F8000F8C000000004989D0E96FFAFFFF49BB00A079E5D57F000041FFD3294C1C443835505548585C406064686C036100000049BB00A079E5D57F000041FFD34C1C284438355048585C6064686C036200000049BB00A079E5D57F000041FFD34C1C28204438355048585C6064686C036300000049BB00A079E5D57F000041FFD34C1C280D3C204438355048585C6064686C036400000049BB00A079E5D57F000041FFD34C1C280D3D204438355048585C6064686C036500000049BB00A079E5D57F000041FFD34C1C280D204438355048585C6064686C036600000049BB00A079E5D57F000041FFD3354C1C44385048585C2864686C20036700000049BB00A079E5D57F000041FFD34C1C38445048582864686C20036800000049BB00A079E5D57F000041FFD34C1C38445048582864686C20036900000049BB00A079E5D57F000041FFD34C1C3438445048582864686C20036A00000049BB00A079E5D57F000041FFD34C1C38445048582864686C20036B00000049BB00A079E5D57F000041FFD34C1C38445048582864686C20036C00000049BB00A079E5D57F000041FFD34C7000284450485840201574036D00000049BB00A079E5D57F000041FFD34C70004450485840201574036E00000049BB00A079E5D57F000041FFD34C70004450485840201574036F00000049BB00A079E5D57F000041FFD34C7000294450485840201574037000000049BB00A079E5D57F000041FFD34C700028445048584015201574037100000049BB00A079E5D57F000041FFD34C700028445048584015201574037200000049BB43A079E5D57F000041FFD34C70788001017C4450485840158801840174035B00000049BB43A079E5D57F000041FFD34C70788001017C4450485840158801840174037300000049BB43A079E5D57F000041FFD34C707890010180017C445048584015840174035C00000049BB43A079E5D57F000041FFD34C707890010180017C445048584015840174037400000049BB00A079E5D57F000041FFD34C707890010980017C445048584015840174037500000049BB00A079E5D57F000041FFD34C70789001087C445048584015840174037600000049BB00A079E5D57F000041FFD34C70787C445048584008900115840174037700000049BB00A079E5D57F000041FFD34C700008207C445048584007900115840174037800000049BB00A079E5D57F000041FFD34C7000087C445048584007900115840174037900000049BB00A079E5D57F000041FFD34C70004450485840080715840174037A00000049BB00A079E5D57F000041FFD34C700008204450485840070715840107037B00000049BB00A079E5D57F000041FFD34C700008445048584029391C070715840120037C00000049BB00A079E5D57F000041FFD34C7000080D445048584029391C070715840120037D00000049BB00A079E5D57F000041FFD34C700008445048584029391C070715840120037E00000049BB00A079E5D57F000041FFD34C7000080D445048584029391C070715840120037F00000049BB00A079E5D57F000041FFD34C7000080D35445048584029391C070715840120038000000049BB00A079E5D57F000041FFD34C7000080D351C4450485840293907070715840120038100000049BB00A079E5D57F000041FFD34C7000080D1C4450485840293907070715840120038200000049BB00A079E5D57F000041FFD34C7000081C4450485840293907070715840120038300000049BB43A079E5D57F000041FFD34C707894019801019C014450485840840174035D00000049BB43A079E5D57F000041FFD34C707894019801019C014450485840840174038400000049BB00A079E5D57F000041FFD34C7078940198019C014450485840840174038500000049BB00A079E5D57F000041FFD34C70001C9C014450485840840174038600000049BB00A079E5D57F000041FFD34C70009C014450485840840174038700000049BB00A079E5D57F000041FFD34C7044504858400774038800000049BB00A079E5D57F000041FFD34C7044504858400774038900000049BB00A079E5D57F000041FFD34C70080C4450485874038A00000049BB00A079E5D57F000041FFD34C700839280C4450485874038B00000049BB00A079E5D57F000041FFD34C700839290C4450485874038C00000049BB00A079E5D57F000041FFD34C7008390C4450485874038D00000049BB00A079E5D57F000041FFD34C283444504858080C07038E00000049BB00A079E5D57F000041FFD34C28383444504858080C07038F00000049BB00A079E5D57F000041FFD34C283444504858080C07039000000049BB00A079E5D57F000041FFD34C283444504858080C07039100000049BB00A079E5D57F000041FFD34C2800445048580820150C07039200000049BB00A079E5D57F000041FFD34C28001D445048580820150C07039300000049BB00A079E5D57F000041FFD34C28001C44504858081520150C07039400000049BB00A079E5D57F000041FFD34C28001C44504858081520150C07039500000049BB43A079E5D57F000041FFD34C70A001B00101A8014450485840A401AC011574035E00000049BB43A079E5D57F000041FFD34C70A001B00101A8014450485840A401AC011574039600000049BB43A079E5D57F000041FFD34C70A001B80101B001A801445048584074AC0115035F00000049BB43A079E5D57F000041FFD34C70A001B80101B001A801445048584074AC0115039700000049BB00A079E5D57F000041FFD34C70A001B80109B001A801445048584074AC0115039800000049BB00A079E5D57F000041FFD34C70A001B80108A801445048584074AC0115039900000049BB00A079E5D57F000041FFD34C70A001A8014450485840B8010874AC0115039A00000049BB00A079E5D57F000041FFD34C7000081CA8014450485840B8010774AC0115039B00000049BB00A079E5D57F000041FFD34C700008A8014450485840B8010774AC0115039C00000049BB00A079E5D57F000041FFD34C70004450485840070874AC0115039D00000049BB00A079E5D57F000041FFD34C7000081C4450485840070707AC0115039E00000049BB00A079E5D57F000041FFD34C7000084450485840200D3507071CAC0115039F00000049BB00A079E5D57F000041FFD34C700008394450485840200D3507071CAC011503A000000049BB00A079E5D57F000041FFD34C7000084450485840200D3507071CAC011503A100000049BB00A079E5D57F000041FFD34C700008394450485840200D3507071CAC011503A200000049BB00A079E5D57F000041FFD34C70000839294450485840200D3507071CAC011503A300000049BB00A079E5D57F000041FFD34C7000083929204450485840070D3507071CAC011503A400000049BB00A079E5D57F000041FFD34C70000839204450485840070D3507071CAC011503A500000049BB00A079E5D57F000041FFD34C700008204450485840070D3507071CAC011503A600000049BB43A079E5D57F000041FFD34C70A001BC01C40101C001445048584074AC01036000000049BB43A079E5D57F000041FFD34C70A001BC01C40101C001445048584074AC0103A700000049BB00A079E5D57F000041FFD34C70A001BC01C401C001445048584074AC0103A800000049BB00A079E5D57F000041FFD34C70001CC001445048584074AC0103A900000049BB00A079E5D57F000041FFD34C7000C001445048584074AC0103AA00000049BB00A079E5D57F000041FFD34C704450485840740703AB00000049BB00A079E5D57F000041FFD34C704450485840740703AC000000 +[d0e960a6f32] jit-backend-dump} +[d0e960a8528] {jit-backend-addr +Loop 4 ( #44 FOR_ITER) has address 7fd5e579c04b to 7fd5e579cc54 (bootstrap 7fd5e579c015) +[d0e960aea44] jit-backend-addr} +[d0e960b00a0] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8d3f +0 30FEFFFF -[3641ad5afc7] jit-backend-dump} -[3641ad5b915] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c047 +0 E0FDFFFF +[d0e960b2140] jit-backend-dump} +[d0e960b36be] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8e80 +0 EF070000 -[3641ad5c35f] jit-backend-dump} -[3641ad5c86b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c106 +0 4A0B0000 +[d0e960b4de0] jit-backend-dump} +[d0e960b5908] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8e92 +0 F8070000 -[3641ad5d187] jit-backend-dump} -[3641ad5d65b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c11a +0 580B0000 +[d0e960b6d96] jit-backend-dump} +[d0e960b775c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8ec2 +0 E4070000 -[3641ad5df5d] jit-backend-dump} -[3641ad60323] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c127 +0 6B0B0000 +[d0e960b8bc6] jit-backend-dump} +[d0e960b9592] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8ed3 +0 EE070000 -[3641ad60f2d] jit-backend-dump} -[3641ad614bb] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c13c +0 770B0000 +[d0e960ba9fc] jit-backend-dump} +[d0e960bb3d4] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8ef3 +0 EA070000 -[3641ad61e33] jit-backend-dump} -[3641ad62397] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c14d +0 890B0000 +[d0e960bcacc] jit-backend-dump} +[d0e960bd58e] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8f0a +0 EE070000 -[3641ad62c9f] jit-backend-dump} -[3641ad63219] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c15f +0 9A0B0000 +[d0e960beb3c] jit-backend-dump} +[d0e960bf4cc] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8f1b +0 F9070000 -[3641ad63af3] jit-backend-dump} -[3641ad63f41] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c171 +0 AA0B0000 +[d0e960c0a56] jit-backend-dump} +[d0e960c13d4] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8f3b +0 F6070000 -[3641ad648d5] jit-backend-dump} -[3641ad64f2f] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c184 +0 B70B0000 +[d0e960c2838] jit-backend-dump} +[d0e960c31d4] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8f4f +0 FE070000 -[3641ad65977] jit-backend-dump} -[3641ad65f0b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c19b +0 BE0B0000 +[d0e960c4638] jit-backend-dump} +[d0e960c514e] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8f66 +0 02080000 -[3641ad667c3] jit-backend-dump} -[3641ad66c41] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c1b2 +0 C50B0000 +[d0e960c67c8] jit-backend-dump} +[d0e960c75ae] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8f80 +0 04080000 -[3641ad6750b] jit-backend-dump} -[3641ad67949] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c1d2 +0 E20B0000 +[d0e960c8ba4] jit-backend-dump} +[d0e960c9534] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8f94 +0 0B080000 -[3641ad68201] jit-backend-dump} -[3641ad68737] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c1fa +0 D80B0000 +[d0e960ca9aa] jit-backend-dump} +[d0e960cb322] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8faa +0 13080000 -[3641ad69171] jit-backend-dump} -[3641ad696cd] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c208 +0 E80B0000 +[d0e960cc738] jit-backend-dump} +[d0e960cd17c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8fbd +0 20080000 -[3641ad69f85] jit-backend-dump} -[3641ad6a3f7] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c21f +0 0B0C0000 +[d0e960ce79c] jit-backend-dump} +[d0e960cf252] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8fce +0 30080000 -[3641ad6acaf] jit-backend-dump} -[3641ad6b0ef] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c234 +0 140C0000 +[d0e960d0776] jit-backend-dump} +[d0e960d1112] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8fed +0 33080000 -[3641ad6b9b7] jit-backend-dump} -[3641ad6bded] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c24e +0 190C0000 +[d0e960d251c] jit-backend-dump} +[d0e960d2fe4] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f8ff6 +0 4B080000 -[3641ad6c861] jit-backend-dump} -[3641ad6ce9f] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c356 +0 300B0000 +[d0e960d441e] jit-backend-dump} +[d0e960d4d96] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f900d +0 56080000 -[3641ad6d837] jit-backend-dump} -[3641ad6dca3] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c365 +0 450B0000 +[d0e960d61d6] jit-backend-dump} +[d0e960d6c98] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f902b +0 5B080000 -[3641ad6e559] jit-backend-dump} -[3641ad6e9c7] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c3fb +0 D30A0000 +[d0e960d8264] jit-backend-dump} +[d0e960d8cd2] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f903c +0 6D080000 -[3641ad6f27f] jit-backend-dump} -[3641ad6f6e1] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c40a +0 E80A0000 +[d0e960da262] jit-backend-dump} +[d0e960dabe6] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9052 +0 7B080000 -[3641ad6ff99] jit-backend-dump} -[3641ad7069f] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c424 +0 F20A0000 +[d0e960dc020] jit-backend-dump} +[d0e960dc980] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9067 +0 AB080000 -[3641ad71087] jit-backend-dump} -[3641ad7169b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c44a +0 F00A0000 +[d0e960dde14] jit-backend-dump} +[d0e960de774] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f907c +0 B9080000 -[3641ad7202f] jit-backend-dump} -[3641ad725c3] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c457 +0 050B0000 +[d0e960dfe0c] jit-backend-dump} +[d0e960e085c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f909c +0 BC080000 -[3641ad72e7d] jit-backend-dump} -[3641ad732dd] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c46b +0 130B0000 +[d0e960e1e22] jit-backend-dump} +[d0e960e27a0] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f90ac +0 CF080000 -[3641ad73b93] jit-backend-dump} -[3641ad74011] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c479 +0 290B0000 +[d0e960e3c34] jit-backend-dump} +[d0e960e4648] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9203 +0 9C070000 -[3641ad74a67] jit-backend-dump} -[3641ad74fed] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c4a6 +0 3F0B0000 +[d0e960e5ae2] jit-backend-dump} +[d0e960e6442] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9212 +0 B5070000 -[3641ad758a7] jit-backend-dump} -[3641ad75d1f] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c4bc +0 4B0B0000 +[d0e960e78d6] jit-backend-dump} +[d0e960e83d4] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f92af +0 40070000 -[3641ad78487] jit-backend-dump} -[3641ad78a17] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c4d1 +0 5A0B0000 +[d0e960edd8c] jit-backend-dump} +[d0e960eeb9c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f92be +0 5A070000 -[3641ad792f9] jit-backend-dump} -[3641ad7973d] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c4df +0 710B0000 +[d0e960f033c] jit-backend-dump} +[d0e960f0e22] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f92d8 +0 69070000 -[3641ad79ff9] jit-backend-dump} -[3641ad7a631] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c4f6 +0 7E0B0000 +[d0e960f2370] jit-backend-dump} +[d0e960f2d18] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f92fa +0 70070000 -[3641ad7b0f7] jit-backend-dump} -[3641ad7b62b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c510 +0 890B0000 +[d0e960f41b8] jit-backend-dump} +[d0e960f4b5a] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f930c +0 85070000 -[3641ad7bfdd] jit-backend-dump} -[3641ad7c43b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c51a +0 A50B0000 +[d0e960f60e4] jit-backend-dump} +[d0e960f6ba0] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9319 +0 9F070000 -[3641ad7ccfb] jit-backend-dump} -[3641ad7d189] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c524 +0 C20B0000 +[d0e960f81ba] jit-backend-dump} +[d0e960f8c40] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f932d +0 B1070000 -[3641ad7da43] jit-backend-dump} -[3641ad7de95] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c537 +0 D50B0000 +[d0e960fa0da] jit-backend-dump} +[d0e960faa64] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f933b +0 CB070000 -[3641ad7e8ff] jit-backend-dump} -[3641ad7eeb5] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c63c +0 F50A0000 +[d0e960fbefe] jit-backend-dump} +[d0e960fc888] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f936c +0 E4070000 -[3641ad7f841] jit-backend-dump} -[3641ad7fcb7] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c64b +0 0A0B0000 +[d0e960fdd22] jit-backend-dump} +[d0e960fe862] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9382 +0 F3070000 -[3641ad80571] jit-backend-dump} -[3641ad809d5] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c654 +0 250B0000 +[d0e960ffdec] jit-backend-dump} +[d0e961008f6] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9397 +0 06080000 -[3641ad8128f] jit-backend-dump} -[3641ad816f7] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c668 +0 340B0000 +[d0e96101f10] jit-backend-dump} +[d0e961028d6] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f93a5 +0 21080000 -[3641ad81fb3] jit-backend-dump} -[3641ad82517] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c676 +0 460B0000 +[d0e96103d6a] jit-backend-dump} +[d0e961047ea] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f93bc +0 32080000 -[3641ad82ef7] jit-backend-dump} -[3641ad83487] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c6bf +0 370B0000 +[d0e96105c78] jit-backend-dump} +[d0e96106644] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f93d6 +0 41080000 -[3641ad83d41] jit-backend-dump} -[3641ad841a3] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c6f1 +0 200B0000 +[d0e96107bd4] jit-backend-dump} +[d0e961086ae] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f93e0 +0 61080000 -[3641ad84a5d] jit-backend-dump} -[3641ad84ed3] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c706 +0 260B0000 +[d0e96109d6a] jit-backend-dump} +[d0e9610a82c] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f93ea +0 82080000 -[3641ad8579d] jit-backend-dump} -[3641ad85c4b] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c717 +0 320B0000 +[d0e9610bd86] jit-backend-dump} +[d0e9610c746] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f93fd +0 99080000 -[3641ad866a3] jit-backend-dump} -[3641ad86cf9] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c729 +0 3D0B0000 +[d0e9610dbd4] jit-backend-dump} +[d0e9610e5a6] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9538 +0 87070000 -[3641ad8765b] jit-backend-dump} -[3641ad87a99] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c74f +0 330B0000 +[d0e9610f9b0] jit-backend-dump} +[d0e96110370] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9547 +0 9E070000 -[3641ad88353] jit-backend-dump} -[3641ad8878f] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c766 +0 380B0000 +[d0e96111ab0] jit-backend-dump} +[d0e961128f0] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9550 +0 BB070000 -[3641ad89059] jit-backend-dump} -[3641ad894cb] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c786 +0 510B0000 +[d0e96113e86] jit-backend-dump} +[d0e96114834] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9564 +0 CC070000 -[3641ad89ef1] jit-backend-dump} -[3641ad8a483] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c790 +0 630B0000 +[d0e96115c68] jit-backend-dump} +[d0e96116622] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9572 +0 E4070000 -[3641ad8ae21] jit-backend-dump} -[3641ad8b2af] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c7a7 +0 6A0B0000 +[d0e96117a50] jit-backend-dump} +[d0e961183ec] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f95c5 +0 B6070000 -[3641ad8bb7f] jit-backend-dump} -[3641ad8c375] jit-backend} -[3641ad8d53d] {jit-log-opt-loop -# Loop 7 : loop with 198 ops -[p0, p1, p2, p3, p4, p5, p6, p7, i8, p9, p10] +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c7bc +0 740B0000 +[d0e9611981a] jit-backend-dump} +[d0e9611a2e2] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c7d5 +0 7B0B0000 +[d0e9611b85a] jit-backend-dump} +[d0e9611c394] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c8e1 +0 8F0A0000 +[d0e9611d810] jit-backend-dump} +[d0e9611e1d6] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c8f0 +0 A60A0000 +[d0e9611f580] jit-backend-dump} +[d0e9611ff5e] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c986 +0 360A0000 +[d0e961213c8] jit-backend-dump} +[d0e96121d76] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c995 +0 4D0A0000 +[d0e96123486] jit-backend-dump} +[d0e96123f72] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c9af +0 590A0000 +[d0e96125598] jit-backend-dump} +[d0e96125f58] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c9d5 +0 590A0000 +[d0e961273e6] jit-backend-dump} +[d0e96127d88] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c9e2 +0 700A0000 +[d0e9612ee20] jit-backend-dump} +[d0e9612fa56] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c9f6 +0 800A0000 +[d0e961310f4] jit-backend-dump} +[d0e96131bda] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ca04 +0 970A0000 +[d0e961331dc] jit-backend-dump} +[d0e96133d6a] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ca31 +0 AE0A0000 +[d0e961351bc] jit-backend-dump} +[d0e96135b76] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ca47 +0 BA0A0000 +[d0e96137010] jit-backend-dump} +[d0e961379a6] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ca5c +0 C90A0000 +[d0e96138e3a] jit-backend-dump} +[d0e961397be] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ca6a +0 E00A0000 +[d0e9613aeda] jit-backend-dump} +[d0e9613b9ba] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ca81 +0 ED0A0000 +[d0e9613ceba] jit-backend-dump} +[d0e9613d8c8] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579ca9b +0 F80A0000 +[d0e9613ed5c] jit-backend-dump} +[d0e9613f734] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579caa5 +0 140B0000 +[d0e96140bc2] jit-backend-dump} +[d0e9614158e] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579caaf +0 310B0000 +[d0e96142a22] jit-backend-dump} +[d0e96143574] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579cac2 +0 440B0000 +[d0e96144bf4] jit-backend-dump} +[d0e96145710] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579cbc7 +0 640A0000 +[d0e96146c82] jit-backend-dump} +[d0e96147642] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579cbd6 +0 7A0A0000 +[d0e96148ad0] jit-backend-dump} +[d0e96149496] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579cbdf +0 960A0000 +[d0e9614a92a] jit-backend-dump} +[d0e9614b3a4] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579cbf3 +0 A60A0000 +[d0e9614cade] jit-backend-dump} +[d0e9614d5a0] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579cc01 +0 B80A0000 +[d0e9614eb06] jit-backend-dump} +[d0e9614f5d4] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579cc48 +0 AB0A0000 +[d0e96150a7a] jit-backend-dump} +[d0e96151fd4] jit-backend} +[d0e96153a32] {jit-log-opt-loop +# Loop 4 : loop with 351 ops +[p0, p1] ++54: p2 = getfield_gc(p0, descr=) ++58: p3 = getfield_gc(p0, descr=) ++62: i4 = getfield_gc(p0, descr=) ++70: p5 = getfield_gc(p0, descr=) ++74: i6 = getfield_gc(p0, descr=) ++81: i7 = getfield_gc(p0, descr=) ++85: p8 = getfield_gc(p0, descr=) ++89: p10 = getarrayitem_gc(p8, 0, descr=) ++93: p12 = getarrayitem_gc(p8, 1, descr=) ++97: p14 = getarrayitem_gc(p8, 2, descr=) ++101: p16 = getarrayitem_gc(p8, 3, descr=) ++105: p18 = getarrayitem_gc(p8, 4, descr=) ++116: p20 = getarrayitem_gc(p8, 5, descr=) ++127: p22 = getarrayitem_gc(p8, 6, descr=) ++138: p24 = getarrayitem_gc(p8, 7, descr=) ++142: p25 = getfield_gc(p0, descr=) ++142: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140556696704672)) debug_merge_point(0, ' #44 FOR_ITER') -+321: p11 = getfield_gc(p7, descr=) -+332: guard_nonnull(p11, descr=) [p1, p0, p7, p11, p2, p3, p4, p5, p6] -+341: i12 = getfield_gc(p7, descr=) -+345: i13 = getfield_gc(p11, descr=) -+350: i14 = int_ge(i12, i13) -guard_false(i14, descr=) [p1, p0, p7, i12, p11, p2, p3, p4, p5, p6] -+359: p15 = getfield_gc(p11, descr=) -+364: p16 = getarrayitem_gc(p15, i12, descr=) -+369: i18 = int_add(i12, 1) ++235: guard_value(i6, 4, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] ++245: guard_class(p16, 38308720, descr=) [p1, p0, p16, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++265: p28 = getfield_gc(p16, descr=) ++269: guard_nonnull(p28, descr=) [p1, p0, p16, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++278: i29 = getfield_gc(p16, descr=) ++282: p30 = getfield_gc(p28, descr=) ++286: guard_class(p30, 38399200, descr=) [p1, p0, p16, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++299: p32 = getfield_gc(p28, descr=) ++303: i33 = getfield_gc(p32, descr=) ++307: i34 = uint_ge(i29, i33) +guard_false(i34, descr=) [p1, p0, p16, i29, i33, p32, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++316: p35 = getfield_gc(p32, descr=) ++320: p36 = getarrayitem_gc(p35, i29, descr=) ++325: guard_nonnull(p36, descr=) [p1, p0, p16, i29, p36, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++334: i38 = int_add(i29, 1) ++338: setfield_gc(p16, i38, descr=) ++342: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p20, p22, p24, p36] debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 LOAD_GLOBAL') -+373: p19 = getfield_gc(p0, descr=) -+384: setfield_gc(p7, i18, descr=) -+388: guard_value(p19, ConstPtr(ptr20), descr=) [p1, p0, p19, p2, p3, p4, p5, p16, p7] -+407: p21 = getfield_gc(p19, descr=) -+411: guard_value(p21, ConstPtr(ptr22), descr=) [p1, p0, p21, p19, p2, p3, p4, p5, p16, p7] -+424: p24 = getfield_gc(ConstPtr(ptr23), descr=) -+437: guard_nonnull_class(p24, 21377976, descr=) [p1, p0, p24, p2, p3, p4, p5, p16, p7] ++352: guard_value(p3, ConstPtr(ptr40), descr=) [p1, p0, p3, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++371: p41 = getfield_gc(p0, descr=) ++375: guard_value(p41, ConstPtr(ptr42), descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++394: p43 = getfield_gc(p41, descr=) ++398: guard_value(p43, ConstPtr(ptr44), descr=) [p1, p0, p43, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++417: guard_not_invalidated(, descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] debug_merge_point(0, ' #53 LOOKUP_METHOD') -+456: p26 = getfield_gc(p24, descr=) -+460: guard_value(p26, ConstPtr(ptr27), descr=) [p1, p0, p24, p26, p2, p3, p4, p5, p16, p7] -+479: p28 = getfield_gc(p26, descr=) -+483: guard_value(p28, ConstPtr(ptr29), descr=) [p1, p0, p24, p28, p26, p2, p3, p4, p5, p16, p7] -+496: p31 = getfield_gc(ConstPtr(ptr30), descr=) -+509: guard_nonnull_class(p31, ConstClass(Function), descr=) [p1, p0, p31, p24, p2, p3, p4, p5, p16, p7] ++417: p46 = getfield_gc(ConstPtr(ptr45), descr=) ++430: guard_value(p46, ConstPtr(ptr47), descr=) [p1, p0, p46, p2, p5, p10, p12, p16, p20, p22, p24, p36] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 LOAD_FAST') -+528: guard_nonnull_class(p16, ConstClass(W_StringObject), descr=) [p1, p0, p16, p2, p3, p4, p5, p7, p31] debug_merge_point(0, ' #62 CALL_METHOD') -+548: p34 = getfield_gc(p31, descr=) -+552: guard_value(p34, ConstPtr(ptr35), descr=) [p1, p0, p34, p31, p2, p3, p4, p5, p16, p7] -+571: p36 = getfield_gc(p31, descr=) -+575: i37 = arraylen_gc(p36, descr=) -+579: i39 = int_sub(3, i37) -+587: i41 = int_ge(2, i39) -guard_true(i41, descr=) [p1, p0, p31, p2, p3, p4, p5, p16, p7] -+597: p42 = getfield_gc(p31, descr=) -+601: p43 = getfield_gc(p31, descr=) -+601: i45 = int_sub(i37, 1) -+608: i46 = int_ge(i45, i37) -guard_false(i46, descr=) [p1, p0, i37, i45, p31, p2, p3, p4, p5, p16, p7, p42] -+617: p47 = getarrayitem_gc_pure(p36, i45, descr=) -+622: i48 = force_token() -+629: i49 = int_is_zero(i8) -guard_true(i49, descr=) [p1, p0, p9, p2, p3, p4, p5, p16, p7, p31, p47, i48, p10, p42] -debug_merge_point(1, ' #0 LOAD_GLOBAL') -+639: guard_value(p42, ConstPtr(ptr50), descr=) [p1, p0, p9, p42, p2, p3, p4, p5, p16, p7, p31, p47, i48, p10, None] -+658: p52 = getfield_gc(p42, descr=) -+662: guard_value(p52, ConstPtr(ptr53), descr=) [p1, p0, p9, p52, p42, p2, p3, p4, p5, p16, p7, p31, p47, i48, p10, None] -+675: p55 = getfield_gc(ConstPtr(ptr54), descr=) -+688: guard_nonnull_class(p55, ConstClass(Function), descr=) [p1, p0, p9, p55, p2, p3, p4, p5, p16, p7, p31, p47, i48, p10, None] -debug_merge_point(1, ' #3 LOAD_FAST') -debug_merge_point(1, ' #6 LOAD_FAST') -+706: guard_nonnull(p47, descr=) [p1, p0, p9, p47, p2, p3, p4, p5, p16, p7, p31, p55, None, i48, p10, None] -debug_merge_point(1, ' #9 CALL_FUNCTION') -+715: p57 = getfield_gc(p55, descr=) -+719: guard_value(p57, ConstPtr(ptr58), descr=) [p1, p0, p9, p57, p55, p2, p3, p4, p5, p16, p7, p31, None, p47, i48, p10, None] -+738: p59 = getfield_gc(p55, descr=) -+742: p60 = getfield_gc(p55, descr=) -+742: p61 = getfield_gc(p55, descr=) -+742: p62 = getfield_gc(p55, descr=) -+742: i63 = force_token() -debug_merge_point(2, ' #0 LOAD_GLOBAL') -+749: guard_value(p59, ConstPtr(ptr64), descr=) [p1, p0, p9, p59, p2, p3, p4, p5, p16, p7, p31, i63, p55, p47, i48, p10, None] -+768: p65 = getfield_gc(p59, descr=) -+772: guard_value(p65, ConstPtr(ptr66), descr=) [p1, p0, p9, p65, p59, p2, p3, p4, p5, p16, p7, p31, i63, p55, p47, i48, p10, None] -+785: p68 = getfield_gc(ConstPtr(ptr67), descr=) -+798: guard_isnull(p68, descr=) [p1, p0, p9, p68, p2, p3, p4, p5, p16, p7, p31, i63, p55, p47, i48, p10, None] -+807: guard_not_invalidated(, descr=) [p1, p0, p9, p2, p3, p4, p5, p16, p7, p31, i63, p55, p47, i48, p10, None] -+807: p70 = getfield_gc(ConstPtr(ptr69), descr=) -+815: guard_value(p70, ConstPtr(ptr71), descr=) [p1, p0, p9, p70, p2, p3, p4, p5, p16, p7, p31, i63, p55, p47, i48, p10, None] -+828: p73 = getfield_gc(ConstPtr(ptr72), descr=) -+836: guard_value(p73, ConstPtr(ptr74), descr=) [p1, p0, p9, p73, p2, p3, p4, p5, p16, p7, p31, i63, p55, p47, i48, p10, None] -debug_merge_point(2, ' #3 LOAD_FAST') -debug_merge_point(2, ' #6 LOAD_CONST') -debug_merge_point(2, ' #9 BINARY_SUBSCR') -debug_merge_point(2, ' #10 CALL_FUNCTION') -debug_merge_point(2, ' #13 BUILD_TUPLE') -debug_merge_point(2, ' #16 LOAD_FAST') -debug_merge_point(2, ' #19 BINARY_ADD') -debug_merge_point(2, ' #20 STORE_FAST') -debug_merge_point(2, ' #23 LOAD_GLOBAL') -+849: p76 = getfield_gc(ConstPtr(ptr75), descr=) -+862: guard_nonnull_class(p76, 21365256, descr=) [p1, p0, p9, p76, p2, p3, p4, p5, p16, p7, p31, i63, p55, p47, i48, p10, None] -debug_merge_point(2, ' #26 LOOKUP_METHOD') -debug_merge_point(2, ' #29 LOAD_FAST') -debug_merge_point(2, ' #32 CALL_METHOD') -+881: p78 = getfield_gc(p76, descr=) -+885: guard_class(p78, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p9, p76, p78, p2, p3, p4, p5, p16, p7, p31, i63, p55, p47, i48, p10, None] -+897: p80 = getfield_gc(p76, descr=) -+901: i81 = force_token() -+908: p83 = new_with_vtable(21373712) -+978: setfield_gc(p83, i63, descr=) -setfield_gc(p9, p83, descr=) -+1032: setfield_gc(p0, i81, descr=) -+1043: p85 = new_array(3, descr=) -+1121: setarrayitem_gc(p85, 0, ConstPtr(ptr87), descr=) -+1129: setarrayitem_gc(p85, 1, ConstPtr(ptr89), descr=) -+1143: setarrayitem_gc(p85, 2, p47, descr=) -+1147: i92 = call_may_force(ConstClass(hash_tuple), p85, descr=) -guard_not_forced(, descr=) [p1, p0, p9, p76, p80, i92, p83, p2, p3, p4, p5, p16, p7, p31, p55, p47, p10, i48, p85] -+1240: guard_no_exception(, descr=) [p1, p0, p9, p76, p80, i92, p83, p2, p3, p4, p5, p16, p7, p31, p55, p47, p10, i48, p85] -+1255: i93 = force_token() -+1262: setfield_gc(p0, i93, descr=) -+1273: p95 = new_with_vtable(21362872) -+1343: setfield_gc(p95, p85, descr=) -+1354: i97 = call_may_force(ConstClass(ll_dict_lookup__dicttablePtr_pypy_interpreter_baseobjspace_W_RootPtr_Signed), p80, p95, i92, descr=) -guard_not_forced(, descr=) [p1, p0, p9, p95, p76, i97, p80, p83, p2, p3, p4, p5, p16, p7, p31, p55, p47, i48, p10] -+1412: guard_no_exception(, descr=) [p1, p0, p9, p95, p76, i97, p80, p83, p2, p3, p4, p5, p16, p7, p31, p55, p47, i48, p10] -+1427: i99 = int_and(i97, -9223372036854775808) -+1443: i100 = int_is_true(i99) -guard_false(i100, descr=) [p1, p0, p9, p95, p76, i97, p80, p83, p2, p3, p4, p5, p16, p7, p31, p55, p47, i48, p10] -+1453: p102 = call(ConstClass(ll_get_value__dicttablePtr_Signed), p80, i97, descr=) -+1472: guard_no_exception(, descr=) [p1, p0, p9, p95, p76, p102, p83, p2, p3, p4, p5, p16, p7, p31, p55, p47, i48, p10] -+1487: guard_nonnull_class(p102, 21574792, descr=) [p1, p0, p9, p95, p76, p102, p83, p2, p3, p4, p5, p16, p7, p31, p55, p47, i48, p10] -debug_merge_point(2, ' #35 STORE_FAST') -debug_merge_point(2, ' #38 LOAD_FAST') -debug_merge_point(2, ' #41 LOAD_CONST') -debug_merge_point(2, ' #44 COMPARE_OP') -+1505: i105 = ptr_eq(p102, ConstPtr(ptr104)) -guard_false(i105, descr=) [p1, p0, p9, p83, p2, p3, p4, p5, p16, p7, p31, p102, p95, p55, p47, i48, p10] -debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') -debug_merge_point(2, ' #50 LOAD_FAST') -debug_merge_point(2, ' #53 RETURN_VALUE') -+1518: p106 = getfield_gc(p9, descr=) -+1529: guard_isnull(p106, descr=) [p1, p0, p9, p102, p106, p83, p2, p3, p4, p5, p16, p7, p31, None, p95, p55, p47, i48, p10] -+1538: i107 = getfield_gc(p9, descr=) -+1542: i108 = int_is_true(i107) -guard_false(i108, descr=) [p1, p0, p9, p102, p83, p2, p3, p4, p5, p16, p7, p31, None, p95, p55, p47, i48, p10] -+1552: p109 = getfield_gc(p9, descr=) -debug_merge_point(1, ' #12 LOOKUP_METHOD') -debug_merge_point(1, ' #15 LOAD_FAST') -debug_merge_point(1, ' #18 CALL_METHOD') -+1552: setfield_gc(p83, -3, descr=) -+1567: guard_not_invalidated(, descr=) [p1, p0, p9, p2, p3, p4, p5, p16, p7, p31, p102, None, None, p47, i48, p10] -+1567: p111 = getfield_gc_pure(p16, descr=) -+1578: i112 = strlen(p111) -+1582: i114 = int_gt(9223372036854775807, i112) -guard_true(i114, descr=) [p1, p0, p9, p102, p111, p2, p3, p4, p5, p16, p7, p31, None, None, None, p47, i48, p10] -+1601: p115 = getfield_gc(p102, descr=) -+1605: i116 = getfield_gc(p102, descr=) -+1609: i118 = getarrayitem_gc_pure(p115, 0, descr=) -+1613: i120 = int_eq(i118, 17) -guard_true(i120, descr=) [p1, p0, p9, p102, p2, p3, p4, p5, p16, p7, p31, p111, i116, i112, p115, None, None, None, p47, i48, p10] -+1623: i122 = getarrayitem_gc_pure(p115, 2, descr=) -+1627: i124 = int_and(i122, 1) -+1634: i125 = int_is_true(i124) -guard_true(i125, descr=) [p1, p0, p9, p102, i122, p2, p3, p4, p5, p16, p7, p31, p111, i116, i112, p115, None, None, None, p47, i48, p10] -+1644: i127 = getarrayitem_gc_pure(p115, 5, descr=) -+1648: i129 = int_gt(i127, 1) -guard_false(i129, descr=) [p1, p0, p9, p102, p2, p3, p4, p5, p16, p7, p31, p111, i116, i112, p115, None, None, None, p47, i48, p10] -+1658: i131 = getarrayitem_gc_pure(p115, 1, descr=) -+1662: i133 = int_add(i131, 1) -+1666: i134 = getarrayitem_gc_pure(p115, i133, descr=) -+1671: i136 = int_eq(i134, 19) -guard_true(i136, descr=) [p1, p0, p9, p102, i133, p2, p3, p4, p5, p16, p7, p31, p111, i116, i112, p115, None, None, None, p47, i48, p10] -+1681: i138 = int_add(i133, 1) -+1688: i139 = getarrayitem_gc_pure(p115, i138, descr=) -+1693: i141 = int_add(i133, 2) -+1697: i143 = int_lt(0, i112) -guard_true(i143, descr=) [p1, p0, p9, p102, i139, i141, p2, p3, p4, p5, p16, p7, p31, p111, i116, i112, p115, None, None, None, p47, i48, p10] -+1707: guard_value(i141, 11, descr=) [p1, p0, p9, p102, i139, i141, p115, p2, p3, p4, p5, p16, p7, p31, p111, i116, i112, None, None, None, None, p47, i48, p10] -+1717: guard_value(i139, 51, descr=) [p1, p0, p9, p102, i139, p115, p2, p3, p4, p5, p16, p7, p31, p111, i116, i112, None, None, None, None, p47, i48, p10] -+1727: guard_value(p115, ConstPtr(ptr146), descr=) [p1, p0, p9, p102, p115, p2, p3, p4, p5, p16, p7, p31, p111, i116, i112, None, None, None, None, p47, i48, p10] ++449: p49 = call(ConstClass(getexecutioncontext), descr=) ++472: p50 = getfield_gc(p49, descr=) ++476: i51 = force_token() ++476: p52 = getfield_gc(p49, descr=) ++480: guard_isnull(p52, descr=) [p1, p0, p49, p52, p2, p5, p10, p12, p16, p50, i51, p36] ++489: i53 = getfield_gc(p49, descr=) ++493: i54 = int_is_zero(i53) +guard_true(i54, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p50, i51, p36] +debug_merge_point(1, ' #0 LOAD_GLOBAL') ++503: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p50, i51, p36] +debug_merge_point(1, ' #3 LOAD_FAST') +debug_merge_point(1, ' #6 LOAD_FAST') +debug_merge_point(1, ' #9 CALL_FUNCTION') ++503: i56 = getfield_gc(ConstPtr(ptr55), descr=) ++516: i58 = int_ge(0, i56) +guard_true(i58, descr=) [p1, p0, p49, i56, p2, p5, p10, p12, p16, p50, i51, p36] ++526: i59 = force_token() +debug_merge_point(2, ' #0 LOAD_GLOBAL') ++526: p61 = getfield_gc(ConstPtr(ptr60), descr=) ++534: guard_value(p61, ConstPtr(ptr62), descr=) [p1, p0, p49, p61, p2, p5, p10, p12, p16, i59, p50, i51, p36] +debug_merge_point(2, ' #3 LOAD_FAST') +debug_merge_point(2, ' #6 LOAD_CONST') +debug_merge_point(2, ' #9 BINARY_SUBSCR') +debug_merge_point(2, ' #10 CALL_FUNCTION') +debug_merge_point(2, ' #13 BUILD_TUPLE') +debug_merge_point(2, ' #16 LOAD_FAST') +debug_merge_point(2, ' #19 BINARY_ADD') +debug_merge_point(2, ' #20 STORE_FAST') +debug_merge_point(2, ' #23 LOAD_GLOBAL') +debug_merge_point(2, ' #26 LOOKUP_METHOD') +debug_merge_point(2, ' #29 LOAD_FAST') +debug_merge_point(2, ' #32 CALL_METHOD') ++547: p64 = getfield_gc(ConstPtr(ptr63), descr=) ++560: guard_class(p64, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p64, p2, p5, p10, p12, p16, i59, p50, i51, p36] ++573: p66 = getfield_gc(ConstPtr(ptr63), descr=) ++586: i67 = force_token() +p69 = new_array(3, descr=) +p71 = new_with_vtable(38380928) ++678: setfield_gc(p71, i59, descr=) +setfield_gc(p49, p71, descr=) ++725: setfield_gc(p0, i67, descr=) ++736: setarrayitem_gc(p69, 0, ConstPtr(ptr73), descr=) ++744: setarrayitem_gc(p69, 1, ConstPtr(ptr75), descr=) ++758: setarrayitem_gc(p69, 2, ConstPtr(ptr77), descr=) ++772: i79 = call_may_force(ConstClass(hash_tuple), p69, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, i51, p69, p50, p36] ++837: guard_no_exception(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, i51, p69, p50, p36] ++852: i80 = force_token() +p82 = new_with_vtable(38290296) ++922: setfield_gc(p0, i80, descr=) ++933: setfield_gc(p82, p69, descr=) ++944: i84 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v531___simple_call__function_l), p66, p82, i79, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p50, p36] ++1002: guard_no_exception(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p50, p36] ++1017: i86 = int_and(i84, -9223372036854775808) ++1033: i87 = int_is_true(i86) +guard_false(i87, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p50, p36] ++1043: p88 = getfield_gc(p66, descr=) ++1054: p89 = getinteriorfield_gc(p88, i84, descr=>) ++1063: guard_nonnull_class(p89, 38536280, descr=) [p1, p0, p49, p82, p89, p71, p2, p5, p10, p12, p16, i51, p50, p36] +debug_merge_point(2, ' #35 STORE_FAST') +debug_merge_point(2, ' #38 LOAD_FAST') +debug_merge_point(2, ' #41 LOAD_CONST') +debug_merge_point(2, ' #44 COMPARE_OP') ++1081: i92 = instance_ptr_eq(ConstPtr(ptr91), p89) +guard_false(i92, descr=) [p1, p0, p49, p71, p2, p5, p10, p12, p16, p89, p82, i51, p50, p36] +debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') +debug_merge_point(2, ' #50 LOAD_FAST') +debug_merge_point(2, ' #53 RETURN_VALUE') ++1094: p93 = getfield_gc(p49, descr=) ++1105: guard_isnull(p93, descr=) [p1, p0, p49, p89, p93, p71, p2, p5, p10, p12, p16, None, p82, i51, p50, p36] ++1114: i95 = getfield_gc(p49, descr=) ++1118: i96 = int_is_true(i95) +guard_false(i96, descr=) [p1, p0, p49, p89, p71, p2, p5, p10, p12, p16, None, p82, i51, p50, p36] ++1128: p97 = getfield_gc(p49, descr=) +debug_merge_point(1, ' #12 LOOKUP_METHOD') ++1128: setfield_gc(p71, -3, descr=) +debug_merge_point(1, ' #15 LOAD_FAST') +debug_merge_point(1, ' #18 CALL_METHOD') ++1143: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p89, None, i51, p50, p36] ++1143: i99 = strlen(p36) ++1154: i101 = int_gt(9223372036854775807, i99) +guard_true(i101, descr=) [p1, p0, p49, p89, p36, p2, p5, p10, p12, p16, None, None, i51, p50, None] ++1173: p102 = getfield_gc_pure(p89, descr=) ++1177: i103 = getfield_gc_pure(p89, descr=) ++1181: i105 = getarrayitem_gc_pure(p102, 0, descr=) ++1185: i107 = int_eq(i105, 17) +guard_true(i107, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] ++1195: i109 = getarrayitem_gc_pure(p102, 2, descr=) ++1199: i111 = int_and(i109, 1) ++1206: i112 = int_is_true(i111) +guard_true(i112, descr=) [p1, p0, p49, p89, i109, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] ++1216: i114 = getarrayitem_gc_pure(p102, 5, descr=) ++1220: i116 = int_gt(i114, 1) +guard_false(i116, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] ++1230: i118 = getarrayitem_gc_pure(p102, 1, descr=) ++1234: i120 = int_add(i118, 1) ++1238: i121 = getarrayitem_gc_pure(p102, i120, descr=) ++1243: i123 = int_eq(i121, 19) +guard_true(i123, descr=) [p1, p0, p49, p89, i120, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] ++1253: i125 = int_add(i120, 1) ++1260: i126 = getarrayitem_gc_pure(p102, i125, descr=) ++1265: i128 = int_add(i120, 2) ++1269: i130 = int_lt(0, i99) +guard_true(i130, descr=) [p1, p0, p49, p89, i126, i128, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] ++1279: guard_value(i128, 11, descr=) [p1, p0, p49, p89, i126, i128, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p50, p36] ++1289: guard_value(i126, 51, descr=) [p1, p0, p49, p89, i126, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p50, p36] ++1299: guard_value(p102, ConstPtr(ptr133), descr=) [p1, p0, p49, p89, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p50, p36] debug_merge_point(2, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') -+1746: i147 = force_token() -+1753: p148 = new_with_vtable(21373712) -+1823: setfield_gc(p148, i48, descr=) -setfield_gc(p9, p148, descr=) -+1868: setfield_gc(p0, i147, descr=) -+1879: p150 = new_with_vtable(21380888) -+1949: setfield_gc(p150, p111, descr=) -+1953: setfield_gc(p150, ConstPtr(ptr146), descr=) -+1967: setfield_gc(p150, i112, descr=) -+1971: setfield_gc(p150, i116, descr=) -+1975: i151 = call_assembler(0, p150, descr=) -guard_not_forced(, descr=) [p1, p0, p9, p102, p150, i151, p148, p2, p3, p4, p5, p16, p7, p31, p47, p10] -+2061: guard_no_exception(, descr=) [p1, p0, p9, p102, p150, i151, p148, p2, p3, p4, p5, p16, p7, p31, p47, p10] -+2076: guard_true(i151, descr=) [p1, p0, p9, p102, p150, p148, p2, p3, p4, p5, p16, p7, p31, p47, p10] -debug_merge_point(1, ' #21 RETURN_VALUE') -+2085: p152 = getfield_gc(p9, descr=) -+2096: guard_isnull(p152, descr=) [p1, p0, p9, p152, p148, p2, p3, p4, p5, p16, p7, p31, p150, p102, p47, p10] -+2105: i153 = getfield_gc(p9, descr=) -+2109: i154 = int_is_true(i153) -guard_false(i154, descr=) [p1, p0, p9, p148, p2, p3, p4, p5, p16, p7, p31, p150, p102, p47, p10] -+2119: p155 = getfield_gc(p9, descr=) ++1318: i134 = force_token() +p136 = new_with_vtable(38342680) +p137 = new_with_vtable(38380928) ++1402: setfield_gc(p137, i51, descr=) +setfield_gc(p49, p137, descr=) ++1449: setfield_gc(p0, i134, descr=) ++1460: setfield_gc(p136, i99, descr=) ++1464: setfield_gc(p136, i103, descr=) ++1468: setfield_gc(p136, p36, descr=) ++1472: setfield_gc(p136, ConstPtr(ptr133), descr=) ++1486: i138 = call_assembler(0, p136, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p89, p136, i138, p137, p2, p5, p10, p12, p16, p50, p36] ++1579: guard_no_exception(, descr=) [p1, p0, p49, p89, p136, i138, p137, p2, p5, p10, p12, p16, p50, p36] ++1594: guard_false(i138, descr=) [p1, p0, p49, p89, p136, p137, p2, p5, p10, p12, p16, p50, p36] +debug_merge_point(1, ' #21 RETURN_VALUE') ++1603: p139 = getfield_gc(p49, descr=) ++1614: guard_isnull(p139, descr=) [p1, p0, p49, p139, p137, p2, p5, p10, p12, p16, p50, p36] ++1623: i140 = getfield_gc(p49, descr=) ++1627: i141 = int_is_true(i140) +guard_false(i141, descr=) [p1, p0, p49, p137, p2, p5, p10, p12, p16, p50, p36] ++1637: p142 = getfield_gc(p49, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') -+2119: i157 = getfield_raw(41389288, descr=) -+2127: i159 = int_sub(i157, 25) -+2131: setfield_raw(41389288, i159, descr=) -setfield_gc(p9, p10, descr=) -+2177: setfield_gc(p148, -3, descr=) -+2192: i162 = int_lt(i159, 0) -guard_false(i162, descr=) [p1, p0, p2, p3, p4, p5, p16, p7, None, None, None, None] +setfield_gc(p49, p50, descr=) ++1677: setfield_gc(p137, -3, descr=) ++1692: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, None, p36] ++1692: i145 = getfield_raw(43780840, descr=) ++1700: i147 = int_lt(i145, 0) +guard_false(i147, descr=) [p1, p0, p2, p5, p10, p12, p16, None, p36] debug_merge_point(0, ' #44 FOR_ITER') -+2202: jump(p0, p1, p2, p3, p4, p5, p16, p7, i153, p9, p10, descr=) -+2224: --end of the loop-- -[3641ae5c499] jit-log-opt-loop} -[3641ae627a9] {jit-backend -[3641af43071] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9f37 +0 554889E5534154415541564157488DA5000000004C8B3C2550525D0148C7042550525D01000000004C8B342558525D0148C7042558525D01000000004C8B2C2560525D0148C7042560525D01000000004C8B242568525D0148C7042568525D01000000004C8B142530E863014C8B0C2578525D0148C7042578525D01000000004C8B042540E86301488B3C2548E86301488B342590525D0148C7042590525D0100000000488B1C2598525D0148C7042598525D0100000000488B1425A0525D0148C70425A0525D0100000000488B0C25A8525D0148C70425A8525D0100000000488B0425B0525D0148C70425B0525D010000000048898570FFFFFF488B0425B8525D0148C70425B8525D010000000048898568FFFFFF488B0425C0525D0148C70425C0525D010000000048898560FFFFFF488B0425C8525D0148C70425C8525D010000000048898558FFFFFF49BBC82103C6827F0000498B034883C00149BBC82103C6827F00004989034983F8040F85000000008139102C00000F8500000000488B79184885FF0F84000000004C8B4108488B47084939C00F8D00000000488B7F104A8B7CC7104983C0014C8941084983FA000F850000000049BB28DCB3C3827F00004D39DC0F85000000004D8B670849BB20A0B0C3827F00004D39DC0F8500000000498B5424104881FA60164F010F850000000049BB0882AFC3827F00004D8B234983FC01720841813C24384100000F8500000000498B54240849BB28A1B0C3827F00004C39DA0F85000000004C8B52104981FA60164F010F850000000049BB4885AFC3827F0000498B134883FA017206813A802701000F85000000004883FF017206813F900700000F85000000004C8B621849BBB863B0C3827F00004D39DC0F85000000004C8B62204D8B54240841B8030000004D29D04983F8020F8F000000004C8B42404C89D04983EA014939C20F8D000000004F8B54D4104889B550FFFFFF4C899548FFFFFF48899540FFFFFF4C898D38FFFFFF4C898530FFFFFF48898D28FFFFFF4889BD20FFFFFF41BB20268A0041FFD3488B7838488D8D78FFFFFF4C8B40484D85C00F85000000004C8B40284983F8000F85000000004C8B8530FFFFFF49BB28A1B0C3827F00004D39D80F85000000004D8B48104981F960164F010F850000000049BB1884AFC3827F00004D8B034983F8017207418138802701000F85000000004C8B8D48FFFFFF4D85C90F8400000000498B501849BB00C3B3C3827F00004C39DA0F8500000000498B50404D8B5028498B702048833C25908D7702000F8500000000488DB578FFFFFF49BB28A1B0C3827F00004C39DA0F85000000004C8B52104981FA60164F010F850000000049BB1863FFC5827F0000498B134885D20F8500000000488B142560F151014881FA60164F010F8500000000488B1425084A66014881FA80A44C010F850000000049BB2883AFC3827F0000498B134883FA017206813A880F00000F85000000004C8B521041813A688B01000F85000000004C8B52084C8DA578FFFFFF48898518FFFFFF48899510FFFFFF488B042590EF4501488D5018483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700903000004889142590EF450148897008488BB518FFFFFFF64604017423565157415150415041524889F74889C641BB9062C90041FFD3415A41585841595F595E488946384D89671848898508FFFFFF488B042590EF4501488D5028483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700880000004889142590EF450148C740080300000048C7401080204D0149BB0073B4C3827F00004C8958184C8948204C899500FFFFFF4C89BDF8FEFFFF4C8985F0FEFFFF4C89ADE8FEFFFF4889BDE0FEFFFF48898DD8FEFFFF4C89B5D0FEFFFF48899DC8FEFFFF488985C0FEFFFF48C78578FFFFFF870000004889C741BB7070960041FFD34883BD78FFFFFF000F8C0000000048833C25908D7702000F8500000000488DB578FFFFFF488B9DF8FEFFFF48897318488985B8FEFFFF488B042590EF4501488D5010483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700380600004889142590EF4501488B95C0FEFFFF48895008488985B0FEFFFF48C78578FFFFFF88000000488BBD00FFFFFF4889C6488B95B8FEFFFF41BB8061BF0041FFD34883BD78FFFFFF000F8C0000000048833C25908D7702000F85000000004889C349BB00000000000000804C21D84883F8000F8500000000488BBD00FFFFFF4889DE41BBB063BE0041FFD348833C25908D7702000F85000000004883F80172068138084203000F85000000004881F8908B4D010F8400000000488B9D18FFFFFF488B53484885D20F8500000000488B53284883FA000F8500000000488B9508FFFFFF48C74208FDFFFFFF488B9520FFFFFF488B72084C8B761049BBFFFFFFFFFFFFFF7F4D39DE0F8D00000000488B4810488B78184C8B49104983F9110F85000000004C8B49204D89CD4983E1014983F9000F84000000004C8B69384983FD010F8F000000004C8B69184983C5014E8B4CE9104983F9130F85000000004D89E94983C5014E8B6CE9104983C1024983FE000F8E000000004983F90B0F85000000004983FD330F850000000049BBA023FDC5827F00004C39D90F8500000000488D8D78FFFFFF488985A8FEFFFF488B042590EF4501488D5018483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700903000004889142590EF4501488B95D8FEFFFF48895008F64304017417575156504889DF4889C641BB9062C90041FFD3585E595F48894338488B95F8FEFFFF48894A18488985A0FEFFFF488B042590EF4501488D5048483B142598EF4501761A49BB10629FC3827F000041FFD349BBA5629FC3827F000041FFD348C700984C00004889142590EF45014889704049BBA023FDC5827F00004C8958384C8970084889781048898598FEFFFF48C78578FFFFFF89000000BF000000004889C649BBB08A9FC3827F000041FFD34883F80174134889C7BE0000000041BBA027990041FFD3EB08488B042510E863014883BD78FFFFFF000F8C0000000048833C25908D7702000F85000000004885C00F8400000000488B8518FFFFFF488B58484885DB0F8500000000488B58284883FB000F8500000000488B3C25E88C77024883EF1948893C25E88C77024C8BB5E0FEFFFFF6400401741350574889C74C89F641BB9062C90041FFD35F584C897038488BB5A0FEFFFF48C74608FDFFFFFF4883FF000F8C00000000488B85F8FEFFFF48898570FFFFFF488B85D0FEFFFF48898568FFFFFF488B85E8FEFFFF48898560FFFFFF488B8538FFFFFF48898558FFFFFF488B85C8FEFFFF48898548FFFFFF4C8BBD20FFFFFF488B8528FFFFFF48898540FFFFFF488B8518FFFFFF48898538FFFFFF4C89B530FFFFFF4989DE49BB528E9FC3827F000041FFE3488B0425E08C77024829E0483B0425807C4501760D49BB34639FC3827F000041FFD3554889E5534154415541564157488DA510FEFFFF4989FF4989F64989D54989CC4D89C24C8B5D104D89D84C8B5D184C89DF4C8B5D204C89DE4C8B5D284C89DB4C8B5D304C89DA4C8B5D384C89D94C8B5D404C899D70FFFFFF4C8B5D484C899D68FFFFFF4C8B5D504C899D60FFFFFF4C8B5D584C899D58FFFFFFE9C9F6FFFF49BB00609FC3827F000041FFD321383C343029241D180C08044044484C038A00000049BB00609FC3827F000041FFD3383C0434302924180C084044484C038B00000049BB00609FC3827F000041FFD3383C041C34302924180C084044484C038C00000049BB00609FC3827F000041FFD3383C04211C34302924180C084044484C038D00000049BB00609FC3827F000041FFD329383C343024180C08041C44484C038E00000049BB00609FC3827F000041FFD3383C303424180C1C0444484C038F00000049BB00609FC3827F000041FFD3383C303424180C1C0444484C039000000049BB00609FC3827F000041FFD3383C08303424180C1C0444484C039100000049BB00609FC3827F000041FFD3383C303424180C1C0444484C039200000049BB00609FC3827F000041FFD3383C30083424180C1C0444484C039300000049BB00609FC3827F000041FFD3383C3028083424180C1C0444484C039400000049BB00609FC3827F000041FFD3383C08303424180C1C0444484C039500000049BB00609FC3827F000041FFD3383C1C3424180C04084C039600000049BB00609FC3827F000041FFD3383C30083424180C1C04039700000049BB00609FC3827F000041FFD3383C083424180C1C04039800000049BB00609FC3827F000041FFD3383C0129083424180C1C0420039900000049BB00609FC3827F000041FFD3383C0020345C500C68645854051C60039A00000049BB00609FC3827F000041FFD3383C00345C500C68645854051C60039B00000049BB00609FC3827F000041FFD3383C0020345C500C68645854051C07039C00000049BB00609FC3827F000041FFD3383C002420345C500C68645854051C07039D00000049BB00609FC3827F000041FFD3383C0020345C500C68645854051C07039E00000049BB00609FC3827F000041FFD3383C0024345C500C6864582007051C07039F00000049BB00609FC3827F000041FFD3383C000820345C500C6864580724051C0703A000000049BB43609FC3827F000041FFD3383C002818345C500C686458082024051C0703A100000049BB00609FC3827F000041FFD3383C0008345C500C68645819072024051C0703A200000049BB00609FC3827F000041FFD3383C002808345C500C68645819072024051C0703A300000049BB00609FC3827F000041FFD3383C0008345C500C68645819072024051C0703A400000049BB00609FC3827F000041FFD3383C00345C500C68645819072024051C0703A500000049BB00609FC3827F000041FFD3383C0008345C500C68645819072024051C0703A600000049BB00609FC3827F000041FFD3383C0008345C500C68645819072024051C0703A700000049BB00609FC3827F000041FFD3383C0008345C500C68645819072024051C0703A800000049BB00609FC3827F000041FFD3383C000828345C500C68645819072024051C0703A900000049BB43609FC3827F000041FFD390017C6C7078017484015C509401686458980188015480018D01038700000049BB43609FC3827F000041FFD390017C6C7078017484015C509401686458980188015480018D0103AA00000049BB43609FC3827F000041FFD390017C6CA0017001787484015C50940168645888015480018D01038800000049BB43609FC3827F000041FFD390017C6CA0017001787484015C50940168645888015480018D0103AB00000049BB00609FC3827F000041FFD390017C6CA001700D787484015C50940168645888015480018D0103AC00000049BB43609FC3827F000041FFD390017C6CA00170007484015C50940168645888015480018D0103AD00000049BB00609FC3827F000041FFD390017C6CA00170007484015C50940168645888015480018D0103AE00000049BB00609FC3827F000041FFD390017C6C7484015C50940168645800A00188015480018D0103AF00000049BB00609FC3827F000041FFD390017C0C00087484015C50940168645807A00188015480018D0103B000000049BB00609FC3827F000041FFD390017C0C007484015C50940168645807A00188015480018D0103B100000049BB00609FC3827F000041FFD390017C0C84015C5094016864580007880154078D0103B200000049BB00609FC3827F000041FFD390017C0C001884015C5094010864580707880154078D0103B300000049BB00609FC3827F000041FFD390017C0C0084015C50940108645804391D180707880154078D0103B400000049BB00609FC3827F000041FFD390017C0C003584015C50940108645804391D180707880154078D0103B500000049BB00609FC3827F000041FFD390017C0C0084015C50940108645804391D180707880154078D0103B600000049BB00609FC3827F000041FFD390017C0C003584015C50940108645804391D180707880154078D0103B700000049BB00609FC3827F000041FFD390017C0C00352584015C50940108645804391D180707880154078D0103B800000049BB00609FC3827F000041FFD390017C0C0035250484015C50940108645807391D180707880154078D0103B900000049BB00609FC3827F000041FFD390017C0C00350484015C50940108645807391D180707880154078D0103BA00000049BB00609FC3827F000041FFD390017C0C000484015C50940108645807391D180707880154078D0103BB00000049BB43609FC3827F000041FFD390017C6CA401AC0101A80184015C509401686458548801038900000049BB43609FC3827F000041FFD390017C6CA401AC0101A80184015C50940168645854880103BC00000049BB00609FC3827F000041FFD390017C6CA401AC01A80184015C50940168645854880103BD00000049BB00609FC3827F000041FFD390017C000CA80184015C509401686458A401AC0154880103BE00000049BB00609FC3827F000041FFD390017C00A80184015C509401686458A401AC0154880103BF00000049BB00609FC3827F000041FFD390017C84015C50940168640707070703C0000000 -[3641af6a3c7] jit-backend-dump} -[3641af6ae17] {jit-backend-addr -Loop 8 ( #44 FOR_ITER) has address 7f82c39fa083 to 7f82c39fa91a (bootstrap 7f82c39f9f37) -[3641af6bcb3] jit-backend-addr} -[3641af6c34d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39f9f47 +0 10FEFFFF -[3641af6d095] jit-backend-dump} -[3641af6d6e1] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa0a7 +0 0F090000 -[3641af6e1cb] jit-backend-dump} -[3641af6e8cb] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa0b3 +0 25090000 -[3641af6f313] jit-backend-dump} -[3641af6f791] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa0c0 +0 38090000 -[3641af7007b] jit-backend-dump} -[3641af704e9] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa0d1 +0 48090000 -[3641af70dd9] jit-backend-dump} -[3641af71243] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa0ec +0 4F090000 -[3641af71c79] jit-backend-dump} -[3641af721a7] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa0ff +0 5C090000 -[3641af72a89] jit-backend-dump} -[3641af72ee9] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa116 +0 63090000 -[3641af737cf] jit-backend-dump} -[3641af73c43] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa128 +0 6F090000 -[3641af74521] jit-backend-dump} -[3641af749c7] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa149 +0 6D090000 -[3641af7533b] jit-backend-dump} -[3641af75979] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa161 +0 73090000 -[3641af765a5] jit-backend-dump} -[3641af76b0d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa172 +0 81090000 -[3641af773eb] jit-backend-dump} -[3641af77839] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa191 +0 82090000 -[3641af78117] jit-backend-dump} -[3641af78557] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa1a3 +0 8F090000 -[3641af78e3b] jit-backend-dump} -[3641af792af] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa1ba +0 94090000 -[3641af79c13] jit-backend-dump} -[3641af7a103] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa1d6 +0 94090000 -[3641af7aa89] jit-backend-dump} -[3641af7aedb] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa1ea +0 9B090000 -[3641af7b7b9] jit-backend-dump} -[3641af7bc1d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa241 +0 62090000 -[3641af7c501] jit-backend-dump} -[3641af7c943] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa24f +0 75090000 -[3641af7d36d] jit-backend-dump} -[3641af7d8c3] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa269 +0 7B090000 -[3641af80103] jit-backend-dump} -[3641af807cd] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa27a +0 8B090000 -[3641af811ed] jit-backend-dump} -[3641af81763] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa29a +0 8D090000 -[3641af82105] jit-backend-dump} -[3641af826a5] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa2aa +0 9E090000 -[3641af82f8d] jit-backend-dump} -[3641af83455] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa2c1 +0 A9090000 -[3641af83ef3] jit-backend-dump} -[3641af8448d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa2dc +0 B1090000 -[3641af84e3d] jit-backend-dump} -[3641af85399] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa2f6 +0 BB090000 -[3641af85c6d] jit-backend-dump} -[3641af860e1] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa307 +0 CE090000 -[3641af869a1] jit-backend-dump} -[3641af86e01] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa31d +0 DD090000 -[3641af87863] jit-backend-dump} -[3641af87fd9] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa332 +0 0F0A0000 -[3641af88969] jit-backend-dump} -[3641af88ddd] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa347 +0 1E0A0000 -[3641af8969f] jit-backend-dump} -[3641af89aff] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa366 +0 230A0000 -[3641af8a3c1] jit-backend-dump} -[3641af8a7fb] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa377 +0 360A0000 -[3641af8b0cf] jit-backend-dump} -[3641af8b60d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa4d7 +0 FB080000 -[3641af8bf9b] jit-backend-dump} -[3641af8c541] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa4e6 +0 18090000 -[3641af8cea5] jit-backend-dump} -[3641af8d31d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa583 +0 A7080000 -[3641af8dbf1] jit-backend-dump} -[3641af8e02f] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa592 +0 C4080000 -[3641af8e8ef] jit-backend-dump} -[3641af8ed45] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa5ac +0 D6080000 -[3641af8f891] jit-backend-dump} -[3641af8fded] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa5ce +0 E0080000 -[3641af90791] jit-backend-dump} -[3641af90bf1] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa5e0 +0 F9080000 -[3641af914b7] jit-backend-dump} -[3641af918f1] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa5ed +0 17090000 -[3641af921c5] jit-backend-dump} -[3641af925fd] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa601 +0 2D090000 -[3641af92ebf] jit-backend-dump} -[3641af93449] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa60f +0 4B090000 -[3641af93e59] jit-backend-dump} -[3641af94447] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa640 +0 6C090000 -[3641af94d2b] jit-backend-dump} -[3641af9518d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa656 +0 7F090000 -[3641af95a4d] jit-backend-dump} -[3641af97e79] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa66b +0 96090000 -[3641af98923] jit-backend-dump} -[3641af98f3b] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa679 +0 B5090000 -[3641af99949] jit-backend-dump} -[3641af99f7d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa690 +0 CA090000 -[3641af9a9d3] jit-backend-dump} -[3641af9af15] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa6aa +0 DD090000 -[3641af9b881] jit-backend-dump} -[3641af9bd27] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa6b4 +0 010A0000 -[3641af9c5e1] jit-backend-dump} -[3641af9ca21] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa6be +0 260A0000 -[3641af9d45d] jit-backend-dump} -[3641af9d9a5] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa6d1 +0 410A0000 -[3641af9e335] jit-backend-dump} -[3641af9e8f5] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa80b +0 34090000 -[3641af9f1ad] jit-backend-dump} -[3641af9f5f9] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa81a +0 4E090000 -[3641af9fec3] jit-backend-dump} -[3641afa030d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa823 +0 6E090000 -[3641afa0bc3] jit-backend-dump} -[3641afa1163] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa837 +0 82090000 -[3641afa1c21] jit-backend-dump} -[3641afa2155] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa845 +0 9D090000 -[3641afa2add] jit-backend-dump} -[3641afa2f6d] {jit-backend-dump -BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa896 +0 74090000 -[3641afa3837] jit-backend-dump} -[3641afa3e13] jit-backend} -[3641afa48a7] {jit-log-opt-loop -# Loop 8 : entry bridge with 208 ops -[p0, p1, p2, p3, i4, p5, i6, i7, p8, p9, p10, p11, p12, p13, p14, p15] ++1710: label(p0, p1, p2, p5, p10, p12, p36, p16, i140, p49, p50, descr=TargetToken(140556696703072)) debug_merge_point(0, ' #44 FOR_ITER') -+362: guard_value(i6, 4, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p8, p9, p10, p11, p12, p13, p14, p15] -+372: guard_class(p11, 21372560, descr=) [p1, p0, p11, p2, p3, i4, p5, p8, p9, p10, p12, p13, p14, p15] -+384: p18 = getfield_gc(p11, descr=) -+388: guard_nonnull(p18, descr=) [p1, p0, p11, p18, p2, p3, i4, p5, p8, p9, p10, p12, p13, p14, p15] -+397: i19 = getfield_gc(p11, descr=) -+401: i20 = getfield_gc(p18, descr=) -+405: i21 = int_ge(i19, i20) -guard_false(i21, descr=) [p1, p0, p11, i19, p18, p2, p3, i4, p5, p8, p9, p10, p12, p13, p14, p15] -+414: p22 = getfield_gc(p18, descr=) -+418: p23 = getarrayitem_gc(p22, i19, descr=) -+423: i25 = int_add(i19, 1) -+427: setfield_gc(p11, i25, descr=) -+431: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p8, p9, p10, p11, p23, p13, p14, p15] ++1740: p148 = getfield_gc(p16, descr=) ++1751: guard_nonnull(p148, descr=) [p1, p0, p16, p148, p2, p5, p10, p12, p36] ++1760: i149 = getfield_gc(p16, descr=) ++1764: p150 = getfield_gc(p148, descr=) ++1768: guard_class(p150, 38399200, descr=) [p1, p0, p16, i149, p150, p148, p2, p5, p10, p12, p36] ++1781: p151 = getfield_gc(p148, descr=) ++1785: i152 = getfield_gc(p151, descr=) ++1789: i153 = uint_ge(i149, i152) +guard_false(i153, descr=) [p1, p0, p16, i149, i152, p151, p2, p5, p10, p12, p36] ++1798: p154 = getfield_gc(p151, descr=) ++1802: p155 = getarrayitem_gc(p154, i149, descr=) ++1807: guard_nonnull(p155, descr=) [p1, p0, p16, i149, p155, p2, p5, p10, p12, p36] ++1816: i156 = int_add(i149, 1) debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 LOAD_GLOBAL') -+441: guard_value(p3, ConstPtr(ptr27), descr=) [p1, p0, p3, p2, p5, p8, p9, p23, p11, p13, p14, p15] -+460: p28 = getfield_gc(p0, descr=) -+464: guard_value(p28, ConstPtr(ptr29), descr=) [p1, p0, p28, p2, p5, p8, p9, p23, p11, p13, p14, p15] -+483: p30 = getfield_gc(p28, descr=) -+488: guard_value(p30, ConstPtr(ptr31), descr=) [p1, p0, p30, p28, p2, p5, p8, p9, p23, p11, p13, p14, p15] -+501: p33 = getfield_gc(ConstPtr(ptr32), descr=) -+514: guard_nonnull_class(p33, 21377976, descr=) [p1, p0, p33, p2, p5, p8, p9, p23, p11, p13, p14, p15] ++1820: p157 = getfield_gc(p0, descr=) ++1831: setfield_gc(p16, i156, descr=) ++1835: guard_value(p157, ConstPtr(ptr42), descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] ++1854: p158 = getfield_gc(p157, descr=) ++1858: guard_value(p158, ConstPtr(ptr44), descr=) [p1, p0, p158, p157, p2, p5, p10, p12, p16, p155, None] ++1877: guard_not_invalidated(, descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] debug_merge_point(0, ' #53 LOOKUP_METHOD') -+534: p35 = getfield_gc(p33, descr=) -+539: guard_value(p35, ConstPtr(ptr36), descr=) [p1, p0, p33, p35, p2, p5, p8, p9, p23, p11, p13, p14, p15] -+558: p37 = getfield_gc(p35, descr=) -+562: guard_value(p37, ConstPtr(ptr38), descr=) [p1, p0, p33, p37, p35, p2, p5, p8, p9, p23, p11, p13, p14, p15] -+575: p40 = getfield_gc(ConstPtr(ptr39), descr=) -+588: guard_nonnull_class(p40, ConstClass(Function), descr=) [p1, p0, p40, p33, p2, p5, p8, p9, p23, p11, p13, p14, p15] ++1877: p159 = getfield_gc(ConstPtr(ptr45), descr=) ++1890: guard_value(p159, ConstPtr(ptr47), descr=) [p1, p0, p159, p2, p5, p10, p12, p16, p155, None] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 LOAD_FAST') -+606: guard_nonnull_class(p23, ConstClass(W_StringObject), descr=) [p1, p0, p23, p2, p5, p8, p9, p11, p40, p15] debug_merge_point(0, ' #62 CALL_METHOD') -+624: p43 = getfield_gc(p40, descr=) -+628: guard_value(p43, ConstPtr(ptr44), descr=) [p1, p0, p43, p40, p2, p5, p8, p9, p23, p11] -+647: p45 = getfield_gc(p40, descr=) -+651: i46 = arraylen_gc(p45, descr=) -+656: i48 = int_sub(3, i46) -+665: i50 = int_ge(2, i48) -guard_true(i50, descr=) [p1, p0, p40, p2, p5, p8, p9, p23, p11] -+675: p51 = getfield_gc(p40, descr=) -+679: p52 = getfield_gc(p40, descr=) -+679: i54 = int_sub(i46, 1) -+686: i55 = int_ge(i54, i46) -guard_false(i55, descr=) [p1, p0, i46, i54, p40, p2, p5, p8, p9, p23, p11, p51] -+695: p56 = getarrayitem_gc_pure(p45, i54, descr=) -+700: p58 = call(ConstClass(getexecutioncontext), descr=) -+758: p59 = getfield_gc(p58, descr=) -+762: i60 = force_token() -+769: p61 = getfield_gc(p58, descr=) -+773: guard_isnull(p61, descr=) [p1, p0, p58, p61, p2, p5, p8, p9, p23, p11, p40, p56, i60, p59, p51] -+782: i62 = getfield_gc(p58, descr=) -+786: i63 = int_is_zero(i62) -guard_true(i63, descr=) [p1, p0, p58, p2, p5, p8, p9, p23, p11, p40, p56, i60, p59, p51] -debug_merge_point(1, ' #0 LOAD_GLOBAL') -+796: guard_value(p51, ConstPtr(ptr64), descr=) [p1, p0, p58, p51, p2, p5, p8, p9, p23, p11, p40, p56, i60, p59, None] -+822: p66 = getfield_gc(p51, descr=) -+826: guard_value(p66, ConstPtr(ptr67), descr=) [p1, p0, p58, p66, p51, p2, p5, p8, p9, p23, p11, p40, p56, i60, p59, None] -+839: p69 = getfield_gc(ConstPtr(ptr68), descr=) -+852: guard_nonnull_class(p69, ConstClass(Function), descr=) [p1, p0, p58, p69, p2, p5, p8, p9, p23, p11, p40, p56, i60, p59, None] -debug_merge_point(1, ' #3 LOAD_FAST') -debug_merge_point(1, ' #6 LOAD_FAST') -+871: guard_nonnull(p56, descr=) [p1, p0, p58, p56, p2, p5, p8, p9, p23, p11, p40, p69, None, i60, p59, None] -debug_merge_point(1, ' #9 CALL_FUNCTION') -+887: p71 = getfield_gc(p69, descr=) -+891: guard_value(p71, ConstPtr(ptr72), descr=) [p1, p0, p58, p71, p69, p2, p5, p8, p9, p23, p11, p40, None, p56, i60, p59, None] -+910: p73 = getfield_gc(p69, descr=) -+914: p74 = getfield_gc(p69, descr=) -+914: p75 = getfield_gc(p69, descr=) -+918: p76 = getfield_gc(p69, descr=) -+922: guard_no_exception(, descr=) [p1, p0, p58, p75, p76, p2, p5, p8, p9, p23, p11, p40, p73, p69, p56, i60, p59, None] -+937: i77 = force_token() -debug_merge_point(2, ' #0 LOAD_GLOBAL') -+944: guard_value(p73, ConstPtr(ptr78), descr=) [p1, p0, p58, p73, p2, p5, p8, p9, p23, p11, p40, i77, None, p69, p56, i60, p59, None] -+963: p79 = getfield_gc(p73, descr=) -+967: guard_value(p79, ConstPtr(ptr80), descr=) [p1, p0, p58, p79, p73, p2, p5, p8, p9, p23, p11, p40, i77, None, p69, p56, i60, p59, None] -+980: p82 = getfield_gc(ConstPtr(ptr81), descr=) -+993: guard_isnull(p82, descr=) [p1, p0, p58, p82, p2, p5, p8, p9, p23, p11, p40, i77, None, p69, p56, i60, p59, None] -+1002: guard_not_invalidated(, descr=) [p1, p0, p58, p2, p5, p8, p9, p23, p11, p40, i77, None, p69, p56, i60, p59, None] -+1002: p84 = getfield_gc(ConstPtr(ptr83), descr=) -+1010: guard_value(p84, ConstPtr(ptr85), descr=) [p1, p0, p58, p84, p2, p5, p8, p9, p23, p11, p40, i77, None, p69, p56, i60, p59, None] -+1023: p87 = getfield_gc(ConstPtr(ptr86), descr=) -+1031: guard_value(p87, ConstPtr(ptr88), descr=) [p1, p0, p58, p87, p2, p5, p8, p9, p23, p11, p40, i77, None, p69, p56, i60, p59, None] -debug_merge_point(2, ' #3 LOAD_FAST') -debug_merge_point(2, ' #6 LOAD_CONST') -debug_merge_point(2, ' #9 BINARY_SUBSCR') -debug_merge_point(2, ' #10 CALL_FUNCTION') -debug_merge_point(2, ' #13 BUILD_TUPLE') -debug_merge_point(2, ' #16 LOAD_FAST') -debug_merge_point(2, ' #19 BINARY_ADD') -debug_merge_point(2, ' #20 STORE_FAST') -debug_merge_point(2, ' #23 LOAD_GLOBAL') -+1044: p90 = getfield_gc(ConstPtr(ptr89), descr=) -+1057: guard_nonnull_class(p90, 21365256, descr=) [p1, p0, p58, p90, p2, p5, p8, p9, p23, p11, p40, i77, None, p69, p56, i60, p59, None] -debug_merge_point(2, ' #26 LOOKUP_METHOD') -debug_merge_point(2, ' #29 LOAD_FAST') -debug_merge_point(2, ' #32 CALL_METHOD') -+1075: p92 = getfield_gc(p90, descr=) -+1079: guard_class(p92, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p58, p90, p92, p2, p5, p8, p9, p23, p11, p40, i77, None, p69, p56, i60, p59, None] -+1092: p94 = getfield_gc(p90, descr=) -+1096: i95 = force_token() -+1103: p97 = new_with_vtable(21373712) -+1180: setfield_gc(p97, i77, descr=) -setfield_gc(p58, p97, descr=) -+1236: setfield_gc(p0, i95, descr=) -+1240: p99 = new_array(3, descr=) -+1318: setarrayitem_gc(p99, 0, ConstPtr(ptr101), descr=) -+1326: setarrayitem_gc(p99, 1, ConstPtr(ptr103), descr=) -+1340: setarrayitem_gc(p99, 2, p56, descr=) -+1344: i106 = call_may_force(ConstClass(hash_tuple), p99, descr=) -guard_not_forced(, descr=) [p1, p0, p58, p90, p94, i106, p97, p2, p5, p8, p9, p23, p11, p40, p99, p59, p56, p69, i60] -+1444: guard_no_exception(, descr=) [p1, p0, p58, p90, p94, i106, p97, p2, p5, p8, p9, p23, p11, p40, p99, p59, p56, p69, i60] -+1459: i107 = force_token() -+1466: setfield_gc(p0, i107, descr=) -+1477: p109 = new_with_vtable(21362872) -+1547: setfield_gc(p109, p99, descr=) -+1558: i111 = call_may_force(ConstClass(ll_dict_lookup__dicttablePtr_pypy_interpreter_baseobjspace_W_RootPtr_Signed), p94, p109, i106, descr=) -guard_not_forced(, descr=) [p1, p0, p58, p109, p90, i111, p94, p97, p2, p5, p8, p9, p23, p11, p40, p59, p56, p69, i60] -+1616: guard_no_exception(, descr=) [p1, p0, p58, p109, p90, i111, p94, p97, p2, p5, p8, p9, p23, p11, p40, p59, p56, p69, i60] -+1631: i113 = int_and(i111, -9223372036854775808) -+1647: i114 = int_is_true(i113) -guard_false(i114, descr=) [p1, p0, p58, p109, p90, i111, p94, p97, p2, p5, p8, p9, p23, p11, p40, p59, p56, p69, i60] -+1657: p116 = call(ConstClass(ll_get_value__dicttablePtr_Signed), p94, i111, descr=) -+1676: guard_no_exception(, descr=) [p1, p0, p58, p109, p90, p116, p97, p2, p5, p8, p9, p23, p11, p40, p59, p56, p69, i60] -+1691: guard_nonnull_class(p116, 21574792, descr=) [p1, p0, p58, p109, p90, p116, p97, p2, p5, p8, p9, p23, p11, p40, p59, p56, p69, i60] -debug_merge_point(2, ' #35 STORE_FAST') -debug_merge_point(2, ' #38 LOAD_FAST') -debug_merge_point(2, ' #41 LOAD_CONST') -debug_merge_point(2, ' #44 COMPARE_OP') -+1709: i119 = ptr_eq(p116, ConstPtr(ptr118)) -guard_false(i119, descr=) [p1, p0, p58, p97, p2, p5, p8, p9, p23, p11, p40, p116, p109, p59, p56, p69, i60] -debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') -debug_merge_point(2, ' #50 LOAD_FAST') -debug_merge_point(2, ' #53 RETURN_VALUE') -+1722: p120 = getfield_gc(p58, descr=) -+1733: guard_isnull(p120, descr=) [p1, p0, p58, p116, p120, p97, p2, p5, p8, p9, p23, p11, p40, None, p109, p59, p56, p69, i60] -+1742: i121 = getfield_gc(p58, descr=) -+1746: i122 = int_is_true(i121) -guard_false(i122, descr=) [p1, p0, p58, p116, p97, p2, p5, p8, p9, p23, p11, p40, None, p109, p59, p56, p69, i60] -+1756: p123 = getfield_gc(p58, descr=) -debug_merge_point(1, ' #12 LOOKUP_METHOD') -debug_merge_point(1, ' #15 LOAD_FAST') -debug_merge_point(1, ' #18 CALL_METHOD') -+1756: setfield_gc(p97, -3, descr=) -+1771: guard_not_invalidated(, descr=) [p1, p0, p58, p2, p5, p8, p9, p23, p11, p40, p116, None, p59, p56, None, i60] -+1771: p125 = getfield_gc_pure(p23, descr=) -+1782: i126 = strlen(p125) -+1786: i128 = int_gt(9223372036854775807, i126) -guard_true(i128, descr=) [p1, p0, p58, p116, p125, p2, p5, p8, p9, p23, p11, p40, None, None, p59, p56, None, i60] -+1805: p129 = getfield_gc(p116, descr=) -+1809: i130 = getfield_gc(p116, descr=) -+1813: i132 = getarrayitem_gc_pure(p129, 0, descr=) -+1817: i134 = int_eq(i132, 17) -guard_true(i134, descr=) [p1, p0, p58, p116, p2, p5, p8, p9, p23, p11, p40, p129, i126, i130, p125, None, None, p59, p56, None, i60] -+1827: i136 = getarrayitem_gc_pure(p129, 2, descr=) -+1831: i138 = int_and(i136, 1) -+1838: i139 = int_is_true(i138) -guard_true(i139, descr=) [p1, p0, p58, p116, i136, p2, p5, p8, p9, p23, p11, p40, p129, i126, i130, p125, None, None, p59, p56, None, i60] -+1848: i141 = getarrayitem_gc_pure(p129, 5, descr=) -+1852: i143 = int_gt(i141, 1) -guard_false(i143, descr=) [p1, p0, p58, p116, p2, p5, p8, p9, p23, p11, p40, p129, i126, i130, p125, None, None, p59, p56, None, i60] -+1862: i145 = getarrayitem_gc_pure(p129, 1, descr=) -+1866: i147 = int_add(i145, 1) -+1870: i148 = getarrayitem_gc_pure(p129, i147, descr=) -+1875: i150 = int_eq(i148, 19) -guard_true(i150, descr=) [p1, p0, p58, p116, i147, p2, p5, p8, p9, p23, p11, p40, p129, i126, i130, p125, None, None, p59, p56, None, i60] -+1885: i152 = int_add(i147, 1) -+1892: i153 = getarrayitem_gc_pure(p129, i152, descr=) -+1897: i155 = int_add(i147, 2) -+1901: i157 = int_lt(0, i126) -guard_true(i157, descr=) [p1, p0, p58, p116, i153, i155, p2, p5, p8, p9, p23, p11, p40, p129, i126, i130, p125, None, None, p59, p56, None, i60] -+1911: guard_value(i155, 11, descr=) [p1, p0, p58, p116, i153, i155, p129, p2, p5, p8, p9, p23, p11, p40, None, i126, i130, p125, None, None, p59, p56, None, i60] -+1921: guard_value(i153, 51, descr=) [p1, p0, p58, p116, i153, p129, p2, p5, p8, p9, p23, p11, p40, None, i126, i130, p125, None, None, p59, p56, None, i60] -+1931: guard_value(p129, ConstPtr(ptr160), descr=) [p1, p0, p58, p116, p129, p2, p5, p8, p9, p23, p11, p40, None, i126, i130, p125, None, None, p59, p56, None, i60] ++1909: i160 = force_token() ++1909: i161 = int_is_zero(i140) +guard_true(i161, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p50, i160, p155, None] +debug_merge_point(1, ' #0 LOAD_GLOBAL') +debug_merge_point(1, ' #3 LOAD_FAST') +debug_merge_point(1, ' #6 LOAD_FAST') +debug_merge_point(1, ' #9 CALL_FUNCTION') ++1919: i162 = getfield_gc(ConstPtr(ptr55), descr=) ++1932: i163 = int_ge(0, i162) +guard_true(i163, descr=) [p1, p0, p49, i162, p2, p5, p10, p12, p16, p50, i160, p155, None] ++1942: i164 = force_token() +debug_merge_point(2, ' #0 LOAD_GLOBAL') ++1942: p165 = getfield_gc(ConstPtr(ptr60), descr=) ++1950: guard_value(p165, ConstPtr(ptr62), descr=) [p1, p0, p49, p165, p2, p5, p10, p12, p16, i164, p50, i160, p155, None] +debug_merge_point(2, ' #3 LOAD_FAST') +debug_merge_point(2, ' #6 LOAD_CONST') +debug_merge_point(2, ' #9 BINARY_SUBSCR') +debug_merge_point(2, ' #10 CALL_FUNCTION') +debug_merge_point(2, ' #13 BUILD_TUPLE') +debug_merge_point(2, ' #16 LOAD_FAST') +debug_merge_point(2, ' #19 BINARY_ADD') +debug_merge_point(2, ' #20 STORE_FAST') +debug_merge_point(2, ' #23 LOAD_GLOBAL') +debug_merge_point(2, ' #26 LOOKUP_METHOD') +debug_merge_point(2, ' #29 LOAD_FAST') +debug_merge_point(2, ' #32 CALL_METHOD') ++1963: p166 = getfield_gc(ConstPtr(ptr63), descr=) ++1976: guard_class(p166, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p166, p2, p5, p10, p12, p16, i164, p50, i160, p155, None] ++1988: p167 = getfield_gc(ConstPtr(ptr63), descr=) ++2001: i168 = force_token() +p169 = new_array(3, descr=) +p170 = new_with_vtable(38380928) ++2093: setfield_gc(p170, i164, descr=) +setfield_gc(p49, p170, descr=) ++2144: setfield_gc(p0, i168, descr=) ++2148: setarrayitem_gc(p169, 0, ConstPtr(ptr73), descr=) ++2156: setarrayitem_gc(p169, 1, ConstPtr(ptr75), descr=) ++2170: setarrayitem_gc(p169, 2, ConstPtr(ptr174), descr=) ++2184: i175 = call_may_force(ConstClass(hash_tuple), p169, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, p169, p50, i160, p155] ++2256: guard_no_exception(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, p169, p50, i160, p155] ++2271: i176 = force_token() +p177 = new_with_vtable(38290296) ++2341: setfield_gc(p0, i176, descr=) ++2352: setfield_gc(p177, p169, descr=) ++2363: i178 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v531___simple_call__function_l), p167, p177, i175, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p155, p50, i160] ++2421: guard_no_exception(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p155, p50, i160] ++2436: i179 = int_and(i178, -9223372036854775808) ++2452: i180 = int_is_true(i179) +guard_false(i180, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p155, p50, i160] ++2462: p181 = getfield_gc(p167, descr=) ++2473: p182 = getinteriorfield_gc(p181, i178, descr=>) ++2482: guard_nonnull_class(p182, 38536280, descr=) [p1, p0, p49, p177, p182, p170, p2, p5, p10, p12, p16, p155, p50, i160] +debug_merge_point(2, ' #35 STORE_FAST') +debug_merge_point(2, ' #38 LOAD_FAST') +debug_merge_point(2, ' #41 LOAD_CONST') +debug_merge_point(2, ' #44 COMPARE_OP') ++2500: i183 = instance_ptr_eq(ConstPtr(ptr91), p182) +guard_false(i183, descr=) [p1, p0, p49, p170, p2, p5, p10, p12, p16, p177, p182, p155, p50, i160] +debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') +debug_merge_point(2, ' #50 LOAD_FAST') +debug_merge_point(2, ' #53 RETURN_VALUE') ++2513: p184 = getfield_gc(p49, descr=) ++2524: guard_isnull(p184, descr=) [p1, p0, p49, p182, p184, p170, p2, p5, p10, p12, p16, p177, None, p155, p50, i160] ++2533: i185 = getfield_gc(p49, descr=) ++2537: i186 = int_is_true(i185) +guard_false(i186, descr=) [p1, p0, p49, p182, p170, p2, p5, p10, p12, p16, p177, None, p155, p50, i160] ++2547: p187 = getfield_gc(p49, descr=) +debug_merge_point(1, ' #12 LOOKUP_METHOD') ++2547: setfield_gc(p170, -3, descr=) +debug_merge_point(1, ' #15 LOAD_FAST') +debug_merge_point(1, ' #18 CALL_METHOD') ++2562: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, None, p182, p155, p50, i160] ++2562: i189 = strlen(p155) ++2573: i191 = int_gt(9223372036854775807, i189) +guard_true(i191, descr=) [p1, p0, p49, p182, p155, p2, p5, p10, p12, p16, None, None, None, p50, i160] ++2592: p192 = getfield_gc_pure(p182, descr=) ++2596: i193 = getfield_gc_pure(p182, descr=) ++2600: i194 = getarrayitem_gc_pure(p192, 0, descr=) ++2604: i195 = int_eq(i194, 17) +guard_true(i195, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] ++2614: i196 = getarrayitem_gc_pure(p192, 2, descr=) ++2618: i197 = int_and(i196, 1) ++2625: i198 = int_is_true(i197) +guard_true(i198, descr=) [p1, p0, p49, p182, i196, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] ++2635: i199 = getarrayitem_gc_pure(p192, 5, descr=) ++2639: i200 = int_gt(i199, 1) +guard_false(i200, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] ++2649: i201 = getarrayitem_gc_pure(p192, 1, descr=) ++2653: i202 = int_add(i201, 1) ++2657: i203 = getarrayitem_gc_pure(p192, i202, descr=) ++2662: i204 = int_eq(i203, 19) +guard_true(i204, descr=) [p1, p0, p49, p182, i202, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] ++2672: i205 = int_add(i202, 1) ++2679: i206 = getarrayitem_gc_pure(p192, i205, descr=) ++2684: i207 = int_add(i202, 2) ++2688: i209 = int_lt(0, i189) +guard_true(i209, descr=) [p1, p0, p49, p182, i206, i207, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] ++2698: guard_value(i207, 11, descr=) [p1, p0, p49, p182, i206, i207, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, p155, p50, i160] ++2708: guard_value(i206, 51, descr=) [p1, p0, p49, p182, i206, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, p155, p50, i160] ++2718: guard_value(p192, ConstPtr(ptr133), descr=) [p1, p0, p49, p182, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, p155, p50, i160] debug_merge_point(2, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') -+1950: i161 = force_token() -+1957: p162 = new_with_vtable(21373712) -+2027: setfield_gc(p162, i60, descr=) -setfield_gc(p58, p162, descr=) -+2071: setfield_gc(p0, i161, descr=) -+2082: p164 = new_with_vtable(21380888) -+2152: setfield_gc(p164, p125, descr=) -+2156: setfield_gc(p164, ConstPtr(ptr160), descr=) -+2170: setfield_gc(p164, i126, descr=) -+2174: setfield_gc(p164, i130, descr=) -+2178: i165 = call_assembler(0, p164, descr=) -guard_not_forced(, descr=) [p1, p0, p58, p116, p164, i165, p162, p2, p5, p8, p9, p23, p11, p40, p56, p59] -+2264: guard_no_exception(, descr=) [p1, p0, p58, p116, p164, i165, p162, p2, p5, p8, p9, p23, p11, p40, p56, p59] -+2279: guard_true(i165, descr=) [p1, p0, p58, p116, p164, p162, p2, p5, p8, p9, p23, p11, p40, p56, p59] -debug_merge_point(1, ' #21 RETURN_VALUE') -+2288: p166 = getfield_gc(p58, descr=) -+2299: guard_isnull(p166, descr=) [p1, p0, p58, p166, p162, p2, p5, p8, p9, p23, p11, p40, p116, p164, p56, p59] -+2308: i167 = getfield_gc(p58, descr=) -+2312: i168 = int_is_true(i167) -guard_false(i168, descr=) [p1, p0, p58, p162, p2, p5, p8, p9, p23, p11, p40, p116, p164, p56, p59] -+2322: p169 = getfield_gc(p58, descr=) ++2737: i210 = force_token() +p211 = new_with_vtable(38342680) +p212 = new_with_vtable(38380928) ++2821: setfield_gc(p212, i160, descr=) +setfield_gc(p49, p212, descr=) ++2868: setfield_gc(p0, i210, descr=) ++2879: setfield_gc(p211, i189, descr=) ++2883: setfield_gc(p211, i193, descr=) ++2887: setfield_gc(p211, p155, descr=) ++2891: setfield_gc(p211, ConstPtr(ptr133), descr=) ++2905: i213 = call_assembler(0, p211, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p182, p211, i213, p212, p2, p5, p10, p12, p16, p155, p50] ++2998: guard_no_exception(, descr=) [p1, p0, p49, p182, p211, i213, p212, p2, p5, p10, p12, p16, p155, p50] ++3013: guard_false(i213, descr=) [p1, p0, p49, p182, p211, p212, p2, p5, p10, p12, p16, p155, p50] +debug_merge_point(1, ' #21 RETURN_VALUE') ++3022: p214 = getfield_gc(p49, descr=) ++3033: guard_isnull(p214, descr=) [p1, p0, p49, p214, p212, p2, p5, p10, p12, p16, p155, p50] ++3042: i215 = getfield_gc(p49, descr=) ++3046: i216 = int_is_true(i215) +guard_false(i216, descr=) [p1, p0, p49, p212, p2, p5, p10, p12, p16, p155, p50] ++3056: p217 = getfield_gc(p49, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') -+2322: i171 = getfield_raw(41389288, descr=) -+2330: i173 = int_sub(i171, 25) -+2334: setfield_raw(41389288, i173, descr=) -setfield_gc(p58, p59, descr=) -+2378: setfield_gc(p162, -3, descr=) -+2393: i176 = int_lt(i173, 0) -guard_false(i176, descr=) [p1, p0, p2, p5, p8, p9, p23, p11, None, None, None, None] +setfield_gc(p49, p50, descr=) ++3094: setfield_gc(p212, -3, descr=) ++3109: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, p155, None] ++3109: i219 = getfield_raw(43780840, descr=) ++3117: i220 = int_lt(i219, 0) +guard_false(i220, descr=) [p1, p0, p2, p5, p10, p12, p16, p155, None] debug_merge_point(0, ' #44 FOR_ITER') -+2403: jump(p0, p1, p2, p5, p8, p9, p23, p11, i167, p58, p59, descr=) -+2531: --end of the loop-- -[3641b33ecab] jit-log-opt-loop} -[3641b779d3b] {jit-backend -[3641b79ef9d] {jit-backend-dump ++3127: jump(p0, p1, p2, p5, p10, p12, p155, p16, i215, p49, p50, descr=TargetToken(140556696703072)) ++3135: --end of the loop-- +[d0e963e4a62] jit-log-opt-loop} +[d0e96518f8f] {jit-backend +[d0e9653113d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fb22f +0 488DA50000000049BBE02103C6827F00004D8B3B4983C70149BBE02103C6827F00004D893B4C8BBD18FFFFFF4D8B77484D85F60F85000000004D8B77284983FE000F85000000004C8B3425E88C77024983EE024C893425E88C77024C8BADE0FEFFFF41F6470401740F4C89FF4C89EE41BB9062C90041FFD34D896F384C8BADA0FEFFFF49C74508FDFFFFFF4983FE000F8C000000004C8BBDF8FEFFFF4C8BB5D0FEFFFF4C8BADE8FEFFFF49BB28DCB3C3827F00004D89DC41BA000000004C8B8D38FFFFFF41B804000000BF2C000000488BB550FFFFFF488B9DC8FEFFFF488B9520FFFFFF488B8D28FFFFFF48C78570FFFFFF0000000048C78568FFFFFF0000000048C78560FFFFFF0000000048C78558FFFFFF0000000049BB83A09FC3827F000041FFE349BB00609FC3827F000041FFD390017C3C38A80184015C50940168645854880103C100000049BB00609FC3827F000041FFD390017C3CA80184015C50940168645854880103C200000049BB00609FC3827F000041FFD390017C84015C5094016864070703C3000000 -[3641b7a3845] jit-backend-dump} -[3641b7a3f15] {jit-backend-addr -bridge out of Guard 189 has address 7f82c39fb22f to 7f82c39fb353 -[3641b7a4d5b] jit-backend-addr} -[3641b7a5535] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d712 +0 488DA50000000049BB283207E8D57F00004D8B3B4983C70149BB283207E8D57F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD31D1803AD00000049BB00A079E5D57F000041FFD31D1803AE000000 +[d0e9653458b] jit-backend-dump} +[d0e96534ac7] {jit-backend-addr +bridge out of Guard 90 has address 7fd5e579d712 to 7fd5e579d786 +[d0e965356dd] jit-backend-addr} +[d0e96535ccb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fb232 +0 10FEFFFF -[3641b7ac32f] jit-backend-dump} -[3641b7acb35] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d715 +0 70FFFFFF +[d0e96536869] jit-backend-dump} +[d0e96536f8f] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fb264 +0 EB000000 -[3641b7ad6e3] jit-backend-dump} -[3641b7adc85] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d747 +0 3B000000 +[d0e96537ad3] jit-backend-dump} +[d0e9653804d] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fb272 +0 02010000 -[3641b7ae619] jit-backend-dump} -[3641b7aeaa1] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d758 +0 3E000000 +[d0e96538a67] jit-backend-dump} +[d0e965392f3] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fb2c0 +0 D8000000 -[3641b7af611] jit-backend-dump} -[3641b7b00df] {jit-backend-dump +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bd8e +0 80190000 +[d0e96539bc5] jit-backend-dump} +[d0e9653a3cd] jit-backend} +[d0e9653abf7] {jit-log-opt-bridge +# bridge out of Guard 90 with 10 ops +[i0, p1] +debug_merge_point(0, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') ++37: p2 = getfield_gc(p1, descr=) ++41: i3 = strgetitem(p2, i0) ++47: i5 = int_eq(i3, 51) +guard_false(i5, descr=) [i0, p1] ++57: i7 = int_add(i0, 1) ++61: i8 = getfield_gc_pure(p1, descr=) ++65: i9 = int_lt(i7, i8) +guard_false(i9, descr=) [i7, p1] ++74: finish(0, descr=) ++116: --end of the loop-- +[d0e96544d3d] jit-log-opt-bridge} +[d0e96a79d8b] {jit-backend +[d0e96aae5cb] {jit-backend-dump BACKEND x86_64 -SYS_EXECUTABLE /home/fijal/src/pypy/compiled/pypy-c -CODE_DUMP @7f82c39fa823 +0 080A0000 -[3641b7b0ab9] jit-backend-dump} -[3641b7b117f] jit-backend} -[3641b7b1b4d] {jit-log-opt-bridge -# bridge out of Guard 189 with 18 ops -[p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14] -debug_merge_point(1, ' #21 RETURN_VALUE') -+37: p15 = getfield_gc(p2, descr=) -+48: guard_isnull(p15, descr=) [p0, p1, p2, p15, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14] -+57: i16 = getfield_gc(p2, descr=) -+61: i17 = int_is_true(i16) -guard_false(i17, descr=) [p0, p1, p2, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14] -+71: p18 = getfield_gc(p2, descr=) +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d7c6 +0 488DA50000000049BB403207E8D57F00004D8B3B4983C70149BB403207E8D57F00004D893B4C8BBD00FFFFFF4D8B77504D85F60F85000000004D8B77284983FE000F85000000004C8BB5E8FEFFFF41F6470401740F4C89FF4C89F641BB8045C50041FFD34D8977404C8BB5B8FEFFFF49C74608FDFFFFFF4C8B3425E80A9C024983FE000F8C00000000488B042550C95401488D5010483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C70088190000488B9508FFFFFF48895008488BBD10FFFFFF49BB902190E5D57F00004D89DE41BD0000000041BA0400000048C78548FFFFFF2C00000048898538FFFFFF48C78530FFFFFF0000000048C78528FFFFFF0000000048C78520FFFFFF0000000048C78518FFFFFF0000000049BBE2C079E5D57F000041FFE349BB00A079E5D57F000041FFD34C703C389C0144504858408401749801940103AF00000049BB00A079E5D57F000041FFD34C703C9C0144504858408401749801940103B000000049BB00A079E5D57F000041FFD34C7044504858400774070703B100000049BB00A079E5D57F000041FFD34C7044504858400774070703B2000000 +[d0e96ab43b9] jit-backend-dump} +[d0e96ab496f] {jit-backend-addr +bridge out of Guard 133 has address 7fd5e579d7c6 to 7fd5e579d904 +[d0e96ab5515] jit-backend-addr} +[d0e96ab5c29] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d7c9 +0 E0FDFFFF +[d0e96ab6851] jit-backend-dump} +[d0e96ab708b] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d7fb +0 05010000 +[d0e96ab7b87] jit-backend-dump} +[d0e96ab80d1] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d809 +0 1B010000 +[d0e96ab89df] jit-backend-dump} +[d0e96ab8e97] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d84b +0 19010000 +[d0e96ab9751] jit-backend-dump} +[d0e96ab9ecb] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579c654 +0 6E110000 +[d0e96aba79d] jit-backend-dump} +[d0e96abb0f9] jit-backend} +[d0e96abba99] {jit-log-opt-bridge +# bridge out of Guard 133 with 19 ops +[p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12] +debug_merge_point(1, ' #21 RETURN_VALUE') ++37: p13 = getfield_gc(p2, descr=) ++48: guard_isnull(p13, descr=) [p0, p1, p2, p13, p5, p6, p7, p8, p9, p10, p11, p12, p4, p3] ++57: i14 = getfield_gc(p2, descr=) ++61: i15 = int_is_true(i14) +guard_false(i15, descr=) [p0, p1, p2, p5, p6, p7, p8, p9, p10, p11, p12, p4, p3] ++71: p16 = getfield_gc(p2, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') -+71: i20 = getfield_raw(41389288, descr=) -+79: i22 = int_sub(i20, 2) -+83: setfield_raw(41389288, i22, descr=) -setfield_gc(p2, p14, descr=) -+124: setfield_gc(p5, -3, descr=) -+139: i25 = int_lt(i22, 0) -guard_false(i25, descr=) [p0, p1, p6, p7, p8, p9, p10, p11, None, None] +setfield_gc(p2, p11, descr=) ++104: setfield_gc(p5, -3, descr=) ++119: guard_not_invalidated(, descr=) [p0, p1, p6, p7, p8, p9, p10, None, p12, None, None] ++119: i20 = getfield_raw(43780840, descr=) ++127: i22 = int_lt(i20, 0) +guard_false(i22, descr=) [p0, p1, p6, p7, p8, p9, p10, None, p12, None, None] debug_merge_point(0, ' #44 FOR_ITER') -+149: jump(p1, p0, p6, ConstPtr(ptr27), 0, p7, 4, 44, p8, p9, p10, p11, ConstPtr(ptr31), ConstPtr(ptr32), ConstPtr(ptr33), ConstPtr(ptr34), descr=) -+292: --end of the loop-- -[3641b7c8de3] jit-log-opt-bridge} -[3641baccf89] {jit-backend-counts -loop 0:13968 -bridge 9:9112 -loop 1:4466 -loop 2:4485 -loop 3:0 -bridge 21:4266 -loop 4:1967 -loop 5:1 -loop 6:9210 -bridge 81:8304 -bridge 82:1686 -loop 7:536 -loop 8:1432 -bridge 189:1052 -[3641bad1805] jit-backend-counts} +p24 = new_with_vtable(ConstClass(W_StringObject)) ++200: setfield_gc(p24, p12, descr=) ++211: jump(p1, p0, p6, ConstPtr(ptr25), 0, p7, 4, 44, p8, p9, p24, p10, ConstPtr(ptr29), ConstPtr(ptr30), ConstPtr(ptr30), ConstPtr(ptr30), descr=TargetToken(140556696704672)) ++318: --end of the loop-- +[d0e96ad1975] jit-log-opt-bridge} +[d0e96b06a95] {jit-backend +[d0e96b14677] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d985 +0 488DA50000000049BB583207E8D57F00004D8B3B4983C70149BB583207E8D57F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC3 +[d0e96b1ee49] jit-backend-dump} +[d0e96b1f53f] {jit-backend-addr +bridge out of Guard 87 has address 7fd5e579d985 to 7fd5e579d9eb +[d0e96b1ffb1] jit-backend-addr} +[d0e96b20623] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d988 +0 70FFFFFF +[d0e96b211b9] jit-backend-dump} +[d0e96b2187b] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bcd1 +0 B01C0000 +[d0e96b22165] jit-backend-dump} +[d0e96b227eb] jit-backend} +[d0e96b22f75] {jit-log-opt-bridge +# bridge out of Guard 87 with 5 ops +[i0, p1] ++37: i3 = int_add(i0, 1) ++44: setfield_gc(p1, i3, descr=) ++48: setfield_gc(p1, ConstPtr(ptr4), descr=) ++56: setfield_gc(p1, i0, descr=) ++60: finish(1, descr=) ++102: --end of the loop-- +[d0e96b2931f] jit-log-opt-bridge} +[d0e96c4ce97] {jit-backend +[d0e96c5a3fd] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d9eb +0 488DA50000000049BB703207E8D57F00004D8B3B4983C70149BB703207E8D57F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC3 +[d0e96c5ce8b] jit-backend-dump} +[d0e96c5d3f3] {jit-backend-addr +bridge out of Guard 89 has address 7fd5e579d9eb to 7fd5e579da51 +[d0e96c5de39] jit-backend-addr} +[d0e96c5e427] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579d9ee +0 70FFFFFF +[d0e96c5efcf] jit-backend-dump} +[d0e96c5f62b] {jit-backend-dump +BACKEND x86_64 +SYS_EXECUTABLE python +CODE_DUMP @7fd5e579bd7d +0 6A1C0000 +[d0e96c5ff59] jit-backend-dump} +[d0e96c605b3] jit-backend} +[d0e96c60d53] {jit-log-opt-bridge +# bridge out of Guard 89 with 5 ops +[i0, p1] ++37: i3 = int_add(i0, 1) ++44: setfield_gc(p1, i3, descr=) ++48: setfield_gc(p1, ConstPtr(ptr4), descr=) ++56: setfield_gc(p1, i0, descr=) ++60: finish(1, descr=) ++102: --end of the loop-- +[d0e96c663ab] jit-log-opt-bridge} +[d0e96cb4901] {jit-backend-counts +TargetToken(140556656117424):4647 +TargetToken(140556656117504):9292 +TargetToken(140556656121504):201 +TargetToken(140556656121584):4468 +bridge 16:4446 +bridge 33:4268 +bridge 33:4268 +TargetToken(140556696702032):1 +TargetToken(140556696702112):1938 +bridge 85:2882 +bridge 88:2074 +bridge 86:158 +TargetToken(140556696704672):527 +TargetToken(140556696703072):1411 +bridge 90:1420 +bridge 133:150 +bridge 87:50 +bridge 89:7 +[d0e96cb9b99] jit-backend-counts} From noreply at buildbot.pypy.org Mon Dec 26 22:41:07 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Dec 2011 22:41:07 +0100 (CET) Subject: [pypy-commit] pypy default: split while doing import Message-ID: <20111226214107.3CA2582BA2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50886:c8ddbb442986 Date: 2011-12-26 23:40 +0200 http://bitbucket.org/pypy/pypy/changeset/c8ddbb442986/ Log: split while doing import diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -385,7 +385,7 @@ parser.postprocess(loop, backend_tp=bname, backend_dump=dump, dump_start=start_ofs)) - loops.append(loop) + loops += split_trace(loop) return log, loops def split_trace(trace): From noreply at buildbot.pypy.org Tue Dec 27 10:37:20 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 10:37:20 +0100 (CET) Subject: [pypy-commit] pypy default: some fixes Message-ID: <20111227093720.56B5D82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50887:a91f8bee90b0 Date: 2011-12-27 11:37 +0200 http://bitbucket.org/pypy/pypy/changeset/a91f8bee90b0/ Log: some fixes diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -147,17 +147,19 @@ inline_level = None def __init__(self, operations, storage): - if operations[0].name == 'debug_merge_point': - self.inline_level = int(operations[0].args[0]) - m = re.search('\w]+)\. file \'(.+?)\'\. line (\d+)> #(\d+) (\w+)', - operations[0].args[1]) - if m is None: - # a non-code loop, like StrLiteralSearch or something - self.bytecode_name = operations[0].args[1][1:-1] - else: - self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups() - self.startlineno = int(lineno) - self.bytecode_no = int(bytecode_no) + for op in operations: + if op.name == 'debug_merge_point': + self.inline_level = int(op.args[0]) + m = re.search('\w]+)\. file \'(.+?)\'\. line (\d+)> #(\d+) (\w+)', + op.args[1]) + if m is None: + # a non-code loop, like StrLiteralSearch or something + self.bytecode_name = op.args[1][1:-1] + else: + self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups() + self.startlineno = int(lineno) + self.bytecode_no = int(bytecode_no) + break self.operations = operations self.storage = storage self.code = storage.disassemble_code(self.filename, self.startlineno, @@ -225,6 +227,7 @@ Also detect inlined functions and make them Function """ stack = [] + seen_dmp = False def getpath(stack): return ",".join([str(len(v)) for v in stack]) @@ -245,11 +248,14 @@ stack = [] for op in operations: if op.name == 'debug_merge_point': - if so_far: - append_to_res(cls.TraceForOpcode(so_far, storage)) - if limit: - break - so_far = [] + if seen_dmp: + if so_far: + append_to_res(cls.TraceForOpcode(so_far, storage)) + if limit: + break + so_far = [] + else: + seen_dmp = True so_far.append(op) if so_far: append_to_res(cls.TraceForOpcode(so_far, storage)) @@ -405,6 +411,7 @@ part = copy(trace) part.operations = trace.operations[start : stop + 1] part.descr = descrs[i] + part.comment = trace.comment parts.append(part) return parts diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -38,6 +38,7 @@ def test_split(): ops = parse(''' [i0] + label() debug_merge_point(0, " #10 ADD") debug_merge_point(0, " #11 SUB") i1 = int_add(i0, 1) @@ -46,7 +47,7 @@ ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 3 - assert len(res.chunks[0].operations) == 1 + assert len(res.chunks[0].operations) == 2 assert len(res.chunks[1].operations) == 2 assert len(res.chunks[2].operations) == 2 assert res.chunks[2].bytecode_no == 11 @@ -96,7 +97,7 @@ i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) - assert res.repr() == res.chunks[1].repr() + assert res.repr() == res.chunks[0].repr() def test_lineno(): fname = str(py.path.local(__file__).join('..', 'x.py')) From noreply at buildbot.pypy.org Tue Dec 27 10:44:07 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 10:44:07 +0100 (CET) Subject: [pypy-commit] jitviewer default: fix PATH Message-ID: <20111227094407.4F9FC82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r180:0768b9ec165c Date: 2011-12-26 23:42 +0200 http://bitbucket.org/pypy/jitviewer/changeset/0768b9ec165c/ Log: fix PATH diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -172,8 +172,7 @@ BaseServer.__init__ = __init__ def main(): - PATH = os.path.join(os.path.dirname((_jitviewer.__file__))) - print PATH + PATH = os.path.dirname(os.path.dirname((_jitviewer.__file__))) if not '__pypy__' in sys.builtin_module_names: print "Please run it using pypy-c" sys.exit(1) From noreply at buildbot.pypy.org Tue Dec 27 10:44:08 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 10:44:08 +0100 (CET) Subject: [pypy-commit] jitviewer default: some progress. Disable css_class because it's outrageous Message-ID: <20111227094408.5688C82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r181:18fe3ae6deff Date: 2011-12-27 11:42 +0200 http://bitbucket.org/pypy/jitviewer/changeset/18fe3ae6deff/ Log: some progress. Disable css_class because it's outrageous diff --git a/_jitviewer/parser.py b/_jitviewer/parser.py --- a/_jitviewer/parser.py +++ b/_jitviewer/parser.py @@ -46,8 +46,8 @@ s = getattr(self, 'repr_' + self.name, self.repr)() return Html(s) - def _getvar(self, v): - return cssclass(v, v, onmouseover='highlight_var(this)', onmouseout='disable_var(this)') + #def _getvar(self, v): + # return cssclass(v, v, onmouseover='highlight_var(this)', onmouseout='disable_var(this)') for bin_op, name in [('==', 'int_eq'), ('!=', 'int_ne'), diff --git a/templates/loop.html b/templates/loop.html --- a/templates/loop.html +++ b/templates/loop.html @@ -10,18 +10,20 @@ {% for chunk in sourceline.chunks %} {% if chunk.is_bytecode %} {{chunk.html_repr()}}
    - {% for op in chunk.operations[1:] %} - {% if op.bridge %} - {{op.html_repr()}} >>show bridge (taken {{op.percentage}}%)
    - {% if op.asm %} -

    {{op.asm}}

    + {% for op in chunk.operations %} + {% if op.name != "debug_merge_point" %} + {% if op.bridge %} + {{op.html_repr()}} >>show bridge (taken {{op.percentage}}%)
    + {% if op.asm %} +

    {{op.asm}}

    + {% endif %} + {% else %} + {{op.html_repr()}}
    + {% if op.asm %} +

    {{op.asm}}

    + {% endif %} {% endif %} - {% else %} - {{op.html_repr()}}
    - {% if op.asm %} -

    {{op.asm}}

    - {% endif %} - {% endif %} + {% endif %} {% endfor %} {% else %} {{(chunk.html_repr())|safe}}
    From noreply at buildbot.pypy.org Tue Dec 27 11:11:31 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Tue, 27 Dec 2011 11:11:31 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: prevent a BadVirtualState exception from escaping here, the loop will be discovered to be invalid later in any case Message-ID: <20111227101131.456D382B1D@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50888:3eafa11552d5 Date: 2011-12-27 11:11 +0100 http://bitbucket.org/pypy/pypy/changeset/3eafa11552d5/ Log: prevent a BadVirtualState exception from escaping here, the loop will be discovered to be invalid later in any case diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -386,6 +386,17 @@ """ self.optimize_loop(ops, expected) + def test_virtual_as_field_of_forced_box(self): + ops = """ + [p0] + pv1 = new_with_vtable(ConstClass(node_vtable)) + label(pv1, p0) + pv2 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(pv2, pv1, descr=valuedescr) + jump(pv1, pv2) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) class OptRenameStrlen(Optimization): def propagate_forward(self, op): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -409,7 +409,13 @@ if self.level == LEVEL_CONSTANT: return assert 0 <= self.position_in_notvirtuals - boxes[self.position_in_notvirtuals] = value.force_box(optimizer) + if optimizer: + box = value.force_box(optimizer) + else: + if value.is_virtual(): + raise BadVirtualState + box = value.get_key_box() + boxes[self.position_in_notvirtuals] = box def _enum(self, virtual_state): if self.level == LEVEL_CONSTANT: @@ -471,8 +477,14 @@ optimizer = optimizer.optearlyforce assert len(values) == len(self.state) inputargs = [None] * len(self.notvirtuals) + + # We try twice. The first time around we allow boxes to be forced + # which might change the virtual state if the box appear in more + # than one place among the inputargs. for i in range(len(values)): self.state[i].enum_forced_boxes(inputargs, values[i], optimizer) + for i in range(len(values)): + self.state[i].enum_forced_boxes(inputargs, values[i], None) if keyboxes: for i in range(len(values)): From noreply at buildbot.pypy.org Tue Dec 27 11:24:14 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 11:24:14 +0100 (CET) Subject: [pypy-commit] pypy default: some simplifications Message-ID: <20111227102414.AF0B482B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50889:7924477d6680 Date: 2011-12-27 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/7924477d6680/ Log: some simplifications diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -24,19 +24,13 @@ self.failargs = failargs def getarg(self, i): - return self._getvar(self.args[i]) + return self.args[i] def getargs(self): - return [self._getvar(v) for v in self.args] + return self.args def getres(self): - return self._getvar(self.res) - - def getdescr(self): - return self.descr - - def _getvar(self, v): - return v + return self.res def is_guard(self): return self._is_guard @@ -44,7 +38,7 @@ def repr(self): args = self.getargs() if self.descr is not None: - args.append('descr=%s' % self.getdescr()) + args.append('descr=%s' % self.descr) arglist = ', '.join(args) if self.res is not None: return '%s = %s(%s)' % (self.getres(), self.name, arglist) @@ -53,8 +47,6 @@ def __repr__(self): return self.repr() - ## return '<%s (%s)>' % (self.name, ', '.join([repr(a) - ## for a in self.args])) class SimpleParser(OpParser): From noreply at buildbot.pypy.org Tue Dec 27 11:26:38 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 11:26:38 +0100 (CET) Subject: [pypy-commit] jitviewer default: simplify and reintroduce cssclass, this time saneish Message-ID: <20111227102638.8A68D82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r182:7d2ec216e348 Date: 2011-12-27 12:25 +0200 http://bitbucket.org/pypy/jitviewer/changeset/7d2ec216e348/ Log: simplify and reintroduce cssclass, this time saneish diff --git a/_jitviewer/parser.py b/_jitviewer/parser.py --- a/_jitviewer/parser.py +++ b/_jitviewer/parser.py @@ -2,20 +2,6 @@ import cgi from pypy.tool.jitlogparser import parser -class Html(str): - def __html__(self): - return self - - def plaintext(self): - # This is not a general way to strip tags, but it's good enough to use - # in tests - s = re.sub('<.*?>', '', self) - s = s.replace("<", "<") - s = s.replace(">", ">") - s = s.replace("&", "&") - return s - - def cssclass(cls, s, **kwds): cls = re.sub("[^\w]", "_", cls) attrs = ['%s="%s"' % (name, value) for name, value in kwds.iteritems()] @@ -26,9 +12,15 @@ def _new_binop(name): name = cgi.escape(name) def f(self): - return '%s = %s %s %s' % (self.getres(), self.getarg(0), name, self.getarg(1)) + return '%s = %s %s %s' % (self.wrap_html(self.res), + self.wrap_html(self.args[0]), + name, self.wrap_html(self.args[1])) return f +class Html(str): + def __html__(self): + return self + class OpHtml(parser.Op): """ Subclass of Op with human-friendly html representation @@ -43,11 +35,11 @@ return "single-operation" def html_repr(self): - s = getattr(self, 'repr_' + self.name, self.repr)() + s = getattr(self, 'repr_' + self.name, self.default_repr)() return Html(s) - #def _getvar(self, v): - # return cssclass(v, v, onmouseover='highlight_var(this)', onmouseout='disable_var(this)') + def wrap_html(self, v): + return cssclass(v, v, onmouseover='highlight_var(this)', onmouseout='disable_var(this)') for bin_op, name in [('==', 'int_eq'), ('!=', 'int_ne'), @@ -67,20 +59,22 @@ locals()['repr_' + name] = _new_binop(bin_op) def repr_guard_true(self): - return 'guard(%s is true)' % self.getarg(0) + return 'guard(%s is true)' % self.wrap_html(self.args[0]) def repr_guard_false(self): - return 'guard(%s is false)' % self.getarg(0) + return 'guard(%s is false)' % self.wrap_html(self.args[0]) def repr_guard_value(self): - return 'guard(%s == %s)' % (self.getarg(0), self.getarg(1)) + return 'guard(%s == %s)' % (self.wrap_html(self.args[0]), + self.wrap_html(self.args[1])) def repr_guard_isnull(self): - return 'guard(%s is null)' % self.getarg(0) + return 'guard(%s is null)' % self.wrap_html(self.args[0]) def repr_getfield_raw(self): name, field = self.descr.split(' ')[1].rsplit('.', 1) - return '%s = ((%s)%s).%s' % (self.getres(), name, self.getarg(0), field[2:]) + return '%s = ((%s)%s).%s' % (self.wrap_html(self.res), name, + self.wrap_html(self.args[0]), field[2:]) def repr_getfield_gc(self): fullname, field = self.descr.split(' ')[1].rsplit('.', 1) @@ -95,29 +89,40 @@ field = cssclass('fieldname', field) obj = self.getarg(0) - return '%s = ((%s.%s)%s).%s' % (self.getres(), namespace, classname, obj, field) + return '%s = ((%s.%s)%s).%s' % (self.wrap_html(self.res), + namespace, classname, obj, field) def repr_getfield_gc_pure(self): return self.repr_getfield_gc() + " [pure]" def repr_setfield_raw(self): name, field = self.descr.split(' ')[1].rsplit('.', 1) - return '((%s)%s).%s = %s' % (name, self.getarg(0), field[2:], self.getarg(1)) + return '((%s)%s).%s = %s' % (name, self.wrap_html(self.args[0]), + field[2:], self.wrap_html(self.args[1])) def repr_setfield_gc(self): name, field = self.descr.split(' ')[1].rsplit('.', 1) - return '((%s)%s).%s = %s' % (name, self.getarg(0), field, self.getarg(1)) + return '((%s)%s).%s = %s' % (name, self.wrap_html(self.args[0]), + field, self.wrap_html(self.args[1])) def repr_jump(self): no = int(re.search("\d+", self.descr).group(0)) return ("" % no + - self.repr() + "") + self.default_repr() + "") + + def default_repr(self): + args = [self.wrap_html(arg) for arg in self.args] + if self.descr is not None: + args.append('descr=%s' % cgi.escape(self.descr)) + arglist = ', '.join(args) + if self.res is not None: + return '%s = %s(%s)' % (self.wrap_html(self.res), self.name, + arglist) + else: + return '%s(%s)' % (self.name, arglist) repr_call_assembler = repr_jump - def getdescr(self): - return cgi.escape(self.descr) - #def repr_call_assembler(self): # xxxx diff --git a/_jitviewer/test/test_parser.py b/_jitviewer/test/test_parser.py --- a/_jitviewer/test/test_parser.py +++ b/_jitviewer/test/test_parser.py @@ -1,4 +1,4 @@ -from _jitviewer.parser import ParserWithHtmlRepr, parse_log_counts, cssclass +from _jitviewer.parser import ParserWithHtmlRepr, cssclass import py def parse(input): @@ -11,18 +11,8 @@ guard_true(i9, descr=) [] i13 = getfield_raw(151937600, descr=) ''').operations - assert ops[0].html_repr().plaintext() == 'i9 = i7 < 1003' - assert ops[2].html_repr().plaintext() == 'i13 = ((pypysig_long_struct)151937600).value' - -def test_parse_log_count(): - py.test.skip('fixme') - class Loop(object): - pass - - loops = [Loop() for i in range(13)] - nums = parse_log_counts(LINES, loops) - assert nums[5] == 2000 - assert loops[9].count == 2000 + assert '<' in ops[0].html_repr() + assert 'pypysig_long_struct' in ops[2].html_repr() def test_highlight_var(): ops = parse(''' From noreply at buildbot.pypy.org Tue Dec 27 12:17:16 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 12:17:16 +0100 (CET) Subject: [pypy-commit] pypy default: append debugging code at the beginning even if for loops Message-ID: <20111227111716.AAEA182B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50890:e105505cc286 Date: 2011-12-27 13:16 +0200 http://bitbucket.org/pypy/pypy/changeset/e105505cc286/ Log: append debugging code at the beginning even if for loops diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -679,9 +679,8 @@ looptoken._x86_debug_checksum = s newoperations = [] - if bridge: - self._append_debugging_code(newoperations, bridge, number, - None) + self._append_debugging_code(newoperations, bridge, number, + None) for op in operations: newoperations.append(op) if op.getopnum() == rop.LABEL: From noreply at buildbot.pypy.org Tue Dec 27 12:32:14 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 12:32:14 +0100 (CET) Subject: [pypy-commit] pypy default: more small fixes Message-ID: <20111227113214.A969082B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50891:8417bf8c3a59 Date: 2011-12-27 13:27 +0200 http://bitbucket.org/pypy/pypy/changeset/8417bf8c3a59/ Log: more small fixes diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -27,7 +27,7 @@ return self.args[i] def getargs(self): - return self.args + return self.args[:] def getres(self): return self.res @@ -159,7 +159,7 @@ def repr(self): if self.filename is None: - return "Unknown" + return self.bytecode_name return "%s, file '%s', line %d" % (self.name, self.filename, self.startlineno) diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -33,7 +33,7 @@ ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 - assert res.chunks[0].repr() + assert 'SomeRandomStuff' in res.chunks[0].repr() def test_split(): ops = parse(''' From noreply at buildbot.pypy.org Tue Dec 27 12:32:15 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 12:32:15 +0100 (CET) Subject: [pypy-commit] pypy default: remove some nonsense Message-ID: <20111227113215.D265282B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50892:d69a1af57f0c Date: 2011-12-27 13:28 +0200 http://bitbucket.org/pypy/pypy/changeset/d69a1af57f0c/ Log: remove some nonsense diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -112,33 +112,25 @@ """ from pypy.jit.metainterp.optimizeopt import optimize_trace - history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - if False: - part = partial_trace - assert False - procedur_token = metainterp.get_procedure_token(greenkey) - assert procedure_token - all_target_tokens = [] - else: - jitcell_token = make_jitcell_token(jitdriver_sd) - part = create_empty_loop(metainterp) - part.inputargs = inputargs[:] - h_ops = history.operations - part.resume_at_jump_descr = resume_at_jump_descr - part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ - [h_ops[i].clone() for i in range(start, len(h_ops))] + \ - [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.resume_at_jump_descr = resume_at_jump_descr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] - try: - optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) - except InvalidLoop: - return None - target_token = part.operations[0].getdescr() - assert isinstance(target_token, TargetToken) - all_target_tokens = [target_token] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] loop = create_empty_loop(metainterp) loop.inputargs = part.inputargs From noreply at buildbot.pypy.org Tue Dec 27 12:32:17 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 12:32:17 +0100 (CET) Subject: [pypy-commit] pypy default: pass the loop name around Message-ID: <20111227113217.0D09382B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50893:2971cd3e5323 Date: 2011-12-27 13:31 +0200 http://bitbucket.org/pypy/pypy/changeset/2971cd3e5323/ Log: pass the loop name around diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -114,6 +114,7 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd + history = metainterp.history jitcell_token = make_jitcell_token(jitdriver_sd) part = create_empty_loop(metainterp) @@ -311,7 +312,10 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) + loopname = jitdriver_sd.warmstate.get_location_str(greenkey) + metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, + type, ops_offset, + name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -13,14 +13,14 @@ self.metainterp_sd = metainterp_sd self.guard_number = guard_number - def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None): + def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): if type is None: debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") - debug_print("# Loop", number, ":", type, + debug_print("# Loop", number, '(%s)' % name , ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-loop") From noreply at buildbot.pypy.org Tue Dec 27 12:36:15 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 12:36:15 +0100 (CET) Subject: [pypy-commit] pypy default: fix the test Message-ID: <20111227113615.8FC9B82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50894:17fd3576a153 Date: 2011-12-27 13:36 +0200 http://bitbucket.org/pypy/pypy/changeset/17fd3576a153/ Log: fix the test diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -18,7 +18,7 @@ self.seen.append((inputargs, operations, token)) class FakeLogger(object): - def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None): + def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): pass def repr_of_resop(self, op): From noreply at buildbot.pypy.org Tue Dec 27 12:44:08 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 12:44:08 +0100 (CET) Subject: [pypy-commit] pypy default: provide get_printable_location for numpy Message-ID: <20111227114408.4560682B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50895:59d04a0ce4a7 Date: 2011-12-27 13:43 +0200 http://bitbucket.org/pypy/pypy/changeset/59d04a0ce4a7/ Log: provide get_printable_location for numpy diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped +from pypy.interpreter.gateway import interp2app, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature from pypy.module.micronumpy.strides import calculate_slice_strides @@ -14,22 +14,26 @@ numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['result_size', 'frame', 'ri', 'self', 'result'] + reds=['result_size', 'frame', 'ri', 'self', 'result'], + get_printable_location=signature.new_printable_location('numpy'), ) all_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['frame', 'self', 'dtype'] + reds=['frame', 'self', 'dtype'], + get_printable_location=signature.new_printable_location('all'), ) any_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['frame', 'self', 'dtype'] + reds=['frame', 'self', 'dtype'], + get_printable_location=signature.new_printable_location('any'), ) slice_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['self', 'frame', 'source', 'res_iter'] + reds=['self', 'frame', 'source', 'res_iter'], + get_printable_location=signature.new_printable_location('slice'), ) def _find_shape_and_elems(space, w_iterable): @@ -291,7 +295,8 @@ def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( greens=['shapelen', 'sig'], - reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'] + reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'], + get_printable_location=signature.new_printable_location(op_name), ) def loop(self): sig = self.find_sig() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -1,9 +1,10 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, types -from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature, find_sig +from pypy.module.micronumpy import interp_boxes, interp_dtype +from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature,\ + find_sig, new_printable_location from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -11,7 +12,8 @@ reduce_driver = jit.JitDriver( greens = ['shapelen', "sig"], virtualizables = ["frame"], - reds = ["frame", "self", "dtype", "value", "obj"] + reds = ["frame", "self", "dtype", "value", "obj"], + get_printable_location=new_printable_location('reduce'), ) class W_Ufunc(Wrappable): diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -5,6 +5,11 @@ from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib.jit import hint, unroll_safe, promote +def new_printable_location(driver_name): + def get_printable_location(shapelen, sig): + return sig.debug_repr() + ' [%d dims,%s]' % (shapelen, driver_name) + return get_printable_location + def sigeq(one, two): return one.eq(two) From noreply at buildbot.pypy.org Tue Dec 27 12:45:42 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 12:45:42 +0100 (CET) Subject: [pypy-commit] pypy default: fix another test Message-ID: <20111227114542.10EC782B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50896:13448846544c Date: 2011-12-27 13:45 +0200 http://bitbucket.org/pypy/pypy/changeset/13448846544c/ Log: fix another test diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -180,7 +180,7 @@ def test_intro_loop(self): bare_logger = logger.Logger(self.make_metainterp_sd()) output = capturing(bare_logger.log_loop, [], [], 1, "foo") - assert output.splitlines()[0] == "# Loop 1 : foo with 0 ops" + assert output.splitlines()[0] == "# Loop 1 () : foo with 0 ops" pure_parse(output) def test_intro_bridge(self): From noreply at buildbot.pypy.org Tue Dec 27 12:54:41 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 12:54:41 +0100 (CET) Subject: [pypy-commit] pypy default: remove some unnecessary dependencies Message-ID: <20111227115441.5FE6A82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50897:ecf82a6d3959 Date: 2011-12-27 13:54 +0200 http://bitbucket.org/pypy/pypy/changeset/ecf82a6d3959/ Log: remove some unnecessary dependencies diff --git a/pypy/translator/sandbox/pypy_interact.py b/pypy/translator/sandbox/pypy_interact.py --- a/pypy/translator/sandbox/pypy_interact.py +++ b/pypy/translator/sandbox/pypy_interact.py @@ -26,7 +26,8 @@ from pypy.translator.sandbox.sandlib import SimpleIOSandboxedProc from pypy.translator.sandbox.sandlib import VirtualizedSandboxedProc from pypy.translator.sandbox.vfs import Dir, RealDir, RealFile -from pypy.tool.lib_pypy import LIB_ROOT +import pypy +LIB_ROOT = os.path.dirname(os.path.dirname(pypy.__file__)) class PyPySandboxedProc(VirtualizedSandboxedProc, SimpleIOSandboxedProc): debug = True diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -30,8 +30,9 @@ # load(). Also, marshal.load(f) blocks with the GIL held when # f is a pipe with no data immediately avaialble, preventing the # _waiting_thread to run. -from pypy.tool.lib_pypy import import_from_lib_pypy -marshal = import_from_lib_pypy('marshal') +import pypy +marshal = py.path.local(pypy.__file__).join('..', '..', 'lib_pypy', + 'marshal.py').pyimport() # Non-marshal result types RESULTTYPE_STATRESULT = object() From noreply at buildbot.pypy.org Tue Dec 27 13:14:43 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 13:14:43 +0100 (CET) Subject: [pypy-commit] pypy default: Fix assembler to differentiate between 3 kinds of counters Message-ID: <20111227121443.B4EA682B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50898:b67ec3f6c00f Date: 2011-12-27 14:13 +0200 http://bitbucket.org/pypy/pypy/changeset/b67ec3f6c00f/ Log: Fix assembler to differentiate between 3 kinds of counters diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -59,7 +59,8 @@ self.is_guard_not_invalidated = is_guard_not_invalidated DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed), - ('bridge', lltype.Signed), # 0 or 1 + ('type', lltype.Char), # 'b'ridge, 'l'abel or + # 'e'ntry point ('number', lltype.Signed)) class Assembler386(object): @@ -150,10 +151,12 @@ debug_start('jit-backend-counts') for i in range(len(self.loop_run_counters)): struct = self.loop_run_counters[i] - if not struct.bridge: + if struct.type == 'l': prefix = 'TargetToken(%d)' % struct.number + elif struct.type == 'b': + prefix = 'bridge ' + str(struct.number) else: - prefix = 'bridge ' + str(struct.number) + prefix = 'entry ' + str(struct.number) debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') @@ -425,7 +428,7 @@ self.setup(looptoken) if log: operations = self._inject_debugging_code(looptoken, operations, - False, looptoken.number) + 'e', looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -492,7 +495,7 @@ self.setup(original_loop_token) if log: operations = self._inject_debugging_code(faildescr, operations, - True, descr_number) + 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -599,15 +602,15 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number, token): + def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive # forever, just because we want to report them at the end # of the process struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', track_allocation=False) struct.i = 0 - struct.bridge = int(bridge) - if bridge: + struct.type = tp + if tp == 'b' or tp == 'e': struct.number = number else: assert token @@ -657,8 +660,8 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None - def _append_debugging_code(self, operations, bridge, number, token): - counter = self._register_counter(bridge, number, token) + def _append_debugging_code(self, operations, tp, number, token): + counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) box = BoxInt() box2 = BoxInt() @@ -670,7 +673,7 @@ operations.extend(ops) @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations, bridge, number): + def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: # before doing anything, let's increase a counter s = 0 @@ -679,12 +682,12 @@ looptoken._x86_debug_checksum = s newoperations = [] - self._append_debugging_code(newoperations, bridge, number, + self._append_debugging_code(newoperations, tp, number, None) for op in operations: newoperations.append(op) if op.getopnum() == rop.LABEL: - self._append_debugging_code(newoperations, bridge, number, + self._append_debugging_code(newoperations, 'l', number, op.getdescr()) operations = newoperations return operations diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -546,6 +546,8 @@ struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 1 struct = self.cpu.assembler.loop_run_counters[1] + assert struct.i == 1 + struct = self.cpu.assembler.loop_run_counters[2] assert struct.i == 9 self.cpu.finish_once() finally: From noreply at buildbot.pypy.org Tue Dec 27 13:23:10 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 13:23:10 +0100 (CET) Subject: [pypy-commit] pypy default: fix the test Message-ID: <20111227122310.287F082B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50899:6d97be67953c Date: 2011-12-27 14:22 +0200 http://bitbucket.org/pypy/pypy/changeset/6d97be67953c/ Log: fix the test diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -552,9 +552,10 @@ self.cpu.finish_once() finally: debug._log = None + l0 = ('debug_print', 'entry -1:1') l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') l2 = ('debug_print', targettoken.repr_of_descr() + ':9') - assert ('jit-backend-counts', [l1, l2]) in dlog + assert ('jit-backend-counts', [l0, l1, l2]) in dlog def test_debugger_checksum(self): loop = """ From noreply at buildbot.pypy.org Tue Dec 27 13:59:58 2011 From: noreply at buildbot.pypy.org (ned) Date: Tue, 27 Dec 2011 13:59:58 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox-2: Merged d9b372cf25b0 to nedbat-sandbox Message-ID: <20111227125958.0F49C82B1D@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox-2 Changeset: r50900:7aa1376fb3bc Date: 2011-12-16 08:34 -0500 http://bitbucket.org/pypy/pypy/changeset/7aa1376fb3bc/ Log: Merged d9b372cf25b0 to nedbat-sandbox diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -190,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -706,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -163,7 +163,7 @@ if not we_are_translated() and self.box_types is not None: assert isinstance(v, TempBox) or v.type in self.box_types - def possibly_free_var(self, v, _hint_dont_reuse_quickly=False): + def possibly_free_var(self, v): """ If v is stored in a register and v is not used beyond the current position, then free it. Must be called at some point for all variables that might be in registers. @@ -173,10 +173,7 @@ return if v not in self.longevity or self.longevity[v][1] <= self.position: if v in self.reg_bindings: - if _hint_dont_reuse_quickly: - self.free_regs.insert(0, self.reg_bindings[v]) - else: - self.free_regs.append(self.reg_bindings[v]) + self.free_regs.append(self.reg_bindings[v]) del self.reg_bindings[v] if self.frame_manager is not None: self.frame_manager.mark_as_free(v) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -38,6 +38,7 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -837,7 +838,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -848,13 +849,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV_bi(to_loc.value, low_part) + self.mc.MOV_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1006,18 +1019,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -2070,7 +2083,7 @@ argtypes=op.getdescr().get_arg_types(), callconv=op.getdescr().get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return if op.getdescr().get_return_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long @@ -2555,11 +2568,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,7 +12,7 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: @@ -31,7 +31,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +66,13 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) @@ -87,7 +94,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -130,9 +130,9 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: @@ -174,12 +174,11 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) return operations @@ -481,7 +480,7 @@ # only to guard operations or to jump or to finish produced = {} last_used = {} - #useful = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -492,10 +491,13 @@ opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - #if opnum != rop.JUMP and opnum != rop.FINISH: - # useful[arg] = None - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -503,7 +505,8 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + self.last_real_usage = last_real_usage + # longevity = {} for arg in produced: if arg in last_used: @@ -519,7 +522,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity#, useful + self.longevity = longevity def loc(self, v): if v is None: # xxx kludgy @@ -1384,13 +1387,6 @@ assert isinstance(descr, TargetToken) arglocs = descr._x86_arglocs self.jump_target_descr = descr - # compute 'tmploc' to be all_regs[0] by spilling what is there - tmpbox1 = TempBox() - tmpbox2 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - self.rm.force_allocate_reg(tmpbox1, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(tmpbox2, selected_reg=xmmtmp) # Part about non-floats src_locations1 = [] dst_locations1 = [] @@ -1402,19 +1398,23 @@ box = op.getarg(i) src_loc = self.loc(box) dst_loc = arglocs[i] - assert dst_loc != tmpreg and dst_loc != xmmtmp if box.type != FLOAT: src_locations1.append(src_loc) dst_locations1.append(dst_loc) else: src_locations2.append(src_loc) dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None # Do the remapping remap_frame_layout_mixed(assembler, src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(tmpbox1) - self.xrm.possibly_free_var(tmpbox2) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1471,16 +1471,15 @@ inputargs = op.getarglist() arglocs = [None] * len(inputargs) # - # we need to make sure that the tmpreg and xmmtmp are free - tmpreg = X86RegisterManager.all_regs[0] - tmpvar = TempBox() - self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) - self.rm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) - # - xmmtmp = X86XMMRegisterManager.all_regs[0] - tmpvar = TempBox() - self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) - self.xrm.possibly_free_var(tmpvar, _hint_dont_reuse_quickly=True) + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) # # we need to make sure that no variable is stored in ebp for arg in inputargs: @@ -1491,9 +1490,9 @@ # for i in range(len(inputargs)): arg = inputargs[i] - assert not isinstance(arg, Const) + assert isinstance(arg, Box) loc = self.loc(arg) - assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) + assert loc is not ebp arglocs[i] = loc if isinstance(loc, RegLoc): self.fm.mark_as_free(arg) diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,13 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): + def __init__(self, position, ebp_offset, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -63,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -74,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -91,9 +104,11 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True - width = WORD + +class ImmedLoc(ImmediateAssemblerLocation): + _immutable_ = True _location_code = 'i' def __init__(self, value): @@ -104,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): return "ImmedLoc(%d)" % (self.value) @@ -116,7 +134,6 @@ class AddressLoc(AssemblerLocation): _immutable_ = True - width = WORD # The address is base_loc + (scaled_loc << scale) + static_offset def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): assert 0 <= scale < 4 @@ -145,6 +162,9 @@ info = getattr(self, attr, '?') return '' % (self._location_code, info) + def get_width(self): + return WORD + def value_a(self): return self.loc_a @@ -179,32 +199,34 @@ raise AssertionError(self._location_code) return result -class ConstFloatLoc(AssemblerLocation): - # XXX: We have to use this class instead of just AddressLoc because - # we want a width of 8 (... I think. Check this!) +class ConstFloatLoc(ImmediateAssemblerLocation): _immutable_ = True - width = 8 _location_code = 'j' def __init__(self, address): self.value = address + def get_width(self): + return 8 + def __repr__(self): return '' % (self.value,) if IS_X86_32: - class FloatImmedLoc(AssemblerLocation): + class FloatImmedLoc(ImmediateAssemblerLocation): # This stands for an immediate float. It cannot be directly used in # any assembler instruction. Instead, it is meant to be decomposed # in two 32-bit halves. On 64-bit, FloatImmedLoc() is a function # instead; see below. _immutable_ = True - width = 8 _location_code = '#' # don't use me def __init__(self, floatstorage): self.aslonglong = floatstorage + def get_width(self): + return 8 + def low_part(self): return intmask(self.aslonglong) diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -71,6 +71,18 @@ ('mov', eax, s24), ('mov', s12, edi)] +def test_no_tmp_reg(): + assembler = MockAssembler() + s8 = frame_pos(0, INT) + s12 = frame_pos(13, INT) + s20 = frame_pos(20, INT) + s24 = frame_pos(221, INT) + remap_frame_layout(assembler, [s8, eax, s12], [s20, s24, edi], None) + assert assembler.ops == [('push', s8), + ('pop', s20), + ('mov', eax, s24), + ('mov', s12, edi)] + def test_reordering(): assembler = MockAssembler() s8 = frame_pos(8, INT) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -498,27 +498,29 @@ else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + def _rewrite_raw_malloc(self, op, name, args): + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + TYPE = op.args[0].value + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, args, + extra = (TYPE,), + extrakey = TYPE) + def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': - d = op.args[1].value.copy() - d.pop('flavor') - add_memory_pressure = d.pop('add_memory_pressure', False) - zero = d.pop('zero', False) - track_allocation = d.pop('track_allocation', True) - if d: - raise UnsupportedMallocFlags(d) - ARRAY = op.args[0].value - name = 'raw_malloc' - if zero: - name += '_zero' - if add_memory_pressure: - name += '_add_memory_pressure' - if not track_allocation: - name += '_no_track_allocation' - return self._do_builtin_call(op, name, - [op.args[2]], - extra = (ARRAY,), - extrakey = ARRAY) + return self._rewrite_raw_malloc(op, 'raw_malloc_varsize', + [op.args[2]]) if op.args[0].value == rstr.STR: return SpaceOperation('newstr', [op.args[2]], op.result) elif op.args[0].value == rstr.UNICODE: @@ -531,11 +533,18 @@ op.result) def rewrite_op_free(self, op): - flags = op.args[1].value - assert flags['flavor'] == 'raw' - ARRAY = op.args[0].concretetype.TO - return self._do_builtin_call(op, 'raw_free', [op.args[0]], - extra = (ARRAY,), extrakey = ARRAY) + d = op.args[1].value.copy() + assert d['flavor'] == 'raw' + d.pop('flavor') + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) + STRUCT = op.args[0].concretetype.TO + name = 'raw_free' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[0]], + extra = (STRUCT,), extrakey = STRUCT) def rewrite_op_getarrayitem(self, op): ARRAY = op.args[0].concretetype.TO @@ -736,6 +745,9 @@ return [op0, op1] def rewrite_op_malloc(self, op): + if op.args[1].value['flavor'] == 'raw': + return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) + # assert op.args[1].value == {'flavor': 'gc'} STRUCT = op.args[0].value vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, STRUCT) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,26 +599,75 @@ return p return _ll_0_alloc_with_del - def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) - return _ll_1_raw_malloc - return build_ll_1_raw_malloc + def build_raw_malloc_varsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_1_raw_malloc_varsize(ARRAY): + def _ll_1_raw_malloc_varsize(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_1_raw_malloc_varsize + return build_ll_1_raw_malloc_varsize - build_ll_1_raw_malloc = build_raw_malloc_builder() - build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) - build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) - build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) - build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) - build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) - build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) - build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_varsize = ( + build_raw_malloc_varsize_builder()) + build_ll_1_raw_malloc_varsize_zero = ( + build_raw_malloc_varsize_builder(zero=True)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_add_memory_pressure = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True)) + build_ll_1_raw_malloc_varsize_no_track_allocation = ( + build_raw_malloc_varsize_builder(track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_1_raw_malloc_varsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_varsize_builder(add_memory_pressure=True, track_allocation=False)) - def build_ll_1_raw_free(ARRAY): - def _ll_1_raw_free(p): - lltype.free(p, flavor='raw') - return _ll_1_raw_free + def build_raw_malloc_fixedsize_builder(zero=False, + add_memory_pressure=False, + track_allocation=True): + def build_ll_0_raw_malloc_fixedsize(STRUCT): + def _ll_0_raw_malloc_fixedsize(): + return lltype.malloc(STRUCT, flavor='raw', zero=zero, + add_memory_pressure=add_memory_pressure, + track_allocation=track_allocation) + return _ll_0_raw_malloc_fixedsize + return build_ll_0_raw_malloc_fixedsize + + build_ll_0_raw_malloc_fixedsize = ( + build_raw_malloc_fixedsize_builder()) + build_ll_0_raw_malloc_fixedsize_zero = ( + build_raw_malloc_fixedsize_builder(zero=True)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True)) + build_ll_0_raw_malloc_fixedsize_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_zero_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(zero=True, add_memory_pressure=True, track_allocation=False)) + build_ll_0_raw_malloc_fixedsize_add_memory_pressure_no_track_allocation = ( + build_raw_malloc_fixedsize_builder(add_memory_pressure=True, track_allocation=False)) + + def build_raw_free_builder(track_allocation=True): + def build_ll_1_raw_free(ARRAY): + def _ll_1_raw_free(p): + lltype.free(p, flavor='raw', + track_allocation=track_allocation) + return _ll_1_raw_free + return build_ll_1_raw_free + + build_ll_1_raw_free = ( + build_raw_free_builder()) + build_ll_1_raw_free_no_track_allocation = ( + build_raw_free_builder(track_allocation=False)) + class OOtypeHelpers: diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -217,7 +217,7 @@ cw.make_jitcodes(verbose=True) # s = jitdriver_sd.mainjitcode.dump() - assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc__Signed>' in s + assert 'residual_call_ir_i $<* fn _ll_1_raw_malloc_varsize__Signed>' in s assert 'setarrayitem_raw_i' in s assert 'getarrayitem_raw_i' in s assert 'residual_call_ir_v $<* fn _ll_1_raw_free__arrayPtr>' in s diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -550,7 +550,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize' # pseudo-function as a str assert op1.opname == '-live-' assert op1.args == [] @@ -564,7 +564,7 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' - assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op0.args[0].value == 'raw_malloc_varsize_zero' # pseudo-fn as a str assert op1.opname == '-live-' assert op1.args == [] @@ -578,6 +578,35 @@ tr = Transformer(FakeCPU(), FakeResidualCallControl()) py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) +def test_raw_malloc_fixedsize(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_r_i' + assert op0.args[0].value == 'raw_malloc_fixedsize_zero' #pseudo-fn as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_free(): + S = lltype.Struct('dummy', ('x', lltype.Signed)) + for flag in [True, False]: + flags = Constant({'flavor': 'raw', 'track_allocation': flag}, + lltype.Void) + op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_v' + if flag: + pseudo_op_name = 'raw_free' + else: + pseudo_op_name = 'raw_free_no_track_allocation' + assert op0.args[0].value == pseudo_op_name # pseudo-function as a str + assert op1.opname == '-live-' + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64 +from pypy.rlib.rarithmetic import r_int64, r_uint from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,6 +21,7 @@ # class MemoryManager(object): + NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -36,12 +37,13 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK self.alive_loops = {} + self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = r_int64(-1) + self.next_check = self.NO_NEXT_CHECK else: self.max_age = max_age if check_frequency <= 0: @@ -49,10 +51,11 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self): + def next_generation(self, do_cleanups_now=True): self.current_generation += 1 - if self.current_generation == self.next_check: + if do_cleanups_now and self.current_generation >= self.next_check: self._kill_old_loops_now() + self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -81,3 +84,22 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") + + def get_current_generation_uint(self): + """Return the current generation, possibly truncated to a uint. + To use only as an approximation for decaying counters.""" + return r_uint(self.current_generation) + + def record_jitcell_dict(self, callback): + """NOT_RPYTHON. The given jitcell_dict is a dict that needs + occasional clean-ups of old cells. A cell is old if it never + reached the threshold, and its counter decayed to a tiny value.""" + # note that the various jitcell_dicts have different RPython types, + # so we have to make a different function for each one. These + # functions are chained to each other: each calls the previous one. + def cleanup_dict(): + callback() + cleanup_previous() + # + cleanup_previous = self._cleanup_jitcell_dicts + self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -246,15 +246,16 @@ self.force_lazy_setfields_and_arrayitems_for_guard()) return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or # handled specially - opnum == rop.SETFIELD_RAW or # no effect on GC struct/array - opnum == rop.SETARRAYITEM_GC or # handled specially - opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.STRSETITEM or # no effect on GC struct/array - opnum == rop.UNICODESETITEM or # no effect on GC struct/array - opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever - opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array - opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7755,6 +7755,22 @@ """ self.optimize_loop(ops, expected) + def test_setinteriorfield_should_not_clear_cache(self): + ops = """ + [i0, p0] + i2 = getfield_gc(p0, descr=adescr) + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0) + """ + expected = """ + [i0, p0, i2] + i3 = call(i2, descr=nonwritedescr) + setinteriorfield_raw(i0, i2, i3) + jump(i0, p0, i2) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -381,11 +381,11 @@ 'GUARD_ISNULL/1d', 'GUARD_NONNULL_CLASS/2d', '_GUARD_FOLDABLE_LAST', - 'GUARD_NO_EXCEPTION/0d', - 'GUARD_EXCEPTION/1d', + 'GUARD_NO_EXCEPTION/0d', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', - 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2910,6 +2910,27 @@ res = self.meta_interp(f, [32]) assert res == f(32) + def test_decay_counters(self): + myjitdriver = JitDriver(greens = ['m'], reds = ['n']) + def f(m, n): + while n > 0: + myjitdriver.jit_merge_point(m=m, n=n) + n += m + n -= m + n -= 1 + def main(): + f(5, 7) # run 7x with m=5 counter[m=5] = 7 + f(15, 10) # compiles one loop counter[m=5] = 3 (automatic decay) + f(5, 5) # run 5x times with m=5 counter[m=5] = 8 + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=9, trace_eagerness=99) + self.check_trace_count(1) + # + self.meta_interp(main, [], decay_halflife=1, + function_threshold=0, threshold=8, trace_eagerness=99) + self.check_trace_count(2) + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_rawmem.py b/pypy/jit/metainterp/test/test_rawmem.py --- a/pypy/jit/metainterp/test/test_rawmem.py +++ b/pypy/jit/metainterp/test/test_rawmem.py @@ -8,7 +8,7 @@ VOID_TP = lltype.Array(lltype.Void, hints={"nolength": True, "uncast_on_llgraph": True}) class A(object): def __init__(self, x): - self.storage = rffi.cast(lltype.Ptr(VOID_TP), x)\ + self.storage = rffi.cast(lltype.Ptr(VOID_TP), x) def f(n): x = lltype.malloc(TP, n, flavor="raw", zero=True) @@ -19,4 +19,14 @@ lltype.free(x, flavor="raw") return s res = self.interp_operations(f, [10]) - assert res == 1.0 \ No newline at end of file + + def test_fixed_size_malloc(self): + TIMEVAL = lltype.Struct('dummy', ('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)) + def f(): + p = lltype.malloc(TIMEVAL, flavor='raw') + lltype.free(p, flavor='raw') + return 42 + res = self.interp_operations(f, []) + assert res == 42 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'finish': 1}) diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -1,3 +1,4 @@ +import math from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -8,7 +9,7 @@ from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from pypy.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from pypy.jit.codewriter import longlong -from pypy.rlib.rarithmetic import r_singlefloat +from pypy.rlib.rarithmetic import r_singlefloat, r_uint def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -275,3 +276,77 @@ state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True + +def test_decay_counters(): + cell = JitCell(r_uint(5)) + cell.counter = 100 + cell.adjust_counter(r_uint(5), math.log(0.9)) + assert cell.counter == 100 + cell.adjust_counter(r_uint(6), math.log(0.9)) + assert cell.counter == 90 + cell.adjust_counter(r_uint(9), math.log(0.9)) + assert cell.counter == int(90 * (0.9**3)) + +def test_cleanup_jitcell_dict(): + from pypy.jit.metainterp.memmgr import MemoryManager + class FakeWarmRunnerDesc: + memory_manager = MemoryManager() + class cpu: + pass + class FakeJitDriverSD: + _green_args_spec = [lltype.Signed] + # + # Test creating tons of jitcells that remain at 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell1 = get_jitcell(True, -1) + assert len(warmstate._jitcell_dict) == 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 1 + # + for i in range(1, 20005): + get_jitcell(True, i) # should trigger a clean-up at 20001 + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 2 + # + # Same test, with one jitcell that has a counter of BASE instead of 0 + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + warmstate.set_param_decay_halflife(2) + warmstate.set_param_threshold(5) + warmstate.set_param_function_threshold(0) + get_jitcell = warmstate._make_jitcell_getter_default() + cell2 = get_jitcell(True, -2) + cell2.counter = BASE = warmstate.increment_threshold * 3 + # + for i in range(0, 20005): + get_jitcell(True, i) + assert len(warmstate._jitcell_dict) == (i % 19999) + 2 + # + assert cell2 in warmstate._jitcell_dict.values() + assert cell2.counter == int(BASE * math.sqrt(0.5)) # decayed once + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + # Same test, with jitcells that are compiled and free by the memmgr + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + get_jitcell(True, -1) + assert FakeWarmRunnerDesc.memory_manager.current_generation == 3 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -1 + cell.wref_procedure_token = None # or a dead weakref, equivalently + assert len(warmstate._jitcell_dict) == (i % 20000) + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + # Same test, with counter == -2 (rare case, kept alive) + warmstate = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) + get_jitcell = warmstate._make_jitcell_getter_default() + cell = get_jitcell(True, -1) + cell.counter = -2 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 4 + # + for i in range(1, 20005): + cell = get_jitcell(True, i) + cell.counter = -2 + assert len(warmstate._jitcell_dict) == i + 1 + assert FakeWarmRunnerDesc.memory_manager.current_generation == 5 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -64,9 +64,11 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, + threshold=3, trace_eagerness=2, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, **kwds): + function_threshold=4, decay_halflife=0, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, + **kwds): from pypy.config.config import ConfigError translator = interp.typer.annotator.translator try: @@ -83,15 +85,16 @@ pass warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds) for jd in warmrunnerdesc.jitdrivers_sd: - jd.warmstate.set_param_threshold(3) # for tests + jd.warmstate.set_param_threshold(threshold) jd.warmstate.set_param_function_threshold(function_threshold) - jd.warmstate.set_param_trace_eagerness(2) # for tests + jd.warmstate.set_param_trace_eagerness(trace_eagerness) jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_loop_longevity(loop_longevity) jd.warmstate.set_param_retrace_limit(retrace_limit) jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) + jd.warmstate.set_param_decay_halflife(decay_halflife) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,10 +1,10 @@ -import sys, weakref +import sys, weakref, math from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict -from pypy.rlib.rarithmetic import intmask +from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rlib.nonconst import NonConstant from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.jit import PARAMETERS @@ -153,6 +153,25 @@ dont_trace_here = False wref_procedure_token = None + def __init__(self, generation): + # The stored 'counter' value follows an exponential decay model. + # Conceptually after every generation, it decays by getting + # multiplied by a constant <= 1.0. In practice, decaying occurs + # lazily: the following field records the latest seen generation + # number, and adjustment is done by adjust_counter() when needed. + self.latest_generation_seen = generation + + def adjust_counter(self, generation, log_decay_factor): + if generation != self.latest_generation_seen: + # The latest_generation_seen is older than the current generation. + # Adjust by multiplying self.counter N times by decay_factor, i.e. + # by decay_factor ** N, which is equal to exp(log(decay_factor)*N). + assert self.counter >= 0 + N = generation - self.latest_generation_seen + factor = math.exp(log_decay_factor * N) + self.counter = int(self.counter * factor) + self.latest_generation_seen = generation + def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() @@ -172,7 +191,6 @@ class WarmEnterState(object): THRESHOLD_LIMIT = sys.maxint // 2 - default_jitcell_dict = None def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -213,6 +231,17 @@ def set_param_inlining(self, value): self.inlining = value + def set_param_decay_halflife(self, value): + # Use 0 or -1 to mean "no decay". Initialize the internal variable + # 'log_decay_factor'. It is choosen such that by multiplying the + # counter on loops by 'exp(log_decay_factor)' (<= 1.0) every + # generation, then the counter will be divided by two after 'value' + # generations have passed. + if value <= 0: + self.log_decay_factor = 0.0 # log(1.0) + else: + self.log_decay_factor = math.log(0.5) / value + def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES @@ -282,6 +311,11 @@ confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) + memmgr = self.warmrunnerdesc.memory_manager + if memmgr is not None: + get_current_generation = memmgr.get_current_generation_uint + else: + get_current_generation = lambda: r_uint(0) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: @@ -326,6 +360,8 @@ if cell.counter >= 0: # update the profiling counter + cell.adjust_counter(get_current_generation(), + self.log_decay_factor) n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n @@ -418,6 +454,15 @@ # return jit_getter + def _new_jitcell(self): + warmrunnerdesc = self.warmrunnerdesc + if (warmrunnerdesc is not None and + warmrunnerdesc.memory_manager is not None): + gen = warmrunnerdesc.memory_manager.get_current_generation_uint() + else: + gen = r_uint(0) + return JitCell(gen) + def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd @@ -447,13 +492,53 @@ except AttributeError: pass # + memmgr = self.warmrunnerdesc and self.warmrunnerdesc.memory_manager + if memmgr: + def _cleanup_dict(): + minimum = sys.maxint + if self.increment_threshold > 0: + minimum = min(minimum, self.increment_threshold) + if self.increment_function_threshold > 0: + minimum = min(minimum, self.increment_function_threshold) + currentgen = memmgr.get_current_generation_uint() + killme = [] + for key, cell in jitcell_dict.iteritems(): + if cell.counter >= 0: + cell.adjust_counter(currentgen, self.log_decay_factor) + if cell.counter < minimum: + killme.append(key) + elif (cell.counter == -1 + and cell.get_procedure_token() is None): + killme.append(key) + for key in killme: + del jitcell_dict[key] + # + def _maybe_cleanup_dict(): + # If no tracing goes on at all because the jitcells are + # each time for new greenargs, the dictionary grows forever. + # So every one in a (rare) while, we decide to force an + # artificial next_generation() and _cleanup_dict(). + self._trigger_automatic_cleanup += 1 + if self._trigger_automatic_cleanup > 20000: + self._trigger_automatic_cleanup = 0 + memmgr.next_generation(do_cleanups_now=False) + _cleanup_dict() + # + self._trigger_automatic_cleanup = 0 + self._jitcell_dict = jitcell_dict # for tests + memmgr.record_jitcell_dict(_cleanup_dict) + else: + def _maybe_cleanup_dict(): + pass + # def get_jitcell(build, *greenargs): try: cell = jitcell_dict[greenargs] except KeyError: if not build: return None - cell = JitCell() + _maybe_cleanup_dict() + cell = self._new_jitcell() jitcell_dict[greenargs] = cell return cell return get_jitcell @@ -464,6 +549,10 @@ get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} + # note that there is no equivalent of record_jitcell_dict() + # in the case of custom getters. We assume that the interpreter + # stores the JitCells on some objects that can go away by GC, + # like the PyCode objects in PyPy. # def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) @@ -485,7 +574,7 @@ if not build: return cell if cell is None: - cell = JitCell() + cell = self._new_jitcell() # if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -161,11 +161,16 @@ def test_shutdown(self): import socket, ssl, sys, gc - if sys.platform == 'darwin': - skip("get also on CPython: error: [Errno 0]") ss = socket.ssl(self.s) ss.write("hello\n") - assert ss.shutdown() is self.s._sock + try: + result = ss.shutdown() + except socket.error, e: + # xxx obscure case; throwing errno 0 is pretty odd... + if e.errno == 0: + skip("Shutdown raised errno 0. CPython does this too") + raise + assert result is self.s._sock raises(ssl.SSLError, ss.write, "hello\n") del ss; gc.collect() diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,9 +1,19 @@ from pypy.interpreter.mixedmodule import MixedModule +class PyPyModule(MixedModule): + interpleveldefs = { + 'debug_repr': 'interp_extras.debug_repr', + } + appleveldefs = {} + class Module(MixedModule): applevel_name = 'numpypy' + submodules = { + 'pypy': PyPyModule + } + interpleveldefs = { 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', @@ -81,6 +91,7 @@ 'mean': 'app_numpy.mean', 'sum': 'app_numpy.sum', 'min': 'app_numpy.min', + 'identity': 'app_numpy.identity', 'max': 'app_numpy.max', 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -13,6 +13,11 @@ # weighting, just the average part! return mean(a) +def identity(n, dtype=None): + a = numpypy.zeros((n,n), dtype=dtype) + for i in range(n): + a[i][i] = 1 + return a def mean(a): if not hasattr(a, "mean"): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -86,6 +86,7 @@ descr_ge = _binop_impl("greater_equal") descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") descr_rmul = _binop_right_impl("multiply") descr_neg = _unaryop_impl("negative") @@ -132,7 +133,7 @@ descr__new__, get_dtype = new_dtype_getter("long") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - pass + descr__new__, get_dtype = new_dtype_getter("ulong") class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") @@ -170,7 +171,8 @@ __mul__ = interp2app(W_GenericBox.descr_mul), __div__ = interp2app(W_GenericBox.descr_div), - __radd__ = interp2app(W_GenericBox.descr_add), + __radd__ = interp2app(W_GenericBox.descr_radd), + __rsub__ = interp2app(W_GenericBox.descr_rsub), __rmul__ = interp2app(W_GenericBox.descr_rmul), __eq__ = interp2app(W_GenericBox.descr_eq), diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_extras.py @@ -0,0 +1,7 @@ +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy.interp_numarray import BaseArray + + + at unwrap_spec(array=BaseArray) +def debug_repr(space, array): + return space.wrap(array.debug_repr()) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -581,6 +581,7 @@ def descr_get_dtype(self, space): return space.wrap(self.find_dtype()) + @jit.unroll_safe def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) @@ -925,9 +926,6 @@ def start_iter(self, res_shape=None): raise NotImplementedError - def descr_debug_repr(self, space): - return space.wrap(self.debug_repr()) - def descr_array_iface(self, space): concrete = self.get_concrete() storage = concrete.get_storage(space) @@ -1466,7 +1464,6 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), - __debug_repr__ = interp2app(BaseArray.descr_debug_repr), __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,34 +1,90 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import get_dtype_cache -from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi +from pypy.module.micronumpy import interp_dtype +from pypy.objspace.std.strutil import strip_spaces FLOAT_SIZE = rffi.sizeof(lltype.Float) - at unwrap_spec(s=str) -def fromstring(space, s): +def _fromstring_text(space, s, count, sep, length, dtype): from pypy.module.micronumpy.interp_numarray import W_NDimArray + + sep_stripped = strip_spaces(sep) + skip_bad_vals = len(sep_stripped) == 0 + + items = [] + num_items = 0 + idx = 0 + + while (num_items < count or count == -1) and idx < len(s): + nextidx = s.find(sep, idx) + if nextidx < 0: + nextidx = length + piece = strip_spaces(s[idx:nextidx]) + if len(piece) > 0 or not skip_bad_vals: + if len(piece) == 0 and not skip_bad_vals: + val = dtype.itemtype.default_fromstring(space) + else: + try: + val = dtype.coerce(space, space.wrap(piece)) + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + gotit = False + while not gotit and len(piece) > 0: + piece = piece[:-1] + try: + val = dtype.coerce(space, space.wrap(piece)) + gotit = True + except OperationError, e: + if not e.match(space, space.w_ValueError): + raise + if not gotit: + val = dtype.itemtype.default_fromstring(space) + nextidx = length + items.append(val) + num_items += 1 + idx = nextidx + 1 + + if count > num_items: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(num_items, [num_items], dtype=dtype) + for i, val in enumerate(items): + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + +def _fromstring_bin(space, s, count, length, dtype): + from pypy.module.micronumpy.interp_numarray import W_NDimArray + + itemsize = dtype.itemtype.get_element_size() + if count == -1: + count = length / itemsize + if length % itemsize != 0: + raise operationerrfmt(space.w_ValueError, + "string length %d not divisable by item size %d", + length, itemsize) + if count * itemsize > length: + raise OperationError(space.w_ValueError, space.wrap( + "string is smaller than requested size")) + + a = W_NDimArray(count, [count], dtype=dtype) + for i in range(count): + val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize]) + a.dtype.setitem(a.storage, i, val) + + return space.wrap(a) + + at unwrap_spec(s=str, count=int, sep=str) +def fromstring(space, s, w_dtype=None, count=-1, sep=''): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) length = len(s) - - if length % FLOAT_SIZE == 0: - number = length/FLOAT_SIZE + if sep == '': + return _fromstring_bin(space, s, count, length, dtype) else: - raise OperationError(space.w_ValueError, space.wrap( - "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - - dtype = get_dtype_cache(space).w_float64dtype - a = W_NDimArray(number, [number], dtype=dtype) - - start = 0 - end = FLOAT_SIZE - i = 0 - while i < number: - part = s[start:end] - a.dtype.setitem(a.storage, i, dtype.box(runpack('d', part))) - i += 1 - start += FLOAT_SIZE - end += FLOAT_SIZE - - return space.wrap(a) + return _fromstring_text(space, s, count, sep, length, dtype) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -259,22 +259,31 @@ assert numpy.uint16('65536') == 0 def test_int32(self): + import sys import numpypy as numpy x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 - assert numpy.int32(2147483648) == -2147483648 assert numpy.int32('2147483647') == 2147483647 - assert numpy.int32('2147483648') == -2147483648 + if sys.maxint > 2 ** 31 - 1: + assert numpy.int32(2147483648) == -2147483648 + assert numpy.int32('2147483648') == -2147483648 + else: + raises(OverflowError, numpy.int32, 2147483648) + raises(OverflowError, numpy.int32, '2147483648') def test_uint32(self): + import sys import numpypy as numpy - assert numpy.uint32(4294967295) == 4294967295 - assert numpy.uint32(4294967296) == 0 - assert numpy.uint32('4294967295') == 4294967295 - assert numpy.uint32('4294967296') == 0 + assert numpy.uint32(10) == 10 + + if sys.maxint > 2 ** 31 - 1: + assert numpy.uint32(4294967295) == 4294967295 + assert numpy.uint32(4294967296) == 0 + assert numpy.uint32('4294967295') == 4294967295 + assert numpy.uint32('4294967296') == 0 def test_int_(self): import numpypy as numpy @@ -294,10 +303,14 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 - assert numpy.int64(9223372036854775807) == 9223372036854775807 + if sys.maxint >= 2 ** 63 - 1: + assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64('9223372036854775807') == 9223372036854775807 + else: + raises(OverflowError, numpy.int64, 9223372036854775807) + raises(OverflowError, numpy.int64, '9223372036854775807') + raises(OverflowError, numpy.int64, 9223372036854775808) - - assert numpy.int64('9223372036854775807') == 9223372036854775807 raises(OverflowError, numpy.int64, '9223372036854775808') def test_uint64(self): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -491,6 +491,11 @@ for i in range(5): assert b[i] == i - 5 + def test_scalar_subtract(self): + from numpypy import int32 + assert int32(2) - 1 == 1 + assert 1 - int32(2) == -1 + def test_mul(self): import numpypy @@ -722,6 +727,26 @@ a = array([True] * 5, bool) assert a.sum() == 5 + def test_identity(self): + from numpypy import identity, array + from numpypy import int32, float64, dtype + a = identity(0) + assert len(a) == 0 + assert a.dtype == dtype('float64') + assert a.shape == (0,0) + b = identity(1, dtype=int32) + assert len(b) == 1 + assert b[0][0] == 1 + assert b.shape == (1,1) + assert b.dtype == dtype('int32') + c = identity(2) + assert c.shape == (2,2) + assert (c == [[1,0],[0,1]]).all() + d = identity(3, dtype='int32') + assert d.shape == (3,3) + assert d.dtype == dtype('int32') + assert (d == [[1,0,0],[0,1,0],[0,0,1]]).all() + def test_prod(self): from numpypy import array a = array(range(1, 6)) @@ -868,16 +893,17 @@ def test_debug_repr(self): from numpypy import zeros, sin + from numpypy.pypy import debug_repr a = zeros(1) - assert a.__debug_repr__() == 'Array' - assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' - assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' - assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + assert debug_repr(a) == 'Array' + assert debug_repr(a + a) == 'Call2(add, Array, Array)' + assert debug_repr(a[::2]) == 'Slice(Array)' + assert debug_repr(a + 2) == 'Call2(add, Array, Scalar)' + assert debug_repr(a + a.flat) == 'Call2(add, Array, FlatIter(Array))' + assert debug_repr(sin(a)) == 'Call1(sin, Array)' b = a + a b[0] = 3 - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + assert debug_repr(b) == 'Call2(add, forced=Array)' def test_tolist_scalar(self): from numpypy import int32, bool_ @@ -1168,13 +1194,110 @@ import struct BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) + cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) + cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) + cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) + cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) def test_fromstring(self): - from numpypy import fromstring + import sys + from numpypy import fromstring, array, uint8, float32, int32 + a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") + b = fromstring('\x01\x02', dtype=uint8) + assert a[0] == 1 + assert a[1] == 2 + c = fromstring(self.fdata, dtype=float32) + assert c[0] == float32(2.3) + d = fromstring("1 2", sep=' ', count=2, dtype=uint8) + assert len(d) == 2 + assert d[0] == 1 + assert d[1] == 2 + e = fromstring('3, 4,5', dtype=uint8, sep=',') + assert len(e) == 3 + assert e[0] == 3 + assert e[1] == 4 + assert e[2] == 5 + f = fromstring('\x01\x02\x03\x04\x05', dtype=uint8, count=3) + assert len(f) == 3 + assert f[0] == 1 + assert f[1] == 2 + assert f[2] == 3 + g = fromstring("1 2 3 ", dtype=uint8, sep=" ") + assert len(g) == 3 + assert g[0] == 1 + assert g[1] == 2 + assert g[2] == 3 + h = fromstring("1, , 2, 3", dtype=uint8, sep=",") + assert (h == [1,0,2,3]).all() + i = fromstring("1 2 3", dtype=uint8, sep=" ") + assert (i == [1,2,3]).all() + j = fromstring("1\t\t\t\t2\t3", dtype=uint8, sep="\t") + assert (j == [1,2,3]).all() + k = fromstring("1,x,2,3", dtype=uint8, sep=",") + assert (k == [1,0]).all() + l = fromstring("1,x,2,3", dtype='float32', sep=",") + assert (l == [1.0,-1.0]).all() + m = fromstring("1,,2,3", sep=",") + assert (m == [1.0,-1.0,2.0,3.0]).all() + n = fromstring("3.4 2.0 3.8 2.2", dtype=int32, sep=" ") + assert (n == [3]).all() + o = fromstring("1.0 2f.0f 3.8 2.2", dtype=float32, sep=" ") + assert len(o) == 2 + assert o[0] == 1.0 + assert o[1] == 2.0 + p = fromstring("1.0,,2.0,3.0", sep=",") + assert (p == [1.0, -1.0, 2.0, 3.0]).all() + q = fromstring("1.0,,2.0,3.0", sep=" ") + assert (q == [1.0]).all() + r = fromstring("\x01\x00\x02", dtype='bool') + assert (r == [True, False, True]).all() + s = fromstring("1,2,3,,5", dtype=bool, sep=",") + assert (s == [True, True, True, False, True]).all() + t = fromstring("", bool) + assert (t == []).all() + u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int) + if sys.maxint > 2 ** 31 - 1: + assert (u == [1]).all() + else: + assert (u == [1, 0]).all() + + def test_fromstring_types(self): + from numpypy import (fromstring, int8, int16, int32, int64, uint8, + uint16, uint32, float32, float64) + + a = fromstring('\xFF', dtype=int8) + assert a[0] == -1 + b = fromstring('\xFF', dtype=uint8) + assert b[0] == 255 + c = fromstring('\xFF\xFF', dtype=int16) + assert c[0] == -1 + d = fromstring('\xFF\xFF', dtype=uint16) + assert d[0] == 65535 + e = fromstring('\xFF\xFF\xFF\xFF', dtype=int32) + assert e[0] == -1 + f = fromstring('\xFF\xFF\xFF\xFF', dtype=uint32) + assert repr(f[0]) == '4294967295' + g = fromstring('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', dtype=int64) + assert g[0] == -1 + h = fromstring(self.float32val, dtype=float32) + assert h[0] == float32(5.2) + i = fromstring(self.float64val, dtype=float64) + assert i[0] == float64(300.4) + j = fromstring(self.ulongval, dtype='L') + assert j[0] == 12 + + + def test_fromstring_invalid(self): + from numpypy import fromstring, uint16, uint8, int32 + #default dtype is 64-bit float, so 3 bytes should fail + raises(ValueError, fromstring, "\x01\x02\x03") + #3 bytes is not modulo 2 bytes (int16) + raises(ValueError, fromstring, "\x01\x03\x03", dtype=uint16) + #5 bytes is larger than 3 bytes + raises(ValueError, fromstring, "\x01\x02\x03", count=5, dtype=uint8) class AppTestRepr(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -8,6 +8,7 @@ from pypy.rlib.objectmodel import specialize from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib.rstruct.runpack import runpack def simple_unary_op(func): @@ -55,6 +56,7 @@ class Primitive(object): _mixin_ = True + def get_element_size(self): return rffi.sizeof(self.T) @@ -84,6 +86,9 @@ def _coerce(self, space, w_item): raise NotImplementedError + def default_fromstring(self, space): + raise NotImplementedError + def read(self, storage, width, i, offset): return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset @@ -102,6 +107,9 @@ width, storage, i, offset, value ) + def runpack_str(self, s): + return self.box(runpack(self.format_code, s)) + @simple_binary_op def add(self, v1, v2): return v1 + v2 @@ -164,6 +172,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool BoxType = interp_boxes.W_BoolBox + format_code = "?" True = BoxType(True) False = BoxType(False) @@ -193,6 +202,9 @@ def for_computation(self, v): return int(v) + def default_fromstring(self, space): + return self.box(False) + class Integer(Primitive): _mixin_ = True @@ -206,6 +218,9 @@ def for_computation(self, v): return widen(v) + def default_fromstring(self, space): + return self.box(0) + @simple_binary_op def div(self, v1, v2): if v2 == 0: @@ -241,42 +256,52 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR BoxType = interp_boxes.W_Int8Box + format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR BoxType = interp_boxes.W_UInt8Box + format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT BoxType = interp_boxes.W_Int16Box + format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT BoxType = interp_boxes.W_UInt16Box + format_code = "H" class Int32(BaseType, Integer): T = rffi.INT BoxType = interp_boxes.W_Int32Box + format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT BoxType = interp_boxes.W_UInt32Box + format_code = "I" class Long(BaseType, Integer): T = rffi.LONG BoxType = interp_boxes.W_LongBox + format_code = "l" class ULong(BaseType, Integer): T = rffi.ULONG BoxType = interp_boxes.W_ULongBox + format_code = "L" class Int64(BaseType, Integer): T = rffi.LONGLONG BoxType = interp_boxes.W_Int64Box + format_code = "q" class UInt64(BaseType, Integer): T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + format_code = "Q" def _coerce(self, space, w_item): try: @@ -304,6 +329,9 @@ def for_computation(self, v): return float(v) + def default_fromstring(self, space): + return self.box(-1.0) + @simple_binary_op def div(self, v1, v2): try: @@ -403,7 +431,9 @@ class Float32(BaseType, Float): T = rffi.FLOAT BoxType = interp_boxes.W_Float32Box + format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box \ No newline at end of file + BoxType = interp_boxes.W_Float64Box + format_code = "d" \ No newline at end of file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -656,7 +656,11 @@ os.fsync(f) # <- should also work with a file, or anything finally: # with a fileno() method f.close() - raises(OSError, os.fsync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fsync(fd) + except OSError: + pass raises(ValueError, os.fsync, -1) if hasattr(os, 'fdatasync'): @@ -668,7 +672,11 @@ os.fdatasync(fd) finally: f.close() - raises(OSError, os.fdatasync, fd) + try: + # May not raise anything with a buggy libc (or eatmydata) + os.fdatasync(fd) + except OSError: + pass raises(ValueError, os.fdatasync, -1) if hasattr(os, 'fchdir'): diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -210,9 +210,9 @@ def entry_bridge_ops(self, *args, **kwds): ops = list(self._allops(*args, **kwds)) labels = [op for op in ops if op.name == 'label'] - assert ops.index(labels[0]) == 0 - i = ops.index(labels[1]) - return ops[1:i] + i0 = ops.index(labels[0]) + i1 = ops.index(labels[1]) + return ops[i0+1:i1] @property def chunks(self): @@ -409,7 +409,7 @@ """ iter_exp_ops = iter(expected_ops) iter_ops = RevertableIterator(self.ops) - for opindex, exp_op in enumerate(iter_exp_ops): + for exp_op in iter_exp_ops: try: if exp_op == '...': # loop until we find an operation which matches @@ -430,7 +430,7 @@ if exp_op[4] is False: # optional operation iter_ops.revert_one() continue # try to match with the next exp_op - e.opindex = opindex + e.opindex = iter_ops.index - 1 raise # # make sure we exhausted iter_ops diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -45,8 +45,10 @@ cmdline = [sys.executable] if not import_site: cmdline.append('-S') - for key, value in jitopts.iteritems(): - cmdline += ['--jit', '%s=%s' % (key, value)] + if jitopts: + jitcmdline = ['%s=%s' % (key, value) + for key, value in jitopts.items()] + cmdline += ['--jit', ','.join(jitcmdline)] cmdline.append(str(self.filepath)) # print cmdline, logfile diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -6,6 +6,8 @@ def main(n): def f(): for i in range(10000): + i -= 1 + i -= 42 # ID: subtract yield i def g(): @@ -15,6 +17,13 @@ g() log = self.run(main, [500]) + # XXX XXX this test fails so far because of a detail that + # changed with jit-simplify-backendintf. We should try to + # think of a way to be more resistent against such details. + # The issue is that we now get one Tracing, then go back + # to the interpreter hoping to immediately run the JITted + # code; but instead, we Trace again, just because another + # counter was also about to reach its limit... loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') assert loop.match_by_id("generator", """ ... @@ -26,3 +35,8 @@ i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) + assert loop.match_by_id("subtract", """ + setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + i2 = int_sub_ovf(i1, 42) + guard_no_overflow(descr=...) + """) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -31,9 +31,9 @@ imag2 = float2longlong(imag2) return real1 == real2 and imag1 == imag2 - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_COMPLEX as tag real = space.float_w(space.getattr(self, space.wrap("real"))) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,9 +34,9 @@ two = float2longlong(space.float_w(w_other)) return one == two - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.rlib.longlong2float import float2longlong from pypy.objspace.std.model import IDTAG_FLOAT as tag val = float2longlong(space.float_w(self)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -26,9 +26,9 @@ return self is w_other return space.int_w(self) == space.int_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_INT as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -18,9 +18,9 @@ return self is w_other return space.bigint_w(self).eq(space.bigint_w(w_other)) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None from pypy.objspace.std.model import IDTAG_LONG as tag b = space.bigint_w(self) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -177,52 +177,55 @@ _specialisations = [] Cls_ii = make_specialised_class((int, int)) -Cls_is = make_specialised_class((int, str)) -Cls_io = make_specialised_class((int, object)) -Cls_si = make_specialised_class((str, int)) -Cls_ss = make_specialised_class((str, str)) -Cls_so = make_specialised_class((str, object)) -Cls_oi = make_specialised_class((object, int)) -Cls_os = make_specialised_class((object, str)) +#Cls_is = make_specialised_class((int, str)) +#Cls_io = make_specialised_class((int, object)) +#Cls_si = make_specialised_class((str, int)) +#Cls_ss = make_specialised_class((str, str)) +#Cls_so = make_specialised_class((str, object)) +#Cls_oi = make_specialised_class((object, int)) +#Cls_os = make_specialised_class((object, str)) Cls_oo = make_specialised_class((object, object)) Cls_ff = make_specialised_class((float, float)) -Cls_ooo = make_specialised_class((object, object, object)) +#Cls_ooo = make_specialised_class((object, object, object)) def makespecialisedtuple(space, list_w): if len(list_w) == 2: w_arg1, w_arg2 = list_w w_type1 = space.type(w_arg1) - w_type2 = space.type(w_arg2) + #w_type2 = space.type(w_arg2) # if w_type1 is space.w_int: + w_type2 = space.type(w_arg2) if w_type2 is space.w_int: return Cls_ii(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_is(space, w_arg1, w_arg2) - else: - return Cls_io(space, w_arg1, w_arg2) + #elif w_type2 is space.w_str: + # return Cls_is(space, w_arg1, w_arg2) + #else: + # return Cls_io(space, w_arg1, w_arg2) # - elif w_type1 is space.w_str: - if w_type2 is space.w_int: - return Cls_si(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_ss(space, w_arg1, w_arg2) - else: - return Cls_so(space, w_arg1, w_arg2) + #elif w_type1 is space.w_str: + # if w_type2 is space.w_int: + # return Cls_si(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_ss(space, w_arg1, w_arg2) + # else: + # return Cls_so(space, w_arg1, w_arg2) # - elif w_type1 is space.w_float and w_type2 is space.w_float: - return Cls_ff(space, w_arg1, w_arg2) + elif w_type1 is space.w_float: + w_type2 = space.type(w_arg2) + if w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) # - else: - if w_type2 is space.w_int: - return Cls_oi(space, w_arg1, w_arg2) - elif w_type2 is space.w_str: - return Cls_os(space, w_arg1, w_arg2) - else: - return Cls_oo(space, w_arg1, w_arg2) + #else: + # if w_type2 is space.w_int: + # return Cls_oi(space, w_arg1, w_arg2) + # elif w_type2 is space.w_str: + # return Cls_os(space, w_arg1, w_arg2) + # else: + return Cls_oo(space, w_arg1, w_arg2) # - elif len(list_w) == 3: - return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + #elif len(list_w) == 3: + # return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) else: raise NotSpecialised diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -32,9 +32,9 @@ return False return space.str_w(self) is space.str_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.str_w(self))) diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -253,6 +253,12 @@ y = 2j assert id(x) != id(y) + def test_object_hash_immutable(self): + x = 42 + y = 40 + y += 2 + assert object.__hash__(x) == object.__hash__(y) + def test_isinstance_shortcut(): from pypy.objspace.std import objspace diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -33,15 +33,15 @@ N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) - def hash_test(values): + def hash_test(values, must_be_specialized=True): N_values_w = [N_space.wrap(value) for value in values] S_values_w = [S_space.wrap(value) for value in values] N_w_tuple = N_space.newtuple(N_values_w) S_w_tuple = S_space.newtuple(S_values_w) - - assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + + if must_be_specialized: + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) assert isinstance(N_w_tuple, W_TupleObject) - assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) @@ -53,7 +53,7 @@ hash_test([1,(1,2)]) hash_test([1,('a',2)]) hash_test([1,()]) - hash_test([1,2,3]) + hash_test([1,2,3], must_be_specialized=False) class AppTestW_SpecialisedTupleObject: @@ -83,6 +83,8 @@ return ("SpecialisedTupleObject" + expected) in r def test_createspecialisedtuple(self): + have = ['ii', 'ff', 'oo'] + # spec = {int: 'i', float: 'f', str: 's', @@ -92,14 +94,14 @@ for y in [43, 4.3, "bar", []]: expected1 = spec[type(x)] expected2 = spec[type(y)] - if (expected1 == 'f') ^ (expected2 == 'f'): - if expected1 == 'f': expected1 = 'o' - if expected2 == 'f': expected2 = 'o' + if expected1 + expected2 not in have: + expected1 = expected2 = 'o' obj = (x, y) assert self.isspecialised(obj, '_' + expected1 + expected2) # - obj = (1, 2, 3) - assert self.isspecialised(obj, '_ooo') + if 'ooo' in have: + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') def test_delegation(self): t = self.forbid_delegation((42, 43)) @@ -214,6 +216,8 @@ raises(IndexError, "t[-3]") def test_three_tuples(self): + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") b = self.forbid_delegation((1, 2, 3)) c = (1,) d = c + (2, 3) @@ -221,6 +225,16 @@ assert b == d def test_mongrel(self): + a = self.forbid_delegation((2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 2 + assert a[0] == 2.2 and a[1] == '333' + b = ('333',) + assert a == (2.2,) + b + assert not a != (2.2,) + b + # + if not self.isspecialised((1, 2, 3)): + skip("don't have specialization for 3-tuples") a = self.forbid_delegation((1, 2.2, '333')) assert self.isspecialised(a) assert len(a) == 3 diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -32,9 +32,9 @@ return False return space.unicode_w(self) is space.unicode_w(w_other) - def unique_id(self, space): + def immutable_unique_id(self, space): if self.user_overridden_class: - return W_Object.unique_id(self, space) + return None return space.wrap(compute_unique_id(space.unicode_w(self))) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -395,6 +395,7 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'enable_opts': 'all', + 'decay_halflife': 40, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) DEFAULT = object() diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py --- a/pypy/rlib/longlong2float.py +++ b/pypy/rlib/longlong2float.py @@ -79,19 +79,19 @@ longlong2float = rffi.llexternal( "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE, _callable=longlong2float_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) float2longlong = rffi.llexternal( "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG, _callable=float2longlong_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -16,6 +16,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.rstring import StringBuilder, UnicodeBuilder +from pypy.rlib import jit from pypy.rpython.lltypesystem import llmemory import os, sys @@ -249,8 +250,7 @@ wrapper = func_with_new_name(wrapper, name) if calling_conv != "c": - from pypy.rlib.jit import dont_look_inside - wrapper = dont_look_inside(wrapper) + wrapper = jit.dont_look_inside(wrapper) return wrapper @@ -697,6 +697,8 @@ return b.build() # str -> char* + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def get_nonmovingbuffer(data): """ Either returns a non-moving copy or performs neccessary pointer @@ -717,6 +719,8 @@ get_nonmovingbuffer._annenforceargs_ = [strtype] # (str, char*) -> None + # Can't inline this because of the raw address manipulation. + @jit.dont_look_inside def free_nonmovingbuffer(data, buf): """ Either free a non-moving buffer or keep the original storage alive. diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -126,10 +126,7 @@ rtype_inplace_rshift = rtype_rshift def rtype_pow(_, hop): - raise MissingRTypeOperation("pow(int, int)" - " (use float**float instead; it is too" - " easy to overlook the overflow" - " issues of int**int)") + raise MissingRTypeOperation("'**' not supported in RPython") rtype_pow_ovf = rtype_pow rtype_inplace_pow = rtype_pow diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -11,14 +11,17 @@ sys.exit(1) def heads(args): - g = os.popen(r"hg heads --topo %s --template '{branches} {node|short}\n'" + g = os.popen(r"hg heads --topo %s --template '{node|short}:{branches}\n'" % args, 'r') result = g.read() g.close() result = result.splitlines(False) - result = [s for s in result - if not s.startswith(' ') - and not s.startswith('closed-branches ')] + for line in result: + if len(line.split(':', 1)) != 2: + raise ValueError("'result' contains: %r" % line) + result = [s.split(':', 1) for s in result] + result = [(head, branch) for (head, branch) in result + if branch not in ['', 'closed-branches']] return result all_heads = heads("--closed") @@ -34,8 +37,7 @@ closed_heads.reverse() -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print '\t', branch print print 'The branches listed above will be merged to "closed-branches".' @@ -54,8 +56,7 @@ print '*** error %r' % (err,) sys.exit(1) -for branch_head in closed_heads: - branch, head = branch_head.split() +for head, branch in closed_heads: print print '***** %s ***** %s *****' % (branch, head) do("hg up --clean closed-branches") diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py --- a/pypy/tool/gcc_cache.py +++ b/pypy/tool/gcc_cache.py @@ -11,6 +11,9 @@ # Import 'platform' every time, the compiler may have been changed from pypy.translator.platform import platform cache_dir = cache_dir_root.join(cachename).ensure(dir=1) + c_files.extend([py.path.local(f) for f in eci.separate_module_files]) + eci = ExternalCompilationInfo(**eci._copy_attributes()) + eci.separate_module_files = () filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() From noreply at buildbot.pypy.org Tue Dec 27 14:09:29 2011 From: noreply at buildbot.pypy.org (ned) Date: Tue, 27 Dec 2011 14:09:29 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox-2: Merge tip to nedbat-sandbox-2 Message-ID: <20111227130929.2DA6F82B1D@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox-2 Changeset: r50901:808b12f6b195 Date: 2011-12-27 08:08 -0500 http://bitbucket.org/pypy/pypy/changeset/808b12f6b195/ Log: Merge tip to nedbat-sandbox-2 diff too long, truncating to 10000 out of 16104 lines diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,8 +231,10 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None -sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] -sqlite.sqlite3_enable_load_extension.restype = c_int +HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension") +if HAS_LOAD_EXTENSION: + sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] + sqlite.sqlite3_enable_load_extension.restype = c_int ########################################## # END Wrapped SQLite C API and constants @@ -708,13 +710,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) - def enable_load_extension(self, enabled): - self._check_thread() - self._check_closed() + if HAS_LOAD_EXTENSION: + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() - rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) - if rc != SQLITE_OK: - raise OperationalError("Error enabling load extension") + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") DML, DQL, DDL = range(3) diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -180,7 +180,12 @@ if name is None: name = pyobj.func_name if signature is None: - signature = cpython_code_signature(pyobj.func_code) + if hasattr(pyobj, '_generator_next_method_of_'): + from pypy.interpreter.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyobj.func_code) if defaults is None: defaults = pyobj.func_defaults self.name = name diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -98,7 +98,6 @@ "Abstract. Get the expected number of locals." raise TypeError, "abstract" - @jit.dont_look_inside def fast2locals(self): # Copy values from the fastlocals to self.w_locals if self.w_locals is None: @@ -112,7 +111,6 @@ w_name = self.space.wrap(name) self.space.setitem(self.w_locals, w_name, w_value) - @jit.dont_look_inside def locals2fast(self): # Copy values from self.w_locals to the fastlocals assert self.w_locals is not None diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -619,7 +619,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -655,7 +656,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -674,7 +676,8 @@ self.descr_reqcls, args.prepend(w_obj)) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -690,7 +693,8 @@ raise OperationError(space.w_SystemError, space.wrap("unexpected DescrMismatch error")) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -708,7 +712,8 @@ self.descr_reqcls, Arguments(space, [w1])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -726,7 +731,8 @@ self.descr_reqcls, Arguments(space, [w1, w2])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -744,7 +750,8 @@ self.descr_reqcls, Arguments(space, [w1, w2, w3])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -763,7 +770,8 @@ Arguments(space, [w1, w2, w3, w4])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,265 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -363,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -408,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -433,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -444,161 +425,56 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,87 +686,124 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -735,49 +813,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - assert not self.layoutbuilder.has_finalizer(S) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -791,108 +846,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETINTERIORFIELD_GC ------ - if op.getopnum() == rop.SETINTERIORFIELD_GC: - val = op.getarg(0) - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -358,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -365,33 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +361,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +381,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +405,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,211 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2930,6 +2930,8 @@ # overflowing value: fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" def test_compile_loop_with_target(self): i0 = BoxInt() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -39,6 +39,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_unique_id # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -58,7 +59,8 @@ self.is_guard_not_invalidated = is_guard_not_invalidated DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed), - ('bridge', lltype.Signed), # 0 or 1 + ('type', lltype.Char), # 'b'ridge, 'l'abel or + # 'e'ntry point ('number', lltype.Signed)) class Assembler386(object): @@ -70,10 +72,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -108,20 +106,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -165,12 +149,15 @@ def finish_once(self): if self._debug: debug_start('jit-backend-counts') - for struct in self.loop_run_counters: - if struct.bridge: - prefix = 'bridge ' + for i in range(len(self.loop_run_counters)): + struct = self.loop_run_counters[i] + if struct.type == 'l': + prefix = 'TargetToken(%d)' % struct.number + elif struct.type == 'b': + prefix = 'bridge ' + str(struct.number) else: - prefix = 'loop ' - debug_print(prefix + str(struct.number) + ':' + str(struct.i)) + prefix = 'entry ' + str(struct.number) + debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') def _build_float_constants(self): @@ -275,7 +262,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() @@ -439,8 +427,8 @@ self.setup(looptoken) if log: - self._register_counter(False, looptoken.number) - operations = self._inject_debugging_code(looptoken, operations) + operations = self._inject_debugging_code(looptoken, operations, + 'e', looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -506,8 +494,8 @@ self.setup(original_loop_token) if log: - self._register_counter(True, descr_number) - operations = self._inject_debugging_code(faildescr, operations) + operations = self._inject_debugging_code(faildescr, operations, + 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -614,17 +602,21 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number): - if self._debug: - # YYY very minor leak -- we need the counters to stay alive - # forever, just because we want to report them at the end - # of the process - struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', - track_allocation=False) - struct.i = 0 - struct.bridge = int(bridge) + def _register_counter(self, tp, number, token): + # YYY very minor leak -- we need the counters to stay alive + # forever, just because we want to report them at the end + # of the process + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', + track_allocation=False) + struct.i = 0 + struct.type = tp + if tp == 'b' or tp == 'e': struct.number = number - self.loop_run_counters.append(struct) + else: + assert token + struct.number = compute_unique_id(token) + self.loop_run_counters.append(struct) + return struct def _find_failure_recovery_bytecode(self, faildescr): adr_jump_offset = faildescr._x86_adr_jump_offset @@ -668,27 +660,36 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None + def _append_debugging_code(self, operations, tp, number, token): + counter = self._register_counter(tp, number, token) + c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations.extend(ops) + @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations): + def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: # before doing anything, let's increase a counter s = 0 for op in operations: s += op.getopnum() looptoken._x86_debug_checksum = s - c_adr = ConstInt(rffi.cast(lltype.Signed, - self.loop_run_counters[-1])) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - if operations[0].getopnum() == rop.LABEL: - operations = [operations[0]] + ops + operations[1:] - else: - operations = ops + operations + + newoperations = [] + self._append_debugging_code(newoperations, tp, number, + None) + for op in operations: + newoperations.append(op) + if op.getopnum() == rop.LABEL: + self._append_debugging_code(newoperations, 'l', number, + op.getdescr()) + operations = newoperations return operations def _assemble(self, regalloc, operations): @@ -865,8 +866,8 @@ high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] low_part = intmask(low_part) high_part = intmask(high_part) - self.mc.MOV_bi(to_loc.value, low_part) - self.mc.MOV_bi(to_loc.value + 4, high_part) + self.mc.MOV32_bi(to_loc.value, low_part) + self.mc.MOV32_bi(to_loc.value + 4, high_part) def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1357,46 +1358,10 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) - def genop_new_with_vtable(self, op, arglocs, result_loc): - assert result_loc is eax - loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, ImmedLoc) - arglocs = arglocs[:-1] - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - self.set_vtable(eax, loc_vtable) + # ---------- - def set_vtable(self, loc, loc_vtable): - if self.cpu.vtable_offset is not None: - assert isinstance(loc, RegLoc) - assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert isinstance(loc, RegLoc) - assert isinstance(loc_num_elem, ImmedLoc) - self.mc.MOV(mem(loc, ofs_length), loc_num_elem) - - # XXX genop_new is abused for all varsized mallocs with Boehm, for now - # (instead of genop_new_array, genop_newstr, genop_newunicode) - def genop_new(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_new_array(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_array_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newstr(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_str_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newunicode(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_unicode_func_addr, arglocs, eax) + def genop_call_malloc_gc(self, op, arglocs, result_loc): + self.genop_call(op, arglocs, result_loc) self.propagate_memoryerror_if_eax_is_null() def propagate_memoryerror_if_eax_is_null(self): @@ -2065,6 +2030,8 @@ self._genop_call(op, arglocs, resloc, force_index) def _genop_call(self, op, arglocs, resloc, force_index): + from pypy.jit.backend.llsupport.descr import CallDescr + sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -2079,13 +2046,16 @@ else: tmp = eax + descr = op.getdescr() + assert isinstance(descr, CallDescr) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types(), - callconv=op.getdescr().get_call_conv()) + argtypes=descr.get_arg_types(), + callconv=descr.get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return - if op.getdescr().get_return_type() == 'L': + if descr.get_result_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long self.mc.MOV_br(resloc.value + 4, edx.value) # XXX should ideally not move the result on the stack, @@ -2094,7 +2064,7 @@ # can just be always a stack location else: self.mc.FSTPL_b(resloc.value) # float return - elif op.getdescr().get_return_type() == 'S': + elif descr.get_result_type() == 'S': # singlefloat return assert resloc is eax if IS_X86_32: @@ -2292,9 +2262,9 @@ # # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: - from pypy.jit.backend.llsupport.descr import BaseFieldDescr + from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset self.mc.MOV(eax, arglocs[1]) self.mc.MOV_mi((eax.value, ofs), 0) @@ -2497,9 +2467,8 @@ else: self.mc.JMP(imm(target)) - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): - size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) - size = (size + WORD-1) & ~(WORD-1) # round up + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size): + assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) @@ -2535,9 +2504,6 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - # on 64-bits, 'tid' is a value that fits in 31 bits - assert rx86.fits_in_32bits(tid) - self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -17,7 +17,10 @@ key = src._getregkey() if key in srccount: if key == dst_locations[i]._getregkey(): - srccount[key] = -sys.maxint # ignore a move "x = x" + # ignore a move "x = x" + # setting any "large enough" negative value is ok, but + # be careful of overflows, don't use -sys.maxint + srccount[key] = -len(dst_locations) - 1 pending_dests -= 1 else: srccount[key] += 1 diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -16,8 +16,8 @@ from pypy.jit.codewriter import heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr +from pypy.jit.backend.llsupport.descr import FieldDescr, ArrayDescr +from pypy.jit.backend.llsupport.descr import CallDescr, SizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox @@ -870,9 +870,9 @@ def _consider_call(self, op, guard_not_forced_op=None): calldescr = op.getdescr() - assert isinstance(calldescr, BaseCallDescr) + assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - 1 - size = calldescr.get_result_size(self.translate_support_code) + size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm1 @@ -917,12 +917,15 @@ consider_call_release_gil = consider_call_may_force + def consider_call_malloc_gc(self, op): + self._consider_call(op) + def consider_call_assembler(self, op, guard_op): descr = op.getdescr() assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size(self.translate_support_code) + size = jd.portal_calldescr.get_result_size() vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.getarg(vable_index)) @@ -957,21 +960,10 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb - def fastpath_malloc_fixedsize(self, op, descr): - assert isinstance(descr, BaseSizeDescr) - self._do_fastpath_malloc(op, descr.size, descr.tid) - - def fastpath_malloc_varsize(self, op, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - size = basesize + itemsize * num_elem - self._do_fastpath_malloc(op, size, arraydescr.tid) - self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) - - def _do_fastpath_malloc(self, op, size, tid): - gc_ll_descr = self.assembler.cpu.gc_ll_descr + def consider_call_malloc_nursery(self, op): + size_box = op.getarg(0) + assert isinstance(size_box, ConstInt) + size = size_box.getint() self.rm.force_allocate_reg(op.result, selected_reg=eax) # # We need edx as a temporary, but otherwise don't save any more @@ -980,86 +972,39 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) # + gc_ll_descr = self.assembler.cpu.gc_ll_descr self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - size, tid, - ) - - def consider_new(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.getdescr()): - self.fastpath_malloc_fixedsize(op, op.getdescr()) - else: - args = gc_ll_descr.args_for_new(op.getdescr()) - arglocs = [imm(x) for x in args] - return self._call(op, arglocs) - - def consider_new_with_vtable(self, op): - classint = op.getarg(0).getint() - descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) - if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self.fastpath_malloc_fixedsize(op, descrsize) - self.assembler.set_vtable(eax, imm(classint)) - # result of fastpath malloc is in eax - else: - args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) - - def consider_newstr(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_newunicode(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_new_array(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(box_num_elem)) - self._call(op, arglocs) + size) def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - ofs = arraydescr.get_base_size(self.translate_support_code) - size = arraydescr.get_item_size(self.translate_support_code) - ptr = arraydescr.is_array_of_pointers() + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize sign = arraydescr.is_item_signed() - return size, ofs, ofs_length, ptr, sign + return size, ofs, sign def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset - size = fielddescr.get_field_size(self.translate_support_code) - ptr = fielddescr.is_pointer_field() + size = fielddescr.field_size sign = fielddescr.is_field_signed() - return imm(ofs), imm(size), ptr, sign + return imm(ofs), imm(size), sign + _unpack_fielddescr._always_inline_ = True def _unpack_interiorfielddescr(self, descr): assert isinstance(descr, InteriorFieldDescr) arraydescr = descr.arraydescr - ofs = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + ofs = arraydescr.basesize + itemsize = arraydescr.itemsize + fieldsize = descr.fielddescr.field_size sign = descr.fielddescr.is_field_signed() ofs += descr.fielddescr.offset return imm(ofs), imm(itemsize), imm(fieldsize), sign def consider_setfield_gc(self, op): - ofs_loc, size_loc, _, _ = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -1117,7 +1062,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - itemsize, ofs, _, _, _ = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, _ = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if itemsize == 1: @@ -1134,7 +1079,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _, sign = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, sign = self._unpack_fielddescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -1150,7 +1095,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - itemsize, ofs, _, _, sign = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, sign = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -1229,8 +1174,8 @@ def consider_arraylen_gc(self, op): arraydescr = op.getdescr() - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.backend.llsupport.descr import GcCache +from pypy.jit.backend.llsupport.descr import GcCache, FieldDescr, FLAG_SIGNED from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.regalloc import RegAlloc @@ -17,7 +17,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcPtrFieldDescr +from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.jit.backend.x86.test.test_regalloc import MockAssembler from pypy.jit.backend.x86.test.test_regalloc import BaseTestRegalloc @@ -41,20 +41,15 @@ return ['compressed'] + shape[1:] class MockGcDescr(GcCache): - def get_funcptr_for_new(self): - return 123 - get_funcptr_for_newarray = get_funcptr_for_new - get_funcptr_for_newstr = get_funcptr_for_new - get_funcptr_for_newunicode = get_funcptr_for_new get_malloc_slowpath_addr = None - + write_barrier_descr = None moving_gc = True gcrootmap = MockGcRootMap() def initialize(self): pass - record_constptrs = GcLLDescr_framework.record_constptrs.im_func + _record_constptrs = GcLLDescr_framework._record_constptrs.im_func rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func class TestRegallocDirectGcIntegration(object): @@ -170,42 +165,32 @@ ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) +NOT_INITIALIZED = chr(0xdd) + class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - expected_malloc_slowpath_size = WORD*2 + write_barrier_descr = None def __init__(self): - GcCache.__init__(self, False) + GcLLDescription.__init__(self, None) # create a nursery - NTP = rffi.CArray(lltype.Signed) - self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, + NTP = rffi.CArray(lltype.Char) + self.nursery = lltype.malloc(NTP, 64, flavor='raw') + for i in range(64): + self.nursery[i] = NOT_INITIALIZED + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 16*WORD - self.addrs[2] = 0 - # 16 WORDs + self.addrs[1] = self.addrs[0] + 64 + self.calls = [] def malloc_slowpath(size): - assert size == self.expected_malloc_slowpath_size + self.calls.append(size) + # reset the nursery nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size - self.addrs[2] += 1 return nadr - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) - self._counter = 123000 - - def can_inline_malloc(self, descr): - return True - - def get_funcptr_for_new(self): - return 42 -# return llhelper(lltype.Ptr(self.NEW_TP), self.new) - - def init_size_descr(self, S, descr): - descr.tid = self._counter - self._counter += 1 + self.generate_function('malloc_nursery', malloc_slowpath, + [lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): return rffi.cast(lltype.Signed, self.addrs) @@ -214,204 +199,61 @@ return rffi.cast(lltype.Signed, self.addrs) + WORD def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) + return self.get_malloc_fn_addr('malloc_nursery') - get_funcptr_for_newarray = None - get_funcptr_for_newstr = None - get_funcptr_for_newunicode = None + def check_nothing_in_nursery(self): + # CALL_MALLOC_NURSERY should not write anything in the nursery + for i in range(64): + assert self.nursery[i] == NOT_INITIALIZED class TestMallocFastpath(BaseTestRegalloc): def setup_method(self, method): cpu = CPU(None, None) - cpu.vtable_offset = WORD cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() + self.cpu = cpu - # hack: specify 'tid' explicitly, because this test is not running - # with the gc transformer - NODE = lltype.GcStruct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) - valuedescr = cpu.fielddescrof(NODE, 'value') - - self.cpu = cpu - self.nodedescr = nodedescr - vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - vtable_int = cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(vtable)) - NODE2 = lltype.GcStruct('node2', - ('parent', rclass.OBJECT), - ('tid', lltype.Signed), - ('vtable', lltype.Ptr(rclass.OBJECT_VTABLE))) - descrsize = cpu.sizeof(NODE2) - heaptracker.register_known_gctype(cpu, vtable, NODE2) - self.descrsize = descrsize - self.vtable_int = vtable_int - - self.namespace = locals().copy() - def test_malloc_fastpath(self): ops = ''' - [i0] - p0 = new(descr=nodedescr) - setfield_gc(p0, i0, descr=valuedescr) - finish(p0) + [] + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(16) + finish(p0, p1, p2) ''' - self.interpret(ops, [42]) - # check the nursery + self.interpret(ops, []) + # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.nodedescr.tid - assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 48 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 64 + # slowpath never called + assert gc_ll_descr.calls == [] def test_malloc_slowpath(self): ops = ''' [] - p0 = new(descr=nodedescr) - p1 = new(descr=nodedescr) - p2 = new(descr=nodedescr) - p3 = new(descr=nodedescr) - p4 = new(descr=nodedescr) - p5 = new(descr=nodedescr) - p6 = new(descr=nodedescr) - p7 = new(descr=nodedescr) - p8 = new(descr=nodedescr) - finish(p0, p1, p2, p3, p4, p5, p6, p7, p8) + p0 = call_malloc_nursery(16) + p1 = call_malloc_nursery(32) + p2 = call_malloc_nursery(24) # overflow + finish(p0, p1, p2) ''' self.interpret(ops, []) + # check the returned pointers + gc_ll_descr = self.cpu.gc_ll_descr + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + ref = self.cpu.get_latest_value_ref + assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 + assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 16 + assert rffi.cast(lltype.Signed, ref(2)) == nurs_adr + 0 + # check the nursery content and state + gc_ll_descr.check_nothing_in_nursery() + assert gc_ll_descr.addrs[0] == nurs_adr + 24 # this should call slow path once - gc_ll_descr = self.cpu.gc_ll_descr - nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nadr + (WORD*2) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_new_with_vtable(self): - ops = ''' - [i0, i1] - p0 = new_with_vtable(ConstClass(vtable)) - guard_class(p0, ConstClass(vtable)) [i0] - finish(i1) - ''' - self.interpret(ops, [0, 1]) - assert self.getint(0) == 1 - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == self.descrsize.tid - assert gc_ll_descr.nursery[1] == self.vtable_int - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - -class Seen(Exception): - pass - -class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): - def can_inline_malloc_varsize(self, arraydescr, num_elem): - return num_elem < 5 - def get_funcptr_for_newarray(self): - return 52 - def init_array_descr(self, A, descr): - descr.tid = self._counter - self._counter += 1 - def args_for_new_array(self, descr): - raise Seen("args_for_new_array") - -class TestMallocVarsizeFastpath(BaseTestRegalloc): - def setup_method(self, method): - cpu = CPU(None, None) - cpu.vtable_offset = WORD - cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() - cpu.setup_once() - self.cpu = cpu - - ARRAY = lltype.GcArray(lltype.Signed) - arraydescr = cpu.arraydescrof(ARRAY) - self.arraydescr = arraydescr - ARRAYCHAR = lltype.GcArray(lltype.Char) - arraychardescr = cpu.arraydescrof(ARRAYCHAR) - - self.namespace = locals().copy() - - def test_malloc_varsize_fastpath(self): - # Hack. Running the GcLLDescr_framework without really having - # a complete GC means that we end up with both the tid and the - # length being at offset 0. In this case, so the length overwrites - # the tid. This is of course only the case in this test class. - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 142, descr=arraydescr) - setarrayitem_gc(p0, 3, 143, descr=arraydescr) - finish(p0) - ''' - self.interpret(ops, []) - # check the nursery - gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.nursery[0] == 4 - assert gc_ll_descr.nursery[1] == 142 - assert gc_ll_descr.nursery[4] == 143 - nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) - assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) - assert gc_ll_descr.addrs[2] == 0 # slowpath never called - - def test_malloc_varsize_slowpath(self): - ops = ''' - [] - p0 = new_array(4, descr=arraydescr) - setarrayitem_gc(p0, 0, 420, descr=arraydescr) - setarrayitem_gc(p0, 3, 430, descr=arraydescr) - p1 = new_array(4, descr=arraydescr) - setarrayitem_gc(p1, 0, 421, descr=arraydescr) - setarrayitem_gc(p1, 3, 431, descr=arraydescr) - p2 = new_array(4, descr=arraydescr) - setarrayitem_gc(p2, 0, 422, descr=arraydescr) - setarrayitem_gc(p2, 3, 432, descr=arraydescr) - p3 = new_array(4, descr=arraydescr) - setarrayitem_gc(p3, 0, 423, descr=arraydescr) - setarrayitem_gc(p3, 3, 433, descr=arraydescr) - finish(p0, p1, p2, p3) - ''' - gc_ll_descr = self.cpu.gc_ll_descr - gc_ll_descr.expected_malloc_slowpath_size = 5*WORD - self.interpret(ops, []) - assert gc_ll_descr.addrs[2] == 1 # slowpath called once - - def test_malloc_varsize_too_big(self): - ops = ''' - [] - p0 = new_array(5, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_varsize_variable(self): - ops = ''' - [i0] - p0 = new_array(i0, descr=arraydescr) - finish(p0) - ''' - py.test.raises(Seen, self.interpret, ops, []) - - def test_malloc_array_of_char(self): - # check that fastpath_malloc_varsize() respects the alignment - # of the pointer in the nursery - ops = ''' - [] - p1 = new_array(1, descr=arraychardescr) - p2 = new_array(2, descr=arraychardescr) - p3 = new_array(3, descr=arraychardescr) - p4 = new_array(4, descr=arraychardescr) - finish(p1, p2, p3, p4) - ''' - self.interpret(ops, []) - p1 = self.getptr(0, llmemory.GCREF) - p2 = self.getptr(1, llmemory.GCREF) - p3 = self.getptr(2, llmemory.GCREF) - p4 = self.getptr(3, llmemory.GCREF) - assert p1._obj.intval & (WORD-1) == 0 # aligned - assert p2._obj.intval & (WORD-1) == 0 # aligned - assert p3._obj.intval & (WORD-1) == 0 # aligned - assert p4._obj.intval & (WORD-1) == 0 # aligned + assert gc_ll_descr.calls == [24] diff --git a/pypy/jit/backend/x86/test/test_jump.py b/pypy/jit/backend/x86/test/test_jump.py --- a/pypy/jit/backend/x86/test/test_jump.py +++ b/pypy/jit/backend/x86/test/test_jump.py @@ -20,6 +20,11 @@ def regalloc_pop(self, loc): self.ops.append(('pop', loc)) + def regalloc_immedmem2mem(self, from_loc, to_loc): + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + self.ops.append(('immedmem2mem', from_loc, to_loc)) + def got(self, expected): print '------------------------ comparing ---------------------------' for op1, op2 in zip(self.ops, expected): @@ -244,12 +249,19 @@ else: return pick1() # + def pick2c(): + n = random.randrange(-2000, 500) + if n >= 0: + return ConstFloatLoc(n) # n is the address, not really used here + else: + return pick2() + # def pick_dst(fn, count, seen): result = [] while len(result) < count: x = fn() keys = [x._getregkey()] - if isinstance(x, StackLoc) and x.width > WORD: + if isinstance(x, StackLoc) and x.get_width() > WORD: keys.append(keys[0] + WORD) for key in keys: if key in seen: @@ -267,7 +279,7 @@ for i, loc in enumerate(locations): if isinstance(loc, RegLoc): if loc.is_xmm: - if loc.width > WORD: + if loc.get_width() > WORD: newvalue = ('value-xmm-%d' % i, 'value-xmm-hiword-%d' % i) else: @@ -276,16 +288,16 @@ else: regs1[loc.value] = 'value-int-%d' % i elif isinstance(loc, StackLoc): - stack[loc.value] = 'value-width%d-%d' % (loc.width, i) - if loc.width > WORD: + stack[loc.value] = 'value-width%d-%d' % (loc.get_width(), i) + if loc.get_width() > WORD: stack[loc.value+WORD] = 'value-hiword-%d' % i else: - assert isinstance(loc, ImmedLoc) + assert isinstance(loc, (ImmedLoc, ConstFloatLoc)) return regs1, regs2, stack # for i in range(500): seen = {} - src_locations2 = [pick2() for i in range(4)] + src_locations2 = [pick2c() for i in range(4)] dst_locations2 = pick_dst(pick2, 4, seen) src_locations1 = [pick1c() for i in range(5)] dst_locations1 = pick_dst(pick1, 5, seen) @@ -299,7 +311,7 @@ # def read(loc, expected_width=None): if expected_width is not None: - assert loc.width == expected_width + assert loc.get_width() == expected_width if isinstance(loc, RegLoc): if loc.is_xmm: return regs2[loc.value] @@ -307,21 +319,27 @@ return regs1[loc.value] if isinstance(loc, StackLoc): got = stack[loc.value] - if loc.width > WORD: + if loc.get_width() > WORD: got = (got, stack[loc.value+WORD]) return got if isinstance(loc, ImmedLoc): return 'const-%d' % loc.value + if isinstance(loc, ConstFloatLoc): + got = 'constfloat-@%d' % loc.value + if loc.get_width() > WORD: + got = (got, 'constfloat-next-@%d' % loc.value) + return got assert 0, loc # def write(loc, newvalue): + assert (type(newvalue) is tuple) == (loc.get_width() > WORD) if isinstance(loc, RegLoc): if loc.is_xmm: regs2[loc.value] = newvalue else: regs1[loc.value] = newvalue elif isinstance(loc, StackLoc): - if loc.width > WORD: + if loc.get_width() > WORD: newval1, newval2 = newvalue stack[loc.value] = newval1 stack[loc.value+WORD] = newval2 @@ -337,10 +355,14 @@ for op in assembler.ops: if op[0] == 'mov': src, dst = op[1:] - assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) - assert isinstance(dst, (RegLoc, StackLoc)) - assert not (isinstance(src, StackLoc) and - isinstance(dst, StackLoc)) + if isinstance(src, ConstFloatLoc): + assert isinstance(dst, RegLoc) + assert dst.is_xmm + else: + assert isinstance(src, (RegLoc, StackLoc, ImmedLoc)) + assert isinstance(dst, (RegLoc, StackLoc)) + assert not (isinstance(src, StackLoc) and + isinstance(dst, StackLoc)) write(dst, read(src)) elif op[0] == 'push': src, = op[1:] @@ -350,6 +372,11 @@ dst, = op[1:] assert isinstance(dst, (RegLoc, StackLoc)) write(dst, extrapushes.pop()) + elif op[0] == 'immedmem2mem': + src, dst = op[1:] + assert isinstance(src, ConstFloatLoc) + assert isinstance(dst, StackLoc) + write(dst, read(src, 8)) else: assert 0, "unknown op: %r" % (op,) assert not extrapushes @@ -358,3 +385,32 @@ assert read(loc, WORD) == src_values1[i] for i, loc in enumerate(dst_locations2): assert read(loc, 8) == src_values2[i] + + +def test_overflow_bug(): + CASE = [ + (-144, -248), # \ cycle + (-248, -144), # / + (-488, -416), # \ two usages of -488 + (-488, -480), # / + (-488, -488), # - one self-application of -488 + ] + class FakeAssembler: + def regalloc_mov(self, src, dst): + print "mov", src, dst + def regalloc_push(self, x): + print "push", x + def regalloc_pop(self, x): + print "pop", x + def regalloc_immedmem2mem(self, x, y): + print "?????????????????????????" + def main(): + srclocs = [StackLoc(9999, x, 'i') for x,y in CASE] + dstlocs = [StackLoc(9999, y, 'i') for x,y in CASE] + remap_frame_layout(FakeAssembler(), srclocs, dstlocs, eax) + # it works when run directly + main() + # but it used to crash when translated, + # because of a -sys.maxint-2 overflowing to sys.maxint + from pypy.rpython.test.test_llinterp import interpret + interpret(main, []) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -69,6 +69,7 @@ return ctypes.cast(res.value._obj.intval, ctypes.POINTER(item_tp)) def test_allocations(self): + py.test.skip("rewrite or kill") from pypy.rpython.lltypesystem import rstr allocs = [None] @@ -518,16 +519,23 @@ from pypy.tool.logparser import parse_log_file, extract_category from pypy.rlib import debug + targettoken, preambletoken = TargetToken(), TargetToken() loop = """ [i0] - label(i0, descr=targettoken) + label(i0, descr=preambletoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1, descr=targettoken) + label(i1, descr=targettoken) + debug_merge_point('xyz', 0) + i11 = int_add(i1, 1) + i12 = int_ge(i11, 10) + guard_false(i12) [] + jump(i11, descr=targettoken) """ - ops = parse(loop, namespace={'targettoken': TargetToken()}) + ops = parse(loop, namespace={'targettoken': targettoken, + 'preambletoken': preambletoken}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) @@ -536,11 +544,18 @@ self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] - assert struct.i == 10 + assert struct.i == 1 + struct = self.cpu.assembler.loop_run_counters[1] + assert struct.i == 1 + struct = self.cpu.assembler.loop_run_counters[2] + assert struct.i == 9 self.cpu.finish_once() finally: debug._log = None - assert ('jit-backend-counts', [('debug_print', 'loop -1:10')]) in dlog + l0 = ('debug_print', 'entry -1:1') + l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') + l2 = ('debug_print', targettoken.repr_of_descr() + ':9') + assert ('jit-backend-counts', [l0, l1, l2]) in dlog def test_debugger_checksum(self): loop = """ diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -69,16 +69,17 @@ def get_functions_to_patch(): from pypy.jit.backend.llsupport import gc # - can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc - def can_inline_malloc2(*args): + can_use_nursery_malloc1 = gc.GcLLDescr_framework.can_use_nursery_malloc + def can_use_nursery_malloc2(*args): try: if os.environ['PYPY_NO_INLINE_MALLOC']: return False except KeyError: pass - return can_inline_malloc1(*args) + return can_use_nursery_malloc1(*args) # - return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + return {(gc.GcLLDescr_framework, 'can_use_nursery_malloc'): + can_use_nursery_malloc2} def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings diff --git a/pypy/jit/backend/x86/test/test_zrpy_platform.py b/pypy/jit/backend/x86/test/test_zrpy_platform.py --- a/pypy/jit/backend/x86/test/test_zrpy_platform.py +++ b/pypy/jit/backend/x86/test/test_zrpy_platform.py @@ -74,8 +74,8 @@ myjitdriver = jit.JitDriver(greens = [], reds = ['n']) def entrypoint(argv): - myjitdriver.set_param('threshold', 2) - myjitdriver.set_param('trace_eagerness', 0) + jit.set_param(myjitdriver, 'threshold', 2) + jit.set_param(myjitdriver, 'trace_eagerness', 0) n = 16 while n > 0: myjitdriver.can_enter_jit(n=n) diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -42,8 +42,7 @@ except AttributeError: pass - def is_candidate(graph): - return policy.look_inside_graph(graph) + is_candidate = policy.look_inside_graph assert len(self.jitdrivers_sd) > 0 todo = [jd.portal_graph for jd in self.jitdrivers_sd] diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -162,7 +162,6 @@ _ll_4_list_setslice = rlist.ll_listsetslice _ll_2_list_delslice_startonly = rlist.ll_listdelslice_startonly _ll_3_list_delslice_startstop = rlist.ll_listdelslice_startstop -_ll_1_list_list2fixed = lltypesystem_rlist.ll_list2fixed _ll_2_list_inplace_mul = rlist.ll_inplace_mul _ll_2_list_getitem_foldable = _ll_2_list_getitem diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -38,7 +38,8 @@ else: extraprocedures = [procedure] metainterp_sd.stats.view(errmsg=errmsg, - extraprocedures=extraprocedures) + extraprocedures=extraprocedures, + metainterp_sd=metainterp_sd) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -105,38 +106,32 @@ def compile_loop(metainterp, greenkey, start, inputargs, jumpargs, - start_resumedescr, full_preamble_needed=True): + resume_at_jump_descr, full_preamble_needed=True): """Try to compile a new procedure by closing the current history back to the first operation. """ from pypy.jit.metainterp.optimizeopt import optimize_trace - history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd + history = metainterp.history - if False: - part = partial_trace - assert False - procedur_token = metainterp.get_procedure_token(greenkey) - assert procedure_token - all_target_tokens = [] - else: - jitcell_token = make_jitcell_token(jitdriver_sd) - part = create_empty_loop(metainterp) - part.inputargs = inputargs[:] - h_ops = history.operations - part.start_resumedescr = start_resumedescr - part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ - [h_ops[i].clone() for i in range(start, len(h_ops))] + \ - [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] - try: - optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) - except InvalidLoop: - return None - target_token = part.operations[0].getdescr() - assert isinstance(target_token, TargetToken) - all_target_tokens = [target_token] + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.resume_at_jump_descr = resume_at_jump_descr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] + + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] loop = create_empty_loop(metainterp) loop.inputargs = part.inputargs @@ -184,7 +179,7 @@ def compile_retrace(metainterp, greenkey, start, inputargs, jumpargs, - start_resumedescr, partial_trace, resumekey): + resume_at_jump_descr, partial_trace, resumekey): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -200,7 +195,7 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] - part.start_resumedescr = start_resumedescr + part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations part.operations = [partial_trace.operations[-1]] + \ @@ -212,13 +207,12 @@ try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - #return None # XXX: Dissable for now # Fall back on jumping to preamble target_token = label.getdescr() assert isinstance(target_token, TargetToken) assert target_token.exported_state part.operations = [orignial_label] + \ - [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + [ResOperation(rop.JUMP, inputargs[:], None, descr=loop_jitcell_token)] try: optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, @@ -318,7 +312,10 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) + loopname = jitdriver_sd.warmstate.get_location_str(greenkey) + metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, + type, ops_offset, + name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) @@ -751,7 +748,7 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey, start_resumedescr=None): +def compile_trace(metainterp, resumekey, resume_at_jump_descr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ @@ -767,7 +764,7 @@ # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] - new_trace.start_resumedescr = start_resumedescr + new_trace.resume_at_jump_descr = resume_at_jump_descr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -46,7 +46,7 @@ # get the function address as an integer func = argboxes[0].getint() # do the call using the correct function from the cpu - rettype = descr.get_return_type() + rettype = descr.get_result_type() if rettype == INT or rettype == 'S': # *S*ingle float try: result = cpu.bh_call_i(func, descr, args_i, args_r, args_f) @@ -344,6 +344,8 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.CALL_MALLOC_GC, + rop.CALL_MALLOC_NURSERY, rop.LABEL, ): # list of opcodes never executed by pyjitpl continue diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,7 +12,7 @@ def get_display_text(self): return None -def display_procedures(procedures, errmsg=None, highlight_procedures={}): +def display_procedures(procedures, errmsg=None, highlight_procedures={}, metainterp_sd=None): graphs = [(procedure, highlight_procedures.get(procedure, 0)) for procedure in procedures] for graph, highlight in graphs: @@ -20,7 +20,7 @@ if is_interesting_guard(op): graphs.append((SubGraph(op.getdescr()._debug_suboperations), highlight)) - graphpage = ResOpGraphPage(graphs, errmsg) + graphpage = ResOpGraphPage(graphs, errmsg, metainterp_sd) graphpage.display() def is_interesting_guard(op): @@ -36,8 +36,8 @@ class ResOpGraphPage(GraphPage): - def compute(self, graphs, errmsg=None): - resopgen = ResOpGen() + def compute(self, graphs, errmsg=None, metainterp_sd=None): + resopgen = ResOpGen(metainterp_sd) for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: @@ -50,13 +50,14 @@ CLUSTERING = True BOX_COLOR = (128, 0, 96) - def __init__(self): + def __init__(self, metainterp_sd=None): self.graphs = [] self.highlight_graphs = {} self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None self.target_tokens = {} + self.metainterp_sd = metainterp_sd def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -164,7 +165,14 @@ opindex = opstartindex while True: op = operations[opindex] - lines.append(op.repr(graytext=True)) + op_repr = op.repr(graytext=True) + if op.getopnum() == rop.DEBUG_MERGE_POINT: + jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] + if jd_sd._get_printable_location_ptr: + s = jd_sd.warmstate.get_location_str(op.getarglist()[2:]) + s = s.replace(',', '.') # we use comma for argument splitting + op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s) + lines.append(op_repr) if is_interesting_guard(op): tgt = op.getdescr()._debug_suboperations[0] tgt_g, tgt_i = self.all_operations[tgt] diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -142,59 +142,6 @@ def repr_of_descr(self): return '%r' % (self,) - def get_arg_types(self): - """ Implement in call descr. - Must return a string of INT, REF and FLOAT ('i', 'r', 'f'). - """ - raise NotImplementedError - - def get_return_type(self): - """ Implement in call descr. - Must return INT, REF, FLOAT, or 'v' for void. - On 32-bit (hack) it can also be 'L' for longlongs. - Additionally it can be 'S' for singlefloats. - """ - raise NotImplementedError - - def get_extra_info(self): - """ Implement in call descr - """ - raise NotImplementedError - - def is_array_of_pointers(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_floats(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_array_of_structs(self): - """ Implement for array descr - """ - raise NotImplementedError - - def is_pointer_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def is_float_field(self): - """ Implement for field descr - """ - raise NotImplementedError - - def as_vtable_size_descr(self): - """ Implement for size descr representing objects with vtables. - Returns self. (it's an annotation hack) - """ - raise NotImplementedError - - def count_fields_if_immutable(self): - return -1 - def _clone_if_mutable(self): return self def clone_if_mutable(self): @@ -758,6 +705,9 @@ self.virtual_state = None self.exported_state = None + + def repr_of_descr(self): + return 'TargetToken(%d)' % compute_unique_id(self) class TreeLoop(object): inputargs = None @@ -765,7 +715,7 @@ call_pure_results = None logops = None quasi_immutable_deps = None - start_resumedescr = None + resume_at_jump_descr = None def _token(*args): raise Exception("TreeLoop.token is killed") @@ -1057,25 +1007,6 @@ # a jump back to itself and possibly a few bridges ending with finnish. # Only the operations within the loop formed by that single jump will # be counted. - - # XXX hacked version, ignore and remove me when jit-targets is merged. - loops = self.get_all_loops() - loops = [loop for loop in loops if 'Preamble' not in repr(loop)] #XXX - assert len(loops) == 1 - loop, = loops - jumpop = loop.operations[-1] - assert jumpop.getopnum() == rop.JUMP - insns = {} - for op in loop.operations: - opname = op.getopname() - insns[opname] = insns.get(opname, 0) + 1 - return self._check_insns(insns, expected, check) - - def check_simple_loop(self, expected=None, **check): - # Usefull in the simplest case when we have only one trace ending with - # a jump back to itself and possibly a few bridges ending with finnish. - # Only the operations within the loop formed by that single jump will - # be counted. loops = self.get_all_loops() assert len(loops) == 1 loop = loops[0] @@ -1134,7 +1065,7 @@ if option.view: self.view() - def view(self, errmsg=None, extraprocedures=[]): + def view(self, errmsg=None, extraprocedures=[], metainterp_sd=None): from pypy.jit.metainterp.graphpage import display_procedures procedures = self.get_all_loops()[:] for procedure in extraprocedures: @@ -1146,7 +1077,7 @@ if hasattr(procedure, '_looptoken_number') and ( procedure._looptoken_number in self.invalidated_token_numbers): highlight_procedures.setdefault(procedure, 2) - display_procedures(procedures, errmsg, highlight_procedures) + display_procedures(procedures, errmsg, highlight_procedures, metainterp_sd) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -5,7 +5,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.history import Const, ConstInt, Box, \ - BoxInt, ConstFloat, BoxFloat, AbstractFailDescr + BoxInt, ConstFloat, BoxFloat, AbstractFailDescr, TargetToken class Logger(object): @@ -13,14 +13,14 @@ self.metainterp_sd = metainterp_sd self.guard_number = guard_number - def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None): + def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): if type is None: debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") - debug_print("# Loop", number, ":", type, + debug_print("# Loop", number, '(%s)' % name , ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-loop") @@ -135,6 +135,13 @@ fail_args = '' return s_offset + res + op.getopname() + '(' + args + ')' + fail_args + def _log_inputarg_setup_ops(self, op): + target_token = op.getdescr() + if isinstance(target_token, TargetToken): + if target_token.exported_state: + for op in target_token.exported_state.inputarg_setup_ops: + debug_print(' ' + self.repr_of_resop(op)) + def _log_operations(self, inputargs, operations, ops_offset): if not have_debug_prints(): return @@ -146,6 +153,8 @@ for i in range(len(operations)): op = operations[i] debug_print(self.repr_of_resop(operations[i], ops_offset)) + if op.getopnum() == rop.LABEL: + self._log_inputarg_setup_ops(op) if ops_offset and None in ops_offset: offset = ops_offset[None] debug_print("+%d: --end of the loop--" % offset) diff --git a/pypy/jit/metainterp/memmgr.py b/pypy/jit/metainterp/memmgr.py --- a/pypy/jit/metainterp/memmgr.py +++ b/pypy/jit/metainterp/memmgr.py @@ -1,5 +1,5 @@ import math -from pypy.rlib.rarithmetic import r_int64, r_uint +from pypy.rlib.rarithmetic import r_int64 from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.rlib.objectmodel import we_are_translated @@ -21,7 +21,6 @@ # class MemoryManager(object): - NO_NEXT_CHECK = r_int64(2 ** 63 - 1) def __init__(self): self.check_frequency = -1 @@ -37,13 +36,12 @@ # According to my estimates it's about 5e9 years given 1000 loops # per second self.current_generation = r_int64(1) - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) self.alive_loops = {} - self._cleanup_jitcell_dicts = lambda: None def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: - self.next_check = self.NO_NEXT_CHECK + self.next_check = r_int64(-1) else: self.max_age = max_age if check_frequency <= 0: @@ -51,11 +49,10 @@ self.check_frequency = check_frequency self.next_check = self.current_generation + 1 - def next_generation(self, do_cleanups_now=True): + def next_generation(self): self.current_generation += 1 - if do_cleanups_now and self.current_generation >= self.next_check: + if self.current_generation == self.next_check: self._kill_old_loops_now() - self._cleanup_jitcell_dicts() self.next_check = self.current_generation + self.check_frequency def keep_loop_alive(self, looptoken): @@ -84,22 +81,3 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") - - def get_current_generation_uint(self): - """Return the current generation, possibly truncated to a uint. - To use only as an approximation for decaying counters.""" - return r_uint(self.current_generation) - - def record_jitcell_dict(self, callback): - """NOT_RPYTHON. The given jitcell_dict is a dict that needs - occasional clean-ups of old cells. A cell is old if it never - reached the threshold, and its counter decayed to a tiny value.""" - # note that the various jitcell_dicts have different RPython types, - # so we have to make a different function for each one. These - # functions are chained to each other: each calls the previous one. - def cleanup_dict(): - callback() - cleanup_previous() - # - cleanup_previous = self._cleanup_jitcell_dicts - self._cleanup_jitcell_dicts = cleanup_dict diff --git a/pypy/jit/metainterp/optimize.py b/pypy/jit/metainterp/optimize.py --- a/pypy/jit/metainterp/optimize.py +++ b/pypy/jit/metainterp/optimize.py @@ -5,58 +5,3 @@ """Raised when the optimize*.py detect that the loop that we are trying to build cannot possibly make sense as a long-running loop (e.g. it cannot run 2 complete iterations).""" - -class RetraceLoop(JitException): - """ Raised when inlining a short preamble resulted in an - InvalidLoop. This means the optimized loop is too specialized - to be useful here, so we trace it again and produced a second - copy specialized in some different way. - """ - -# ____________________________________________________________ - -def optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): - debug_start("jit-optimize") - try: - return _optimize_loop(metainterp_sd, old_loop_tokens, loop, - enable_opts) - finally: - debug_stop("jit-optimize") - -def _optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 - loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, - loop.operations) - # XXX do we really still need a list? - if old_loop_tokens: - return old_loop_tokens[0] - optimize_loop_1(metainterp_sd, loop, enable_opts) - return None - -# ____________________________________________________________ - -def optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, - inline_short_preamble=True, retraced=False): - debug_start("jit-optimize") - try: - return _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, - enable_opts, - inline_short_preamble, retraced) - finally: - debug_stop("jit-optimize") - -def _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, - inline_short_preamble, retraced=False): - from pypy.jit.metainterp.optimizeopt import optimize_bridge_1 - bridge.logops = metainterp_sd.logger_noopt.log_loop(bridge.inputargs, - bridge.operations) - if old_loop_tokens: - old_loop_token = old_loop_tokens[0] - bridge.operations[-1].setdescr(old_loop_token) # patch jump target - optimize_bridge_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) - return old_loop_tokens[0] - #return bridge.operations[-1].getdescr() - return None - -# ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -51,34 +51,6 @@ return optimizations, unroll - -def optimize_loop_1(metainterp_sd, loop, enable_opts, - inline_short_preamble=True, retraced=False): - """Optimize loop.operations to remove internal overheadish operations. - """ - - optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble, retraced) - if unroll: - optimize_unroll(metainterp_sd, loop, optimizations) - else: - optimizer = Optimizer(metainterp_sd, loop, optimizations) - optimizer.propagate_all_forward() - -def optimize_bridge_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble=True, retraced=False): - """The same, but for a bridge. """ - enable_opts = enable_opts.copy() - try: - del enable_opts['unroll'] - except KeyError: - pass - optimize_loop_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) - -if __name__ == '__main__': - print ALL_OPTS_NAMES - def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ @@ -96,3 +68,6 @@ finally: debug_stop("jit-optimize") +if __name__ == '__main__': + print ALL_OPTS_NAMES + diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -453,6 +453,7 @@ def clear_newoperations(self): self._newoperations = [] + self.seen_results = {} def make_equal_to(self, box, value, replace=False): assert isinstance(value, OptValue) diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -35,6 +35,9 @@ pass def optimize_LABEL(self, op): + descr = op.getdescr() + if isinstance(descr, JitCellToken): + return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) self.last_label_descr = op.getdescr() self.emit_operation(op) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,21 +1,25 @@ +from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot, + FakeMetaInterpStaticData) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.jit.metainterp.optimize import InvalidLoop from py.test import raises +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method class BaseTestMultiLabel(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" - def optimize_loop(self, ops, expected): + def optimize_loop(self, ops, expected, expected_shorts=None): loop = self.parse(ops) if expected != "crash!": expected = self.parse(expected) part = TreeLoop('part') part.inputargs = loop.inputargs - part.start_resumedescr = FakeDescrWithSnapshot() + part.resume_at_jump_descr = FakeDescrWithSnapshot() token = loop.original_jitcell_token optimized = TreeLoop('optimized') @@ -32,15 +36,17 @@ if nxt < len(loop.operations): label = loop.operations[nxt] assert label.getopnum() == rop.LABEL - jumpop = ResOperation(rop.JUMP, label.getarglist(), - None, descr=token) - operations.append(jumpop) + if label.getdescr() is None: + label.setdescr(token) + operations.append(label) part.operations = operations + self._do_optimize_loop(part, None) if part.operations[-1].getopnum() == rop.LABEL: last_label = [part.operations.pop()] else: last_label = [] + optimized.operations.extend(part.operations) prv = nxt + 1 @@ -53,11 +59,36 @@ print 'Failed!' print + shorts = [op.getdescr().short_preamble + for op in optimized.operations + if op.getopnum() == rop.LABEL] + + if expected_shorts: + for short in shorts: + print + print "Short preamble:" + print '\n'.join([str(o) for o in short]) + + assert expected != "crash!", "should have raised an exception" self.assert_equal(optimized, expected) + if expected_shorts: + assert len(shorts) == len(expected_shorts) + for short, expected_short in zip(shorts, expected_shorts): + expected_short = self.parse(expected_short) + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, expected_short, + text_right='expected short preamble') + + return optimized +class OptimizeoptTestMultiLabel(BaseTestMultiLabel): + def test_simple(self): ops = """ [i1] @@ -193,8 +224,217 @@ """ with raises(InvalidLoop): self.optimize_loop(ops, ops) + + def test_two_intermediate_labels_basic_1(self): + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + expected = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1, i2) + i4 = int_add(i1, i2) + label(p1, i4) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + short1 = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + short2 = """ + [p1, i1] + label(p1, i1) + jump(p1, i1) + """ + self.optimize_loop(ops, expected, expected_shorts=[short1, short2]) + + def test_two_intermediate_labels_basic_2(self): + ops = """ + [p1, i1] + i2 = int_add(i1, 1) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = getfield_gc(p1, descr=valuedescr) + i6 = int_add(i4, i5) + jump(p1, i6) + """ + expected = """ + [p1, i1] + i2 = int_add(i1, 1) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4, i3) + i6 = int_add(i4, i3) + jump(p1, i6, i3) + """ + short1 = """ + [p1, i1] + label(p1, i1) + jump(p1, i1) + """ + short2 = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, expected, expected_shorts=[short1, short2]) + + def test_two_intermediate_labels_both(self): + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = int_add(i1, i3) + label(p1, i4) + i5 = getfield_gc(p1, descr=valuedescr) + i6 = int_mul(i4, i5) + jump(p1, i6) + """ + expected = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1, i2) + i4 = int_add(i1, i2) + label(p1, i4, i2) + i6 = int_mul(i4, i2) + jump(p1, i6, i2) + """ + short = """ + [p1, i1] + label(p1, i1) + i2 = getfield_gc(p1, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, expected, expected_shorts=[short, short]) + + def test_import_across_multiple_labels_basic(self): + # Not supported, juts make sure we get a functional trace + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i1) + i3 = int_add(i1, 1) + label(p1, i1) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + self.optimize_loop(ops, ops) + + def test_import_across_multiple_labels_with_duplication(self): + # Not supported, juts make sure we get a functional trace + ops = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + label(p1, i2) + i3 = int_add(i2, 1) + label(p1, i2) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + exported = """ + [p1, i1] + i2 = getfield_gc(p1, descr=valuedescr) + i6 = same_as(i2) + label(p1, i2) + i3 = int_add(i2, 1) + label(p1, i2) + i4 = getfield_gc(p1, descr=valuedescr) + i5 = int_add(i4, 1) + jump(p1, i5) + """ + self.optimize_loop(ops, exported) + + def test_import_virtual_across_multiple_labels(self): + ops = """ + [p0, i1] + i1a = int_add(i1, 1) + pv = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(pv, i1a, descr=valuedescr) + label(pv, i1) + i2 = int_mul(i1, 3) + label(pv, i2) + i3 = getfield_gc(pv, descr=valuedescr) + i4 = int_add(i3, i2) + jump(pv, i4) + """ + expected = """ + [p0, i1] + i1a = int_add(i1, 1) + i5 = same_as(i1a) + label(i1a, i1) + i2 = int_mul(i1, 3) + label(i1a, i2) + i4 = int_add(i1a, i2) + jump(i1a, i4) + """ + self.optimize_loop(ops, expected) + + +class OptRenameStrlen(Optimization): + def propagate_forward(self, op): + dispatch_opt(self, op) + + def optimize_STRLEN(self, op): + newop = op.clone() + newop.result = op.result.clonebox() + self.emit_operation(newop) + self.make_equal_to(op.result, self.getvalue(newop.result)) + +dispatch_opt = make_dispatcher_method(OptRenameStrlen, 'optimize_', + default=OptRenameStrlen.emit_operation) + +class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): + + def _do_optimize_loop(self, loop, call_pure_results): + from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll + from pypy.jit.metainterp.optimizeopt.util import args_dict + from pypy.jit.metainterp.optimizeopt.pure import OptPure + + self.loop = loop + loop.call_pure_results = args_dict() + metainterp_sd = FakeMetaInterpStaticData(self.cpu) + optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) + + def test_optimizer_renaming_boxes(self): + ops = """ + [p1] + i1 = strlen(p1) + label(p1) + i2 = strlen(p1) + i3 = int_add(i2, 7) + jump(p1) + """ + expected = """ + [p1] + i1 = strlen(p1) + label(p1, i1) + i11 = same_as(i1) + i2 = int_add(i11, 7) + jump(p1, i11) + """ + self.optimize_loop(ops, expected) + - -class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + +class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): pass +class TestOptimizerRenamingBoxesLLtype(BaseTestOptimizerRenamingBoxes, LLtypeMixin): + pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -4,7 +4,7 @@ LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize -from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken @@ -4211,7 +4211,6 @@ preamble = """ [p0] i0 = strlen(p0) - i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5668,8 +5667,7 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - i7 = same_as(i2) - jump(p2, p3, i7) + jump(p2, p3, i2) """ expected = """ [p1, p2, i1] @@ -5744,9 +5742,7 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - i129 = same_as(i2) - i130 = same_as(i3) - jump(p2, p3, p5, i129, i130) + jump(p2, p3, p5, i2, i3) """ expected = """ [p1, p2, p3, i1, i2] @@ -5959,8 +5955,7 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - i9 = same_as(i4) - jump(p4, i1, i2, p2, i5, i3, i9) + jump(p4, i1, i2, p2, i5, i3, i4) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6082,9 +6077,7 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - i11 = same_as(i1) - i12 = same_as(i2) - jump(p1, p2, p3, i3, i11, i12) + jump(p1, p2, p3, i3, i1, i2) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6304,7 +6297,6 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) - i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6350,9 +6342,7 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - i11 = same_as(i1) - i12 = same_as(i2) - jump(p1, p2, i3, i11, i12) + jump(p1, p2, i3, i1, i2) """ expected = """ [p1, p2, i3, i1, i2] @@ -6925,8 +6915,7 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - i0 = same_as(i843) - jump(p9, i0) + jump(p9, i843) """ short = """ [p9] @@ -7770,7 +7759,7 @@ jump(i0, p0, i2) """ self.optimize_loop(ops, expected) - + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -430,18 +430,18 @@ preamble = TreeLoop('preamble') preamble.inputargs = inputargs - preamble.start_resumedescr = FakeDescrWithSnapshot() + preamble.resume_at_jump_descr = FakeDescrWithSnapshot() token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ operations + \ - [ResOperation(rop.JUMP, jump_args, None, descr=token)] + [ResOperation(rop.LABEL, jump_args, None, descr=token)] self._do_optimize_loop(preamble, call_pure_results) assert preamble.operations[-1].getopnum() == rop.LABEL inliner = Inliner(inputargs, jump_args) - loop.start_resumedescr = preamble.start_resumedescr + loop.resume_at_jump_descr = preamble.resume_at_jump_descr loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -3,7 +3,7 @@ from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException -from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop +from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds from pypy.jit.metainterp.inliner import Inliner @@ -51,10 +51,10 @@ distinction anymore)""" inline_short_preamble = True - did_import = False def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) + self.boxes_created_this_iteration = None def fix_snapshot(self, jump_args, snapshot): if snapshot is None: @@ -71,7 +71,6 @@ loop = self.optimizer.loop self.optimizer.clear_newoperations() - start_label = loop.operations[0] if start_label.getopnum() == rop.LABEL: loop.operations = loop.operations[1:] @@ -82,7 +81,7 @@ start_label = None jumpop = loop.operations[-1] - if jumpop.getopnum() == rop.JUMP: + if jumpop.getopnum() == rop.JUMP or jumpop.getopnum() == rop.LABEL: loop.operations = loop.operations[:-1] else: jumpop = None @@ -91,48 +90,87 @@ self.optimizer.propagate_all_forward(clear=False) if not jumpop: - return - if self.jump_to_already_compiled_trace(jumpop): - # Found a compiled trace to jump to - if self.did_import: - - self.close_bridge(start_label) - self.finilize_short_preamble(start_label) return cell_token = jumpop.getdescr() assert isinstance(cell_token, JitCellToken) stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) - if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) - self.optimizer.flush() - KillHugeIntBounds(self.optimizer).apply() + + if jumpop.getopnum() == rop.JUMP: + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.short: + # Construct our short preamble + assert start_label + self.close_bridge(start_label) + return - loop.operations = self.optimizer.get_newoperations() - self.export_state(stop_label) - loop.operations.append(stop_label) - else: - assert stop_label + if start_label and self.jump_to_start_label(start_label, stop_label): + # Initial label matches, jump to it + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, + descr=start_label.getdescr()) + if self.short: + # Construct our short preamble + self.close_loop(start_label, jumpop) + else: + self.optimizer.send_extra_operation(jumpop) + return + + if cell_token.target_tokens: + limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit + if cell_token.retraced_count < limit: + cell_token.retraced_count += 1 + debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) + else: + debug_print("Retrace count reached, jumping to preamble") + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return + + # Found nothing to jump to, emit a label instead + + if self.short: + # Construct our short preamble assert start_label - stop_target = stop_label.getdescr() - start_target = start_label.getdescr() - assert isinstance(stop_target, TargetToken) - assert isinstance(start_target, TargetToken) - assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token - jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + self.close_bridge(start_label) - self.close_loop(jumpop) - self.finilize_short_preamble(start_label) + self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() + + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + + def jump_to_start_label(self, start_label, stop_label): + if not start_label or not stop_label: + return False + + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + if stop_target.targeting_jitcell_token is not start_target.targeting_jitcell_token: + return False + + return True + + #args = stop_label.getarglist() + #modifier = VirtualStateAdder(self.optimizer) + #virtual_state = modifier.get_virtual_state(args) + #if self.initial_virtual_state.generalization_of(virtual_state): + # return True + def export_state(self, targetop): original_jump_args = targetop.getarglist() jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] - assert self.optimizer.loop.start_resumedescr - start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) - # FIXME: I dont thnik we need fix_snapshot anymore + assert self.optimizer.loop.resume_at_jump_descr + resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr.clone_if_mutable() + assert isinstance(resume_at_jump_descr, ResumeGuardDescr) + resume_at_jump_descr.rd_snapshot = self.fix_snapshot(jump_args, resume_at_jump_descr.rd_snapshot) modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) @@ -141,26 +179,21 @@ inputargs = virtual_state.make_inputargs(values, self.optimizer) short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - constant_inputargs[box] = const - short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) - aliased_vrituals = {} - for i in range(len(original_jump_args)): - if original_jump_args[i] is not jump_args[i]: - if values[i].is_virtual(): - aliased_vrituals[original_jump_args[i]] = jump_args[i] - else: - short_boxes.alias(original_jump_args[i], jump_args[i]) + if self.boxes_created_this_iteration is not None: + for box in self.inputargs: + self.boxes_created_this_iteration[box] = True + + short_boxes = ShortBoxes(self.optimizer, inputargs, + self.boxes_created_this_iteration) self.optimizer.clear_newoperations() - for box in short_inputargs: - value = self.getvalue(box) - if value.is_virtual(): - value.force_box(self.optimizer) + for i in range(len(original_jump_args)): + if values[i].is_virtual(): + values[i].force_box(self.optimizer) + if original_jump_args[i] is not jump_args[i]: + op = ResOperation(rop.SAME_AS, [jump_args[i]], original_jump_args[i]) + self.optimizer.emit_operation(op) inputarg_setup_ops = self.optimizer.get_newoperations() target_token = targetop.getdescr() @@ -168,78 +201,81 @@ targetop.initarglist(inputargs) target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] - target_token.start_resumedescr = start_resumedescr - target_token.exported_state = ExportedState(constant_inputargs, short_boxes, - inputarg_setup_ops, self.optimizer, - aliased_vrituals, jump_args) + target_token.resume_at_jump_descr = resume_at_jump_descr + + exported_values = {} + for box in inputargs: + exported_values[box] = self.optimizer.getvalue(box) + for op in short_boxes.operations(): + if op and op.result: + box = op.result + exported_values[box] = self.optimizer.getvalue(box) + + target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops, + exported_values) def import_state(self, targetop): - self.did_import = False - if not targetop: - # FIXME: Set up some sort of empty state with no virtuals? + if not targetop: # Trace did not start with a label + self.inputargs = self.optimizer.loop.inputargs + self.short = None + self.initial_virtual_state = None return + + self.inputargs = targetop.getarglist() target_token = targetop.getdescr() - if not target_token: - return assert isinstance(target_token, TargetToken) exported_state = target_token.exported_state if not exported_state: - # FIXME: Set up some sort of empty state with no virtuals + # No state exported, construct one without virtuals + self.short = None + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(self.inputargs) + self.initial_virtual_state = virtual_state return - self.did_import = True self.short = target_token.short_preamble[:] self.short_seen = {} - self.short_boxes = exported_state.short_boxes.clone() - for box, const in exported_state.constant_inputargs.items(): - self.short_seen[box] = True - self.imported_state = exported_state - self.inputargs = targetop.getarglist() + self.short_boxes = exported_state.short_boxes + self.short_resume_at_jump_descr = target_token.resume_at_jump_descr self.initial_virtual_state = target_token.virtual_state - self.start_resumedescr = target_token.start_resumedescr seen = {} for box in self.inputargs: if box in seen: continue seen[box] = True - preamble_value = exported_state.optimizer.getvalue(box) + preamble_value = exported_state.exported_values[box] value = self.optimizer.getvalue(box) value.import_from(preamble_value, self.optimizer) - for newbox, oldbox in self.short_boxes.aliases.items(): - self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) - # Setup the state of the new optimizer by emiting the # short operations and discarding the result self.optimizer.emitting_dissabled = True for op in exported_state.inputarg_setup_ops: self.optimizer.send_extra_operation(op) + seen = {} - for op in self.short_boxes.operations(): self.ensure_short_op_emitted(op, self.optimizer, seen) if op and op.result: - preamble_value = exported_state.optimizer.getvalue(op.result) + preamble_value = exported_state.exported_values[op.result] value = self.optimizer.getvalue(op.result) if not value.is_virtual(): imp = ValueImporter(self, preamble_value, op) self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) newresult = newvalue.get_key_box() + # note that emitting here SAME_AS should not happen, but + # in case it does, we would prefer to be suboptimal in asm + # to a fatal RPython exception. if newresult is not op.result and not newvalue.is_constant(): - self.short_boxes.alias(newresult, op.result) op = ResOperation(rop.SAME_AS, [op.result], newresult) - self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX - #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! + self.optimizer._newoperations.append(op) self.optimizer.flush() self.optimizer.emitting_dissabled = False - for box, key_box in exported_state.aliased_vrituals.items(): - self.optimizer.make_equal_to(box, self.getvalue(key_box)) - def close_bridge(self, start_label): - inputargs = self.inputargs + inputargs = self.inputargs short_jumpargs = inputargs[:] # We dont need to inline the short preamble we are creating as we are conneting @@ -249,8 +285,6 @@ newoperations = self.optimizer.get_newoperations() self.boxes_created_this_iteration = {} i = 0 - while newoperations[i].getopnum() != rop.LABEL: - i += 1 while i < len(newoperations): op = newoperations[i] self.boxes_created_this_iteration[op.result] = True @@ -262,11 +296,11 @@ i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) - - def close_loop(self, jumpop): + self.finilize_short_preamble(start_label) + + def close_loop(self, start_label, jumpop): virtual_state = self.initial_virtual_state short_inputargs = self.short[0].getarglist() - constant_inputargs = self.imported_state.constant_inputargs inputargs = self.inputargs short_jumpargs = inputargs[:] @@ -289,8 +323,6 @@ raise InvalidLoop args[short_inputargs[i]] = jmp_to_short_args[i] self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - for box, const in constant_inputargs.items(): - self.short_inliner.argmap[box] = const for op in self.short[1:]: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) @@ -299,8 +331,6 @@ newoperations = self.optimizer.get_newoperations() self.boxes_created_this_iteration = {} i = j = 0 - while newoperations[i].getopnum() != rop.LABEL: - i += 1 while i < len(newoperations) or j < len(jumpargs): if i == len(newoperations): while j < len(jumpargs): @@ -353,6 +383,8 @@ assert isinstance(target_token, TargetToken) target_token.targeting_jitcell_token.retraced_count = sys.maxint + self.finilize_short_preamble(start_label) + def finilize_short_preamble(self, start_label): short = self.short assert short[-1].getopnum() == rop.JUMP @@ -365,7 +397,7 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - descr = target_token.start_resumedescr.clone_if_mutable() + descr = target_token.resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) short[i] = op @@ -381,13 +413,11 @@ newargs[i] = a.clonebox() boxmap[a] = newargs[i] inliner = Inliner(short_inputargs, newargs) - for box, const in self.imported_state.constant_inputargs.items(): - inliner.argmap[box] = const for i in range(len(short)): short[i] = inliner.inline_op(short[i]) - target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(target_token.start_resumedescr) + target_token.resume_at_jump_descr = target_token.resume_at_jump_descr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed for box in short[0].getarglist(): @@ -398,31 +428,6 @@ target_token.short_preamble = self.short target_token.exported_state = None - - def FIXME_old_stuff(): - preamble_optimizer = self.optimizer From noreply at buildbot.pypy.org Tue Dec 27 14:18:41 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 14:18:41 +0100 (CET) Subject: [pypy-commit] pypy default: a neat debugging feature Message-ID: <20111227131841.6B12782B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50902:a8481f7a34ed Date: 2011-12-27 15:18 +0200 http://bitbucket.org/pypy/pypy/changeset/a8481f7a34ed/ Log: a neat debugging feature diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -4,6 +4,7 @@ class PyPyModule(MixedModule): interpleveldefs = { 'debug_repr': 'interp_extras.debug_repr', + 'remove_invalidates': 'interp_extras.remove_invalidates', } appleveldefs = {} diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py --- a/pypy/module/micronumpy/interp_extras.py +++ b/pypy/module/micronumpy/interp_extras.py @@ -5,3 +5,11 @@ @unwrap_spec(array=BaseArray) def debug_repr(space, array): return space.wrap(array.find_sig().debug_repr()) + + at unwrap_spec(array=BaseArray) +def remove_invalidates(space, array): + """ Array modification will no longer invalidate any of it's + potential children. Use only for performance debugging + """ + del array.invalidates[:] + return space.w_None diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -898,6 +898,15 @@ b[0] = 3 assert debug_repr(b) == 'Array' + def test_remove_invalidates(self): + from numpypy import array + from numpypy.pypy import remove_invalidates + a = array([1, 2, 3]) + b = a + a + remove_invalidates(a) + a[0] = 14 + assert b[0] == 28 + def test_virtual_views(self): from numpypy import arange a = arange(15) From noreply at buildbot.pypy.org Tue Dec 27 15:41:08 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 15:41:08 +0100 (CET) Subject: [pypy-commit] jitviewer default: regen log Message-ID: <20111227144108.E80E182B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r183:5f18887dce76 Date: 2011-12-27 16:40 +0200 http://bitbucket.org/pypy/jitviewer/changeset/5f18887dce76/ Log: regen log diff --git a/log.pypylog b/log.pypylog --- a/log.pypylog +++ b/log.pypylog @@ -1,442 +1,442 @@ -[d0e7d8d709c] {jit-backend-dump +[7e18c33e717a] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a000 +0 4157415641554154415341524151415057565554535251504889E341BB503AF20041FFD34889DF4883E4F041BB100CD30041FFD3488D65D8415F415E415D415C5B5DC3 -[d0e7d8f4034] jit-backend-dump} -[d0e7d8f674c] {jit-backend-dump +CODE_DUMP @7f74cbf56000 +0 4157415641554154415341524151415057565554535251504889E341BBC0BAF20041FFD34889DF4883E4F041BBC0C9D20041FFD3488D65D8415F415E415D415C5B5DC3 +[7e18c33f9678] jit-backend-dump} +[7e18c33fb1c9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a043 +0 4157415641554154415341524151415057565554535251504889E341BB003AF20041FFD34889DF4883E4F041BB100CD30041FFD3488D65D8415F415E415D415C5B5DC3 -[d0e7d8f99b6] jit-backend-dump} -[d0e7d8ff830] {jit-backend-dump +CODE_DUMP @7f74cbf56043 +0 4157415641554154415341524151415057565554535251504889E341BB70BAF20041FFD34889DF4883E4F041BBC0C9D20041FFD3488D65D8415F415E415D415C5B5DC3 +[7e18c33fcadd] jit-backend-dump} +[7e18c340052a] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a086 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BB503AF20041FFD34889DF4883E4F041BB100CD30041FFD3488D65D8415F415E415D415C5B5DC3 -[d0e7d904384] jit-backend-dump} -[d0e7d906196] {jit-backend-dump +CODE_DUMP @7f74cbf56086 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BBC0BAF20041FFD34889DF4883E4F041BBC0C9D20041FFD3488D65D8415F415E415D415C5B5DC3 +[7e18c340297b] jit-backend-dump} +[7e18c34037a3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a137 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BB003AF20041FFD34889DF4883E4F041BB100CD30041FFD3488D65D8415F415E415D415C5B5DC3 -[d0e7d90a4e6] jit-backend-dump} -[d0e7d91000c] {jit-backend-dump +CODE_DUMP @7f74cbf56137 +0 4157415641554154415341524151415057565554535251504889E34881EC80000000F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C2438F2440F11442440F2440F114C2448F2440F11542450F2440F115C2458F2440F11642460F2440F116C2468F2440F11742470F2440F117C247841BB70BAF20041FFD34889DF4883E4F041BBC0C9D20041FFD3488D65D8415F415E415D415C5B5DC3 +[7e18c3405738] jit-backend-dump} +[7e18c3408501] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a210 +0 41BB6039F20041FFD3B803000000488D65D8415F415E415D415C5B5DC3 -[d0e7d9121cc] jit-backend-dump} -[d0e7d91b0ac] {jit-backend-dump +CODE_DUMP @7f74cbf56210 +0 41BBD0B9F20041FFD3B803000000488D65D8415F415E415D415C5B5DC3 +[7e18c340953c] jit-backend-dump} +[7e18c340f7f4] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a22d +0 F20F11442410F20F114C2418F20F11542420F20F115C2428F20F11642430F20F116C2438F20F11742440F20F117C2448F2440F11442450F2440F114C2458F2440F11542460F2440F115C2468F2440F11642470F2440F116C2478F2440F11B42480000000F2440F11BC24880000004829C24C894DA848894D804C8955B04C8945A048897D98488975904889D741BB30C6CE0041FFE3 -[d0e7d91efca] jit-backend-dump} -[d0e7d92a670] {jit-backend-dump +CODE_DUMP @7f74cbf5622d +0 F20F11442410F20F114C2418F20F11542420F20F115C2428F20F11642430F20F116C2438F20F11742440F20F117C2448F2440F11442450F2440F114C2458F2440F11542460F2440F115C2468F2440F11642470F2440F116C2478F2440F11B42480000000F2440F11BC24880000004829C24C8945A048894D804C8955B0488975904C894DA848897D984889D741BBC080CE0041FFE3 +[7e18c34118c4] jit-backend-dump} +[7e18c341703c] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a2c2 +0 4C8B4DA8488B4D804C8B55B04C8B45A0488B7D98488B7590F20F10442410F20F104C2418F20F10542420F20F105C2428F20F10642430F20F106C2438F20F10742440F20F107C2448F2440F10442450F2440F104C2458F2440F10542460F2440F105C2468F2440F10642470F2440F106C2478F2440F10B42480000000F2440F10BC24880000004885C07409488B142550C95401C349BB10A279E5D57F000041FFE3 -[d0e7d92ea2c] jit-backend-dump} -[d0e7d932e9c] {jit-backend-dump +CODE_DUMP @7f74cbf562c2 +0 4C8B45A0488B4D804C8B55B0488B75904C8B4DA8488B7D98F20F10442410F20F104C2418F20F10542420F20F105C2428F20F10642430F20F106C2438F20F10742440F20F107C2448F2440F10442450F2440F104C2458F2440F10542460F2440F105C2468F2440F10642470F2440F106C2478F2440F10B42480000000F2440F10BC24880000004885C07409488B1425B0685501C349BB1062F5CB747F000041FFE3 +[7e18c3419028] jit-backend-dump} +[7e18c341ba40] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a363 +0 57565251415041514883EC40F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C24384889E741BB50D2A80041FFD3488B0425400C9C024885C0753CF20F107C2438F20F10742430F20F106C2428F20F10642420F20F105C2418F20F10542410F20F104C2408F20F1004244883C44041594158595A5E5FC341BB003AF20041FFD3B8030000004883C478C3 -[d0e7d937156] jit-backend-dump} -[d0e7d938ba2] {jit-backend-counts -[d0e7d9394fc] jit-backend-counts} -[d0e7fb5a273] {jit-backend -[d0e8032b45f] {jit-backend-dump +CODE_DUMP @7f74cbf56363 +0 57565251415041514883EC40F20F110424F20F114C2408F20F11542410F20F115C2418F20F11642420F20F116C2428F20F11742430F20F117C24384889E741BBF0F0A80041FFD3488B0425203B9D024885C0753CF20F107C2438F20F10742430F20F106C2428F20F10642420F20F105C2418F20F10542410F20F104C2408F20F1004244883C44041594158595A5E5FC341BB70BAF20041FFD3B8030000004883C478C3 +[7e18c341d8eb] jit-backend-dump} +[7e18c341e71f] {jit-backend-counts +[7e18c341f46c] jit-backend-counts} +[7e18c39d710c] {jit-backend +[7e18c3ee50f0] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a406 +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284D8B40304889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48899548FFFFFF48898D40FFFFFF4C898538FFFFFF49BBF03007E8D57F00004D8B034983C00149BBF03007E8D57F00004D89034983FA030F85000000008138306900000F85000000004C8B50104D85D20F84000000004C8B4008498B4A108139702703000F85000000004D8B5208498B4A08498B52104D8B52184983F8000F8C000000004D39D00F8D000000004D89C14C0FAFC24989CC4C01C14983C1014C8948084983FD000F85000000004883FB017206813BF81600000F850000000049BBE09C8DE5D57F00004D39DE0F85000000004C8B73084983C6010F8000000000488B1C25E80A9C024883FB000F8C0000000048898D30FFFFFF49BB083107E8D57F0000498B0B4883C10149BB083107E8D57F000049890B4D39D10F8D000000004C89C94C0FAFCA4C89E34D01CC4883C101488948084D89F14983C6010F80000000004C8B0C25E80A9C024983F9000F8C000000004C89A530FFFFFF4989C94989DCE993FFFFFF49BB00A079E5D57F000041FFD32944404838354C510C5400585C030400000049BB00A079E5D57F000041FFD34440004838354C0C54585C030500000049BB00A079E5D57F000041FFD3444000284838354C0C54585C030600000049BB00A079E5D57F000041FFD34440002104284838354C0C54585C030700000049BB00A079E5D57F000041FFD3444000212909054838354C0C54585C030800000049BB00A079E5D57F000041FFD34440002109054838354C0C54585C030900000049BB00A079E5D57F000041FFD335444048384C0C54005C05030A00000049BB00A079E5D57F000041FFD344400C48384C005C05030B00000049BB00A079E5D57F000041FFD3444038484C0C005C05030C00000049BB00A079E5D57F000041FFD344400C39484C0005030D00000049BB00A079E5D57F000041FFD34440484C003905030E00000049BB00A079E5D57F000041FFD34440484C003905030F00000049BB00A079E5D57F000041FFD3444000250931484C6139031000000049BB00A079E5D57F000041FFD3444039484C00310725031100000049BB00A079E5D57F000041FFD34440484C0039310707031200000049BB00A079E5D57F000041FFD34440484C00393107070313000000 -[d0e803559b7] jit-backend-dump} -[d0e80356de5] {jit-backend-addr -Loop 0 ( #19 FOR_ITER) has address 7fd5e579a43c to 7fd5e579a5fb (bootstrap 7fd5e579a406) -[d0e803590a1] jit-backend-addr} -[d0e8035a421] {jit-backend-dump +CODE_DUMP @7f74cbf56406 +0 488B0425C0399D024829E0483B0425E08C5001760D49BB6363F5CB747F000041FFD3554889E5534154415541564157488DA50000000049BBF0F082CE747F00004D8B3B4983C70149BBF0F082CE747F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284D8B40304889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48899548FFFFFF48898D40FFFFFF4C898538FFFFFF49BB08F182CE747F00004D8B034983C00149BB08F182CE747F00004D89034983FA030F85000000008138104D00000F85000000004C8B50104D85D20F84000000004C8B4008498B4A108139302303000F85000000004D8B5208498B4A08498B52104D8B52184983F8000F8C000000004D39D00F8D000000004D89C14C0FAFC24989CC4C01C14983C1014C8948084983FD000F85000000004883FB017206813BF81600000F850000000049BBE05C09CC747F00004D39DE0F85000000004C8B73084983C6010F8000000000488B1C25C8399D024883FB000F8C0000000048898D30FFFFFF49BB20F182CE747F0000498B0B4883C10149BB20F182CE747F000049890B4D39D10F8D000000004C89C94C0FAFCA4C89E34D01CC4883C101488948084D89F14983C6010F80000000004C8B0C25C8399D024983F9000F8C000000004C89A530FFFFFF4989C94989DCE993FFFFFF49BB0060F5CB747F000041FFD32944404838354C510C5400585C030400000049BB0060F5CB747F000041FFD34440004838354C0C54585C030500000049BB0060F5CB747F000041FFD3444000284838354C0C54585C030600000049BB0060F5CB747F000041FFD34440002104284838354C0C54585C030700000049BB0060F5CB747F000041FFD3444000212909054838354C0C54585C030800000049BB0060F5CB747F000041FFD34440002109054838354C0C54585C030900000049BB0060F5CB747F000041FFD335444048384C0C54005C05030A00000049BB0060F5CB747F000041FFD344400C48384C005C05030B00000049BB0060F5CB747F000041FFD3444038484C0C005C05030C00000049BB0060F5CB747F000041FFD344400C39484C0005030D00000049BB0060F5CB747F000041FFD34440484C003905030E00000049BB0060F5CB747F000041FFD34440484C003905030F00000049BB0060F5CB747F000041FFD3444000250931484C3961031000000049BB0060F5CB747F000041FFD3444039484C00312507031100000049BB0060F5CB747F000041FFD34440484C0039310707031200000049BB0060F5CB747F000041FFD34440484C00393107070313000000 +[7e18c3f0324c] jit-backend-dump} +[7e18c3f03d0e] {jit-backend-addr +Loop 0 ( #19 FOR_ITER) has address 7f74cbf5643c to 7f74cbf56619 (bootstrap 7f74cbf56406) +[7e18c3f0502b] jit-backend-addr} +[7e18c3f05d6c] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a438 +0 30FFFFFF -[d0e8035c161] jit-backend-dump} -[d0e8035d487] {jit-backend-dump +CODE_DUMP @7f74cbf56438 +0 30FFFFFF +[7e18c3f06bf1] jit-backend-dump} +[7e18c3f079f8] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a4cf +0 28010000 -[d0e8035eb7f] jit-backend-dump} -[d0e8035f647] {jit-backend-dump +CODE_DUMP @7f74cbf564ed +0 28010000 +[7e18c3f08526] jit-backend-dump} +[7e18c3f08a60] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a4db +0 3B010000 -[d0e80360bb9] jit-backend-dump} -[d0e803615df] {jit-backend-dump +CODE_DUMP @7f74cbf564f9 +0 3B010000 +[7e18c3f09621] jit-backend-dump} +[7e18c3f09c00] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a4e8 +0 4B010000 -[d0e80362a49] jit-backend-dump} -[d0e80363415] {jit-backend-dump +CODE_DUMP @7f74cbf56506 +0 4B010000 +[7e18c3f0a6c5] jit-backend-dump} +[7e18c3f0ab39] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a4fc +0 55010000 -[d0e80364879] jit-backend-dump} -[d0e80365233] {jit-backend-dump +CODE_DUMP @7f74cbf5651a +0 55010000 +[7e18c3f0b517] jit-backend-dump} +[7e18c3f0b97c] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a516 +0 5B010000 -[d0e80366895] jit-backend-dump} -[d0e80367333] {jit-backend-dump +CODE_DUMP @7f74cbf56534 +0 5B010000 +[7e18c3f0c32d] jit-backend-dump} +[7e18c3f0c7bc] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a51f +0 73010000 -[d0e803688bd] jit-backend-dump} -[d0e80369259] {jit-backend-dump +CODE_DUMP @7f74cbf5653d +0 73010000 +[7e18c3f0d287] jit-backend-dump} +[7e18c3f0d80f] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a53e +0 74010000 -[d0e8036a6c9] jit-backend-dump} -[d0e8036b065] {jit-backend-dump +CODE_DUMP @7f74cbf5655c +0 74010000 +[7e18c3f0e340] jit-backend-dump} +[7e18c3f0e7c3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a550 +0 7F010000 -[d0e8036c4cf] jit-backend-dump} -[d0e8036ce23] {jit-backend-dump +CODE_DUMP @7f74cbf5656e +0 7F010000 +[7e18c3f0f168] jit-backend-dump} +[7e18c3f0f5cd] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a563 +0 87010000 -[d0e8036e28d] jit-backend-dump} -[d0e8036ed5b] {jit-backend-dump +CODE_DUMP @7f74cbf56581 +0 87010000 +[7e18c3f0ff6c] jit-backend-dump} +[7e18c3f103f2] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a571 +0 94010000 -[d0e8037030f] jit-backend-dump} -[d0e8037119d] {jit-backend-dump +CODE_DUMP @7f74cbf5658f +0 94010000 +[7e18c3f10d79] jit-backend-dump} +[7e18c3f115a1] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a583 +0 B5010000 -[d0e803726d9] jit-backend-dump} -[d0e80373069] {jit-backend-dump +CODE_DUMP @7f74cbf565a1 +0 B5010000 +[7e18c3f1217d] jit-backend-dump} +[7e18c3f12714] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a5b1 +0 A0010000 -[d0e803744d9] jit-backend-dump} -[d0e80374e51] {jit-backend-dump +CODE_DUMP @7f74cbf565cf +0 A0010000 +[7e18c3f131af] jit-backend-dump} +[7e18c3f13617] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a5d3 +0 9A010000 -[d0e803762c1] jit-backend-dump} -[d0e80376c99] {jit-backend-dump +CODE_DUMP @7f74cbf565f1 +0 9A010000 +[7e18c3f13fcb] jit-backend-dump} +[7e18c3f1459e] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a5e5 +0 BE010000 -[d0e80378349] jit-backend-dump} -[d0e803796ff] jit-backend} -[d0e8037b13f] {jit-log-opt-loop -# Loop 0 : loop with 73 ops +CODE_DUMP @7f74cbf56603 +0 BE010000 +[7e18c3f14f22] jit-backend-dump} +[7e18c3f15b01] jit-backend} +[7e18c3f17cc1] {jit-log-opt-loop +# Loop 0 ( #19 FOR_ITER) : loop with 73 ops [p0, p1] -+54: p2 = getfield_gc(p0, descr=) -+58: p3 = getfield_gc(p0, descr=) -+62: i4 = getfield_gc(p0, descr=) -+70: p5 = getfield_gc(p0, descr=) -+74: i6 = getfield_gc(p0, descr=) -+81: i7 = getfield_gc(p0, descr=) -+85: p8 = getfield_gc(p0, descr=) -+89: p10 = getarrayitem_gc(p8, 0, descr=) -+93: p12 = getarrayitem_gc(p8, 1, descr=) -+97: p14 = getarrayitem_gc(p8, 2, descr=) -+101: p16 = getarrayitem_gc(p8, 3, descr=) -+105: p18 = getarrayitem_gc(p8, 4, descr=) -+109: p19 = getfield_gc(p0, descr=) -+109: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, descr=TargetToken(140556656117424)) ++84: p2 = getfield_gc(p0, descr=) ++88: p3 = getfield_gc(p0, descr=) ++92: i4 = getfield_gc(p0, descr=) ++100: p5 = getfield_gc(p0, descr=) ++104: i6 = getfield_gc(p0, descr=) ++111: i7 = getfield_gc(p0, descr=) ++115: p8 = getfield_gc(p0, descr=) ++119: p10 = getarrayitem_gc(p8, 0, descr=) ++123: p12 = getarrayitem_gc(p8, 1, descr=) ++127: p14 = getarrayitem_gc(p8, 2, descr=) ++131: p16 = getarrayitem_gc(p8, 3, descr=) ++135: p18 = getarrayitem_gc(p8, 4, descr=) ++139: p19 = getfield_gc(p0, descr=) ++139: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, descr=TargetToken(140139616183984)) debug_merge_point(0, ' #19 FOR_ITER') -+195: guard_value(i6, 3, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18] -+205: guard_class(p14, 38308720, descr=) [p1, p0, p14, p2, p3, i4, p5, p10, p12, p16, p18] -+217: p22 = getfield_gc(p14, descr=) -+221: guard_nonnull(p22, descr=) [p1, p0, p14, p22, p2, p3, i4, p5, p10, p12, p16, p18] -+230: i23 = getfield_gc(p14, descr=) -+234: p24 = getfield_gc(p22, descr=) -+238: guard_class(p24, 38488496, descr=) [p1, p0, p14, i23, p24, p22, p2, p3, i4, p5, p10, p12, p16, p18] -+250: p26 = getfield_gc(p22, descr=) -+254: i27 = getfield_gc_pure(p26, descr=) -+258: i28 = getfield_gc_pure(p26, descr=) -+262: i29 = getfield_gc_pure(p26, descr=) -+266: i31 = int_lt(i23, 0) ++225: guard_value(i6, 3, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18] ++235: guard_class(p14, 38352528, descr=) [p1, p0, p14, p2, p3, i4, p5, p10, p12, p16, p18] ++247: p22 = getfield_gc(p14, descr=) ++251: guard_nonnull(p22, descr=) [p1, p0, p14, p22, p2, p3, i4, p5, p10, p12, p16, p18] ++260: i23 = getfield_gc(p14, descr=) ++264: p24 = getfield_gc(p22, descr=) ++268: guard_class(p24, 38538416, descr=) [p1, p0, p14, i23, p24, p22, p2, p3, i4, p5, p10, p12, p16, p18] ++280: p26 = getfield_gc(p22, descr=) ++284: i27 = getfield_gc_pure(p26, descr=) ++288: i28 = getfield_gc_pure(p26, descr=) ++292: i29 = getfield_gc_pure(p26, descr=) ++296: i31 = int_lt(i23, 0) guard_false(i31, descr=) [p1, p0, p14, i23, i29, i28, i27, p2, p3, i4, p5, p10, p12, p16, p18] -+276: i32 = int_ge(i23, i29) ++306: i32 = int_ge(i23, i29) guard_false(i32, descr=) [p1, p0, p14, i23, i28, i27, p2, p3, i4, p5, p10, p12, p16, p18] -+285: i33 = int_mul(i23, i28) -+292: i34 = int_add(i27, i33) -+298: i36 = int_add(i23, 1) -+302: setfield_gc(p14, i36, descr=) -+306: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p18, i34] ++315: i33 = int_mul(i23, i28) ++322: i34 = int_add(i27, i33) ++328: i36 = int_add(i23, 1) ++332: setfield_gc(p14, i36, descr=) ++336: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p18, i34] debug_merge_point(0, ' #22 STORE_FAST') debug_merge_point(0, ' #25 LOAD_FAST') -+316: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, p5, p14, p18, i34] ++346: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p3, p5, p14, p18, i34] debug_merge_point(0, ' #28 LOAD_CONST') -+334: guard_value(p3, ConstPtr(ptr39), descr=) [p1, p0, p3, p2, p5, p10, p14, p18, i34] ++364: guard_value(p3, ConstPtr(ptr39), descr=) [p1, p0, p3, p2, p5, p10, p14, p18, i34] debug_merge_point(0, ' #31 INPLACE_ADD') -+353: i40 = getfield_gc_pure(p10, descr=) -+357: i42 = int_add_ovf(i40, 1) ++383: i40 = getfield_gc_pure(p10, descr=) ++387: i42 = int_add_ovf(i40, 1) guard_no_overflow(, descr=) [p1, p0, p10, i42, p2, p5, p14, i34] debug_merge_point(0, ' #32 STORE_FAST') debug_merge_point(0, ' #35 JUMP_ABSOLUTE') -+367: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i42, i34] -+367: i44 = getfield_raw(43780840, descr=) -+375: i46 = int_lt(i44, 0) ++397: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i42, i34] ++397: i44 = getfield_raw(43858376, descr=) ++405: i46 = int_lt(i44, 0) guard_false(i46, descr=) [p1, p0, p2, p5, p14, i42, i34] debug_merge_point(0, ' #19 FOR_ITER') -+385: label(p0, p1, p2, p5, i42, i34, p14, i36, i29, i28, i27, descr=TargetToken(140556656117504)) ++415: label(p0, p1, p2, p5, i42, i34, p14, i36, i29, i28, i27, descr=TargetToken(140139616184064)) debug_merge_point(0, ' #19 FOR_ITER') -+422: i47 = int_ge(i36, i29) -guard_false(i47, descr=) [p1, p0, p14, i36, i28, i27, p2, p5, i34, i42] -+431: i48 = int_mul(i36, i28) -+438: i49 = int_add(i27, i48) -+444: i50 = int_add(i36, 1) ++452: i47 = int_ge(i36, i29) +guard_false(i47, descr=) [p1, p0, p14, i36, i28, i27, p2, p5, i42, i34] ++461: i48 = int_mul(i36, i28) ++468: i49 = int_add(i27, i48) ++474: i50 = int_add(i36, 1) debug_merge_point(0, ' #22 STORE_FAST') debug_merge_point(0, ' #25 LOAD_FAST') debug_merge_point(0, ' #28 LOAD_CONST') debug_merge_point(0, ' #31 INPLACE_ADD') -+448: setfield_gc(p14, i50, descr=) -+452: i51 = int_add_ovf(i42, 1) -guard_no_overflow(, descr=) [p1, p0, i51, p2, p5, p14, i49, None, i42] ++478: setfield_gc(p14, i50, descr=) ++482: i51 = int_add_ovf(i42, 1) +guard_no_overflow(, descr=) [p1, p0, i51, p2, p5, p14, i49, i42, None] debug_merge_point(0, ' #32 STORE_FAST') debug_merge_point(0, ' #35 JUMP_ABSOLUTE') -+465: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] -+465: i53 = getfield_raw(43780840, descr=) -+473: i54 = int_lt(i53, 0) ++495: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] ++495: i53 = getfield_raw(43858376, descr=) ++503: i54 = int_lt(i53, 0) guard_false(i54, descr=) [p1, p0, p2, p5, p14, i51, i49, None, None] debug_merge_point(0, ' #19 FOR_ITER') -+483: jump(p0, p1, p2, p5, i51, i49, p14, i50, i29, i28, i27, descr=TargetToken(140556656117504)) -+501: --end of the loop-- -[d0e804a30d1] jit-log-opt-loop} -[d0e842b177e] {jit-backend -[d0e8436c0d8] {jit-backend-dump ++513: jump(p0, p1, p2, p5, i51, i49, p14, i50, i29, i28, i27, descr=TargetToken(140139616184064)) ++531: --end of the loop-- +[7e18c3fba5a9] jit-log-opt-loop} +[7e18c4351e29] {jit-backend +[7e18c43b55d5] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a7c2 +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B40204D8B40284889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48898548FFFFFF4C898540FFFFFF49BBD83007E8D57F00004D8B034983C00149BBD83007E8D57F00004D89034983FA020F85000000004883FA017206813AF81600000F85000000004983FD000F850000000049BB989D8DE5D57F00004D39DE0F85000000004C8B72084981FE102700000F8D0000000049BB00000000000000804D39DE0F84000000004C89F0B90200000048899538FFFFFF48898530FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004883FB017206813BF81600000F8500000000488B43084883C0010F8000000000488B9D30FFFFFF4883C3014C8B3425E80A9C024983FE000F8C0000000049BB203107E8D57F00004D8B334983C60149BB203107E8D57F00004D89334881FB102700000F8D0000000049BB00000000000000804C39DB0F840000000048898528FFFFFF4889D8B90200000048898520FFFFFF489948F7F94889D048C1FA3FBB020000004821D34801D84883F8000F8500000000488B8528FFFFFF4883C0010F8000000000488B9D20FFFFFF4883C301488B1425E80A9C024883FA000F8C00000000E958FFFFFF49BB00A079E5D57F000041FFD32944404838354C510C085458031400000049BB00A079E5D57F000041FFD34440084838354C0C5458031500000049BB00A079E5D57F000041FFD335444048384C0C0858031600000049BB00A079E5D57F000041FFD3444038484C0C0858031700000049BB00A079E5D57F000041FFD3444008484C0C031800000049BB00A079E5D57F000041FFD344400839484C0C031900000049BB00A079E5D57F000041FFD34440484C0C5C01031A00000049BB00A079E5D57F000041FFD344400C484C5C07031B00000049BB00A079E5D57F000041FFD344400C01484C5C07031C00000049BB00A079E5D57F000041FFD34440484C010D07031D00000049BB00A079E5D57F000041FFD34440484C010D07031E00000049BB00A079E5D57F000041FFD34440484C010D031F00000049BB00A079E5D57F000041FFD344400D484C0107032000000049BB00A079E5D57F000041FFD34440484C016569032100000049BB00A079E5D57F000041FFD3444001484C076569032200000049BB00A079E5D57F000041FFD34440484C0D01070707032300000049BB00A079E5D57F000041FFD34440484C0D010707070324000000 -[d0e8437f56c] jit-backend-dump} -[d0e843802c8] {jit-backend-addr -Loop 1 ( #15 LOAD_FAST) has address 7fd5e579a7f8 to 7fd5e579a9f4 (bootstrap 7fd5e579a7c2) -[d0e84382392] jit-backend-addr} -[d0e843830f4] {jit-backend-dump +CODE_DUMP @7f74cbf567e0 +0 488B0425C0399D024829E0483B0425E08C5001760D49BB6363F5CB747F000041FFD3554889E5534154415541564157488DA50000000049BBD8F082CE747F00004D8B3B4983C70149BBD8F082CE747F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B40204D8B40284889BD70FFFFFF4889B568FFFFFF4C89BD60FFFFFF4C89A558FFFFFF4C898D50FFFFFF48898548FFFFFF4C898540FFFFFF49BB38F182CE747F00004D8B034983C00149BB38F182CE747F00004D89034983FA020F85000000004883FA017206813AF81600000F85000000004983FD000F850000000049BB985D09CC747F00004D39DE0F85000000004C8B72084981FE102700000F8D0000000049BB00000000000000804D39DE0F84000000004C89F0B90200000048899538FFFFFF48898530FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004883FB017206813BF81600000F8500000000488B43084883C0010F8000000000488B9D30FFFFFF4883C3014C8B3425C8399D024983FE000F8C0000000049BB50F182CE747F00004D8B334983C60149BB50F182CE747F00004D89334881FB102700000F8D0000000049BB00000000000000804C39DB0F840000000048898528FFFFFF4889D8B90200000048898520FFFFFF489948F7F94889D048C1FA3FBB020000004821D34801D84883F8000F8500000000488B8528FFFFFF4883C0010F8000000000488B9D20FFFFFF4883C301488B1425C8399D024883FA000F8C00000000E958FFFFFF49BB0060F5CB747F000041FFD32944404838354C510C085458031400000049BB0060F5CB747F000041FFD34440084838354C0C5458031500000049BB0060F5CB747F000041FFD335444048384C0C0858031600000049BB0060F5CB747F000041FFD3444038484C0C0858031700000049BB0060F5CB747F000041FFD3444008484C0C031800000049BB0060F5CB747F000041FFD344400839484C0C031900000049BB0060F5CB747F000041FFD34440484C0C5C01031A00000049BB0060F5CB747F000041FFD344400C484C5C07031B00000049BB0060F5CB747F000041FFD344400C01484C5C07031C00000049BB0060F5CB747F000041FFD34440484C010D07031D00000049BB0060F5CB747F000041FFD34440484C010D07031E00000049BB0060F5CB747F000041FFD34440484C010D031F00000049BB0060F5CB747F000041FFD344400D484C0107032000000049BB0060F5CB747F000041FFD34440484C016569032100000049BB0060F5CB747F000041FFD3444001484C076569032200000049BB0060F5CB747F000041FFD34440484C0D01070707032300000049BB0060F5CB747F000041FFD34440484C0D010707070324000000 +[7e18c43be371] jit-backend-dump} +[7e18c43be9a7] {jit-backend-addr +Loop 1 ( #15 LOAD_FAST) has address 7f74cbf56816 to 7f74cbf56a30 (bootstrap 7f74cbf567e0) +[7e18c43bf74b] jit-backend-addr} +[7e18c43bfe05] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a7f4 +0 20FFFFFF -[d0e84384e94] jit-backend-dump} -[d0e84385f8c] {jit-backend-dump +CODE_DUMP @7f74cbf56812 +0 20FFFFFF +[7e18c43c0ab9] jit-backend-dump} +[7e18c43c1193] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a880 +0 70010000 -[d0e843877c8] jit-backend-dump} -[d0e843882ba] {jit-backend-dump +CODE_DUMP @7f74cbf568bc +0 70010000 +[7e18c43c1cdd] jit-backend-dump} +[7e18c43c2219] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a892 +0 7C010000 -[d0e8438995e] jit-backend-dump} -[d0e8438a3ea] {jit-backend-dump +CODE_DUMP @7f74cbf568ce +0 7C010000 +[7e18c43ca71b] jit-backend-dump} +[7e18c43cae15] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a89c +0 8E010000 -[d0e8438b926] jit-backend-dump} -[d0e8438c2c2] {jit-backend-dump +CODE_DUMP @7f74cbf568d8 +0 8E010000 +[7e18c43cb8f3] jit-backend-dump} +[7e18c43cbe13] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a8af +0 96010000 -[d0e8439bf82] jit-backend-dump} -[d0e8439cf90] {jit-backend-dump +CODE_DUMP @7f74cbf568eb +0 96010000 +[7e18c43cc7d3] jit-backend-dump} +[7e18c43cccf7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a8c0 +0 9F010000 -[d0e8439e832] jit-backend-dump} -[d0e8439f210] {jit-backend-dump +CODE_DUMP @7f74cbf568fc +0 9F010000 +[7e18c43cd745] jit-backend-dump} +[7e18c43cdc4f] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a8d3 +0 A4010000 -[d0e843a094a] jit-backend-dump} -[d0e843a1436] {jit-backend-dump +CODE_DUMP @7f74cbf5690f +0 A4010000 +[7e18c43ce623] jit-backend-dump} +[7e18c43cea43] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a90b +0 85010000 -[d0e843a2a14] jit-backend-dump} -[d0e843a34ac] {jit-backend-dump +CODE_DUMP @7f74cbf56947 +0 85010000 +[7e18c43cf30d] jit-backend-dump} +[7e18c43cf73b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a91d +0 8C010000 -[d0e843a4988] jit-backend-dump} -[d0e843a535a] {jit-backend-dump +CODE_DUMP @7f74cbf56959 +0 8C010000 +[7e18c43cfff9] jit-backend-dump} +[7e18c43d0425] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a92b +0 97010000 -[d0e843a6800] jit-backend-dump} -[d0e843a7484] {jit-backend-dump +CODE_DUMP @7f74cbf56967 +0 97010000 +[7e18c43d0ebd] jit-backend-dump} +[7e18c43d14e9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a948 +0 AD010000 -[d0e843a896c] jit-backend-dump} -[d0e843a937a] {jit-backend-dump +CODE_DUMP @7f74cbf56984 +0 AD010000 +[7e18c43d1f75] jit-backend-dump} +[7e18c43d24eb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a973 +0 9B010000 -[d0e843aaa7e] jit-backend-dump} -[d0e843ab552] {jit-backend-dump +CODE_DUMP @7f74cbf569af +0 9B010000 +[7e18c43d2da3] jit-backend-dump} +[7e18c43d31db] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a986 +0 A0010000 -[d0e843acb0c] jit-backend-dump} -[d0e843ad4f6] {jit-backend-dump +CODE_DUMP @7f74cbf569c2 +0 A0010000 +[7e18c43d3aa7] jit-backend-dump} +[7e18c43d3eb7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a9bd +0 82010000 -[d0e843ae97e] jit-backend-dump} -[d0e843af350] {jit-backend-dump +CODE_DUMP @7f74cbf569f9 +0 82010000 +[7e18c43d4771] jit-backend-dump} +[7e18c43d4bd1] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a9ce +0 8A010000 -[d0e843b07d2] jit-backend-dump} -[d0e843b11f2] {jit-backend-dump +CODE_DUMP @7f74cbf56a0a +0 8A010000 +[7e18c43d5615] jit-backend-dump} +[7e18c43d5bc5] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a9eb +0 A2010000 -[d0e843b27b2] jit-backend-dump} -[d0e843b396a] jit-backend} -[d0e843b50e6] {jit-log-opt-loop -# Loop 1 : loop with 92 ops +CODE_DUMP @7f74cbf56a27 +0 A2010000 +[7e18c43d6569] jit-backend-dump} +[7e18c43d6f39] jit-backend} +[7e18c43d8261] {jit-log-opt-loop +# Loop 1 ( #15 LOAD_FAST) : loop with 92 ops [p0, p1] -+54: p2 = getfield_gc(p0, descr=) -+58: p3 = getfield_gc(p0, descr=) -+62: i4 = getfield_gc(p0, descr=) -+70: p5 = getfield_gc(p0, descr=) -+74: i6 = getfield_gc(p0, descr=) -+81: i7 = getfield_gc(p0, descr=) -+85: p8 = getfield_gc(p0, descr=) -+89: p10 = getarrayitem_gc(p8, 0, descr=) -+93: p12 = getarrayitem_gc(p8, 1, descr=) -+97: p14 = getarrayitem_gc(p8, 2, descr=) -+101: p16 = getarrayitem_gc(p8, 3, descr=) -+105: p17 = getfield_gc(p0, descr=) -+105: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, descr=TargetToken(140556656121504)) ++84: p2 = getfield_gc(p0, descr=) ++88: p3 = getfield_gc(p0, descr=) ++92: i4 = getfield_gc(p0, descr=) ++100: p5 = getfield_gc(p0, descr=) ++104: i6 = getfield_gc(p0, descr=) ++111: i7 = getfield_gc(p0, descr=) ++115: p8 = getfield_gc(p0, descr=) ++119: p10 = getarrayitem_gc(p8, 0, descr=) ++123: p12 = getarrayitem_gc(p8, 1, descr=) ++127: p14 = getarrayitem_gc(p8, 2, descr=) ++131: p16 = getarrayitem_gc(p8, 3, descr=) ++135: p17 = getfield_gc(p0, descr=) ++135: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, descr=TargetToken(140139616188064)) debug_merge_point(0, ' #15 LOAD_FAST') -+184: guard_value(i6, 2, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16] -+194: guard_nonnull_class(p12, ConstClass(W_IntObject), descr=) [p1, p0, p12, p2, p3, i4, p5, p10, p14, p16] -+212: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p16] ++214: guard_value(i6, 2, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16] ++224: guard_nonnull_class(p12, ConstClass(W_IntObject), descr=) [p1, p0, p12, p2, p3, i4, p5, p10, p14, p16] ++242: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p16] debug_merge_point(0, ' #18 LOAD_CONST') -+222: guard_value(p3, ConstPtr(ptr21), descr=) [p1, p0, p3, p2, p5, p10, p12, p16] ++252: guard_value(p3, ConstPtr(ptr21), descr=) [p1, p0, p3, p2, p5, p10, p12, p16] debug_merge_point(0, ' #21 COMPARE_OP') -+241: i22 = getfield_gc_pure(p12, descr=) -+245: i24 = int_lt(i22, 10000) ++271: i22 = getfield_gc_pure(p12, descr=) ++275: i24 = int_lt(i22, 10000) guard_true(i24, descr=) [p1, p0, p12, p2, p5, p10] debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') debug_merge_point(0, ' #27 LOAD_FAST') debug_merge_point(0, ' #30 LOAD_CONST') debug_merge_point(0, ' #33 BINARY_MODULO') -+258: i26 = int_eq(i22, -9223372036854775808) ++288: i26 = int_eq(i22, -9223372036854775808) guard_false(i26, descr=) [p1, p0, p12, i22, p2, p5, p10] -+277: i28 = int_mod(i22, 2) -+304: i30 = int_rshift(i28, 63) -+311: i31 = int_and(2, i30) -+320: i32 = int_add(i28, i31) ++307: i28 = int_mod(i22, 2) ++334: i30 = int_rshift(i28, 63) ++341: i31 = int_and(2, i30) ++350: i32 = int_add(i28, i31) debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') -+323: i33 = int_is_true(i32) ++353: i33 = int_is_true(i32) guard_false(i33, descr=) [p1, p0, p2, p5, p10, p12, i32] debug_merge_point(0, ' #53 LOAD_FAST') -+333: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p5, p12, None] ++363: guard_nonnull_class(p10, ConstClass(W_IntObject), descr=) [p1, p0, p10, p2, p5, p12, None] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 INPLACE_ADD') -+351: i36 = getfield_gc_pure(p10, descr=) -+355: i38 = int_add_ovf(i36, 1) ++381: i36 = getfield_gc_pure(p10, descr=) ++385: i38 = int_add_ovf(i36, 1) guard_no_overflow(, descr=) [p1, p0, p10, i38, p2, p5, p12, None] debug_merge_point(0, ' #60 STORE_FAST') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') debug_merge_point(0, ' #69 INPLACE_ADD') -+365: i40 = int_add(i22, 1) ++395: i40 = int_add(i22, 1) debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+376: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i38, i40, None] -+376: i42 = getfield_raw(43780840, descr=) -+384: i44 = int_lt(i42, 0) ++406: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i38, i40, None] ++406: i42 = getfield_raw(43858376, descr=) ++414: i44 = int_lt(i42, 0) guard_false(i44, descr=) [p1, p0, p2, p5, i38, i40, None] debug_merge_point(0, ' #15 LOAD_FAST') -+394: label(p0, p1, p2, p5, i38, i40, descr=TargetToken(140556656121584)) ++424: label(p0, p1, p2, p5, i38, i40, descr=TargetToken(140139616188144)) debug_merge_point(0, ' #15 LOAD_FAST') debug_merge_point(0, ' #18 LOAD_CONST') debug_merge_point(0, ' #21 COMPARE_OP') -+424: i45 = int_lt(i40, 10000) ++454: i45 = int_lt(i40, 10000) guard_true(i45, descr=) [p1, p0, p2, p5, i38, i40] debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') debug_merge_point(0, ' #27 LOAD_FAST') debug_merge_point(0, ' #30 LOAD_CONST') debug_merge_point(0, ' #33 BINARY_MODULO') -+437: i46 = int_eq(i40, -9223372036854775808) ++467: i46 = int_eq(i40, -9223372036854775808) guard_false(i46, descr=) [p1, p0, i40, p2, p5, i38, None] -+456: i47 = int_mod(i40, 2) -+483: i48 = int_rshift(i47, 63) -+490: i49 = int_and(2, i48) -+498: i50 = int_add(i47, i49) ++486: i47 = int_mod(i40, 2) ++513: i48 = int_rshift(i47, 63) ++520: i49 = int_and(2, i48) ++528: i50 = int_add(i47, i49) debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') -+501: i51 = int_is_true(i50) ++531: i51 = int_is_true(i50) guard_false(i51, descr=) [p1, p0, p2, p5, i50, i38, i40] debug_merge_point(0, ' #53 LOAD_FAST') debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 INPLACE_ADD') -+511: i52 = int_add_ovf(i38, 1) ++541: i52 = int_add_ovf(i38, 1) guard_no_overflow(, descr=) [p1, p0, i52, p2, p5, None, i38, i40] debug_merge_point(0, ' #60 STORE_FAST') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') debug_merge_point(0, ' #69 INPLACE_ADD') -+528: i53 = int_add(i40, 1) ++558: i53 = int_add(i40, 1) debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+539: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] -+539: i54 = getfield_raw(43780840, descr=) -+547: i55 = int_lt(i54, 0) ++569: guard_not_invalidated(, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] ++569: i54 = getfield_raw(43858376, descr=) ++577: i55 = int_lt(i54, 0) guard_false(i55, descr=) [p1, p0, p2, p5, i53, i52, None, None, None] debug_merge_point(0, ' #15 LOAD_FAST') -+557: jump(p0, p1, p2, p5, i52, i53, descr=TargetToken(140556656121584)) -+562: --end of the loop-- -[d0e84448eae] jit-log-opt-loop} -[d0e85322b25] {jit-backend -[d0e85385784] {jit-backend-dump ++587: jump(p0, p1, p2, p5, i52, i53, descr=TargetToken(140139616188144)) ++592: --end of the loop-- +[7e18c441d911] jit-log-opt-loop} +[7e18c44e87e5] {jit-backend +[7e18c452918b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579abb9 +0 488DA50000000049BB383107E8D57F00004D8B234983C40149BB383107E8D57F00004D89234C8BA558FFFFFF498B54241048C740100000000041813C24288801000F85000000004D8B6424184983FC020F85000000004885D20F8500000000488B9570FFFFFF4C8B6268488B042550C95401488D5020483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C700F8160000488B9570FFFFFF40C68295000000014C8B8D60FFFFFFF64204017417504151524889D74C89CE41BB8045C50041FFD35A4159584C894A50F6420401741D50524889D749BBE09C8DE5D57F00004C89DE41BB8045C50041FFD35A5849BBE09C8DE5D57F00004C895A7840C682960000000048C742600000000048C782800000000200000048C742582A00000041F644240401742641F6442404407518504C89E7BE000000004889C241BBE042C50041FFD358EB0641804C24FF0149894424104889C24883C01048C700F81600004C8B8D30FFFFFF4C89480841F644240401742841F644240440751A52504C89E7BE010000004889C241BBE042C50041FFD3585AEB0641804C24FF01498944241849C74424200000000049C74424280000000049C7442430000000004C89720848891425F05F710141BB503AF20041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD344403048083961032500000049BB00A079E5D57F000041FFD344403148083961032600000049BB00A079E5D57F000041FFD34440084839610327000000 -[d0e8539120a] jit-backend-dump} -[d0e853923bc] {jit-backend-addr -bridge out of Guard 16 has address 7fd5e579abb9 to 7fd5e579adb2 -[d0e85393ad2] jit-backend-addr} -[d0e85394658] {jit-backend-dump +CODE_DUMP @7f74cbf56bf5 +0 488DA50000000049BB68F182CE747F00004D8B234983C40149BB68F182CE747F00004D89234C8BA558FFFFFF498B54241048C740100000000041813C24288801000F85000000004D8B6424184983FC020F85000000004885D20F8500000000488B9570FFFFFF4C8B6268488B0425B0685501488D5020483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C700F8160000488B9570FFFFFF40C68295000000014C8B8D60FFFFFFF64204017417504151524889D74C89CE41BBB0E5C40041FFD35A4159584C894A50F6420401741D50524889D749BBE05C09CC747F00004C89DE41BBB0E5C40041FFD35A5849BBE05C09CC747F00004C895A7840C682960000000048C742600000000048C782800000000200000048C742582A00000041F644240401742641F6442404407518504C89E7BE000000004889C241BB10E3C40041FFD358EB0641804C24FF0149894424104889C24883C01048C700F81600004C8B8D30FFFFFF4C89480841F644240401742841F644240440751A52504C89E7BE010000004889C241BB10E3C40041FFD3585AEB0641804C24FF01498944241849C74424200000000049C74424280000000049C7442430000000004C89720848891425D084720141BBC0BAF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB0060F5CB747F000041FFD344403048083961032500000049BB0060F5CB747F000041FFD344403148083961032600000049BB0060F5CB747F000041FFD34440084839610327000000 +[7e18c452f77d] jit-backend-dump} +[7e18c4530253] {jit-backend-addr +bridge out of Guard 16 has address 7f74cbf56bf5 to 7f74cbf56dee +[7e18c4530fcd] jit-backend-addr} +[7e18c453162d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579abbc +0 A0FEFFFF -[d0e85395d96] jit-backend-dump} -[d0e853968e0] {jit-backend-dump +CODE_DUMP @7f74cbf56bf8 +0 A0FEFFFF +[7e18c4532319] jit-backend-dump} +[7e18c4532965] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579abfc +0 B2010000 -[d0e85397ba5] jit-backend-dump} -[d0e85398401] {jit-backend-dump +CODE_DUMP @7f74cbf56c38 +0 B2010000 +[7e18c45334f7] jit-backend-dump} +[7e18c4533a7b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ac0b +0 BC010000 -[d0e8539952c] jit-backend-dump} -[d0e85399d06] {jit-backend-dump +CODE_DUMP @7f74cbf56c47 +0 BC010000 +[7e18c45344c3] jit-backend-dump} +[7e18c453490d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ac14 +0 CC010000 -[d0e8539af85] jit-backend-dump} -[d0e8539bb06] {jit-backend-dump +CODE_DUMP @7f74cbf56c50 +0 CC010000 +[7e18c45351f1] jit-backend-dump} +[7e18c4535a93] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a5b1 +0 04060000 -[d0e8539cc6d] jit-backend-dump} -[d0e8539d95b] jit-backend} -[d0e8539e9aa] {jit-log-opt-bridge +CODE_DUMP @7f74cbf565cf +0 22060000 +[7e18c4536359] jit-backend-dump} +[7e18c4536ac9] jit-backend} +[7e18c453732b] {jit-log-opt-bridge # bridge out of Guard 16 with 28 ops [p0, p1, p2, i3, i4, i5, p6, p7, i8, i9] debug_merge_point(0, ' #38 POP_BLOCK') +37: p10 = getfield_gc_pure(p7, descr=) +49: setfield_gc(p2, ConstPtr(ptr11), descr=) -+57: guard_class(p7, 38382184, descr=) [p0, p1, p7, p6, p10, i9, i8] ++57: guard_class(p7, 38433192, descr=) [p0, p1, p7, p6, p10, i8, i9] +71: i13 = getfield_gc_pure(p7, descr=) -+76: guard_value(i13, 2, descr=) [p0, p1, i13, p6, p10, i9, i8] ++76: guard_value(i13, 2, descr=) [p0, p1, i13, p6, p10, i8, i9] debug_merge_point(0, ' #39 LOAD_FAST') debug_merge_point(0, ' #42 RETURN_VALUE') -+86: guard_isnull(p10, descr=) [p0, p1, p10, p6, i9, i8] ++86: guard_isnull(p10, descr=) [p0, p1, p10, p6, i8, i9] +95: p15 = getfield_gc(p1, descr=) +106: p16 = getfield_gc(p1, descr=) p18 = new_with_vtable(ConstClass(W_IntObject)) @@ -449,136 +449,136 @@ +300: setfield_gc(p1, 42, descr=) setarrayitem_gc(p15, 0, p18, descr=) p27 = new_with_vtable(ConstClass(W_IntObject)) -+373: setfield_gc(p27, i8, descr=) ++373: setfield_gc(p27, i9, descr=) setarrayitem_gc(p15, 1, p27, descr=) +437: setarrayitem_gc(p15, 2, ConstPtr(ptr30), descr=) +446: setarrayitem_gc(p15, 3, ConstPtr(ptr32), descr=) +455: setarrayitem_gc(p15, 4, ConstPtr(ptr32), descr=) -+464: setfield_gc(p18, i9, descr=) -+468: finish(p18, descr=) ++464: setfield_gc(p18, i8, descr=) ++468: finish(p18, descr=) +505: --end of the loop-- -[d0e853d860a] jit-log-opt-bridge} -[d0e8a56c11b] {jit-backend -[d0e8aaa04c9] {jit-backend-dump +[7e18c4558fdf] jit-log-opt-bridge} +[7e18c4a4b601] {jit-backend +[7e18c4e146bb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ae4b +0 488DA50000000049BB503107E8D57F0000498B034883C00149BB503107E8D57F0000498903488B8570FFFFFF4C8B780849BBA8EB8BE5D57F00004D39DF0F85000000004D8B771049BBC0EB8BE5D57F00004D39DE0F850000000041BB10468D0041FFD34C8B78404C8B70504D85F60F85000000004C8B70284983FE000F85000000004C8B342500F584014981FEE02687010F85000000004C8B3425E80A9C024983FE000F8C0000000048898518FFFFFF488B042550C95401488D9048010000483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C700388001004889C24881C09800000048C7008800000048C74008000000004989C64883C01048C7008800000048C74008050000004989C54883C03848C700F81600004989C44883C01048C700F81600004989C24883C01048C700306900004989C14883C01848C700E02900004989C04883C01848C7004083010048896808488BBD18FFFFFFF6470401741E5057415241505241514889C641BB8045C50041FFD341595A4158415A5F5848894740488BB570FFFFFF48896E1848C742700200000049BB803AF8E7D57F00004C895A6048C74258130000004C89722849BBE09C8DE5D57F00004C895A7848C7828000000003000000C78290000000150000004C897A3049BBA8EB8BE5D57F00004C895A0849C7442408010000004D8965104D89551849C741080100000049C74010A0C19F0149BBA03AF8E7D57F00004D8958084D8941104D894D204C896A6848899510FFFFFF48898508FFFFFF48C78578FFFFFF280000004889FE4889D749BB06A479E5D57F000041FFD34883F80274154889C7488BB510FFFFFF41BB50C2940041FFD3EB23488B8510FFFFFF48C7401800000000488B0425F05F710148C70425F05F7101000000004883BD78FFFFFF000F8C0000000048833C25400C9C02000F8500000000488BBD18FFFFFF488B77504885F60F8500000000488B7728488B9510FFFFFF48C74250000000004883FE000F8500000000488B77404C8B6A304C0FB68A94000000F647040174185057564151524C89EE41BB8045C50041FFD35A41595E5F584C896F404D85C90F85000000004C8B8D08FFFFFF49C74108FDFFFFFF8138F81600000F85000000004C8B4808488BBD28FFFFFF4C01CF0F8000000000488B8520FFFFFF4883C0010F80000000004C8B0C25E80A9C024983F9000F8C0000000049BB683107E8D57F00004D8B0B4983C10149BB683107E8D57F00004D890B4881F8102700000F8D0000000049BB00000000000000804C39D80F8400000000B90200000048898500FFFFFF489948F7F94889D048C1FA3F41B9020000004921D14C01C84883F8000F85000000004889F84883C7010F8000000000488B8500FFFFFF4883C0014C8B0C25E80A9C024983F9000F8C000000004889C34889F849BB4CA979E5D57F000041FFE349BB00A079E5D57F000041FFD344003C484C6569032900000049BB00A079E5D57F000041FFD34400383C484C6569032A00000049BB00A079E5D57F000041FFD344003C484C6569032B00000049BB00A079E5D57F000041FFD344400038484C3C156569032C00000049BB00A079E5D57F000041FFD3444000484C3C156569032D00000049BB00A079E5D57F000041FFD3444000484C3C156569032E00000049BB00A079E5D57F000041FFD344400038484C3C156569032F00000049BB00A079E5D57F000041FFD3444000484C3C156569033000000049BB43A079E5D57F000041FFD344406C700074484C6569032800000049BB43A079E5D57F000041FFD344406C700074484C6569033100000049BB00A079E5D57F000041FFD344401C00701874484C6569033200000049BB00A079E5D57F000041FFD3444000081C74484C6569033300000049BB00A079E5D57F000041FFD344400018081C74484C6569033400000049BB00A079E5D57F000041FFD3444000484C6569033500000049BB00A079E5D57F000041FFD34440001D484C6569033600000049BB00A079E5D57F000041FFD3444001484C1D0769033700000049BB00A079E5D57F000041FFD34440484C011D0707033800000049BB00A079E5D57F000041FFD34440484C011D0707033900000049BB00A079E5D57F000041FFD34440484C011D033A00000049BB00A079E5D57F000041FFD3444001484C071D033B00000049BB00A079E5D57F000041FFD34440484C01791D033C00000049BB00A079E5D57F000041FFD344401D484C077901033D00000049BB00A079E5D57F000041FFD34440484C1D01070707033E00000049BB00A079E5D57F000041FFD34440484C1D01070707033F000000 -[d0e8aad0247] jit-backend-dump} -[d0e8aad14a7] {jit-backend-addr -bridge out of Guard 33 has address 7fd5e579ae4b to 7fd5e579b270 -[d0e8aad346f] jit-backend-addr} -[d0e8aad44a1] {jit-backend-dump +CODE_DUMP @7f74cbf56e87 +0 488DA50000000049BB80F182CE747F0000498B034883C00149BB80F182CE747F0000498903488B8570FFFFFF4C8B780849BB908B07CC747F00004D39DF0F85000000004D8B771049BBA88B07CC747F00004D39DE0F850000000041BB30698D0041FFD34C8B78404C8B70504D85F60F85000000004C8B70284983FE000F85000000004C8B3425403E86014981FE207088010F85000000004C8B3425C8399D024983FE000F8C0000000048898518FFFFFF488B0425B0685501488D9048010000483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C700388001004889C24881C09800000048C7008800000048C74008050000004989C64883C03848C700F81600004989C54883C01048C700F81600004989C44883C01048C700104D00004989C24883C01848C700A82D00004989C14883C01848C7008800000048C74008000000004989C04883C01048C7004083010048896808488BBD18FFFFFFF6470401741E5741515241524150504889C641BBB0E5C40041FFD3584158415A5A41595F48894740488BB570FFFFFF48896E1848C742700200000048C7425813000000C78290000000150000004C897A3049BBE05C09CC747F00004C895A7848C782800000000300000049C74508010000004D896E104D89661849C742080100000049BBC0DA73CE747F00004D89590849C7411000F3A0014D894A104D8956204C8972684C89422849BBE0DA73CE747F00004C895A6049BB908B07CC747F00004C895A0848899510FFFFFF48898508FFFFFF48C78578FFFFFF280000004889FE4889D749BB0664F5CB747F000041FFD34883F80174154889C7488BB510FFFFFF41BB40E9940041FFD3EB23488B8510FFFFFF48C7401800000000488B0425D084720148C70425D0847201000000004883BD78FFFFFF000F8C0000000048833C25203B9D02000F8500000000488BBD18FFFFFF488B77504885F60F8500000000488B7728488B9510FFFFFF48C74250000000004883FE000F8500000000488B77404C8B42304C0FB6B294000000F647040174185641505257504C89C641BBB0E5C40041FFD3585F5A41585E4C8947404D85F60F85000000004C8BB508FFFFFF49C74608FDFFFFFF8138F81600000F85000000004C8B7008488BBD28FFFFFF4C01F70F8000000000488B8520FFFFFF4883C0010F80000000004C8B3425C8399D024983FE000F8C0000000049BB98F182CE747F00004D8B334983C60149BB98F182CE747F00004D89334881F8102700000F8D0000000049BB00000000000000804C39D80F8400000000B90200000048898500FFFFFF489948F7F94889D048C1FA3F41BE020000004921D64C01F04883F8000F85000000004889F84883C7010F8000000000488B8500FFFFFF4883C0014C8B3425C8399D024983FE000F8C000000004889C34889F849BB8869F5CB747F000041FFE349BB0060F5CB747F000041FFD344003C484C6569032900000049BB0060F5CB747F000041FFD34400383C484C6569032A00000049BB0060F5CB747F000041FFD344003C484C6569032B00000049BB0060F5CB747F000041FFD344400038484C3C156569032C00000049BB0060F5CB747F000041FFD3444000484C3C156569032D00000049BB0060F5CB747F000041FFD3444000484C3C156569032E00000049BB0060F5CB747F000041FFD344400038484C3C156569032F00000049BB0060F5CB747F000041FFD3444000484C3C156569033000000049BB4360F5CB747F000041FFD344406C700074484C6569032800000049BB4360F5CB747F000041FFD344406C700074484C6569033100000049BB0060F5CB747F000041FFD344401C00701874484C6569033200000049BB0060F5CB747F000041FFD3444000081C74484C6569033300000049BB0060F5CB747F000041FFD344400018081C74484C6569033400000049BB0060F5CB747F000041FFD3444000484C6569033500000049BB0060F5CB747F000041FFD34440001D484C6569033600000049BB0060F5CB747F000041FFD3444001484C1D0769033700000049BB0060F5CB747F000041FFD34440484C011D0707033800000049BB0060F5CB747F000041FFD34440484C011D0707033900000049BB0060F5CB747F000041FFD34440484C1D01033A00000049BB0060F5CB747F000041FFD3444001484C1D07033B00000049BB0060F5CB747F000041FFD34440484C011D79033C00000049BB0060F5CB747F000041FFD344401D484C070179033D00000049BB0060F5CB747F000041FFD34440484C1D01070707033E00000049BB0060F5CB747F000041FFD34440484C1D01070707033F000000 +[7e18c4e313b6] jit-backend-dump} +[7e18c4e31efc] {jit-backend-addr +bridge out of Guard 33 has address 7f74cbf56e87 to 7f74cbf572ab +[7e18c4e33270] jit-backend-addr} +[7e18c4e33e6d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ae4e +0 70FEFFFF -[d0e8aad635b] jit-backend-dump} -[d0e8aad7675] {jit-backend-dump +CODE_DUMP @7f74cbf56e8a +0 70FEFFFF +[7e18c4e34de2] jit-backend-dump} +[7e18c4e35a12] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ae8a +0 E2030000 -[d0e8aad8e15] jit-backend-dump} -[d0e8aad98bf] {jit-backend-dump +CODE_DUMP @7f74cbf56ec6 +0 E1030000 +[7e18c4e3657c] jit-backend-dump} +[7e18c4e36a8c] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579aea1 +0 E4030000 -[d0e8aadae37] jit-backend-dump} -[d0e8aadbaf7] {jit-backend-dump +CODE_DUMP @7f74cbf56edd +0 E3030000 +[7e18c4e3749d] jit-backend-dump} +[7e18c4e37a88] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579aebb +0 FD030000 -[d0e8aadd1ad] jit-backend-dump} -[d0e8aaddcb7] {jit-backend-dump +CODE_DUMP @7f74cbf56ef7 +0 FC030000 +[7e18c4e38442] jit-backend-dump} +[7e18c4e389eb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579aec9 +0 0B040000 -[d0e8aadf319] jit-backend-dump} -[d0e8aadff9d] {jit-backend-dump +CODE_DUMP @7f74cbf56f05 +0 0A040000 +[7e18c4e394da] jit-backend-dump} +[7e18c4e39ace] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579aede +0 2C040000 -[d0e8aae15c9] jit-backend-dump} -[d0e8aae20d3] {jit-backend-dump +CODE_DUMP @7f74cbf56f1a +0 2B040000 +[7e18c4e3a45e] jit-backend-dump} +[7e18c4e3a8e7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579aef0 +0 36040000 -[d0e8aae361b] jit-backend-dump} -[d0e8aae3fdb] {jit-backend-dump +CODE_DUMP @7f74cbf56f2c +0 35040000 +[7e18c4e3b259] jit-backend-dump} +[7e18c4e3b700] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b0f6 +0 4B020000 -[d0e8aae5427] jit-backend-dump} -[d0e8aae5de1] {jit-backend-dump +CODE_DUMP @7f74cbf57131 +0 4B020000 +[7e18c4e3c07b] jit-backend-dump} +[7e18c4e3c4ef] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b105 +0 58020000 -[d0e8aae722d] jit-backend-dump} -[d0e8aae7bbd] {jit-backend-dump +CODE_DUMP @7f74cbf57140 +0 58020000 +[7e18c4e3cfc9] jit-backend-dump} +[7e18c4e3d572] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b119 +0 60020000 -[d0e8aae92bb] jit-backend-dump} -[d0e8aae9d71] {jit-backend-dump +CODE_DUMP @7f74cbf57154 +0 60020000 +[7e18c4e3e046] jit-backend-dump} +[7e18c4e3e4ea] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b136 +0 60020000 -[d0e8aaeb2d1] jit-backend-dump} -[d0e8aaebc91] {jit-backend-dump +CODE_DUMP @7f74cbf57171 +0 60020000 +[7e18c4e3ee4d] jit-backend-dump} +[7e18c4e3f2eb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b171 +0 41020000 -[d0e8aaed0dd] jit-backend-dump} -[d0e8aaeda6d] {jit-backend-dump +CODE_DUMP @7f74cbf571ac +0 41020000 +[7e18c4e3fc4e] jit-backend-dump} +[7e18c4e400da] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b18c +0 43020000 -[d0e8aaeeec5] jit-backend-dump} -[d0e8aaef861] {jit-backend-dump +CODE_DUMP @7f74cbf571c7 +0 43020000 +[7e18c4e40a55] jit-backend-dump} +[7e18c4e40ff8] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b1a0 +0 48020000 -[d0e8aaf0cb3] jit-backend-dump} -[d0e8aaf16df] {jit-backend-dump +CODE_DUMP @7f74cbf571db +0 48020000 +[7e18c4e41b7a] jit-backend-dump} +[7e18c4e42111] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b1b1 +0 51020000 -[d0e8aaf2d29] jit-backend-dump} -[d0e8aaf4049] {jit-backend-dump +CODE_DUMP @7f74cbf571ec +0 51020000 +[7e18c4e42beb] jit-backend-dump} +[7e18c4e434d6] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b1c3 +0 73020000 -[d0e8aaf55af] jit-backend-dump} -[d0e8aaf5f2d] {jit-backend-dump +CODE_DUMP @7f74cbf571fe +0 73020000 +[7e18c4e43e54] jit-backend-dump} +[7e18c4e442e3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b1ee +0 62020000 -[d0e8aaf737f] jit-backend-dump} -[d0e8aaf7d09] {jit-backend-dump +CODE_DUMP @7f74cbf57229 +0 62020000 +[7e18c4e44c58] jit-backend-dump} +[7e18c4e450d8] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b201 +0 67020000 -[d0e8aaf9161] jit-backend-dump} -[d0e8aaf9ae5] {jit-backend-dump +CODE_DUMP @7f74cbf5723c +0 67020000 +[7e18c4e45e04] jit-backend-dump} +[7e18c4e4638c] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b22f +0 52020000 -[d0e8aafb0c9] jit-backend-dump} -[d0e8aafbbfd] {jit-backend-dump +CODE_DUMP @7f74cbf5726a +0 52020000 +[7e18c4e46eab] jit-backend-dump} +[7e18c4e47322] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b23c +0 5E020000 -[d0e8aafd247] jit-backend-dump} -[d0e8aafdc43] {jit-backend-dump +CODE_DUMP @7f74cbf57277 +0 5E020000 +[7e18c4e47d75] jit-backend-dump} +[7e18c4e4824f] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b259 +0 76020000 -[d0e8aaff0bf] jit-backend-dump} -[d0e8ab0006d] {jit-backend-dump +CODE_DUMP @7f74cbf57294 +0 76020000 +[7e18c4e48bb2] jit-backend-dump} +[7e18c4e493c2] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a9bd +0 8A040000 -[d0e8ab014dd] jit-backend-dump} -[d0e8ab02695] jit-backend} -[d0e8ab03ea1] {jit-log-opt-bridge +CODE_DUMP @7f74cbf569f9 +0 8A040000 +[7e18c4e49d2b] jit-backend-dump} +[7e18c4e4a8fb] jit-backend} +[7e18c4e4b8e2] {jit-log-opt-bridge # bridge out of Guard 33 with 137 ops [p0, p1, p2, p3, i4, i5, i6] debug_merge_point(0, ' #37 LOAD_FAST') @@ -614,428 +614,428 @@ debug_merge_point(1, ' #31 INPLACE_ADD') debug_merge_point(1, ' #32 STORE_FAST') debug_merge_point(1, ' #35 JUMP_ABSOLUTE') -+151: i22 = getfield_raw(43780840, descr=) ++151: i22 = getfield_raw(43858376, descr=) +159: i24 = int_lt(i22, 0) guard_false(i24, descr=) [p0, p1, p12, p2, p3, p13, i14, i5, i6] debug_merge_point(1, ' #19 FOR_ITER') +169: i25 = force_token() -p27 = new_with_vtable(38380152) -p29 = new_array(0, descr=) -p31 = new_array(5, descr=) +p27 = new_with_vtable(38431160) +p29 = new_array(5, descr=) +p31 = new_with_vtable(ConstClass(W_IntObject)) p33 = new_with_vtable(ConstClass(W_IntObject)) -p35 = new_with_vtable(ConstClass(W_IntObject)) -p37 = new_with_vtable(38308720) -p39 = new_with_vtable(ConstClass(W_ListObject)) -p41 = new_with_vtable(38380928) +p35 = new_with_vtable(38352528) +p37 = new_with_vtable(ConstClass(W_ListObject)) +p39 = new_array(0, descr=) +p41 = new_with_vtable(38431936) +359: setfield_gc(p41, i14, descr=) setfield_gc(p12, p41, descr=) +410: setfield_gc(p1, i25, descr=) +421: setfield_gc(p27, 2, descr=) -+429: setfield_gc(p27, ConstPtr(ptr43), descr=) -+443: setfield_gc(p27, 19, descr=) -+451: setfield_gc(p27, p29, descr=) -+455: setfield_gc(p27, ConstPtr(ptr45), descr=) -+469: setfield_gc(p27, 3, descr=) -+480: setfield_gc(p27, 21, descr=) -+490: setfield_gc(p27, p13, descr=) -+494: setfield_gc(p27, ConstPtr(ptr8), descr=) -+508: setfield_gc(p33, 1, descr=) -+517: setarrayitem_gc(p31, 0, p33, descr=) -+521: setarrayitem_gc(p31, 1, p35, descr=) -+525: setfield_gc(p37, 1, descr=) -+533: setfield_gc(p39, ConstPtr(ptr52), descr=) -+541: setfield_gc(p39, ConstPtr(ptr53), descr=) -+555: setfield_gc(p37, p39, descr=) -+559: setarrayitem_gc(p31, 2, p37, descr=) -+563: setfield_gc(p27, p31, descr=) -+567: p55 = call_assembler(p27, p12, descr=) ++429: setfield_gc(p27, 19, descr=) ++437: setfield_gc(p27, 21, descr=) ++447: setfield_gc(p27, p13, descr=) ++451: setfield_gc(p27, ConstPtr(ptr45), descr=) ++465: setfield_gc(p27, 3, descr=) ++476: setfield_gc(p31, 1, descr=) ++484: setarrayitem_gc(p29, 0, p31, descr=) ++488: setarrayitem_gc(p29, 1, p33, descr=) ++492: setfield_gc(p35, 1, descr=) ++500: setfield_gc(p37, ConstPtr(ptr51), descr=) ++514: setfield_gc(p37, ConstPtr(ptr52), descr=) ++522: setfield_gc(p35, p37, descr=) ++526: setarrayitem_gc(p29, 2, p35, descr=) ++530: setfield_gc(p27, p29, descr=) ++534: setfield_gc(p27, p39, descr=) ++538: setfield_gc(p27, ConstPtr(ptr54), descr=) ++552: setfield_gc(p27, ConstPtr(ptr8), descr=) ++566: p55 = call_assembler(p27, p12, descr=) guard_not_forced(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i5, i6] -+687: guard_no_exception(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i5, i6] -+702: p56 = getfield_gc(p12, descr=) -+713: guard_isnull(p56, descr=) [p0, p1, p12, p55, p27, p56, p41, p2, p3, i5, i6] -+722: i57 = getfield_gc(p12, descr=) -+726: setfield_gc(p27, ConstPtr(ptr58), descr=) -+741: i59 = int_is_true(i57) ++686: guard_no_exception(, descr=) [p0, p1, p12, p27, p55, p41, p2, p3, i5, i6] ++701: p56 = getfield_gc(p12, descr=) ++712: guard_isnull(p56, descr=) [p0, p1, p12, p55, p27, p56, p41, p2, p3, i5, i6] ++721: i57 = getfield_gc(p12, descr=) ++725: setfield_gc(p27, ConstPtr(ptr58), descr=) ++740: i59 = int_is_true(i57) guard_false(i59, descr=) [p0, p1, p55, p27, p12, p41, p2, p3, i5, i6] -+751: p60 = getfield_gc(p12, descr=) -+755: p61 = getfield_gc(p27, descr=) -+759: i62 = getfield_gc(p27, descr=) ++750: p60 = getfield_gc(p12, descr=) ++754: p61 = getfield_gc(p27, descr=) ++758: i62 = getfield_gc(p27, descr=) setfield_gc(p12, p61, descr=) -+801: guard_false(i62, descr=) [p0, p1, p55, p60, p27, p12, p41, p2, p3, i5, i6] ++800: guard_false(i62, descr=) [p0, p1, p55, p60, p27, p12, p41, p2, p3, i5, i6] debug_merge_point(0, ' #46 INPLACE_ADD') -+810: setfield_gc(p41, -3, descr=) -+825: guard_class(p55, ConstClass(W_IntObject), descr=) [p0, p1, p55, p2, p3, i5, i6] -+837: i65 = getfield_gc_pure(p55, descr=) -+841: i66 = int_add_ovf(i5, i65) ++809: setfield_gc(p41, -3, descr=) ++824: guard_class(p55, ConstClass(W_IntObject), descr=) [p0, p1, p55, p2, p3, i5, i6] ++836: i65 = getfield_gc_pure(p55, descr=) ++840: i66 = int_add_ovf(i5, i65) guard_no_overflow(, descr=) [p0, p1, p55, i66, p2, p3, i5, i6] debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 JUMP_FORWARD') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') debug_merge_point(0, ' #69 INPLACE_ADD') -+857: i68 = int_add_ovf(i6, 1) ++856: i68 = int_add_ovf(i6, 1) guard_no_overflow(, descr=) [p0, p1, i68, p2, p3, i66, None, i6] debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+874: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i68, i66, None, None] -+874: i71 = getfield_raw(43780840, descr=) -+882: i73 = int_lt(i71, 0) ++873: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i68, i66, None, None] ++873: i71 = getfield_raw(43858376, descr=) ++881: i73 = int_lt(i71, 0) guard_false(i73, descr=) [p0, p1, p2, p3, i68, i66, None, None] debug_merge_point(0, ' #15 LOAD_FAST') -+892: label(p1, p0, p2, p3, i66, i68, descr=TargetToken(140556656123584)) ++891: label(p1, p0, p2, p3, i66, i68, descr=TargetToken(140139616190144)) debug_merge_point(0, ' #18 LOAD_CONST') debug_merge_point(0, ' #21 COMPARE_OP') -+922: i75 = int_lt(i68, 10000) -guard_true(i75, descr=) [p0, p1, p2, p3, i68, i66] ++921: i75 = int_lt(i68, 10000) +guard_true(i75, descr=) [p0, p1, p2, p3, i66, i68] debug_merge_point(0, ' #24 POP_JUMP_IF_FALSE') debug_merge_point(0, ' #27 LOAD_FAST') debug_merge_point(0, ' #30 LOAD_CONST') debug_merge_point(0, ' #33 BINARY_MODULO') -+935: i77 = int_eq(i68, -9223372036854775808) -guard_false(i77, descr=) [p0, p1, i68, p2, p3, None, i66] -+954: i79 = int_mod(i68, 2) -+971: i81 = int_rshift(i79, 63) -+978: i82 = int_and(2, i81) -+987: i83 = int_add(i79, i82) ++934: i77 = int_eq(i68, -9223372036854775808) +guard_false(i77, descr=) [p0, p1, i68, p2, p3, i66, None] ++953: i79 = int_mod(i68, 2) ++970: i81 = int_rshift(i79, 63) ++977: i82 = int_and(2, i81) ++986: i83 = int_add(i79, i82) debug_merge_point(0, ' #34 POP_JUMP_IF_FALSE') -+990: i84 = int_is_true(i83) -guard_false(i84, descr=) [p0, p1, p2, p3, i83, i68, i66] ++989: i84 = int_is_true(i83) +guard_false(i84, descr=) [p0, p1, p2, p3, i83, i66, i68] debug_merge_point(0, ' #53 LOAD_FAST') debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 INPLACE_ADD') -+1000: i86 = int_add_ovf(i66, 1) -guard_no_overflow(, descr=) [p0, p1, i86, p2, p3, None, i68, i66] ++999: i86 = int_add_ovf(i66, 1) +guard_no_overflow(, descr=) [p0, p1, i86, p2, p3, None, i66, i68] debug_merge_point(0, ' #60 STORE_FAST') debug_merge_point(0, ' #63 LOAD_FAST') debug_merge_point(0, ' #66 LOAD_CONST') debug_merge_point(0, ' #69 INPLACE_ADD') -+1013: i88 = int_add(i68, 1) ++1012: i88 = int_add(i68, 1) debug_merge_point(0, ' #70 STORE_FAST') debug_merge_point(0, ' #73 JUMP_ABSOLUTE') -+1024: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] -+1024: i90 = getfield_raw(43780840, descr=) -+1032: i92 = int_lt(i90, 0) ++1023: guard_not_invalidated(, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] ++1023: i90 = getfield_raw(43858376, descr=) ++1031: i92 = int_lt(i90, 0) guard_false(i92, descr=) [p0, p1, p2, p3, i86, i88, None, None, None] debug_merge_point(0, ' #15 LOAD_FAST') -+1042: jump(p1, p0, p2, p3, i86, i88, descr=TargetToken(140556656121584)) -+1061: --end of the loop-- -[d0e8abe8f9f] jit-log-opt-bridge} -[d0e8adedcd7] {jit-backend-dump ++1041: jump(p1, p0, p2, p3, i86, i88, descr=TargetToken(140139616188144)) ++1060: --end of the loop-- +[7e18c4ec4dd6] jit-log-opt-bridge} +[7e18c4fe89a3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a93a +0 E9A1010000 -[d0e8adf23d5] jit-backend-dump} -[d0e8adf30f5] {jit-backend-dump +CODE_DUMP @7f74cbf56976 +0 E9A1010000 +[7e18c4fec25e] jit-backend-dump} +[7e18c4fec9b1] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579a9dd +0 E994010000 -[d0e8adf4bad] jit-backend-dump} -[d0e8adf568d] {jit-backend-dump +CODE_DUMP @7f74cbf56a19 +0 E994010000 +[7e18c4fed7af] jit-backend-dump} +[7e18c4fedda9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579aea5 +0 E9F9030000 -[d0e8adf6c77] jit-backend-dump} -[d0e8adf7631] {jit-backend-dump +CODE_DUMP @7f74cbf56ee1 +0 E9F8030000 +[7e18c4fee916] jit-backend-dump} +[7e18c4feee9b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579aecd +0 E921040000 -[d0e8adf8c51] jit-backend-dump} -[d0e8adf9689] {jit-backend-dump +CODE_DUMP @7f74cbf56f09 +0 E920040000 +[7e18c4fef984] jit-backend-dump} +[7e18c4fefdf2] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b1b5 +0 E966020000 -[d0e8adfac61] jit-backend-dump} -[d0e8adfb5e5] {jit-backend-dump +CODE_DUMP @7f74cbf571f0 +0 E966020000 +[7e18c4ff07dc] jit-backend-dump} +[7e18c4ff0d67] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b24b +0 E968020000 -[d0e8adfcaf7] jit-backend-dump} -[d0e9120bfcd] {jit-backend -[d0e913919c1] {jit-backend-dump +CODE_DUMP @7f74cbf57286 +0 E968020000 +[7e18c4ff1751] jit-backend-dump} +[7e18c53eb6fc] {jit-backend +[7e18c54c6a6b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b562 +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B482848898D70FFFFFF498B48304C89BD68FFFFFF4D8B78384889BD60FFFFFF498B78404D8B40484889B558FFFFFF4C89A550FFFFFF4C898D48FFFFFF48899D40FFFFFF48899538FFFFFF48898530FFFFFF4C89BD28FFFFFF4889BD20FFFFFF4C898518FFFFFF49BB803107E8D57F00004D8B034983C00149BB803107E8D57F00004D89034983FA050F85000000008139306900000F85000000004C8B51104D85D20F84000000004C8B4108498B7A10813F702703000F85000000004D8B5208498B7A084D8B7A104D8B52184983F8000F8C000000004D39D00F8D000000004C89C04D0FAFC74889FA4C01C74883C001488941084983FD000F850000000049BB902190E5D57F00004D39DE0F85000000004C8BB560FFFFFF4D8B6E0849BBA8EB8BE5D57F00004D39DD0F85000000004D8B451049BBC0EB8BE5D57F00004D39D80F85000000004C8B2C2500F584014981FDE02687010F850000000048898D10FFFFFF4C899508FFFFFF48899500FFFFFF4889BDF8FEFFFF488985F0FEFFFF41BB2003EE0041FFD348833C25400C9C02000F8500000000488BBD70FFFFFF488B5710813AA0CA01000F8500000000488B57084C8B52084C89D14983C201488985E8FEFFFF488995E0FEFFFF48898DD8FEFFFF4889D74C89D641BB8017790041FFD348833C25400C9C02000F8500000000488B8DE0FEFFFF488B5110488BBDD8FEFFFF4C8B95E8FEFFFFF64204017432F6420440751E57515241524889FE4889D74C89D241BBE042C50041FFD3415A5A595FEB0E5748C1EF074883F7F8480FAB3A5F4C8954FA104C8B1425E80A9C024983FA000F8C0000000049BB983107E8D57F00004D8B334983C60149BB983107E8D57F00004D89334C8BB5F0FEFFFF4C3BB508FFFFFF0F8D000000004D0FAFF74C8B9500FFFFFF4D01F24C8BB5F0FEFFFF4983C601488BBD10FFFFFF4C89770848898DD0FEFFFF4C8995F8FEFFFF4C89D741BB2003EE0041FFD348833C25400C9C02000F85000000004C8B95D0FEFFFF498B4A084889CF4883C1014889BDC8FEFFFF488985C0FEFFFF4C89D74889CE41BB8017790041FFD348833C25400C9C02000F85000000004C8B95D0FEFFFF498B4210488B8DC8FEFFFF488BBDC0FEFFFFF64004017432F6400440751E41525750514889CE4889FA4889C741BBE042C50041FFD359585F415AEB0E5148C1E9074883F1F8480FAB085948897CC810488B3C25E80A9C024883FF000F8C000000004C89B5F0FEFFFF4C89D1E9CCFEFFFF49BB00A079E5D57F000041FFD3294C484438355055585C60400464686C034000000049BB00A079E5D57F000041FFD34C480444383550585C604064686C034100000049BB00A079E5D57F000041FFD34C48042844383550585C604064686C034200000049BB00A079E5D57F000041FFD34C4804211C2844383550585C604064686C034300000049BB00A079E5D57F000041FFD34C480421293D1D44383550585C604064686C034400000049BB00A079E5D57F000041FFD34C4804213D1D44383550585C604064686C034500000049BB00A079E5D57F000041FFD3354C48443850585C604004686C1D034600000049BB00A079E5D57F000041FFD34C483844505C604004686C1D034700000049BB00A079E5D57F000041FFD34C383444505C604004686C1D034800000049BB00A079E5D57F000041FFD34C38203444505C604004686C1D034900000049BB00A079E5D57F000041FFD34C383444505C604004686C1D034A00000049BB00A079E5D57F000041FFD34C383444505C604004686C1D034B00000049BB43A079E5D57F000041FFD34C380044505C6040706C7D034C00000049BB00A079E5D57F000041FFD34C38081C44505C60706C007D034D00000049BB43A079E5D57F000041FFD34C388D018401880144505C6040706C077D034E00000049BB00A079E5D57F000041FFD34C3844505C6040706C077D034F00000049BB00A079E5D57F000041FFD34C4870393D7944505C60406C7D035000000049BB00A079E5D57F000041FFD34C4844505C60401C6C2907035100000049BB43A079E5D57F000041FFD34C480044505C6040706C7D07035200000049BB43A079E5D57F000041FFD34C4895019801900144505C6040706C7D07035300000049BB00A079E5D57F000041FFD34C4844505C6040706C7D070354000000 -[d0e913c0d91] jit-backend-dump} -[d0e913c2081] {jit-backend-addr -Loop 2 ( #13 FOR_ITER) has address 7fd5e579b598 to 7fd5e579b953 (bootstrap 7fd5e579b562) -[d0e913c4781] jit-backend-addr} -[d0e913c58c1] {jit-backend-dump +CODE_DUMP @7f74cbf5759f +0 488B0425C0399D024829E0483B0425E08C5001760D49BB6363F5CB747F000041FFD3554889E5534154415541564157488DA50000000049BBB0F182CE747F00004D8B3B4983C70149BBB0F182CE747F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B482848899570FFFFFF498B503048899568FFFFFF498B50384889BD60FFFFFF498B78404D8B40484889B558FFFFFF4C89BD50FFFFFF4C89A548FFFFFF4C898D40FFFFFF48899D38FFFFFF48898530FFFFFF48899528FFFFFF4889BD20FFFFFF4C898518FFFFFF49BBC8F182CE747F00004D8B034983C00149BBC8F182CE747F00004D89034983FA050F85000000004C8B9568FFFFFF41813A104D00000F85000000004D8B42104D85C00F8400000000498B7A08498B5010813A302303000F85000000004D8B4008498B5008498B40104D8B40184883FF000F8C000000004C39C70F8D000000004889FB480FAFF84989D14801FA4883C30149895A084983FD000F850000000049BB90C10BCC747F00004D39DE0F85000000004C8BB560FFFFFF4D8B6E0849BB908B07CC747F00004D39DD0F8500000000498B7D1049BBA88B07CC747F00004C39DF0F85000000004C8B2C25403E86014981FD207088010F850000000048899510FFFFFF48898508FFFFFF4C898500FFFFFF4C898DF8FEFFFF48898DF0FEFFFF4889D741BB2087EE0041FFD348833C25203B9D02000F8500000000488B8DF0FEFFFF4C8B491041813960CA01000F85000000004C8B49084D8B41084D89C24983C0014C898DE8FEFFFF488985E0FEFFFF4C8995D8FEFFFF4C89CF4C89C641BBC009790041FFD348833C25203B9D02000F8500000000488B8DE8FEFFFF4C8B5110488B85D8FEFFFF4C8B85E0FEFFFF41F6420401743541F642044075205041504152514C89D74889C64C89C241BB10E3C40041FFD359415A415858EB0E5048C1E8074883F0F8490FAB02584D8944C2104C8B0425C8399D024983F8000F8C0000000049BBE0F182CE747F00004D8B334983C60149BBE0F182CE747F00004D8933483B9D00FFFFFF0F8D000000004989DE480FAF9D08FFFFFF4C8B85F8FEFFFF4901D84983C601488B9D68FFFFFF4C89730848898DD0FEFFFF4C898510FFFFFF4C89C741BB2087EE0041FFD348833C25203B9D02000F85000000004C8B85D0FEFFFF498B48084989CA4883C1014C8995C8FEFFFF488985C0FEFFFF4C89C74889CE41BBC009790041FFD348833C25203B9D02000F8500000000488B85D0FEFFFF4C8B4010488B8DC8FEFFFF4C8B95C0FEFFFF41F6400401743541F640044075204152504150514C89C74889CE4C89D241BB10E3C40041FFD359415858415AEB0E5148C1E9074883F1F8490FAB08594D8954C8104C8B1425C8399D024983FA000F8C0000000048899D68FFFFFF4C89F34889C1E9CCFEFFFF49BB0060F5CB747F000041FFD3294C4850383554595C4060044464686C034000000049BB0060F5CB747F000041FFD34C4828503835545C40600464686C034100000049BB0060F5CB747F000041FFD34C482820503835545C40600464686C034200000049BB0060F5CB747F000041FFD34C48281D0820503835545C40600464686C034300000049BB0060F5CB747F000041FFD34C48281D210109503835545C40600464686C034400000049BB0060F5CB747F000041FFD34C48281D0109503835545C40600464686C034500000049BB0060F5CB747F000041FFD3354C485038545C40600428686C09034600000049BB0060F5CB747F000041FFD34C4838505440600428686C09034700000049BB0060F5CB747F000041FFD34C3834505440600428686C09034800000049BB0060F5CB747F000041FFD34C381C34505440600428686C09034900000049BB0060F5CB747F000041FFD34C3834505440600428686C09034A00000049BB0060F5CB747F000041FFD34C3834505440600428686C09034B00000049BB4360F5CB747F000041FFD34C3800505440608001446C71034C00000049BB0060F5CB747F000041FFD34C38240450544060446C0071034D00000049BB4360F5CB747F000041FFD34C388D0188018401505440608001446C0771034E00000049BB0060F5CB747F000041FFD34C38505440608001446C0771034F00000049BB0060F5CB747F000041FFD34C48440D757D5054406080016C71035000000049BB0060F5CB747F000041FFD34C485054406080010C6C2107035100000049BB4360F5CB747F000041FFD34C48005054406080010C6C7107035200000049BB4360F5CB747F000041FFD34C489501980190015054406080010C6C7107035300000049BB0060F5CB747F000041FFD34C485054406080010C6C71070354000000 +[7e18c54e5e5a] jit-backend-dump} +[7e18c54e6a2d] {jit-backend-addr +Loop 2 ( #13 FOR_ITER) has address 7f74cbf575d5 to 7f74cbf579be (bootstrap 7f74cbf5759f) +[7e18c54e7e94] jit-backend-addr} +[7e18c54e89fb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b594 +0 C0FEFFFF -[d0e913c76c1] jit-backend-dump} -[d0e913c8add] {jit-backend-dump +CODE_DUMP @7f74cbf575d1 +0 C0FEFFFF +[7e18c54e9883] jit-backend-dump} +[7e18c54ea5f4] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b653 +0 FC020000 -[d0e913ca157] jit-backend-dump} -[d0e913caccd] {jit-backend-dump +CODE_DUMP @7f74cbf576ae +0 0C030000 +[7e18c54eb1d9] jit-backend-dump} +[7e18c54eb6d1] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b65f +0 12030000 -[d0e913cc3a1] jit-backend-dump} -[d0e913cce15] {jit-backend-dump +CODE_DUMP @7f74cbf576c2 +0 1A030000 +[7e18c54ec22f] jit-backend-dump} +[7e18c54ec817] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b66c +0 25030000 -[d0e913ce351] jit-backend-dump} -[d0e913ced8f] {jit-backend-dump +CODE_DUMP @7f74cbf576cf +0 2D030000 +[7e18c54ed42c] jit-backend-dump} +[7e18c54ed8eb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b680 +0 32030000 -[d0e913d02c5] jit-backend-dump} -[d0e913d0cf1] {jit-backend-dump +CODE_DUMP @7f74cbf576e3 +0 3A030000 +[7e18c54ee2f9] jit-backend-dump} +[7e18c54ee7ca] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b69a +0 3B030000 -[d0e913d2143] jit-backend-dump} -[d0e913d2ccb] {jit-backend-dump +CODE_DUMP @7f74cbf576fd +0 43030000 +[7e18c54ef1b4] jit-backend-dump} +[7e18c54f23d3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b6a3 +0 56030000 -[d0e913d42fd] jit-backend-dump} -[d0e913d4e31] {jit-backend-dump +CODE_DUMP @7f74cbf57706 +0 5E030000 +[7e18c54f310e] jit-backend-dump} +[7e18c54f36b7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b6c2 +0 5A030000 -[d0e913d6475] jit-backend-dump} -[d0e913d6ee9] {jit-backend-dump +CODE_DUMP @7f74cbf57725 +0 62030000 +[7e18c54f41b2] jit-backend-dump} +[7e18c54f4737] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b6d5 +0 67030000 -[d0e913d840d] jit-backend-dump} -[d0e913d8db5] {jit-backend-dump +CODE_DUMP @7f74cbf57738 +0 6F030000 +[7e18c54f50b2] jit-backend-dump} +[7e18c54f5574] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b6f3 +0 67030000 -[d0e913da201] jit-backend-dump} -[d0e913dab8b] {jit-backend-dump +CODE_DUMP @7f74cbf57756 +0 6F030000 +[7e18c54f5eef] jit-backend-dump} +[7e18c54f638a] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b70a +0 6E030000 -[d0e913e0f93] jit-backend-dump} -[d0e913e1fe3] {jit-backend-dump +CODE_DUMP @7f74cbf5776d +0 76030000 +[7e18c54f6d05] jit-backend-dump} +[7e18c54f73a7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b71f +0 96030000 -[d0e913e36cf] jit-backend-dump} -[d0e913e40ad] {jit-backend-dump +CODE_DUMP @7f74cbf57782 +0 9E030000 +[7e18c54f7e18] jit-backend-dump} +[7e18c54f83e2] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b75a +0 79030000 -[d0e913e5601] jit-backend-dump} -[d0e913e5f67] {jit-backend-dump +CODE_DUMP @7f74cbf577c0 +0 7E030000 +[7e18c54f8eb3] jit-backend-dump} +[7e18c54f9465] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b771 +0 7F030000 -[d0e913e73d1] jit-backend-dump} -[d0e913e7d8b] {jit-backend-dump +CODE_DUMP @7f74cbf577d8 +0 84030000 +[7e18c54f9de0] jit-backend-dump} +[7e18c54fa3b6] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b7b3 +0 5B030000 -[d0e913e9201] jit-backend-dump} -[d0e913e9bc1] {jit-backend-dump +CODE_DUMP @7f74cbf5781a +0 60030000 +[7e18c54fad34] jit-backend-dump} +[7e18c54fb1db] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b81b +0 16030000 -[d0e913eb229] jit-backend-dump} -[d0e913ebd03] {jit-backend-dump +CODE_DUMP @7f74cbf57886 +0 18030000 +[7e18c54fbb5c] jit-backend-dump} +[7e18c54fc0ea] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b84d +0 01030000 -[d0e913ed37d] jit-backend-dump} -[d0e913eddcd] {jit-backend-dump +CODE_DUMP @7f74cbf578b1 +0 0B030000 +[7e18c54fcbe5] jit-backend-dump} +[7e18c54fd227] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b89a +0 F0020000 -[d0e913ef1e3] jit-backend-dump} -[d0e913efb85] {jit-backend-dump +CODE_DUMP @7f74cbf578fe +0 FC020000 +[7e18c54fdc74] jit-backend-dump} +[7e18c54fe136] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b8d8 +0 D0020000 -[d0e913f0f9b] jit-backend-dump} -[d0e913f1925] {jit-backend-dump +CODE_DUMP @7f74cbf5793c +0 DD020000 +[7e18c54feaab] jit-backend-dump} +[7e18c54fef52] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579b940 +0 8B020000 -[d0e913f2dd1] jit-backend-dump} -[d0e913f4313] jit-backend} -[d0e913f5cb1] {jit-log-opt-loop -# Loop 2 : loop with 100 ops +CODE_DUMP @7f74cbf579a8 +0 95020000 +[7e18c54ff8d0] jit-backend-dump} +[7e18c55004fa] jit-backend} +[7e18c5501bb3] {jit-log-opt-loop +# Loop 2 ( #13 FOR_ITER) : loop with 100 ops [p0, p1] -+54: p2 = getfield_gc(p0, descr=) -+58: p3 = getfield_gc(p0, descr=) -+62: i4 = getfield_gc(p0, descr=) -+70: p5 = getfield_gc(p0, descr=) -+74: i6 = getfield_gc(p0, descr=) -+81: i7 = getfield_gc(p0, descr=) -+85: p8 = getfield_gc(p0, descr=) -+89: p10 = getarrayitem_gc(p8, 0, descr=) -+93: p12 = getarrayitem_gc(p8, 1, descr=) -+97: p14 = getarrayitem_gc(p8, 2, descr=) -+101: p16 = getarrayitem_gc(p8, 3, descr=) -+105: p18 = getarrayitem_gc(p8, 4, descr=) -+116: p20 = getarrayitem_gc(p8, 5, descr=) -+127: p22 = getarrayitem_gc(p8, 6, descr=) -+138: p24 = getarrayitem_gc(p8, 7, descr=) -+142: p25 = getfield_gc(p0, descr=) -+142: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140556696702032)) ++84: p2 = getfield_gc(p0, descr=) ++88: p3 = getfield_gc(p0, descr=) ++92: i4 = getfield_gc(p0, descr=) ++100: p5 = getfield_gc(p0, descr=) ++104: i6 = getfield_gc(p0, descr=) ++111: i7 = getfield_gc(p0, descr=) ++115: p8 = getfield_gc(p0, descr=) ++119: p10 = getarrayitem_gc(p8, 0, descr=) ++123: p12 = getarrayitem_gc(p8, 1, descr=) ++127: p14 = getarrayitem_gc(p8, 2, descr=) ++131: p16 = getarrayitem_gc(p8, 3, descr=) ++135: p18 = getarrayitem_gc(p8, 4, descr=) ++146: p20 = getarrayitem_gc(p8, 5, descr=) ++157: p22 = getarrayitem_gc(p8, 6, descr=) ++168: p24 = getarrayitem_gc(p8, 7, descr=) ++172: p25 = getfield_gc(p0, descr=) ++172: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140139656776704)) debug_merge_point(0, ' #13 FOR_ITER') -+235: guard_value(i6, 5, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] -+245: guard_class(p18, 38308720, descr=) [p1, p0, p18, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+257: p28 = getfield_gc(p18, descr=) -+261: guard_nonnull(p28, descr=) [p1, p0, p18, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+270: i29 = getfield_gc(p18, descr=) -+274: p30 = getfield_gc(p28, descr=) -+278: guard_class(p30, 38488496, descr=) [p1, p0, p18, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+290: p32 = getfield_gc(p28, descr=) -+294: i33 = getfield_gc_pure(p32, descr=) -+298: i34 = getfield_gc_pure(p32, descr=) -+302: i35 = getfield_gc_pure(p32, descr=) -+306: i37 = int_lt(i29, 0) ++265: guard_value(i6, 5, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] ++275: guard_class(p18, 38352528, descr=) [p1, p0, p18, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++295: p28 = getfield_gc(p18, descr=) ++299: guard_nonnull(p28, descr=) [p1, p0, p18, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++308: i29 = getfield_gc(p18, descr=) ++312: p30 = getfield_gc(p28, descr=) ++316: guard_class(p30, 38538416, descr=) [p1, p0, p18, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] ++328: p32 = getfield_gc(p28, descr=) ++332: i33 = getfield_gc_pure(p32, descr=) ++336: i34 = getfield_gc_pure(p32, descr=) ++340: i35 = getfield_gc_pure(p32, descr=) ++344: i37 = int_lt(i29, 0) guard_false(i37, descr=) [p1, p0, p18, i29, i35, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+316: i38 = int_ge(i29, i35) ++354: i38 = int_ge(i29, i35) guard_false(i38, descr=) [p1, p0, p18, i29, i34, i33, p2, p3, i4, p5, p10, p12, p14, p16, p20, p22, p24] -+325: i39 = int_mul(i29, i34) -+332: i40 = int_add(i33, i39) -+338: i42 = int_add(i29, 1) -+342: setfield_gc(p18, i42, descr=) -+346: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p18, p22, p24, i40] ++363: i39 = int_mul(i29, i34) ++370: i40 = int_add(i33, i39) ++376: i42 = int_add(i29, 1) ++380: setfield_gc(p18, i42, descr=) ++384: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p18, p22, p24, i40] debug_merge_point(0, ' #16 STORE_FAST') debug_merge_point(0, ' #19 LOAD_GLOBAL') -+356: guard_value(p3, ConstPtr(ptr44), descr=) [p1, p0, p3, p2, p5, p12, p14, p16, p18, p22, p24, i40] -+375: p45 = getfield_gc(p0, descr=) -+386: guard_value(p45, ConstPtr(ptr46), descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] -+405: p47 = getfield_gc(p45, descr=) -+409: guard_value(p47, ConstPtr(ptr48), descr=) [p1, p0, p47, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] -+428: guard_not_invalidated(, descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] -+428: p50 = getfield_gc(ConstPtr(ptr49), descr=) -+436: guard_value(p50, ConstPtr(ptr51), descr=) [p1, p0, p50, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++394: guard_value(p3, ConstPtr(ptr44), descr=) [p1, p0, p3, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++413: p45 = getfield_gc(p0, descr=) ++424: guard_value(p45, ConstPtr(ptr46), descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++443: p47 = getfield_gc(p45, descr=) ++447: guard_value(p47, ConstPtr(ptr48), descr=) [p1, p0, p47, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++466: guard_not_invalidated(, descr=) [p1, p0, p45, p2, p5, p12, p14, p16, p18, p22, p24, i40] ++466: p50 = getfield_gc(ConstPtr(ptr49), descr=) ++474: guard_value(p50, ConstPtr(ptr51), descr=) [p1, p0, p50, p2, p5, p12, p14, p16, p18, p22, p24, i40] debug_merge_point(0, ' #22 LOAD_FAST') debug_merge_point(0, ' #25 CALL_FUNCTION') -+449: p53 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i40, descr=) -+493: guard_no_exception(, descr=) [p1, p0, p53, p2, p5, p12, p14, p16, p18, p24, i40] ++487: p53 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i40, descr=) ++534: guard_no_exception(, descr=) [p1, p0, p53, p2, p5, p12, p14, p16, p18, p24, i40] debug_merge_point(0, ' #28 LIST_APPEND') -+508: p54 = getfield_gc(p16, descr=) -+519: guard_class(p54, 38399200, descr=) [p1, p0, p54, p16, p2, p5, p12, p14, p18, p24, p53, i40] -+531: p56 = getfield_gc(p16, descr=) -+535: i57 = getfield_gc(p56, descr=) -+539: i59 = int_add(i57, 1) -+546: p60 = getfield_gc(p56, descr=) -+546: i61 = arraylen_gc(p60, descr=) -+546: call(ConstClass(_ll_list_resize_ge_trampoline__v717___simple_call__function__), p56, i59, descr=) -+582: guard_no_exception(, descr=) [p1, p0, i57, p53, p56, p2, p5, p12, p14, p16, p18, p24, None, i40] -+597: p64 = getfield_gc(p56, descr=) ++549: p54 = getfield_gc(p16, descr=) ++560: guard_class(p54, 38450144, descr=) [p1, p0, p54, p16, p2, p5, p12, p14, p18, p24, p53, i40] ++573: p56 = getfield_gc(p16, descr=) ++577: i57 = getfield_gc(p56, descr=) ++581: i59 = int_add(i57, 1) ++588: p60 = getfield_gc(p56, descr=) ++588: i61 = arraylen_gc(p60, descr=) ++588: call(ConstClass(_ll_list_resize_ge_trampoline__v539___simple_call__function__), p56, i59, descr=) ++624: guard_no_exception(, descr=) [p1, p0, i57, p53, p56, p2, p5, p12, p14, p16, p18, p24, None, i40] ++639: p64 = getfield_gc(p56, descr=) setarrayitem_gc(p64, i57, p53, descr=) debug_merge_point(0, ' #31 JUMP_ABSOLUTE') -+683: i66 = getfield_raw(43780840, descr=) -+691: i68 = int_lt(i66, 0) ++729: i66 = getfield_raw(43858376, descr=) ++737: i68 = int_lt(i66, 0) guard_false(i68, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, None, i40] debug_merge_point(0, ' #13 FOR_ITER') -+701: p69 = same_as(ConstPtr(ptr48)) -+701: label(p0, p1, p2, p5, i40, p12, p14, p16, p18, p24, i42, i35, i34, i33, p56, descr=TargetToken(140556696702112)) ++747: p69 = same_as(ConstPtr(ptr48)) ++747: label(p0, p1, p2, p5, i40, p12, p14, p16, p18, p24, i42, i35, i34, i33, p56, descr=TargetToken(140139656776784)) debug_merge_point(0, ' #13 FOR_ITER') -+731: i70 = int_ge(i42, i35) ++777: i70 = int_ge(i42, i35) guard_false(i70, descr=) [p1, p0, p18, i42, i34, i33, p2, p5, p12, p14, p16, p24, i40] -+751: i71 = int_mul(i42, i34) -+755: i72 = int_add(i33, i71) -+765: i73 = int_add(i42, 1) ++790: i71 = int_mul(i42, i34) ++801: i72 = int_add(i33, i71) ++811: i73 = int_add(i42, 1) debug_merge_point(0, ' #16 STORE_FAST') debug_merge_point(0, ' #19 LOAD_GLOBAL') -+776: setfield_gc(p18, i73, descr=) -+787: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] ++815: setfield_gc(p18, i73, descr=) ++826: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #22 LOAD_FAST') debug_merge_point(0, ' #25 CALL_FUNCTION') -+787: p74 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i72, descr=) -+813: guard_no_exception(, descr=) [p1, p0, p74, p2, p5, p12, p14, p16, p18, p24, i72, None] ++826: p74 = call(ConstClass(ll_int_str__IntegerR_SignedConst_Signed), i72, descr=) ++852: guard_no_exception(, descr=) [p1, p0, p74, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #28 LIST_APPEND') -+828: i75 = getfield_gc(p56, descr=) -+839: i76 = int_add(i75, 1) -+846: p77 = getfield_gc(p56, descr=) -+846: i78 = arraylen_gc(p77, descr=) -+846: call(ConstClass(_ll_list_resize_ge_trampoline__v717___simple_call__function__), p56, i76, descr=) -+875: guard_no_exception(, descr=) [p1, p0, i75, p74, p56, p2, p5, p12, p14, p16, p18, p24, i72, None] -+890: p79 = getfield_gc(p56, descr=) ++867: i75 = getfield_gc(p56, descr=) ++878: i76 = int_add(i75, 1) ++885: p77 = getfield_gc(p56, descr=) ++885: i78 = arraylen_gc(p77, descr=) ++885: call(ConstClass(_ll_list_resize_ge_trampoline__v539___simple_call__function__), p56, i76, descr=) ++914: guard_no_exception(, descr=) [p1, p0, i75, p74, p56, p2, p5, p12, p14, p16, p18, p24, i72, None] ++929: p79 = getfield_gc(p56, descr=) setarrayitem_gc(p79, i75, p74, descr=) debug_merge_point(0, ' #31 JUMP_ABSOLUTE') -+976: i80 = getfield_raw(43780840, descr=) -+984: i81 = int_lt(i80, 0) ++1019: i80 = getfield_raw(43858376, descr=) ++1027: i81 = int_lt(i80, 0) guard_false(i81, descr=) [p1, p0, p2, p5, p12, p14, p16, p18, p24, i72, None] debug_merge_point(0, ' #13 FOR_ITER') -+994: jump(p0, p1, p2, p5, i72, p12, p14, p16, p18, p24, i73, i35, i34, i33, p56, descr=TargetToken(140556696702112)) -+1009: --end of the loop-- -[d0e914beb85] jit-log-opt-loop} -[d0e91ddd13d] {jit-backend -[d0e91e0341d] {jit-backend-dump ++1037: jump(p0, p1, p2, p5, i72, p12, p14, p16, p18, p24, i73, i35, i34, i33, p56, descr=TargetToken(140139656776784)) ++1055: --end of the loop-- +[7e18c5571e6d] jit-log-opt-loop} +[7e18c5a109b1] {jit-backend +[7e18c5a36207] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bbec +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7E404D0FB67C3F184983FF330F85000000004989FF4883C70148897E1848C74620000000004C897E28B80100000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD31D180355000000 -[d0e91e0a4fd] jit-backend-dump} -[d0e91e0b193] {jit-backend-addr -Loop 3 (StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) has address 7fd5e579bc22 to 7fd5e579bc77 (bootstrap 7fd5e579bbec) -[d0e91e0ceeb] jit-backend-addr} -[d0e91e0dc5f] {jit-backend-dump +CODE_DUMP @7f74cbf57c5f +0 488B0425C0399D024829E0483B0425E08C5001760D49BB6363F5CB747F000041FFD3554889E5534154415541564157488DA50000000049BBF8F182CE747F00004D8B3B4983C70149BBF8F182CE747F00004D893B4C8B7E404D0FB67C3F184983FF330F85000000004989FF4883C70148897E1848C74620000000004C897E28B80100000048890425901A550141BBC0BAF20041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB0060F5CB747F000041FFD31D180355000000 +[7e18c5a3a5c7] jit-backend-dump} +[7e18c5a3ab23] {jit-backend-addr +Loop 3 (StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) has address 7f74cbf57c95 to 7f74cbf57d08 (bootstrap 7f74cbf57c5f) +[7e18c5a3bc4d] jit-backend-addr} +[7e18c5a3c4b7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bc1e +0 70FFFFFF -[d0e91e0f6a5] jit-backend-dump} -[d0e91e1047f] {jit-backend-dump +CODE_DUMP @7f74cbf57c91 +0 70FFFFFF +[7e18c5a3d025] jit-backend-dump} +[7e18c5a3d8d5] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bc32 +0 41000000 -[d0e91e11bcb] jit-backend-dump} -[d0e91e12a71] jit-backend} -[d0e91e13d37] {jit-log-opt-loop -# Loop 3 : entry bridge with 10 ops +CODE_DUMP @7f74cbf57cc3 +0 41000000 +[7e18c5a3e36f] jit-backend-dump} +[7e18c5a3ed71] jit-backend} +[7e18c5a40383] {jit-log-opt-loop +# Loop 3 (StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]) : entry bridge with 10 ops [i0, p1] debug_merge_point(0, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') -+54: p2 = getfield_gc(p1, descr=) -+58: i3 = strgetitem(p2, i0) -+64: i5 = int_eq(i3, 51) ++84: p2 = getfield_gc(p1, descr=) ++88: i3 = strgetitem(p2, i0) ++94: i5 = int_eq(i3, 51) guard_true(i5, descr=) [i0, p1] -+74: i7 = int_add(i0, 1) -+81: setfield_gc(p1, i7, descr=) -+85: setfield_gc(p1, ConstPtr(ptr8), descr=) -+93: setfield_gc(p1, i0, descr=) -+97: finish(1, descr=) -+139: --end of the loop-- -[d0e91e29451] jit-log-opt-loop} -[d0e9266372d] {jit-backend -[d0e9268c073] {jit-backend-dump ++104: i7 = int_add(i0, 1) ++111: setfield_gc(p1, i7, descr=) ++115: setfield_gc(p1, ConstPtr(ptr8), descr=) ++123: setfield_gc(p1, i0, descr=) ++127: finish(1, descr=) ++169: --end of the loop-- +[7e18c5a57e6f] jit-log-opt-loop} +[7e18c5ea97f7] {jit-backend +[7e18c5ec15f3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bc8b +0 488DA50000000049BBB03107E8D57F00004D8B3B4983C70149BBB03107E8D57F00004D893B4883C7014C8B7E084C39FF0F8D000000004C8B76404D0FB6743E184983FE330F84000000004883C7014C39FF0F8C00000000B80000000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD31D18035600000049BB00A079E5D57F000041FFD31D18035700000049BB00A079E5D57F000041FFD31D180358000000 -[d0e9268f9b9] jit-backend-dump} -[d0e9268ff35] {jit-backend-addr -bridge out of Guard 85 has address 7fd5e579bc8b to 7fd5e579bd0c -[d0e9269b5ef] jit-backend-addr} -[d0e9269bef1] {jit-backend-dump +CODE_DUMP @7f74cbf57d1c +0 488DA50000000049BB10F282CE747F00004D8B3B4983C70149BB10F282CE747F00004D893B4883C7014C8B7E084C39FF0F8D000000004C8B76404D0FB6743E184983FE330F84000000004883C7014C39FF0F8C00000000B80000000048890425901A550141BBC0BAF20041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB0060F5CB747F000041FFD31D18035600000049BB0060F5CB747F000041FFD31D18035700000049BB0060F5CB747F000041FFD31D180358000000 +[7e18c5ec4a37] jit-backend-dump} +[7e18c5ec4fa5] {jit-backend-addr +bridge out of Guard 85 has address 7f74cbf57d1c to 7f74cbf57d9d +[7e18c5ec5bf7] jit-backend-addr} +[7e18c5ec6215] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bc8e +0 70FFFFFF -[d0e9269cc89] jit-backend-dump} -[d0e9269d493] {jit-backend-dump +CODE_DUMP @7f74cbf57d1f +0 70FFFFFF +[7e18c5ec6e55] jit-backend-dump} +[7e18c5ec75ff] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bcbd +0 4B000000 -[d0e9269df29] jit-backend-dump} -[d0e9269e3d9] {jit-backend-dump +CODE_DUMP @7f74cbf57d4e +0 4B000000 +[7e18c5ec7fb7] jit-backend-dump} +[7e18c5ec841b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bcd1 +0 4B000000 -[d0e9269ed2f] jit-backend-dump} -[d0e9269f1a1] {jit-backend-dump +CODE_DUMP @7f74cbf57d62 +0 4B000000 +[7e18c5ec8d3d] jit-backend-dump} +[7e18c5ec9175] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bcde +0 52000000 -[d0e9269fbbf] jit-backend-dump} -[d0e926a03a3] {jit-backend-dump +CODE_DUMP @7f74cbf57d6f +0 52000000 +[7e18c5ec9b15] jit-backend-dump} +[7e18c5eca38b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bc32 +0 55000000 -[d0e926a0ea5] jit-backend-dump} -[d0e926a1805] jit-backend} -[d0e926a21ab] {jit-log-opt-bridge +CODE_DUMP @7f74cbf57cc3 +0 55000000 +[7e18c5ecada7] jit-backend-dump} +[7e18c5ecb425] jit-backend} +[7e18c5ecbce1] {jit-log-opt-bridge # bridge out of Guard 85 with 13 ops [i0, p1] +37: i3 = int_add(i0, 1) @@ -1050,40 +1050,40 @@ +74: i11 = int_add(i3, 1) +78: i12 = int_lt(i11, i4) guard_false(i12, descr=) [i11, p1] -+87: finish(0, descr=) ++87: finish(0, descr=) +129: --end of the loop-- -[d0e926aeccf] jit-log-opt-bridge} -[d0e929bf4d7] {jit-backend -[d0e929d10f1] {jit-backend-dump +[7e18c5ed742b] jit-log-opt-bridge} +[7e18c61e68c7] {jit-backend +[7e18c61fa9a7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bd48 +0 488DA50000000049BBC83107E8D57F00004D8B3B4983C70149BBC83107E8D57F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD31D18035900000049BB00A079E5D57F000041FFD31D18035A000000 -[d0e929d4389] jit-backend-dump} -[d0e929d489d] {jit-backend-addr -bridge out of Guard 88 has address 7fd5e579bd48 to 7fd5e579bdbc -[d0e929d52eb] jit-backend-addr} -[d0e929d58ad] {jit-backend-dump +CODE_DUMP @7f74cbf57dd9 +0 488DA50000000049BB28F282CE747F00004D8B3B4983C70149BB28F282CE747F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425901A550141BBC0BAF20041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB0060F5CB747F000041FFD31D18035900000049BB0060F5CB747F000041FFD31D18035A000000 +[7e18c61fd977] jit-backend-dump} +[7e18c61fde5d] {jit-backend-addr +bridge out of Guard 88 has address 7f74cbf57dd9 to 7f74cbf57e4d +[7e18c61fe88d] jit-backend-addr} +[7e18c61feedb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bd4b +0 70FFFFFF -[d0e929d6415] jit-backend-dump} -[d0e929d6a3b] {jit-backend-dump +CODE_DUMP @7f74cbf57ddc +0 70FFFFFF +[7e18c61ff995] jit-backend-dump} +[7e18c61fffad] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bd7d +0 3B000000 -[d0e929d75a1] jit-backend-dump} -[d0e929d7b23] {jit-backend-dump +CODE_DUMP @7f74cbf57e0e +0 3B000000 +[7e18c6200a7b] jit-backend-dump} +[7e18c6200fd3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bd8e +0 3E000000 -[d0e929d858b] jit-backend-dump} -[d0e929d8b6d] {jit-backend-dump +CODE_DUMP @7f74cbf57e1f +0 3E000000 +[7e18c620198b] jit-backend-dump} +[7e18c6201f69] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bcde +0 66000000 -[d0e929d949b] jit-backend-dump} -[d0e929d9b33] jit-backend} -[d0e929da23b] {jit-log-opt-bridge +CODE_DUMP @7f74cbf57d6f +0 66000000 +[7e18c62027f7] jit-backend-dump} +[7e18c6202e61] jit-backend} +[7e18c62035ab] {jit-log-opt-bridge # bridge out of Guard 88 with 10 ops [i0, p1] debug_merge_point(0, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') @@ -1095,812 +1095,812 @@ +61: i8 = getfield_gc_pure(p1, descr=) +65: i9 = int_lt(i7, i8) guard_false(i9, descr=) [i7, p1] -+74: finish(0, descr=) ++74: finish(0, descr=) +116: --end of the loop-- -[d0e929e3651] jit-log-opt-bridge} -[d0e92d4390b] {jit-backend -[d0e92d4e3fd] {jit-backend-dump +[7e18c6213be9] jit-log-opt-bridge} +[7e18c6553871] {jit-backend +[7e18c655e4a9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bde4 +0 488DA50000000049BBE03107E8D57F0000498B334883C60149BBE03107E8D57F0000498933B80000000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC3 -[d0e92d50c27] jit-backend-dump} -[d0e92d51189] {jit-backend-addr -bridge out of Guard 86 has address 7fd5e579bde4 to 7fd5e579be33 -[d0e92d51acb] jit-backend-addr} -[d0e92d52073] {jit-backend-dump +CODE_DUMP @7f74cbf57e75 +0 488DA50000000049BB40F282CE747F0000498B334883C60149BB40F282CE747F0000498933B80000000048890425901A550141BBC0BAF20041FFD3B802000000488D65D8415F415E415D415C5B5DC3 +[7e18c6560b67] jit-backend-dump} +[7e18c6560fd1] {jit-backend-addr +bridge out of Guard 86 has address 7f74cbf57e75 to 7f74cbf57ec4 +[7e18c656191f] jit-backend-addr} +[7e18c6561f0b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bde7 +0 70FFFFFF -[d0e92d52c5b] jit-backend-dump} -[d0e92d534d5] {jit-backend-dump +CODE_DUMP @7f74cbf57e78 +0 70FFFFFF +[7e18c6562ad1] jit-backend-dump} +[7e18c65631d3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bcbd +0 23010000 -[d0e92d5af55] jit-backend-dump} -[d0e92d5b7c1] jit-backend} -[d0e92d5bf3f] {jit-log-opt-bridge +CODE_DUMP @7f74cbf57d4e +0 23010000 +[7e18c6563bdb] jit-backend-dump} +[7e18c6564221] jit-backend} +[7e18c65648f7] {jit-log-opt-bridge # bridge out of Guard 86 with 1 ops [i0, p1] -+37: finish(0, descr=) ++37: finish(0, descr=) +79: --end of the loop-- -[d0e92d60e07] jit-log-opt-bridge} -[d0e95d023e2] {jit-backend -[d0e96040dee] {jit-backend-dump +[7e18c6567069] jit-log-opt-bridge} +[7e18c75192d5] {jit-backend +[7e18c76bb2ab] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c015 +0 488B0425E00A9C024829E0483B0425E0EC4F01760D49BB63A379E5D57F000041FFD3554889E5534154415541564157488DA5000000004C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B482848898D70FFFFFF498B48304C89BD68FFFFFF4D8B783848899D60FFFFFF498B58404D8B40484889B558FFFFFF4C89A550FFFFFF4C898D48FFFFFF48899540FFFFFF48898538FFFFFF48898D30FFFFFF4C89BD28FFFFFF48899D20FFFFFF4C898518FFFFFF49BBF83107E8D57F00004D8B034983C00149BBF83107E8D57F00004D89034983FA040F85000000004C8B9570FFFFFF41813A306900000F85000000004D8B42104D85C00F8400000000498B5A084D8B781041813FA0CA01000F85000000004D8B40084D8B78084C39FB0F83000000004D8B40104D8B44D8104D85C00F84000000004883C30149895A084983FD000F850000000049BB902190E5D57F00004D39DE0F85000000004C8B770849BBA8EB8BE5D57F00004D39DE0F85000000004D8B6E1049BBC0EB8BE5D57F00004D39DD0F850000000049BB28D78FE5D57F00004D8B3349BB30D78FE5D57F00004D39DE0F85000000004889BD10FFFFFF4C898508FFFFFF41BB10468D0041FFD34C8B40404C8B50504D85D20F85000000004C8B50284983FA000F850000000049BBA02991E5D57F00004D8B134983FA000F8F000000004C8B142500F584014981FAE02687010F850000000049BB58D78FE5D57F00004D8B1341813A98D901000F850000000049BB50D78FE5D57F00004D8B1348898500FFFFFF488B042550C95401488D5040483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C7008800000048C74008030000004889C24883C02848C7004083010048896808488BBD00FFFFFFF6470401741A524150415257504889C641BB8045C50041FFD3585F415A41585A488947404C8BB510FFFFFF49896E1848C74210A0ED820149BBF00090E5D57F00004C895A1849BB800D90E5D57F00004C895A20488985F8FEFFFF4C8995F0FEFFFF4C8985E8FEFFFF488995E0FEFFFF48C78578FFFFFF5B0000004889D741BBF06D920041FFD34883BD78FFFFFF000F8C0000000048833C25400C9C02000F8500000000488985D8FEFFFF488B042550C95401488D5010483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C70038210000488B9510FFFFFF48896A184C8B85E0FEFFFF4C894008488985D0FEFFFF48C78578FFFFFF5C000000488BBDF0FEFFFF4889C6488B95D8FEFFFF41BB8021790041FFD34883BD78FFFFFF000F8C0000000048833C25400C9C02000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B85F0FEFFFF488B4018486BD218488B5410184883FA017206813A18E203000F85000000004881FA805371010F8400000000488B8500FFFFFF4C8B40504D85C00F85000000004C8B40284983F8000F85000000004C8B85F8FEFFFF49C74008FDFFFFFF4C8B8508FFFFFF4D8B501049BBFFFFFFFFFFFFFF7F4D39DA0F8D00000000488B7A104C8B72184C8B6F104983FD110F85000000004C8B6F204C89EB4983E5014983FD000F8400000000488B5F384883FB010F8F00000000488B5F184883C3014C8B6CDF104983FD130F85000000004989DD4883C301488B5CDF104983C5024983FA000F8E000000004983FD0B0F85000000004883FB330F850000000049BB102BFAE7D57F00004C39DF0F8500000000488995C8FEFFFF488B042550C95401488D5060483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C700D8ED00004889C24883C04848C7004083010048896808488BBD00FFFFFFF6470401741A505741504152524889C641BB8045C50041FFD35A415A41585F5848894740488B9D10FFFFFF48896B184C8952084C8972104C89424049BB102BFAE7D57F00004C895A38488995C0FEFFFF488985B8FEFFFF48C78578FFFFFF5D000000BF000000004889D649BBECBB79E5D57F000041FFD34883F80174134889C7BE0000000041BB707A950041FFD3EB08488B0425107B54014883BD78FFFFFF000F8C0000000048833C25400C9C02000F85000000004885C00F8500000000488B8500FFFFFF488B78504885FF0F8500000000488B78284883FF000F85000000004C8B85E8FEFFFFF64004017417415057504889C74C89C641BB8045C50041FFD3585F41584C894040488B95B8FEFFFF48C74208FDFFFFFF488B1425E80A9C024883FA000F8C0000000049BB103207E8D57F0000498B134883C20149BB103207E8D57F0000498913488B9570FFFFFF488B5A104885DB0F84000000004C8B72084C8B531041813AA0CA01000F8500000000488B5B084C8B53084D39D60F8300000000488B5B104A8B5CF3104885DB0F84000000004983C6014C8B9510FFFFFF4D8B6A084C89720849BBA8EB8BE5D57F00004D39DD0F85000000004D8B751049BBC0EB8BE5D57F00004D39DE0F850000000049BB28D78FE5D57F00004D8B2B49BB30D78FE5D57F00004D39DD0F85000000004883FF000F850000000049BBA02991E5D57F0000498B3B4883FF000F8F00000000488B3C2500F584014881FFE02687010F850000000049BB58D78FE5D57F0000498B3B813F98D901000F850000000049BB50D78FE5D57F0000498B3B488985B0FEFFFF488B042550C95401488D5040483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C7008800000048C74008030000004889C24883C02848C70040830100488968084C8BADB0FEFFFF41F6450401741D574150505241524C89EF4889C641BB8045C50041FFD3415A5A5841585F4989454049896A1848C74210A0ED820149BBF00090E5D57F00004C895A1849BB800D90E5D57F00004C895A20488995A8FEFFFF488985A0FEFFFF4C898598FEFFFF48899D08FFFFFF4889BD90FEFFFF48C78578FFFFFF5E0000004889D741BBF06D920041FFD34883BD78FFFFFF000F8C0000000048833C25400C9C02000F850000000048898588FEFFFF488B042550C95401488D5010483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C70038210000488B9510FFFFFF48896A18488BBDA8FEFFFF4889780848898580FEFFFF48C78578FFFFFF5F000000488BBD90FEFFFF4889C6488B9588FEFFFF41BB8021790041FFD34883BD78FFFFFF000F8C0000000048833C25400C9C02000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B8590FEFFFF488B4018486BD218488B5410184883FA017206813A18E203000F85000000004881FA805371010F8400000000488B85B0FEFFFF488B78504885FF0F8500000000488B78284883FF000F8500000000488BBDA0FEFFFF48C74708FDFFFFFF488BBD08FFFFFF488B5F1049BBFFFFFFFFFFFFFF7F4C39DB0F8D000000004C8B42104C8B6A184D8B50104983FA110F85000000004D8B50204D89D64983E2014983FA000F84000000004D8B70384983FE010F8F000000004D8B70184983C6014F8B54F0104983FA130F85000000004D89F24983C6014F8B74F0104983C2024883FB000F8E000000004983FA0B0F85000000004983FE330F850000000049BB102BFAE7D57F00004D39D80F850000000048899578FEFFFF488B042550C95401488D5060483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C700D8ED00004889C24883C04848C70040830100488968084C8B85B0FEFFFF41F6400401741957524150504C89C74889C641BB8045C50041FFD35841585A5F498940404C8BB510FFFFFF49896E1848895A084C896A1048897A4049BB102BFAE7D57F00004C895A3848898570FEFFFF48899568FEFFFF48C78578FFFFFF60000000BF000000004889D649BBECBB79E5D57F000041FFD34883F80174134889C7BE0000000041BB707A950041FFD3EB08488B0425107B54014883BD78FFFFFF000F8C0000000048833C25400C9C02000F85000000004885C00F8500000000488B85B0FEFFFF488B78504885FF0F8500000000488B78284883FF000F8500000000488B9598FEFFFFF640040174155752504889C74889D641BB8045C50041FFD3585A5F488950404C8B8570FEFFFF49C74008FDFFFFFF4C8B0425E80A9C024983F8000F8C000000004989D0E96FFAFFFF49BB00A079E5D57F000041FFD3294C1C443835505548585C406064686C036100000049BB00A079E5D57F000041FFD34C1C284438355048585C6064686C036200000049BB00A079E5D57F000041FFD34C1C28204438355048585C6064686C036300000049BB00A079E5D57F000041FFD34C1C280D3C204438355048585C6064686C036400000049BB00A079E5D57F000041FFD34C1C280D3D204438355048585C6064686C036500000049BB00A079E5D57F000041FFD34C1C280D204438355048585C6064686C036600000049BB00A079E5D57F000041FFD3354C1C44385048585C2864686C20036700000049BB00A079E5D57F000041FFD34C1C38445048582864686C20036800000049BB00A079E5D57F000041FFD34C1C38445048582864686C20036900000049BB00A079E5D57F000041FFD34C1C3438445048582864686C20036A00000049BB00A079E5D57F000041FFD34C1C38445048582864686C20036B00000049BB00A079E5D57F000041FFD34C1C38445048582864686C20036C00000049BB00A079E5D57F000041FFD34C7000284450485840201574036D00000049BB00A079E5D57F000041FFD34C70004450485840201574036E00000049BB00A079E5D57F000041FFD34C70004450485840201574036F00000049BB00A079E5D57F000041FFD34C7000294450485840201574037000000049BB00A079E5D57F000041FFD34C700028445048584015201574037100000049BB00A079E5D57F000041FFD34C700028445048584015201574037200000049BB43A079E5D57F000041FFD34C70788001017C4450485840158801840174035B00000049BB43A079E5D57F000041FFD34C70788001017C4450485840158801840174037300000049BB43A079E5D57F000041FFD34C707890010180017C445048584015840174035C00000049BB43A079E5D57F000041FFD34C707890010180017C445048584015840174037400000049BB00A079E5D57F000041FFD34C707890010980017C445048584015840174037500000049BB00A079E5D57F000041FFD34C70789001087C445048584015840174037600000049BB00A079E5D57F000041FFD34C70787C445048584008900115840174037700000049BB00A079E5D57F000041FFD34C700008207C445048584007900115840174037800000049BB00A079E5D57F000041FFD34C7000087C445048584007900115840174037900000049BB00A079E5D57F000041FFD34C70004450485840080715840174037A00000049BB00A079E5D57F000041FFD34C700008204450485840070715840107037B00000049BB00A079E5D57F000041FFD34C700008445048584029391C070715840120037C00000049BB00A079E5D57F000041FFD34C7000080D445048584029391C070715840120037D00000049BB00A079E5D57F000041FFD34C700008445048584029391C070715840120037E00000049BB00A079E5D57F000041FFD34C7000080D445048584029391C070715840120037F00000049BB00A079E5D57F000041FFD34C7000080D35445048584029391C070715840120038000000049BB00A079E5D57F000041FFD34C7000080D351C4450485840293907070715840120038100000049BB00A079E5D57F000041FFD34C7000080D1C4450485840293907070715840120038200000049BB00A079E5D57F000041FFD34C7000081C4450485840293907070715840120038300000049BB43A079E5D57F000041FFD34C707894019801019C014450485840840174035D00000049BB43A079E5D57F000041FFD34C707894019801019C014450485840840174038400000049BB00A079E5D57F000041FFD34C7078940198019C014450485840840174038500000049BB00A079E5D57F000041FFD34C70001C9C014450485840840174038600000049BB00A079E5D57F000041FFD34C70009C014450485840840174038700000049BB00A079E5D57F000041FFD34C7044504858400774038800000049BB00A079E5D57F000041FFD34C7044504858400774038900000049BB00A079E5D57F000041FFD34C70080C4450485874038A00000049BB00A079E5D57F000041FFD34C700839280C4450485874038B00000049BB00A079E5D57F000041FFD34C700839290C4450485874038C00000049BB00A079E5D57F000041FFD34C7008390C4450485874038D00000049BB00A079E5D57F000041FFD34C283444504858080C07038E00000049BB00A079E5D57F000041FFD34C28383444504858080C07038F00000049BB00A079E5D57F000041FFD34C283444504858080C07039000000049BB00A079E5D57F000041FFD34C283444504858080C07039100000049BB00A079E5D57F000041FFD34C2800445048580820150C07039200000049BB00A079E5D57F000041FFD34C28001D445048580820150C07039300000049BB00A079E5D57F000041FFD34C28001C44504858081520150C07039400000049BB00A079E5D57F000041FFD34C28001C44504858081520150C07039500000049BB43A079E5D57F000041FFD34C70A001B00101A8014450485840A401AC011574035E00000049BB43A079E5D57F000041FFD34C70A001B00101A8014450485840A401AC011574039600000049BB43A079E5D57F000041FFD34C70A001B80101B001A801445048584074AC0115035F00000049BB43A079E5D57F000041FFD34C70A001B80101B001A801445048584074AC0115039700000049BB00A079E5D57F000041FFD34C70A001B80109B001A801445048584074AC0115039800000049BB00A079E5D57F000041FFD34C70A001B80108A801445048584074AC0115039900000049BB00A079E5D57F000041FFD34C70A001A8014450485840B8010874AC0115039A00000049BB00A079E5D57F000041FFD34C7000081CA8014450485840B8010774AC0115039B00000049BB00A079E5D57F000041FFD34C700008A8014450485840B8010774AC0115039C00000049BB00A079E5D57F000041FFD34C70004450485840070874AC0115039D00000049BB00A079E5D57F000041FFD34C7000081C4450485840070707AC0115039E00000049BB00A079E5D57F000041FFD34C7000084450485840200D3507071CAC0115039F00000049BB00A079E5D57F000041FFD34C700008394450485840200D3507071CAC011503A000000049BB00A079E5D57F000041FFD34C7000084450485840200D3507071CAC011503A100000049BB00A079E5D57F000041FFD34C700008394450485840200D3507071CAC011503A200000049BB00A079E5D57F000041FFD34C70000839294450485840200D3507071CAC011503A300000049BB00A079E5D57F000041FFD34C7000083929204450485840070D3507071CAC011503A400000049BB00A079E5D57F000041FFD34C70000839204450485840070D3507071CAC011503A500000049BB00A079E5D57F000041FFD34C700008204450485840070D3507071CAC011503A600000049BB43A079E5D57F000041FFD34C70A001BC01C40101C001445048584074AC01036000000049BB43A079E5D57F000041FFD34C70A001BC01C40101C001445048584074AC0103A700000049BB00A079E5D57F000041FFD34C70A001BC01C401C001445048584074AC0103A800000049BB00A079E5D57F000041FFD34C70001CC001445048584074AC0103A900000049BB00A079E5D57F000041FFD34C7000C001445048584074AC0103AA00000049BB00A079E5D57F000041FFD34C704450485840740703AB00000049BB00A079E5D57F000041FFD34C704450485840740703AC000000 -[d0e960a6f32] jit-backend-dump} -[d0e960a8528] {jit-backend-addr -Loop 4 ( #44 FOR_ITER) has address 7fd5e579c04b to 7fd5e579cc54 (bootstrap 7fd5e579c015) -[d0e960aea44] jit-backend-addr} -[d0e960b00a0] {jit-backend-dump +CODE_DUMP @7f74cbf580a8 +0 488B0425C0399D024829E0483B0425E08C5001760D49BB6363F5CB747F000041FFD3554889E5534154415541564157488DA50000000049BB58F282CE747F00004D8B3B4983C70149BB58F282CE747F00004D893B4C8B7F504C8B77784C0FB6AF960000004C8B67604C8B97800000004C8B4F584C8B4768498B5810498B5018498B4020498B48284889B570FFFFFF498B70304C89A568FFFFFF4D8B603848899560FFFFFF498B50404D8B40484C89BD58FFFFFF4C898D50FFFFFF48899D48FFFFFF48898540FFFFFF4889B538FFFFFF4C89A530FFFFFF48899528FFFFFF4C898520FFFFFF49BB70F282CE747F00004D8B034983C00149BB70F282CE747F00004D89034983FA040F85000000008139104D00000F85000000004C8B51104D85D20F84000000004C8B4108498B5210813A60CA01000F85000000004D8B5208498B52084939D00F83000000004D8B52104F8B54C2104D85D20F84000000004983C0014C8941084983FD000F850000000049BB90C10BCC747F00004D39DE0F85000000004C8B770849BB908B07CC747F00004D39DE0F85000000004D8B6E1049BBA88B07CC747F00004D39DD0F850000000049BBA0770BCC747F00004D8B3349BBA8770BCC747F00004D39DE0F85000000004C899518FFFFFF4889BD10FFFFFF48898D08FFFFFF41BB30698D0041FFD3488B4840488B78504885FF0F8500000000488B78284883FF000F850000000049BB18AA0CCC747F0000498B3B4883FF000F8F00000000488B3C25403E86014881FF207088010F850000000049BBD0770BCC747F0000498B3B813F98D901000F850000000049BBC8770BCC747F0000498B3B48898500FFFFFF488B0425B0685501488D5040483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C7008800000048C74008030000004889C24883C02848C70040830100488968084C8B9500FFFFFF41F6420401741B4152505251574C89D74889C641BBB0E5C40041FFD35F595A58415A498942404C8BB510FFFFFF49896E1848C74210E016840149BB20A10BCC747F00004C895A1849BBF0AD0BCC747F00004C895A204889BDF8FEFFFF48898DF0FEFFFF488995E8FEFFFF488985E0FEFFFF48C78578FFFFFF5B0000004889D741BB3091920041FFD34883BD78FFFFFF000F8C0000000048833C25203B9D02000F8500000000488985D8FEFFFF488B0425B0685501488D5010483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C70038200000488B9510FFFFFF48896A184C8BB5E8FEFFFF4C897008488985D0FEFFFF48C78578FFFFFF5C000000488BBDF8FEFFFF4889C6488B95D8FEFFFF41BB3012790041FFD34883BD78FFFFFF000F8C0000000048833C25203B9D02000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B85F8FEFFFF488B4018486BD218488B5410184883FA017206813A30DF03000F85000000004881FA007C72010F8400000000488B8500FFFFFF4C8B70504D85F60F85000000004C8B70284983FE000F85000000004C8BB5E0FEFFFF49C74608FDFFFFFF4C8BB518FFFFFF4D8B561049BBFFFFFFFFFFFFFF7F4D39DA0F8D00000000488B4A10488B7A184C8B69104983FD110F85000000004C8B69204D89E84983E5014983FD000F84000000004C8B41384983F8010F8F000000004C8B41184983C0014E8B6CC1104983FD130F85000000004D89C54983C0014E8B44C1104983C5024983FA000F8E000000004983FD0B0F85000000004983F8330F850000000049BBA0EA75CE747F00004C39D90F8500000000488995C8FEFFFF488B0425B0685501488D5060483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C700189F00004889C24883C04848C7004083010048896808488B8D00FFFFFFF6410401741B4152515750524889CF4889C641BBB0E5C40041FFD35A585F59415A488941404C8B8510FFFFFF498968184C8952084C89724049BBA0EA75CE747F00004C895A3848897A10488995C0FEFFFF488985B8FEFFFF48C78578FFFFFF5D000000BF000000004889D649BB5F7CF5CB747F000041FFD34883F80274134889C7BE0000000041BBA083950041FFD3EB08488B0425901A55014883BD78FFFFFF000F8C0000000048833C25203B9D02000F85000000004885C00F8500000000488B8500FFFFFF4C8B40504D85C00F85000000004C8B40284983F8000F8500000000488B8DF0FEFFFFF64004017417514150504889C74889CE41BBB0E5C40041FFD35841585948894840488B95B8FEFFFF48C74208FDFFFFFF488B1425C8399D024883FA000F8C0000000049BB88F282CE747F0000498B134883C20149BB88F282CE747F0000498913488B9508FFFFFF4C8B72104D85F60F8400000000488B7A084D8B561041813A60CA01000F85000000004D8B76084D8B56084C39D70F83000000004D8B76104D8B74FE104D85F60F84000000004883C7014C8B9510FFFFFF4D8B6A0848897A0849BB908B07CC747F00004D39DD0F8500000000498B7D1049BBA88B07CC747F00004C39DF0F850000000049BBA0770BCC747F00004D8B2B49BBA8770BCC747F00004D39DD0F85000000004983F8000F850000000049BB18AA0CCC747F00004D8B034983F8000F8F000000004C8B0425403E86014981F8207088010F850000000049BBD0770BCC747F00004D8B0341813898D901000F850000000049BBC8770BCC747F00004D8B03488985B0FEFFFF488B0425B0685501488D5040483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C7008800000048C74008030000004889C24883C02848C70040830100488968084C8BADB0FEFFFF41F6450401741D504150415252514C89EF4889C641BBB0E5C40041FFD3595A415A4158584989454049896A1848C74210E016840149BB20A10BCC747F00004C895A1849BBF0AD0BCC747F00004C895A2048898DA8FEFFFF4C89B518FFFFFF488995A0FEFFFF4C898598FEFFFF48898590FEFFFF48C78578FFFFFF5E0000004889D741BB3091920041FFD34883BD78FFFFFF000F8C0000000048833C25203B9D02000F850000000048898588FEFFFF488B0425B0685501488D5010483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C70038200000488B9510FFFFFF48896A184C8B85A0FEFFFF4C89400848898580FEFFFF48C78578FFFFFF5F000000488BBD98FEFFFF4889C6488B9588FEFFFF41BB3012790041FFD34883BD78FFFFFF000F8C0000000048833C25203B9D02000F85000000004889C249BB00000000000000804C21D84883F8000F8500000000488B8598FEFFFF488B4018486BD218488B5410184883FA017206813A30DF03000F85000000004881FA007C72010F8400000000488B85B0FEFFFF4C8B40504D85C00F85000000004C8B40284983F8000F85000000004C8B8590FEFFFF49C74008FDFFFFFF4C8B8518FFFFFF4D8B501049BBFFFFFFFFFFFFFF7F4D39DA0F8D000000004C8B7210488B4A184D8B6E104983FD110F85000000004D8B6E204C89EF4983E5014983FD000F8400000000498B7E384883FF010F8F00000000498B7E184883C7014D8B6CFE104983FD130F85000000004989FD4883C701498B7CFE104983C5024983FA000F8E000000004983FD0B0F85000000004883FF330F850000000049BBA0EA75CE747F00004D39DE0F850000000048899578FEFFFF488B0425B0685501488D5060483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C700189F00004889C24883C04848C70040830100488968084C8BB5B0FEFFFF41F6460401741D504152525141504C89F74889C641BBB0E5C40041FFD34158595A415A5849894640488BBD10FFFFFF48896F184C8952084C89424049BBA0EA75CE747F00004C895A3848894A1048899570FEFFFF48898568FEFFFF48C78578FFFFFF60000000BF000000004889D649BB5F7CF5CB747F000041FFD34883F80274134889C7BE0000000041BBA083950041FFD3EB08488B0425901A55014883BD78FFFFFF000F8C0000000048833C25203B9D02000F85000000004885C00F8500000000488B85B0FEFFFF488B78504885FF0F8500000000488B78284883FF000F85000000004C8BB5A8FEFFFFF6400401741357504889C74C89F641BBB0E5C40041FFD3585F4C897040488B9568FEFFFF48C74208FDFFFFFF488B1425C8399D024883FA000F8C000000004989F84C89F1E969FAFFFF49BB0060F5CB747F000041FFD329401C4C38354451544858045C606468036100000049BB0060F5CB747F000041FFD3401C044C3835445448585C606468036200000049BB0060F5CB747F000041FFD3401C04284C3835445448585C606468036300000049BB0060F5CB747F000041FFD3401C042108284C3835445448585C606468036400000049BB0060F5CB747F000041FFD3401C042109284C3835445448585C606468036500000049BB0060F5CB747F000041FFD3401C0421284C3835445448585C606468036600000049BB0060F5CB747F000041FFD335401C4C38445448580460646828036700000049BB0060F5CB747F000041FFD3401C384C4454480460646828036800000049BB0060F5CB747F000041FFD3401C384C4454480460646828036900000049BB0060F5CB747F000041FFD3401C34384C4454480460646828036A00000049BB0060F5CB747F000041FFD3401C384C4454480460646828036B00000049BB0060F5CB747F000041FFD3401C384C4454480460646828036C00000049BB0060F5CB747F000041FFD34070001C4C4454487404156C036D00000049BB0060F5CB747F000041FFD34070004C4454487404156C036E00000049BB0060F5CB747F000041FFD34070004C4454487404156C036F00000049BB0060F5CB747F000041FFD34070001D4C4454487404156C037000000049BB0060F5CB747F000041FFD34070001C4C445448741504156C037100000049BB0060F5CB747F000041FFD34070001C4C445448741504156C037200000049BB4360F5CB747F000041FFD34070787C0188014C445448746C8001158401035B00000049BB4360F5CB747F000041FFD34070787C0188014C445448746C8001158401037300000049BB4360F5CB747F000041FFD34070789001017C88014C445448746C800115035C00000049BB4360F5CB747F000041FFD34070789001017C88014C445448746C800115037400000049BB0060F5CB747F000041FFD34070789001097C88014C445448746C800115037500000049BB0060F5CB747F000041FFD340707890010888014C445448746C800115037600000049BB0060F5CB747F000041FFD340707888014C445448749001086C800115037700000049BB0060F5CB747F000041FFD3407000083888014C445448749001076C800115037800000049BB0060F5CB747F000041FFD34070000888014C445448749001076C800115037900000049BB0060F5CB747F000041FFD34070004C4454487407086C800115037A00000049BB0060F5CB747F000041FFD340700008384C44544874070707800115037B00000049BB0060F5CB747F000041FFD3407000084C44544874291D04070738800115037C00000049BB0060F5CB747F000041FFD340700008214C44544874291D04070738800115037D00000049BB0060F5CB747F000041FFD3407000084C44544874291D04070738800115037E00000049BB0060F5CB747F000041FFD340700008214C44544874291D04070738800115037F00000049BB0060F5CB747F000041FFD34070000821354C44544874291D04070738800115038000000049BB0060F5CB747F000041FFD3407000082135044C44544874291D07070738800115038100000049BB0060F5CB747F000041FFD34070000821044C44544874291D07070738800115038200000049BB0060F5CB747F000041FFD340700008044C44544874291D07070738800115038300000049BB4360F5CB747F000041FFD340707898019401019C014C4454487480016C035D00000049BB4360F5CB747F000041FFD340707898019401019C014C4454487480016C038400000049BB0060F5CB747F000041FFD3407078980194019C014C4454487480016C038500000049BB0060F5CB747F000041FFD3407000209C014C4454487480016C038600000049BB0060F5CB747F000041FFD34070009C014C4454487480016C038700000049BB0060F5CB747F000041FFD340704C44544874076C038800000049BB0060F5CB747F000041FFD340704C44544874076C038900000049BB0060F5CB747F000041FFD3407008384C4454486C038A00000049BB0060F5CB747F000041FFD34070081D28384C4454486C038B00000049BB0060F5CB747F000041FFD34070081D29384C4454486C038C00000049BB0060F5CB747F000041FFD34070081D384C4454486C038D00000049BB0060F5CB747F000041FFD34028344C445448083807038E00000049BB0060F5CB747F000041FFD340281C344C445448083807038F00000049BB0060F5CB747F000041FFD34028344C445448083807039000000049BB0060F5CB747F000041FFD34028344C445448083807039100000049BB0060F5CB747F000041FFD34028004C4454480815043807039200000049BB0060F5CB747F000041FFD3402800214C4454480815043807039300000049BB0060F5CB747F000041FFD3402800204C445448081515043807039400000049BB0060F5CB747F000041FFD3402800204C445448081515043807039500000049BB4360F5CB747F000041FFD34070A001AC0101B0014C4454487415A8016CA401035E00000049BB4360F5CB747F000041FFD34070A001AC0101B0014C4454487415A8016CA401039600000049BB4360F5CB747F000041FFD34070A001B80101AC01B0014C44544874156CA401035F00000049BB4360F5CB747F000041FFD34070A001B80101AC01B0014C44544874156CA401039700000049BB0060F5CB747F000041FFD34070A001B80109AC01B0014C44544874156CA401039800000049BB0060F5CB747F000041FFD34070A001B80108B0014C44544874156CA401039900000049BB0060F5CB747F000041FFD34070A001B0014C44544874B80108156CA401039A00000049BB0060F5CB747F000041FFD34070000820B0014C44544874B80107156CA401039B00000049BB0060F5CB747F000041FFD340700008B0014C44544874B80107156CA401039C00000049BB0060F5CB747F000041FFD34070004C445448740708156CA401039D00000049BB0060F5CB747F000041FFD340700008204C4454487407071507A401039E00000049BB0060F5CB747F000041FFD3407000084C4454487405293807071520A401039F00000049BB0060F5CB747F000041FFD3407000081D4C4454487405293807071520A40103A000000049BB0060F5CB747F000041FFD3407000084C4454487405293807071520A40103A100000049BB0060F5CB747F000041FFD3407000081D4C4454487405293807071520A40103A200000049BB0060F5CB747F000041FFD3407000081D354C4454487405293807071520A40103A300000049BB0060F5CB747F000041FFD3407000081D35384C4454487405290707071520A40103A400000049BB0060F5CB747F000041FFD3407000081D384C4454487405290707071520A40103A500000049BB0060F5CB747F000041FFD340700008384C4454487405290707071520A40103A600000049BB4360F5CB747F000041FFD34070A001C001BC0101C4014C44544874A4016C036000000049BB4360F5CB747F000041FFD34070A001C001BC0101C4014C44544874A4016C03A700000049BB0060F5CB747F000041FFD34070A001C001BC01C4014C44544874A4016C03A800000049BB0060F5CB747F000041FFD34070001CC4014C44544874A4016C03A900000049BB0060F5CB747F000041FFD3407000C4014C44544874A4016C03AA00000049BB0060F5CB747F000041FFD340704C44544874076C03AB00000049BB0060F5CB747F000041FFD340704C44544874076C03AC000000 +[7e18c76f3847] jit-backend-dump} +[7e18c76f45a3] {jit-backend-addr +Loop 4 ( #44 FOR_ITER) has address 7f74cbf580de to 7f74cbf58d04 (bootstrap 7f74cbf580a8) +[7e18c76f5965] jit-backend-addr} +[7e18c76f64a3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c047 +0 E0FDFFFF -[d0e960b2140] jit-backend-dump} -[d0e960b36be] {jit-backend-dump +CODE_DUMP @7f74cbf580da +0 E0FDFFFF +[7e18c76f717f] jit-backend-dump} +[7e18c76f7d6f] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c106 +0 4A0B0000 -[d0e960b4de0] jit-backend-dump} -[d0e960b5908] {jit-backend-dump +CODE_DUMP @7f74cbf581b0 +0 500B0000 +[7e18c76f8873] jit-backend-dump} +[7e18c76f8de7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c11a +0 580B0000 -[d0e960b6d96] jit-backend-dump} -[d0e960b775c] {jit-backend-dump +CODE_DUMP @7f74cbf581bc +0 660B0000 +[7e18c76f986d] jit-backend-dump} +[7e18c76f9e19] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c127 +0 6B0B0000 -[d0e960b8bc6] jit-backend-dump} -[d0e960b9592] {jit-backend-dump +CODE_DUMP @7f74cbf581c9 +0 790B0000 +[7e18c76fa889] jit-backend-dump} +[7e18c76faca9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c13c +0 770B0000 -[d0e960ba9fc] jit-backend-dump} -[d0e960bb3d4] {jit-backend-dump +CODE_DUMP @7f74cbf581dd +0 860B0000 +[7e18c76fb57f] jit-backend-dump} +[7e18c76fb9e3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c14d +0 890B0000 -[d0e960bcacc] jit-backend-dump} -[d0e960bd58e] {jit-backend-dump +CODE_DUMP @7f74cbf581ee +0 980B0000 +[7e18c76fc293] jit-backend-dump} +[7e18c76fc6df] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c15f +0 9A0B0000 -[d0e960beb3c] jit-backend-dump} -[d0e960bf4cc] {jit-backend-dump +CODE_DUMP @7f74cbf58200 +0 A90B0000 +[7e18c76fcf77] jit-backend-dump} +[7e18c76fd4f3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c171 +0 AA0B0000 -[d0e960c0a56] jit-backend-dump} -[d0e960c13d4] {jit-backend-dump +CODE_DUMP @7f74cbf58212 +0 B90B0000 +[7e18c76fdefd] jit-backend-dump} +[7e18c76fe461] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c184 +0 B70B0000 -[d0e960c2838] jit-backend-dump} -[d0e960c31d4] {jit-backend-dump +CODE_DUMP @7f74cbf58225 +0 C60B0000 +[7e18c76fedcf] jit-backend-dump} +[7e18c76ff20d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c19b +0 BE0B0000 -[d0e960c4638] jit-backend-dump} -[d0e960c514e] {jit-backend-dump +CODE_DUMP @7f74cbf5823c +0 CD0B0000 +[7e18c76ffaaf] jit-backend-dump} +[7e18c76fff13] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c1b2 +0 C50B0000 -[d0e960c67c8] jit-backend-dump} -[d0e960c75ae] {jit-backend-dump +CODE_DUMP @7f74cbf58253 +0 D40B0000 +[7e18c77007b7] jit-backend-dump} +[7e18c7700e13] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c1d2 +0 E20B0000 -[d0e960c8ba4] jit-backend-dump} -[d0e960c9534] {jit-backend-dump +CODE_DUMP @7f74cbf58273 +0 F10B0000 +[7e18c77018db] jit-backend-dump} +[7e18c7701e37] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c1fa +0 D80B0000 -[d0e960ca9aa] jit-backend-dump} -[d0e960cb322] {jit-backend-dump +CODE_DUMP @7f74cbf582a2 +0 E00B0000 +[7e18c7702861] jit-backend-dump} +[7e18c7702dad] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c208 +0 E80B0000 -[d0e960cc738] jit-backend-dump} -[d0e960cd17c] {jit-backend-dump +CODE_DUMP @7f74cbf582b0 +0 F00B0000 +[7e18c77036e7] jit-backend-dump} +[7e18c7703b95] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c21f +0 0B0C0000 -[d0e960ce79c] jit-backend-dump} -[d0e960cf252] {jit-backend-dump +CODE_DUMP @7f74cbf582c7 +0 130C0000 +[7e18c7704437] jit-backend-dump} +[7e18c7704963] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c234 +0 140C0000 -[d0e960d0776] jit-backend-dump} -[d0e960d1112] {jit-backend-dump +CODE_DUMP @7f74cbf582dc +0 1C0C0000 +[7e18c7705205] jit-backend-dump} +[7e18c770570d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c24e +0 190C0000 -[d0e960d251c] jit-backend-dump} -[d0e960d2fe4] {jit-backend-dump +CODE_DUMP @7f74cbf582f5 +0 220C0000 +[7e18c77061fb] jit-backend-dump} +[7e18c7706727] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c356 +0 300B0000 -[d0e960d441e] jit-backend-dump} -[d0e960d4d96] {jit-backend-dump +CODE_DUMP @7f74cbf583ff +0 370B0000 +[7e18c77070cd] jit-backend-dump} +[7e18c77075ef] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c365 +0 450B0000 -[d0e960d61d6] jit-backend-dump} -[d0e960d6c98] {jit-backend-dump +CODE_DUMP @7f74cbf5840e +0 4C0B0000 +[7e18c7708011] jit-backend-dump} +[7e18c7708473] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c3fb +0 D30A0000 -[d0e960d8264] jit-backend-dump} -[d0e960d8cd2] {jit-backend-dump +CODE_DUMP @7f74cbf584a4 +0 DA0A0000 +[7e18c7708d15] jit-backend-dump} +[7e18c7709117] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c40a +0 E80A0000 -[d0e960da262] jit-backend-dump} -[d0e960dabe6] {jit-backend-dump +CODE_DUMP @7f74cbf584b3 +0 EF0A0000 +[7e18c7709b1f] jit-backend-dump} +[7e18c770a061] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c424 +0 F20A0000 -[d0e960dc020] jit-backend-dump} -[d0e960dc980] {jit-backend-dump +CODE_DUMP @7f74cbf584cd +0 F90A0000 +[7e18c770cdcf] jit-backend-dump} +[7e18c770d5c7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c44a +0 F00A0000 -[d0e960dde14] jit-backend-dump} -[d0e960de774] {jit-backend-dump +CODE_DUMP @7f74cbf584f3 +0 F70A0000 +[7e18c770e0a5] jit-backend-dump} +[7e18c770e5eb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c457 +0 050B0000 -[d0e960dfe0c] jit-backend-dump} -[d0e960e085c] {jit-backend-dump +CODE_DUMP @7f74cbf58500 +0 0D0B0000 +[7e18c770efa1] jit-backend-dump} +[7e18c770f48b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c46b +0 130B0000 -[d0e960e1e22] jit-backend-dump} -[d0e960e27a0] {jit-backend-dump +CODE_DUMP @7f74cbf58514 +0 1C0B0000 +[7e18c770fdf1] jit-backend-dump} +[7e18c7710259] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c479 +0 290B0000 -[d0e960e3c34] jit-backend-dump} -[d0e960e4648] {jit-backend-dump +CODE_DUMP @7f74cbf58522 +0 330B0000 +[7e18c7710d43] jit-backend-dump} +[7e18c77113b3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c4a6 +0 3F0B0000 -[d0e960e5ae2] jit-backend-dump} -[d0e960e6442] {jit-backend-dump +CODE_DUMP @7f74cbf5854f +0 4A0B0000 +[7e18c7711dc3] jit-backend-dump} +[7e18c7712201] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c4bc +0 4B0B0000 -[d0e960e78d6] jit-backend-dump} -[d0e960e83d4] {jit-backend-dump +CODE_DUMP @7f74cbf58565 +0 560B0000 +[7e18c7712aa3] jit-backend-dump} +[7e18c7712ea5] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c4d1 +0 5A0B0000 -[d0e960edd8c] jit-backend-dump} -[d0e960eeb9c] {jit-backend-dump +CODE_DUMP @7f74cbf5857a +0 650B0000 +[7e18c7713747] jit-backend-dump} +[7e18c7713b71] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c4df +0 710B0000 -[d0e960f033c] jit-backend-dump} -[d0e960f0e22] {jit-backend-dump +CODE_DUMP @7f74cbf58588 +0 7C0B0000 +[7e18c7714619] jit-backend-dump} +[7e18c7714b7b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c4f6 +0 7E0B0000 -[d0e960f2370] jit-backend-dump} -[d0e960f2d18] {jit-backend-dump +CODE_DUMP @7f74cbf5859f +0 890B0000 +[7e18c7715595] jit-backend-dump} +[7e18c77159fd] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c510 +0 890B0000 -[d0e960f41b8] jit-backend-dump} -[d0e960f4b5a] {jit-backend-dump +CODE_DUMP @7f74cbf585b9 +0 940B0000 +[7e18c771629d] jit-backend-dump} +[7e18c77166a1] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c51a +0 A50B0000 -[d0e960f60e4] jit-backend-dump} -[d0e960f6ba0] {jit-backend-dump +CODE_DUMP @7f74cbf585c3 +0 B00B0000 +[7e18c7716f43] jit-backend-dump} +[7e18c771737d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c524 +0 C20B0000 -[d0e960f81ba] jit-backend-dump} -[d0e960f8c40] {jit-backend-dump +CODE_DUMP @7f74cbf585cd +0 CD0B0000 +[7e18c7717c1f] jit-backend-dump} +[7e18c7718191] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c537 +0 D50B0000 -[d0e960fa0da] jit-backend-dump} -[d0e960faa64] {jit-backend-dump +CODE_DUMP @7f74cbf585e0 +0 E00B0000 +[7e18c7718c27] jit-backend-dump} +[7e18c77191b7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c63c +0 F50A0000 -[d0e960fbefe] jit-backend-dump} -[d0e960fc888] {jit-backend-dump +CODE_DUMP @7f74cbf586e6 +0 FF0A0000 +[7e18c7719bc5] jit-backend-dump} +[7e18c7719fe3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c64b +0 0A0B0000 -[d0e960fdd22] jit-backend-dump} -[d0e960fe862] {jit-backend-dump +CODE_DUMP @7f74cbf586f5 +0 140B0000 +[7e18c771a885] jit-backend-dump} +[7e18c771acc3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c654 +0 250B0000 -[d0e960ffdec] jit-backend-dump} -[d0e961008f6] {jit-backend-dump +CODE_DUMP @7f74cbf586fe +0 2F0B0000 +[7e18c771b565] jit-backend-dump} +[7e18c771b9a3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c668 +0 340B0000 -[d0e96101f10] jit-backend-dump} -[d0e961028d6] {jit-backend-dump +CODE_DUMP @7f74cbf58712 +0 3E0B0000 +[7e18c771c3ab] jit-backend-dump} +[7e18c771c903] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c676 +0 460B0000 -[d0e96103d6a] jit-backend-dump} -[d0e961047ea] {jit-backend-dump +CODE_DUMP @7f74cbf58720 +0 500B0000 +[7e18c771d2f5] jit-backend-dump} +[7e18c771d855] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c6bf +0 370B0000 -[d0e96105c78] jit-backend-dump} -[d0e96106644] {jit-backend-dump +CODE_DUMP @7f74cbf58769 +0 410B0000 +[7e18c771e0f9] jit-backend-dump} +[7e18c771e627] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c6f1 +0 200B0000 -[d0e96107bd4] jit-backend-dump} -[d0e961086ae] {jit-backend-dump +CODE_DUMP @7f74cbf5879b +0 2A0B0000 +[7e18c771eecb] jit-backend-dump} +[7e18c771f305] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c706 +0 260B0000 -[d0e96109d6a] jit-backend-dump} -[d0e9610a82c] {jit-backend-dump +CODE_DUMP @7f74cbf587b0 +0 300B0000 +[7e18c771fba9] jit-backend-dump} +[7e18c77200e9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c717 +0 320B0000 -[d0e9610bd86] jit-backend-dump} -[d0e9610c746] {jit-backend-dump +CODE_DUMP @7f74cbf587c1 +0 3C0B0000 +[7e18c7720b01] jit-backend-dump} +[7e18c7720fe9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c729 +0 3D0B0000 -[d0e9610dbd4] jit-backend-dump} -[d0e9610e5a6] {jit-backend-dump +CODE_DUMP @7f74cbf587d3 +0 470B0000 +[7e18c7721a15] jit-backend-dump} +[7e18c7721e2f] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c74f +0 330B0000 -[d0e9610f9b0] jit-backend-dump} -[d0e96110370] {jit-backend-dump +CODE_DUMP @7f74cbf587f9 +0 3D0B0000 +[7e18c77226d3] jit-backend-dump} +[7e18c7722af9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c766 +0 380B0000 -[d0e96111ab0] jit-backend-dump} -[d0e961128f0] {jit-backend-dump +CODE_DUMP @7f74cbf58810 +0 420B0000 +[7e18c772338b] jit-backend-dump} +[7e18c772399d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c786 +0 510B0000 -[d0e96113e86] jit-backend-dump} -[d0e96114834] {jit-backend-dump +CODE_DUMP @7f74cbf58830 +0 5B0B0000 +[7e18c77243ab] jit-backend-dump} +[7e18c7724905] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c790 +0 630B0000 -[d0e96115c68] jit-backend-dump} -[d0e96116622] {jit-backend-dump +CODE_DUMP @7f74cbf5883a +0 6D0B0000 +[7e18c772539b] jit-backend-dump} +[7e18c77258fb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c7a7 +0 6A0B0000 -[d0e96117a50] jit-backend-dump} -[d0e961183ec] {jit-backend-dump +CODE_DUMP @7f74cbf58851 +0 740B0000 +[7e18c7726291] jit-backend-dump} +[7e18c77266f7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c7bc +0 740B0000 -[d0e9611981a] jit-backend-dump} -[d0e9611a2e2] {jit-backend-dump +CODE_DUMP @7f74cbf58866 +0 7E0B0000 +[7e18c7728fe1] jit-backend-dump} +[7e18c772962d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c7d5 +0 7B0B0000 -[d0e9611b85a] jit-backend-dump} -[d0e9611c394] {jit-backend-dump +CODE_DUMP @7f74cbf58880 +0 840B0000 +[7e18c772a0bb] jit-backend-dump} +[7e18c772a63d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c8e1 +0 8F0A0000 -[d0e9611d810] jit-backend-dump} -[d0e9611e1d6] {jit-backend-dump +CODE_DUMP @7f74cbf5898c +0 980A0000 +[7e18c772b0a7] jit-backend-dump} +[7e18c772b63b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c8f0 +0 A60A0000 -[d0e9611f580] jit-backend-dump} -[d0e9611ff5e] {jit-backend-dump +CODE_DUMP @7f74cbf5899b +0 AF0A0000 +[7e18c772c01b] jit-backend-dump} +[7e18c772c479] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c986 +0 360A0000 -[d0e961213c8] jit-backend-dump} -[d0e96121d76] {jit-backend-dump +CODE_DUMP @7f74cbf58a31 +0 3F0A0000 +[7e18c772cd63] jit-backend-dump} +[7e18c772d1ab] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c995 +0 4D0A0000 -[d0e96123486] jit-backend-dump} -[d0e96123f72] {jit-backend-dump +CODE_DUMP @7f74cbf58a40 +0 560A0000 +[7e18c772da2d] jit-backend-dump} +[7e18c772de45] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c9af +0 590A0000 -[d0e96125598] jit-backend-dump} -[d0e96125f58] {jit-backend-dump +CODE_DUMP @7f74cbf58a5a +0 620A0000 +[7e18c772e7bf] jit-backend-dump} +[7e18c772ed23] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c9d5 +0 590A0000 -[d0e961273e6] jit-backend-dump} -[d0e96127d88] {jit-backend-dump +CODE_DUMP @7f74cbf58a80 +0 620A0000 +[7e18c772f7ab] jit-backend-dump} +[7e18c772fd09] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c9e2 +0 700A0000 -[d0e9612ee20] jit-backend-dump} -[d0e9612fa56] {jit-backend-dump +CODE_DUMP @7f74cbf58a8d +0 790A0000 +[7e18c773070d] jit-backend-dump} +[7e18c7730b63] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c9f6 +0 800A0000 -[d0e961310f4] jit-backend-dump} -[d0e96131bda] {jit-backend-dump +CODE_DUMP @7f74cbf58aa1 +0 890A0000 +[7e18c77313fd] jit-backend-dump} +[7e18c7731827] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ca04 +0 970A0000 -[d0e961331dc] jit-backend-dump} -[d0e96133d6a] {jit-backend-dump +CODE_DUMP @7f74cbf58aaf +0 A00A0000 +[7e18c77320bb] jit-backend-dump} +[7e18c773255d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ca31 +0 AE0A0000 -[d0e961351bc] jit-backend-dump} -[d0e96135b76] {jit-backend-dump +CODE_DUMP @7f74cbf58adc +0 B70A0000 +[7e18c7732fa1] jit-backend-dump} +[7e18c77334d7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ca47 +0 BA0A0000 -[d0e96137010] jit-backend-dump} -[d0e961379a6] {jit-backend-dump +CODE_DUMP @7f74cbf58af2 +0 C30A0000 +[7e18c7733ef9] jit-backend-dump} +[7e18c773441b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ca5c +0 C90A0000 -[d0e96138e3a] jit-backend-dump} -[d0e961397be] {jit-backend-dump +CODE_DUMP @7f74cbf58b07 +0 D20A0000 +[7e18c7734cc1] jit-backend-dump} +[7e18c77351e1] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ca6a +0 E00A0000 -[d0e9613aeda] jit-backend-dump} -[d0e9613b9ba] {jit-backend-dump +CODE_DUMP @7f74cbf58b15 +0 E90A0000 +[7e18c7735a87] jit-backend-dump} +[7e18c7735eed] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ca81 +0 ED0A0000 -[d0e9613ceba] jit-backend-dump} -[d0e9613d8c8] {jit-backend-dump +CODE_DUMP @7f74cbf58b2c +0 F60A0000 +[7e18c7736781] jit-backend-dump} +[7e18c7736cdb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579ca9b +0 F80A0000 -[d0e9613ed5c] jit-backend-dump} -[d0e9613f734] {jit-backend-dump +CODE_DUMP @7f74cbf58b46 +0 010B0000 +[7e18c77376f1] jit-backend-dump} +[7e18c7737c1d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579caa5 +0 140B0000 -[d0e96140bc2] jit-backend-dump} -[d0e9614158e] {jit-backend-dump +CODE_DUMP @7f74cbf58b50 +0 1D0B0000 +[7e18c7738755] jit-backend-dump} +[7e18c7738b5b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579caaf +0 310B0000 -[d0e96142a22] jit-backend-dump} -[d0e96143574] {jit-backend-dump +CODE_DUMP @7f74cbf58b5a +0 3A0B0000 +[7e18c7739403] jit-backend-dump} +[7e18c773982b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579cac2 +0 440B0000 -[d0e96144bf4] jit-backend-dump} -[d0e96145710] {jit-backend-dump +CODE_DUMP @7f74cbf58b6d +0 4D0B0000 +[7e18c773a0d1] jit-backend-dump} +[7e18c773a50d] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579cbc7 +0 640A0000 -[d0e96146c82] jit-backend-dump} -[d0e96147642] {jit-backend-dump +CODE_DUMP @7f74cbf58c76 +0 690A0000 +[7e18c773af55] jit-backend-dump} +[7e18c773b4b5] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579cbd6 +0 7A0A0000 -[d0e96148ad0] jit-backend-dump} -[d0e96149496] {jit-backend-dump +CODE_DUMP @7f74cbf58c85 +0 7F0A0000 +[7e18c773bedd] jit-backend-dump} +[7e18c773c305] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579cbdf +0 960A0000 -[d0e9614a92a] jit-backend-dump} -[d0e9614b3a4] {jit-backend-dump +CODE_DUMP @7f74cbf58c8e +0 9B0A0000 +[7e18c773cbad] jit-backend-dump} +[7e18c773cfeb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579cbf3 +0 A60A0000 -[d0e9614cade] jit-backend-dump} -[d0e9614d5a0] {jit-backend-dump +CODE_DUMP @7f74cbf58ca2 +0 AB0A0000 +[7e18c773d891] jit-backend-dump} +[7e18c773dce1] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579cc01 +0 B80A0000 -[d0e9614eb06] jit-backend-dump} -[d0e9614f5d4] {jit-backend-dump +CODE_DUMP @7f74cbf58cb0 +0 BD0A0000 +[7e18c773e587] jit-backend-dump} +[7e18c773eb95] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579cc48 +0 AB0A0000 -[d0e96150a7a] jit-backend-dump} -[d0e96151fd4] jit-backend} -[d0e96153a32] {jit-log-opt-loop -# Loop 4 : loop with 351 ops +CODE_DUMP @7f74cbf58cf5 +0 B20A0000 +[7e18c773f639] jit-backend-dump} +[7e18c774029d] jit-backend} +[7e18c7741961] {jit-log-opt-loop +# Loop 4 ( #44 FOR_ITER) : loop with 351 ops [p0, p1] -+54: p2 = getfield_gc(p0, descr=) -+58: p3 = getfield_gc(p0, descr=) -+62: i4 = getfield_gc(p0, descr=) -+70: p5 = getfield_gc(p0, descr=) -+74: i6 = getfield_gc(p0, descr=) -+81: i7 = getfield_gc(p0, descr=) -+85: p8 = getfield_gc(p0, descr=) -+89: p10 = getarrayitem_gc(p8, 0, descr=) -+93: p12 = getarrayitem_gc(p8, 1, descr=) -+97: p14 = getarrayitem_gc(p8, 2, descr=) -+101: p16 = getarrayitem_gc(p8, 3, descr=) -+105: p18 = getarrayitem_gc(p8, 4, descr=) -+116: p20 = getarrayitem_gc(p8, 5, descr=) -+127: p22 = getarrayitem_gc(p8, 6, descr=) -+138: p24 = getarrayitem_gc(p8, 7, descr=) -+142: p25 = getfield_gc(p0, descr=) -+142: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140556696704672)) ++84: p2 = getfield_gc(p0, descr=) ++88: p3 = getfield_gc(p0, descr=) ++92: i4 = getfield_gc(p0, descr=) ++100: p5 = getfield_gc(p0, descr=) ++104: i6 = getfield_gc(p0, descr=) ++111: i7 = getfield_gc(p0, descr=) ++115: p8 = getfield_gc(p0, descr=) ++119: p10 = getarrayitem_gc(p8, 0, descr=) ++123: p12 = getarrayitem_gc(p8, 1, descr=) ++127: p14 = getarrayitem_gc(p8, 2, descr=) ++131: p16 = getarrayitem_gc(p8, 3, descr=) ++135: p18 = getarrayitem_gc(p8, 4, descr=) ++146: p20 = getarrayitem_gc(p8, 5, descr=) ++157: p22 = getarrayitem_gc(p8, 6, descr=) ++168: p24 = getarrayitem_gc(p8, 7, descr=) ++172: p25 = getfield_gc(p0, descr=) ++172: label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, p24, descr=TargetToken(140139656779344)) debug_merge_point(0, ' #44 FOR_ITER') -+235: guard_value(i6, 4, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] -+245: guard_class(p16, 38308720, descr=) [p1, p0, p16, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] -+265: p28 = getfield_gc(p16, descr=) -+269: guard_nonnull(p28, descr=) [p1, p0, p16, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] -+278: i29 = getfield_gc(p16, descr=) -+282: p30 = getfield_gc(p28, descr=) -+286: guard_class(p30, 38399200, descr=) [p1, p0, p16, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] -+299: p32 = getfield_gc(p28, descr=) -+303: i33 = getfield_gc(p32, descr=) -+307: i34 = uint_ge(i29, i33) ++258: guard_value(i6, 4, descr=) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22, p24] ++268: guard_class(p16, 38352528, descr=) [p1, p0, p16, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++280: p28 = getfield_gc(p16, descr=) ++284: guard_nonnull(p28, descr=) [p1, p0, p16, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++293: i29 = getfield_gc(p16, descr=) ++297: p30 = getfield_gc(p28, descr=) ++301: guard_class(p30, 38450144, descr=) [p1, p0, p16, i29, p30, p28, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++313: p32 = getfield_gc(p28, descr=) ++317: i33 = getfield_gc(p32, descr=) ++321: i34 = uint_ge(i29, i33) guard_false(i34, descr=) [p1, p0, p16, i29, i33, p32, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] -+316: p35 = getfield_gc(p32, descr=) -+320: p36 = getarrayitem_gc(p35, i29, descr=) -+325: guard_nonnull(p36, descr=) [p1, p0, p16, i29, p36, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] -+334: i38 = int_add(i29, 1) -+338: setfield_gc(p16, i38, descr=) -+342: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p20, p22, p24, p36] ++330: p35 = getfield_gc(p32, descr=) ++334: p36 = getarrayitem_gc(p35, i29, descr=) ++339: guard_nonnull(p36, descr=) [p1, p0, p16, i29, p36, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22, p24] ++348: i38 = int_add(i29, 1) ++352: setfield_gc(p16, i38, descr=) ++356: guard_value(i4, 0, descr=) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p20, p22, p24, p36] debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 LOAD_GLOBAL') -+352: guard_value(p3, ConstPtr(ptr40), descr=) [p1, p0, p3, p2, p5, p10, p12, p16, p20, p22, p24, p36] -+371: p41 = getfield_gc(p0, descr=) -+375: guard_value(p41, ConstPtr(ptr42), descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] -+394: p43 = getfield_gc(p41, descr=) -+398: guard_value(p43, ConstPtr(ptr44), descr=) [p1, p0, p43, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] -+417: guard_not_invalidated(, descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++366: guard_value(p3, ConstPtr(ptr40), descr=) [p1, p0, p3, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++385: p41 = getfield_gc(p0, descr=) ++389: guard_value(p41, ConstPtr(ptr42), descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++408: p43 = getfield_gc(p41, descr=) ++412: guard_value(p43, ConstPtr(ptr44), descr=) [p1, p0, p43, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++431: guard_not_invalidated(, descr=) [p1, p0, p41, p2, p5, p10, p12, p16, p20, p22, p24, p36] debug_merge_point(0, ' #53 LOOKUP_METHOD') -+417: p46 = getfield_gc(ConstPtr(ptr45), descr=) -+430: guard_value(p46, ConstPtr(ptr47), descr=) [p1, p0, p46, p2, p5, p10, p12, p16, p20, p22, p24, p36] ++431: p46 = getfield_gc(ConstPtr(ptr45), descr=) ++444: guard_value(p46, ConstPtr(ptr47), descr=) [p1, p0, p46, p2, p5, p10, p12, p16, p20, p22, p24, p36] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 LOAD_FAST') debug_merge_point(0, ' #62 CALL_METHOD') -+449: p49 = call(ConstClass(getexecutioncontext), descr=) -+472: p50 = getfield_gc(p49, descr=) -+476: i51 = force_token() -+476: p52 = getfield_gc(p49, descr=) -+480: guard_isnull(p52, descr=) [p1, p0, p49, p52, p2, p5, p10, p12, p16, p50, i51, p36] -+489: i53 = getfield_gc(p49, descr=) -+493: i54 = int_is_zero(i53) ++463: p49 = call(ConstClass(getexecutioncontext), descr=) ++493: p50 = getfield_gc(p49, descr=) ++497: i51 = force_token() ++497: p52 = getfield_gc(p49, descr=) ++501: guard_isnull(p52, descr=) [p1, p0, p49, p52, p2, p5, p10, p12, p16, p50, i51, p36] ++510: i53 = getfield_gc(p49, descr=) ++514: i54 = int_is_zero(i53) guard_true(i54, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p50, i51, p36] -debug_merge_point(1, ' #0 LOAD_GLOBAL') -+503: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p50, i51, p36] -debug_merge_point(1, ' #3 LOAD_FAST') -debug_merge_point(1, ' #6 LOAD_FAST') -debug_merge_point(1, ' #9 CALL_FUNCTION') -+503: i56 = getfield_gc(ConstPtr(ptr55), descr=) -+516: i58 = int_ge(0, i56) +debug_merge_point(1, ' #0 LOAD_GLOBAL') ++524: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p50, i51, p36] +debug_merge_point(1, ' #3 LOAD_FAST') +debug_merge_point(1, ' #6 LOAD_FAST') +debug_merge_point(1, ' #9 CALL_FUNCTION') ++524: i56 = getfield_gc(ConstPtr(ptr55), descr=) ++537: i58 = int_ge(0, i56) guard_true(i58, descr=) [p1, p0, p49, i56, p2, p5, p10, p12, p16, p50, i51, p36] -+526: i59 = force_token() -debug_merge_point(2, ' #0 LOAD_GLOBAL') -+526: p61 = getfield_gc(ConstPtr(ptr60), descr=) -+534: guard_value(p61, ConstPtr(ptr62), descr=) [p1, p0, p49, p61, p2, p5, p10, p12, p16, i59, p50, i51, p36] -debug_merge_point(2, ' #3 LOAD_FAST') -debug_merge_point(2, ' #6 LOAD_CONST') -debug_merge_point(2, ' #9 BINARY_SUBSCR') -debug_merge_point(2, ' #10 CALL_FUNCTION') -debug_merge_point(2, ' #13 BUILD_TUPLE') -debug_merge_point(2, ' #16 LOAD_FAST') -debug_merge_point(2, ' #19 BINARY_ADD') -debug_merge_point(2, ' #20 STORE_FAST') -debug_merge_point(2, ' #23 LOAD_GLOBAL') -debug_merge_point(2, ' #26 LOOKUP_METHOD') -debug_merge_point(2, ' #29 LOAD_FAST') -debug_merge_point(2, ' #32 CALL_METHOD') -+547: p64 = getfield_gc(ConstPtr(ptr63), descr=) -+560: guard_class(p64, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p64, p2, p5, p10, p12, p16, i59, p50, i51, p36] -+573: p66 = getfield_gc(ConstPtr(ptr63), descr=) -+586: i67 = force_token() ++547: i59 = force_token() +debug_merge_point(2, ' #0 LOAD_GLOBAL') ++547: p61 = getfield_gc(ConstPtr(ptr60), descr=) ++555: guard_value(p61, ConstPtr(ptr62), descr=) [p1, p0, p49, p61, p2, p5, p10, p12, p16, i59, p50, i51, p36] +debug_merge_point(2, ' #3 LOAD_FAST') +debug_merge_point(2, ' #6 LOAD_CONST') +debug_merge_point(2, ' #9 BINARY_SUBSCR') +debug_merge_point(2, ' #10 CALL_FUNCTION') +debug_merge_point(2, ' #13 BUILD_TUPLE') +debug_merge_point(2, ' #16 LOAD_FAST') +debug_merge_point(2, ' #19 BINARY_ADD') +debug_merge_point(2, ' #20 STORE_FAST') +debug_merge_point(2, ' #23 LOAD_GLOBAL') +debug_merge_point(2, ' #26 LOOKUP_METHOD') +debug_merge_point(2, ' #29 LOAD_FAST') +debug_merge_point(2, ' #32 CALL_METHOD') ++568: p64 = getfield_gc(ConstPtr(ptr63), descr=) ++581: guard_class(p64, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p64, p2, p5, p10, p12, p16, i59, p50, i51, p36] ++593: p66 = getfield_gc(ConstPtr(ptr63), descr=) ++606: i67 = force_token() p69 = new_array(3, descr=) -p71 = new_with_vtable(38380928) -+678: setfield_gc(p71, i59, descr=) +p71 = new_with_vtable(38431936) ++698: setfield_gc(p71, i59, descr=) setfield_gc(p49, p71, descr=) -+725: setfield_gc(p0, i67, descr=) -+736: setarrayitem_gc(p69, 0, ConstPtr(ptr73), descr=) -+744: setarrayitem_gc(p69, 1, ConstPtr(ptr75), descr=) -+758: setarrayitem_gc(p69, 2, ConstPtr(ptr77), descr=) -+772: i79 = call_may_force(ConstClass(hash_tuple), p69, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, i51, p69, p50, p36] -+837: guard_no_exception(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, i51, p69, p50, p36] -+852: i80 = force_token() -p82 = new_with_vtable(38290296) -+922: setfield_gc(p0, i80, descr=) -+933: setfield_gc(p82, p69, descr=) -+944: i84 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v531___simple_call__function_l), p66, p82, i79, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p50, p36] -+1002: guard_no_exception(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p50, p36] -+1017: i86 = int_and(i84, -9223372036854775808) -+1033: i87 = int_is_true(i86) -guard_false(i87, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, i51, p50, p36] -+1043: p88 = getfield_gc(p66, descr=) -+1054: p89 = getinteriorfield_gc(p88, i84, descr=>) -+1063: guard_nonnull_class(p89, 38536280, descr=) [p1, p0, p49, p82, p89, p71, p2, p5, p10, p12, p16, i51, p50, p36] -debug_merge_point(2, ' #35 STORE_FAST') -debug_merge_point(2, ' #38 LOAD_FAST') -debug_merge_point(2, ' #41 LOAD_CONST') -debug_merge_point(2, ' #44 COMPARE_OP') -+1081: i92 = instance_ptr_eq(ConstPtr(ptr91), p89) -guard_false(i92, descr=) [p1, p0, p49, p71, p2, p5, p10, p12, p16, p89, p82, i51, p50, p36] -debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') -debug_merge_point(2, ' #50 LOAD_FAST') -debug_merge_point(2, ' #53 RETURN_VALUE') -+1094: p93 = getfield_gc(p49, descr=) -+1105: guard_isnull(p93, descr=) [p1, p0, p49, p89, p93, p71, p2, p5, p10, p12, p16, None, p82, i51, p50, p36] -+1114: i95 = getfield_gc(p49, descr=) -+1118: i96 = int_is_true(i95) -guard_false(i96, descr=) [p1, p0, p49, p89, p71, p2, p5, p10, p12, p16, None, p82, i51, p50, p36] -+1128: p97 = getfield_gc(p49, descr=) -debug_merge_point(1, ' #12 LOOKUP_METHOD') -+1128: setfield_gc(p71, -3, descr=) -debug_merge_point(1, ' #15 LOAD_FAST') -debug_merge_point(1, ' #18 CALL_METHOD') -+1143: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p89, None, i51, p50, p36] -+1143: i99 = strlen(p36) -+1154: i101 = int_gt(9223372036854775807, i99) -guard_true(i101, descr=) [p1, p0, p49, p89, p36, p2, p5, p10, p12, p16, None, None, i51, p50, None] -+1173: p102 = getfield_gc_pure(p89, descr=) -+1177: i103 = getfield_gc_pure(p89, descr=) -+1181: i105 = getarrayitem_gc_pure(p102, 0, descr=) -+1185: i107 = int_eq(i105, 17) -guard_true(i107, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] -+1195: i109 = getarrayitem_gc_pure(p102, 2, descr=) -+1199: i111 = int_and(i109, 1) -+1206: i112 = int_is_true(i111) -guard_true(i112, descr=) [p1, p0, p49, p89, i109, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] -+1216: i114 = getarrayitem_gc_pure(p102, 5, descr=) -+1220: i116 = int_gt(i114, 1) -guard_false(i116, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] -+1230: i118 = getarrayitem_gc_pure(p102, 1, descr=) -+1234: i120 = int_add(i118, 1) -+1238: i121 = getarrayitem_gc_pure(p102, i120, descr=) -+1243: i123 = int_eq(i121, 19) -guard_true(i123, descr=) [p1, p0, p49, p89, i120, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] -+1253: i125 = int_add(i120, 1) -+1260: i126 = getarrayitem_gc_pure(p102, i125, descr=) -+1265: i128 = int_add(i120, 2) -+1269: i130 = int_lt(0, i99) -guard_true(i130, descr=) [p1, p0, p49, p89, i126, i128, p2, p5, p10, p12, p16, i99, i103, p102, None, None, i51, p50, p36] -+1279: guard_value(i128, 11, descr=) [p1, p0, p49, p89, i126, i128, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p50, p36] -+1289: guard_value(i126, 51, descr=) [p1, p0, p49, p89, i126, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p50, p36] -+1299: guard_value(p102, ConstPtr(ptr133), descr=) [p1, p0, p49, p89, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, i51, p50, p36] ++747: setfield_gc(p0, i67, descr=) ++758: setarrayitem_gc(p69, 0, ConstPtr(ptr73), descr=) ++766: setarrayitem_gc(p69, 1, ConstPtr(ptr75), descr=) ++780: setarrayitem_gc(p69, 2, ConstPtr(ptr77), descr=) ++794: i79 = call_may_force(ConstClass(hash_tuple), p69, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, p36, p50, i51, p69] ++859: guard_no_exception(, descr=) [p1, p0, p49, p66, i79, p71, p2, p5, p10, p12, p16, p36, p50, i51, p69] ++874: i80 = force_token() +p82 = new_with_vtable(38341048) ++944: setfield_gc(p0, i80, descr=) ++955: setfield_gc(p82, p69, descr=) ++966: i84 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v978___simple_call__function_l), p66, p82, i79, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, p36, p50, i51] ++1024: guard_no_exception(, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, p36, p50, i51] ++1039: i86 = int_and(i84, -9223372036854775808) ++1055: i87 = int_is_true(i86) +guard_false(i87, descr=) [p1, p0, p49, p82, i84, p66, p71, p2, p5, p10, p12, p16, p36, p50, i51] ++1065: p88 = getfield_gc(p66, descr=) ++1076: p89 = getinteriorfield_gc(p88, i84, descr=>) ++1085: guard_nonnull_class(p89, 38586544, descr=) [p1, p0, p49, p82, p89, p71, p2, p5, p10, p12, p16, p36, p50, i51] +debug_merge_point(2, ' #35 STORE_FAST') +debug_merge_point(2, ' #38 LOAD_FAST') +debug_merge_point(2, ' #41 LOAD_CONST') +debug_merge_point(2, ' #44 COMPARE_OP') ++1103: i92 = instance_ptr_eq(ConstPtr(ptr91), p89) +guard_false(i92, descr=) [p1, p0, p49, p71, p2, p5, p10, p12, p16, p82, p89, p36, p50, i51] +debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') +debug_merge_point(2, ' #50 LOAD_FAST') +debug_merge_point(2, ' #53 RETURN_VALUE') ++1116: p93 = getfield_gc(p49, descr=) ++1127: guard_isnull(p93, descr=) [p1, p0, p49, p89, p93, p71, p2, p5, p10, p12, p16, p82, None, p36, p50, i51] ++1136: i95 = getfield_gc(p49, descr=) ++1140: i96 = int_is_true(i95) +guard_false(i96, descr=) [p1, p0, p49, p89, p71, p2, p5, p10, p12, p16, p82, None, p36, p50, i51] ++1150: p97 = getfield_gc(p49, descr=) +debug_merge_point(1, ' #12 LOOKUP_METHOD') ++1150: setfield_gc(p71, -3, descr=) +debug_merge_point(1, ' #15 LOAD_FAST') +debug_merge_point(1, ' #18 CALL_METHOD') ++1165: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, None, p89, p36, p50, i51] ++1165: i99 = strlen(p36) ++1176: i101 = int_gt(9223372036854775807, i99) +guard_true(i101, descr=) [p1, p0, p49, p89, p36, p2, p5, p10, p12, p16, None, None, None, p50, i51] ++1195: p102 = getfield_gc_pure(p89, descr=) ++1199: i103 = getfield_gc_pure(p89, descr=) ++1203: i105 = getarrayitem_gc_pure(p102, 0, descr=) ++1207: i107 = int_eq(i105, 17) +guard_true(i107, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, i99, i103, p102, None, None, p36, p50, i51] ++1217: i109 = getarrayitem_gc_pure(p102, 2, descr=) ++1221: i111 = int_and(i109, 1) ++1228: i112 = int_is_true(i111) +guard_true(i112, descr=) [p1, p0, p49, p89, i109, p2, p5, p10, p12, p16, i99, i103, p102, None, None, p36, p50, i51] ++1238: i114 = getarrayitem_gc_pure(p102, 5, descr=) ++1242: i116 = int_gt(i114, 1) +guard_false(i116, descr=) [p1, p0, p49, p89, p2, p5, p10, p12, p16, i99, i103, p102, None, None, p36, p50, i51] ++1252: i118 = getarrayitem_gc_pure(p102, 1, descr=) ++1256: i120 = int_add(i118, 1) ++1260: i121 = getarrayitem_gc_pure(p102, i120, descr=) ++1265: i123 = int_eq(i121, 19) +guard_true(i123, descr=) [p1, p0, p49, p89, i120, p2, p5, p10, p12, p16, i99, i103, p102, None, None, p36, p50, i51] ++1275: i125 = int_add(i120, 1) ++1282: i126 = getarrayitem_gc_pure(p102, i125, descr=) ++1287: i128 = int_add(i120, 2) ++1291: i130 = int_lt(0, i99) +guard_true(i130, descr=) [p1, p0, p49, p89, i126, i128, p2, p5, p10, p12, p16, i99, i103, p102, None, None, p36, p50, i51] ++1301: guard_value(i128, 11, descr=) [p1, p0, p49, p89, i126, i128, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, p36, p50, i51] ++1311: guard_value(i126, 51, descr=) [p1, p0, p49, p89, i126, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, p36, p50, i51] ++1321: guard_value(p102, ConstPtr(ptr133), descr=) [p1, p0, p49, p89, p102, p2, p5, p10, p12, p16, i99, i103, None, None, None, p36, p50, i51] debug_merge_point(2, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') -+1318: i134 = force_token() -p136 = new_with_vtable(38342680) -p137 = new_with_vtable(38380928) -+1402: setfield_gc(p137, i51, descr=) ++1340: i134 = force_token() +p136 = new_with_vtable(38373528) +p137 = new_with_vtable(38431936) ++1424: setfield_gc(p137, i51, descr=) setfield_gc(p49, p137, descr=) -+1449: setfield_gc(p0, i134, descr=) -+1460: setfield_gc(p136, i99, descr=) -+1464: setfield_gc(p136, i103, descr=) -+1468: setfield_gc(p136, p36, descr=) -+1472: setfield_gc(p136, ConstPtr(ptr133), descr=) -+1486: i138 = call_assembler(0, p136, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p89, p136, i138, p137, p2, p5, p10, p12, p16, p50, p36] -+1579: guard_no_exception(, descr=) [p1, p0, p49, p89, p136, i138, p137, p2, p5, p10, p12, p16, p50, p36] -+1594: guard_false(i138, descr=) [p1, p0, p49, p89, p136, p137, p2, p5, p10, p12, p16, p50, p36] -debug_merge_point(1, ' #21 RETURN_VALUE') -+1603: p139 = getfield_gc(p49, descr=) -+1614: guard_isnull(p139, descr=) [p1, p0, p49, p139, p137, p2, p5, p10, p12, p16, p50, p36] -+1623: i140 = getfield_gc(p49, descr=) -+1627: i141 = int_is_true(i140) ++1472: setfield_gc(p0, i134, descr=) ++1483: setfield_gc(p136, i99, descr=) ++1487: setfield_gc(p136, p36, descr=) ++1491: setfield_gc(p136, ConstPtr(ptr133), descr=) ++1505: setfield_gc(p136, i103, descr=) ++1509: i138 = call_assembler(0, p136, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p136, p89, i138, p137, p2, p5, p10, p12, p16, p50, p36] ++1602: guard_no_exception(, descr=) [p1, p0, p49, p136, p89, i138, p137, p2, p5, p10, p12, p16, p50, p36] ++1617: guard_false(i138, descr=) [p1, p0, p49, p136, p89, p137, p2, p5, p10, p12, p16, p50, p36] +debug_merge_point(1, ' #21 RETURN_VALUE') ++1626: p139 = getfield_gc(p49, descr=) ++1637: guard_isnull(p139, descr=) [p1, p0, p49, p139, p137, p2, p5, p10, p12, p16, p50, p36] ++1646: i140 = getfield_gc(p49, descr=) ++1650: i141 = int_is_true(i140) guard_false(i141, descr=) [p1, p0, p49, p137, p2, p5, p10, p12, p16, p50, p36] -+1637: p142 = getfield_gc(p49, descr=) ++1660: p142 = getfield_gc(p49, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') setfield_gc(p49, p50, descr=) -+1677: setfield_gc(p137, -3, descr=) -+1692: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, None, p36] -+1692: i145 = getfield_raw(43780840, descr=) -+1700: i147 = int_lt(i145, 0) ++1700: setfield_gc(p137, -3, descr=) ++1715: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, None, p36] ++1715: i145 = getfield_raw(43858376, descr=) ++1723: i147 = int_lt(i145, 0) guard_false(i147, descr=) [p1, p0, p2, p5, p10, p12, p16, None, p36] debug_merge_point(0, ' #44 FOR_ITER') -+1710: label(p0, p1, p2, p5, p10, p12, p36, p16, i140, p49, p50, descr=TargetToken(140556696703072)) ++1733: label(p0, p1, p2, p5, p10, p12, p36, p16, i140, p49, p50, descr=TargetToken(140139656777744)) debug_merge_point(0, ' #44 FOR_ITER') -+1740: p148 = getfield_gc(p16, descr=) -+1751: guard_nonnull(p148, descr=) [p1, p0, p16, p148, p2, p5, p10, p12, p36] -+1760: i149 = getfield_gc(p16, descr=) -+1764: p150 = getfield_gc(p148, descr=) -+1768: guard_class(p150, 38399200, descr=) [p1, p0, p16, i149, p150, p148, p2, p5, p10, p12, p36] -+1781: p151 = getfield_gc(p148, descr=) -+1785: i152 = getfield_gc(p151, descr=) -+1789: i153 = uint_ge(i149, i152) ++1763: p148 = getfield_gc(p16, descr=) ++1774: guard_nonnull(p148, descr=) [p1, p0, p16, p148, p2, p5, p10, p12, p36] ++1783: i149 = getfield_gc(p16, descr=) ++1787: p150 = getfield_gc(p148, descr=) ++1791: guard_class(p150, 38450144, descr=) [p1, p0, p16, i149, p150, p148, p2, p5, p10, p12, p36] ++1804: p151 = getfield_gc(p148, descr=) ++1808: i152 = getfield_gc(p151, descr=) ++1812: i153 = uint_ge(i149, i152) guard_false(i153, descr=) [p1, p0, p16, i149, i152, p151, p2, p5, p10, p12, p36] -+1798: p154 = getfield_gc(p151, descr=) -+1802: p155 = getarrayitem_gc(p154, i149, descr=) -+1807: guard_nonnull(p155, descr=) [p1, p0, p16, i149, p155, p2, p5, p10, p12, p36] -+1816: i156 = int_add(i149, 1) ++1821: p154 = getfield_gc(p151, descr=) ++1825: p155 = getarrayitem_gc(p154, i149, descr=) ++1830: guard_nonnull(p155, descr=) [p1, p0, p16, i149, p155, p2, p5, p10, p12, p36] ++1839: i156 = int_add(i149, 1) debug_merge_point(0, ' #47 STORE_FAST') debug_merge_point(0, ' #50 LOAD_GLOBAL') -+1820: p157 = getfield_gc(p0, descr=) -+1831: setfield_gc(p16, i156, descr=) -+1835: guard_value(p157, ConstPtr(ptr42), descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] -+1854: p158 = getfield_gc(p157, descr=) -+1858: guard_value(p158, ConstPtr(ptr44), descr=) [p1, p0, p158, p157, p2, p5, p10, p12, p16, p155, None] -+1877: guard_not_invalidated(, descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] ++1843: p157 = getfield_gc(p0, descr=) ++1854: setfield_gc(p16, i156, descr=) ++1858: guard_value(p157, ConstPtr(ptr42), descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] ++1877: p158 = getfield_gc(p157, descr=) ++1881: guard_value(p158, ConstPtr(ptr44), descr=) [p1, p0, p158, p157, p2, p5, p10, p12, p16, p155, None] ++1900: guard_not_invalidated(, descr=) [p1, p0, p157, p2, p5, p10, p12, p16, p155, None] debug_merge_point(0, ' #53 LOOKUP_METHOD') -+1877: p159 = getfield_gc(ConstPtr(ptr45), descr=) -+1890: guard_value(p159, ConstPtr(ptr47), descr=) [p1, p0, p159, p2, p5, p10, p12, p16, p155, None] ++1900: p159 = getfield_gc(ConstPtr(ptr45), descr=) ++1913: guard_value(p159, ConstPtr(ptr47), descr=) [p1, p0, p159, p2, p5, p10, p12, p16, p155, None] debug_merge_point(0, ' #56 LOAD_CONST') debug_merge_point(0, ' #59 LOAD_FAST') debug_merge_point(0, ' #62 CALL_METHOD') -+1909: i160 = force_token() -+1909: i161 = int_is_zero(i140) -guard_true(i161, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, p50, i160, p155, None] -debug_merge_point(1, ' #0 LOAD_GLOBAL') -debug_merge_point(1, ' #3 LOAD_FAST') -debug_merge_point(1, ' #6 LOAD_FAST') -debug_merge_point(1, ' #9 CALL_FUNCTION') -+1919: i162 = getfield_gc(ConstPtr(ptr55), descr=) -+1932: i163 = int_ge(0, i162) -guard_true(i163, descr=) [p1, p0, p49, i162, p2, p5, p10, p12, p16, p50, i160, p155, None] -+1942: i164 = force_token() -debug_merge_point(2, ' #0 LOAD_GLOBAL') -+1942: p165 = getfield_gc(ConstPtr(ptr60), descr=) -+1950: guard_value(p165, ConstPtr(ptr62), descr=) [p1, p0, p49, p165, p2, p5, p10, p12, p16, i164, p50, i160, p155, None] -debug_merge_point(2, ' #3 LOAD_FAST') -debug_merge_point(2, ' #6 LOAD_CONST') -debug_merge_point(2, ' #9 BINARY_SUBSCR') -debug_merge_point(2, ' #10 CALL_FUNCTION') -debug_merge_point(2, ' #13 BUILD_TUPLE') -debug_merge_point(2, ' #16 LOAD_FAST') -debug_merge_point(2, ' #19 BINARY_ADD') -debug_merge_point(2, ' #20 STORE_FAST') -debug_merge_point(2, ' #23 LOAD_GLOBAL') -debug_merge_point(2, ' #26 LOOKUP_METHOD') -debug_merge_point(2, ' #29 LOAD_FAST') -debug_merge_point(2, ' #32 CALL_METHOD') -+1963: p166 = getfield_gc(ConstPtr(ptr63), descr=) -+1976: guard_class(p166, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p166, p2, p5, p10, p12, p16, i164, p50, i160, p155, None] -+1988: p167 = getfield_gc(ConstPtr(ptr63), descr=) -+2001: i168 = force_token() ++1932: i160 = force_token() ++1932: i161 = int_is_zero(i140) +guard_true(i161, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, i160, p50, p155, None] +debug_merge_point(1, ' #0 LOAD_GLOBAL') +debug_merge_point(1, ' #3 LOAD_FAST') +debug_merge_point(1, ' #6 LOAD_FAST') +debug_merge_point(1, ' #9 CALL_FUNCTION') ++1942: i162 = getfield_gc(ConstPtr(ptr55), descr=) ++1955: i163 = int_ge(0, i162) +guard_true(i163, descr=) [p1, p0, p49, i162, p2, p5, p10, p12, p16, i160, p50, p155, None] ++1965: i164 = force_token() +debug_merge_point(2, ' #0 LOAD_GLOBAL') ++1965: p165 = getfield_gc(ConstPtr(ptr60), descr=) ++1973: guard_value(p165, ConstPtr(ptr62), descr=) [p1, p0, p49, p165, p2, p5, p10, p12, p16, i164, i160, p50, p155, None] +debug_merge_point(2, ' #3 LOAD_FAST') +debug_merge_point(2, ' #6 LOAD_CONST') +debug_merge_point(2, ' #9 BINARY_SUBSCR') +debug_merge_point(2, ' #10 CALL_FUNCTION') +debug_merge_point(2, ' #13 BUILD_TUPLE') +debug_merge_point(2, ' #16 LOAD_FAST') +debug_merge_point(2, ' #19 BINARY_ADD') +debug_merge_point(2, ' #20 STORE_FAST') +debug_merge_point(2, ' #23 LOAD_GLOBAL') +debug_merge_point(2, ' #26 LOOKUP_METHOD') +debug_merge_point(2, ' #29 LOAD_FAST') +debug_merge_point(2, ' #32 CALL_METHOD') ++1986: p166 = getfield_gc(ConstPtr(ptr63), descr=) ++1999: guard_class(p166, ConstClass(ObjectDictStrategy), descr=) [p1, p0, p49, p166, p2, p5, p10, p12, p16, i164, i160, p50, p155, None] ++2012: p167 = getfield_gc(ConstPtr(ptr63), descr=) ++2025: i168 = force_token() p169 = new_array(3, descr=) -p170 = new_with_vtable(38380928) -+2093: setfield_gc(p170, i164, descr=) +p170 = new_with_vtable(38431936) ++2117: setfield_gc(p170, i164, descr=) setfield_gc(p49, p170, descr=) -+2144: setfield_gc(p0, i168, descr=) -+2148: setarrayitem_gc(p169, 0, ConstPtr(ptr73), descr=) -+2156: setarrayitem_gc(p169, 1, ConstPtr(ptr75), descr=) -+2170: setarrayitem_gc(p169, 2, ConstPtr(ptr174), descr=) -+2184: i175 = call_may_force(ConstClass(hash_tuple), p169, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, p169, p50, i160, p155] -+2256: guard_no_exception(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, p169, p50, i160, p155] -+2271: i176 = force_token() -p177 = new_with_vtable(38290296) -+2341: setfield_gc(p0, i176, descr=) -+2352: setfield_gc(p177, p169, descr=) -+2363: i178 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v531___simple_call__function_l), p167, p177, i175, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p155, p50, i160] -+2421: guard_no_exception(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p155, p50, i160] -+2436: i179 = int_and(i178, -9223372036854775808) -+2452: i180 = int_is_true(i179) -guard_false(i180, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, p155, p50, i160] -+2462: p181 = getfield_gc(p167, descr=) -+2473: p182 = getinteriorfield_gc(p181, i178, descr=>) -+2482: guard_nonnull_class(p182, 38536280, descr=) [p1, p0, p49, p177, p182, p170, p2, p5, p10, p12, p16, p155, p50, i160] -debug_merge_point(2, ' #35 STORE_FAST') -debug_merge_point(2, ' #38 LOAD_FAST') -debug_merge_point(2, ' #41 LOAD_CONST') -debug_merge_point(2, ' #44 COMPARE_OP') -+2500: i183 = instance_ptr_eq(ConstPtr(ptr91), p182) -guard_false(i183, descr=) [p1, p0, p49, p170, p2, p5, p10, p12, p16, p177, p182, p155, p50, i160] -debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') -debug_merge_point(2, ' #50 LOAD_FAST') -debug_merge_point(2, ' #53 RETURN_VALUE') -+2513: p184 = getfield_gc(p49, descr=) -+2524: guard_isnull(p184, descr=) [p1, p0, p49, p182, p184, p170, p2, p5, p10, p12, p16, p177, None, p155, p50, i160] -+2533: i185 = getfield_gc(p49, descr=) -+2537: i186 = int_is_true(i185) -guard_false(i186, descr=) [p1, p0, p49, p182, p170, p2, p5, p10, p12, p16, p177, None, p155, p50, i160] -+2547: p187 = getfield_gc(p49, descr=) -debug_merge_point(1, ' #12 LOOKUP_METHOD') -+2547: setfield_gc(p170, -3, descr=) -debug_merge_point(1, ' #15 LOAD_FAST') -debug_merge_point(1, ' #18 CALL_METHOD') -+2562: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, None, p182, p155, p50, i160] -+2562: i189 = strlen(p155) -+2573: i191 = int_gt(9223372036854775807, i189) -guard_true(i191, descr=) [p1, p0, p49, p182, p155, p2, p5, p10, p12, p16, None, None, None, p50, i160] -+2592: p192 = getfield_gc_pure(p182, descr=) -+2596: i193 = getfield_gc_pure(p182, descr=) -+2600: i194 = getarrayitem_gc_pure(p192, 0, descr=) -+2604: i195 = int_eq(i194, 17) -guard_true(i195, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] -+2614: i196 = getarrayitem_gc_pure(p192, 2, descr=) -+2618: i197 = int_and(i196, 1) -+2625: i198 = int_is_true(i197) -guard_true(i198, descr=) [p1, p0, p49, p182, i196, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] -+2635: i199 = getarrayitem_gc_pure(p192, 5, descr=) -+2639: i200 = int_gt(i199, 1) -guard_false(i200, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] -+2649: i201 = getarrayitem_gc_pure(p192, 1, descr=) -+2653: i202 = int_add(i201, 1) -+2657: i203 = getarrayitem_gc_pure(p192, i202, descr=) -+2662: i204 = int_eq(i203, 19) -guard_true(i204, descr=) [p1, p0, p49, p182, i202, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] -+2672: i205 = int_add(i202, 1) -+2679: i206 = getarrayitem_gc_pure(p192, i205, descr=) -+2684: i207 = int_add(i202, 2) -+2688: i209 = int_lt(0, i189) -guard_true(i209, descr=) [p1, p0, p49, p182, i206, i207, p2, p5, p10, p12, p16, p192, i189, i193, None, None, p155, p50, i160] -+2698: guard_value(i207, 11, descr=) [p1, p0, p49, p182, i206, i207, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, p155, p50, i160] -+2708: guard_value(i206, 51, descr=) [p1, p0, p49, p182, i206, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, p155, p50, i160] -+2718: guard_value(p192, ConstPtr(ptr133), descr=) [p1, p0, p49, p182, p192, p2, p5, p10, p12, p16, None, i189, i193, None, None, p155, p50, i160] ++2168: setfield_gc(p0, i168, descr=) ++2172: setarrayitem_gc(p169, 0, ConstPtr(ptr73), descr=) ++2180: setarrayitem_gc(p169, 1, ConstPtr(ptr75), descr=) ++2194: setarrayitem_gc(p169, 2, ConstPtr(ptr174), descr=) ++2208: i175 = call_may_force(ConstClass(hash_tuple), p169, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, i160, p169, p155, p50] ++2280: guard_no_exception(, descr=) [p1, p0, p49, p167, i175, p170, p2, p5, p10, p12, p16, i160, p169, p155, p50] ++2295: i176 = force_token() +p177 = new_with_vtable(38341048) ++2365: setfield_gc(p0, i176, descr=) ++2376: setfield_gc(p177, p169, descr=) ++2387: i178 = call_may_force(ConstClass(ll_dict_lookup_trampoline__v978___simple_call__function_l), p167, p177, i175, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, i160, p155, p50] ++2445: guard_no_exception(, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, i160, p155, p50] ++2460: i179 = int_and(i178, -9223372036854775808) ++2476: i180 = int_is_true(i179) +guard_false(i180, descr=) [p1, p0, p49, p177, i178, p167, p170, p2, p5, p10, p12, p16, i160, p155, p50] ++2486: p181 = getfield_gc(p167, descr=) ++2497: p182 = getinteriorfield_gc(p181, i178, descr=>) ++2506: guard_nonnull_class(p182, 38586544, descr=) [p1, p0, p49, p177, p182, p170, p2, p5, p10, p12, p16, i160, p155, p50] +debug_merge_point(2, ' #35 STORE_FAST') +debug_merge_point(2, ' #38 LOAD_FAST') +debug_merge_point(2, ' #41 LOAD_CONST') +debug_merge_point(2, ' #44 COMPARE_OP') ++2524: i183 = instance_ptr_eq(ConstPtr(ptr91), p182) +guard_false(i183, descr=) [p1, p0, p49, p170, p2, p5, p10, p12, p16, p177, p182, i160, p155, p50] +debug_merge_point(2, ' #47 POP_JUMP_IF_FALSE') +debug_merge_point(2, ' #50 LOAD_FAST') +debug_merge_point(2, ' #53 RETURN_VALUE') ++2537: p184 = getfield_gc(p49, descr=) ++2548: guard_isnull(p184, descr=) [p1, p0, p49, p182, p184, p170, p2, p5, p10, p12, p16, p177, None, i160, p155, p50] ++2557: i185 = getfield_gc(p49, descr=) ++2561: i186 = int_is_true(i185) +guard_false(i186, descr=) [p1, p0, p49, p182, p170, p2, p5, p10, p12, p16, p177, None, i160, p155, p50] ++2571: p187 = getfield_gc(p49, descr=) +debug_merge_point(1, ' #12 LOOKUP_METHOD') ++2571: setfield_gc(p170, -3, descr=) +debug_merge_point(1, ' #15 LOAD_FAST') +debug_merge_point(1, ' #18 CALL_METHOD') ++2586: guard_not_invalidated(, descr=) [p1, p0, p49, p2, p5, p10, p12, p16, None, p182, i160, p155, p50] ++2586: i189 = strlen(p155) ++2597: i191 = int_gt(9223372036854775807, i189) +guard_true(i191, descr=) [p1, p0, p49, p182, p155, p2, p5, p10, p12, p16, None, None, i160, None, p50] ++2616: p192 = getfield_gc_pure(p182, descr=) ++2620: i193 = getfield_gc_pure(p182, descr=) ++2624: i194 = getarrayitem_gc_pure(p192, 0, descr=) ++2628: i195 = int_eq(i194, 17) +guard_true(i195, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, i193, i189, p192, None, None, i160, p155, p50] ++2638: i196 = getarrayitem_gc_pure(p192, 2, descr=) ++2642: i197 = int_and(i196, 1) ++2649: i198 = int_is_true(i197) +guard_true(i198, descr=) [p1, p0, p49, p182, i196, p2, p5, p10, p12, p16, i193, i189, p192, None, None, i160, p155, p50] ++2659: i199 = getarrayitem_gc_pure(p192, 5, descr=) ++2663: i200 = int_gt(i199, 1) +guard_false(i200, descr=) [p1, p0, p49, p182, p2, p5, p10, p12, p16, i193, i189, p192, None, None, i160, p155, p50] ++2673: i201 = getarrayitem_gc_pure(p192, 1, descr=) ++2677: i202 = int_add(i201, 1) ++2681: i203 = getarrayitem_gc_pure(p192, i202, descr=) ++2686: i204 = int_eq(i203, 19) +guard_true(i204, descr=) [p1, p0, p49, p182, i202, p2, p5, p10, p12, p16, i193, i189, p192, None, None, i160, p155, p50] ++2696: i205 = int_add(i202, 1) ++2703: i206 = getarrayitem_gc_pure(p192, i205, descr=) ++2708: i207 = int_add(i202, 2) ++2712: i209 = int_lt(0, i189) +guard_true(i209, descr=) [p1, p0, p49, p182, i206, i207, p2, p5, p10, p12, p16, i193, i189, p192, None, None, i160, p155, p50] ++2722: guard_value(i207, 11, descr=) [p1, p0, p49, p182, i206, i207, p192, p2, p5, p10, p12, p16, i193, i189, None, None, None, i160, p155, p50] ++2732: guard_value(i206, 51, descr=) [p1, p0, p49, p182, i206, p192, p2, p5, p10, p12, p16, i193, i189, None, None, None, i160, p155, p50] ++2742: guard_value(p192, ConstPtr(ptr133), descr=) [p1, p0, p49, p182, p192, p2, p5, p10, p12, p16, i193, i189, None, None, None, i160, p155, p50] debug_merge_point(2, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') -+2737: i210 = force_token() -p211 = new_with_vtable(38342680) -p212 = new_with_vtable(38380928) -+2821: setfield_gc(p212, i160, descr=) ++2761: i210 = force_token() +p211 = new_with_vtable(38373528) +p212 = new_with_vtable(38431936) ++2845: setfield_gc(p212, i160, descr=) setfield_gc(p49, p212, descr=) -+2868: setfield_gc(p0, i210, descr=) -+2879: setfield_gc(p211, i189, descr=) -+2883: setfield_gc(p211, i193, descr=) -+2887: setfield_gc(p211, p155, descr=) -+2891: setfield_gc(p211, ConstPtr(ptr133), descr=) -+2905: i213 = call_assembler(0, p211, descr=) -guard_not_forced(, descr=) [p1, p0, p49, p182, p211, i213, p212, p2, p5, p10, p12, p16, p155, p50] -+2998: guard_no_exception(, descr=) [p1, p0, p49, p182, p211, i213, p212, p2, p5, p10, p12, p16, p155, p50] -+3013: guard_false(i213, descr=) [p1, p0, p49, p182, p211, p212, p2, p5, p10, p12, p16, p155, p50] -debug_merge_point(1, ' #21 RETURN_VALUE') -+3022: p214 = getfield_gc(p49, descr=) -+3033: guard_isnull(p214, descr=) [p1, p0, p49, p214, p212, p2, p5, p10, p12, p16, p155, p50] -+3042: i215 = getfield_gc(p49, descr=) -+3046: i216 = int_is_true(i215) -guard_false(i216, descr=) [p1, p0, p49, p212, p2, p5, p10, p12, p16, p155, p50] -+3056: p217 = getfield_gc(p49, descr=) ++2896: setfield_gc(p0, i210, descr=) ++2907: setfield_gc(p211, i189, descr=) ++2911: setfield_gc(p211, p155, descr=) ++2915: setfield_gc(p211, ConstPtr(ptr133), descr=) ++2929: setfield_gc(p211, i193, descr=) ++2933: i213 = call_assembler(0, p211, descr=) +guard_not_forced(, descr=) [p1, p0, p49, p211, p182, i213, p212, p2, p5, p10, p12, p16, p50, p155] ++3026: guard_no_exception(, descr=) [p1, p0, p49, p211, p182, i213, p212, p2, p5, p10, p12, p16, p50, p155] ++3041: guard_false(i213, descr=) [p1, p0, p49, p211, p182, p212, p2, p5, p10, p12, p16, p50, p155] +debug_merge_point(1, ' #21 RETURN_VALUE') ++3050: p214 = getfield_gc(p49, descr=) ++3061: guard_isnull(p214, descr=) [p1, p0, p49, p214, p212, p2, p5, p10, p12, p16, p50, p155] ++3070: i215 = getfield_gc(p49, descr=) ++3074: i216 = int_is_true(i215) +guard_false(i216, descr=) [p1, p0, p49, p212, p2, p5, p10, p12, p16, p50, p155] ++3084: p217 = getfield_gc(p49, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') setfield_gc(p49, p50, descr=) -+3094: setfield_gc(p212, -3, descr=) -+3109: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, p155, None] -+3109: i219 = getfield_raw(43780840, descr=) -+3117: i220 = int_lt(i219, 0) -guard_false(i220, descr=) [p1, p0, p2, p5, p10, p12, p16, p155, None] ++3120: setfield_gc(p212, -3, descr=) ++3135: guard_not_invalidated(, descr=) [p1, p0, p2, p5, p10, p12, p16, None, p155] ++3135: i219 = getfield_raw(43858376, descr=) ++3143: i220 = int_lt(i219, 0) +guard_false(i220, descr=) [p1, p0, p2, p5, p10, p12, p16, None, p155] debug_merge_point(0, ' #44 FOR_ITER') -+3127: jump(p0, p1, p2, p5, p10, p12, p155, p16, i215, p49, p50, descr=TargetToken(140556696703072)) -+3135: --end of the loop-- -[d0e963e4a62] jit-log-opt-loop} -[d0e96518f8f] {jit-backend -[d0e9653113d] {jit-backend-dump ++3153: jump(p0, p1, p2, p5, p10, p12, p155, p16, i215, p49, p50, descr=TargetToken(140139656777744)) ++3164: --end of the loop-- +[7e18c787d7a5] jit-log-opt-loop} +[7e18c79879ff] {jit-backend +[7e18c799e36b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d712 +0 488DA50000000049BB283207E8D57F00004D8B3B4983C70149BB283207E8D57F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC349BB00A079E5D57F000041FFD31D1803AD00000049BB00A079E5D57F000041FFD31D1803AE000000 -[d0e9653458b] jit-backend-dump} -[d0e96534ac7] {jit-backend-addr -bridge out of Guard 90 has address 7fd5e579d712 to 7fd5e579d786 -[d0e965356dd] jit-backend-addr} -[d0e96535ccb] {jit-backend-dump +CODE_DUMP @7f74cbf597c6 +0 488DA50000000049BBA0F282CE747F00004D8B3B4983C70149BBA0F282CE747F00004D893B4C8B7E404D0FB67C3F184983FF330F84000000004883C7014C8B7E084C39FF0F8C00000000B80000000048890425901A550141BBC0BAF20041FFD3B802000000488D65D8415F415E415D415C5B5DC349BB0060F5CB747F000041FFD31D1803AD00000049BB0060F5CB747F000041FFD31D1803AE000000 +[7e18c79a15f1] jit-backend-dump} +[7e18c79a1aed] {jit-backend-addr +bridge out of Guard 90 has address 7f74cbf597c6 to 7f74cbf5983a +[7e18c79a25f5] jit-backend-addr} +[7e18c79a2c21] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d715 +0 70FFFFFF -[d0e96536869] jit-backend-dump} -[d0e96536f8f] {jit-backend-dump +CODE_DUMP @7f74cbf597c9 +0 70FFFFFF +[7e18c79a9017] jit-backend-dump} +[7e18c79a9805] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d747 +0 3B000000 -[d0e96537ad3] jit-backend-dump} -[d0e9653804d] {jit-backend-dump +CODE_DUMP @7f74cbf597fb +0 3B000000 +[7e18c79aa391] jit-backend-dump} +[7e18c79aa8c3] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d758 +0 3E000000 -[d0e96538a67] jit-backend-dump} -[d0e965392f3] {jit-backend-dump +CODE_DUMP @7f74cbf5980c +0 3E000000 +[7e18c79ab29d] jit-backend-dump} +[7e18c79abe0b] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bd8e +0 80190000 -[d0e96539bc5] jit-backend-dump} -[d0e9653a3cd] jit-backend} -[d0e9653abf7] {jit-log-opt-bridge +CODE_DUMP @7f74cbf57e1f +0 A3190000 +[7e18c79ac6e5] jit-backend-dump} +[7e18c79ace71] jit-backend} +[7e18c79ad765] {jit-log-opt-bridge # bridge out of Guard 90 with 10 ops [i0, p1] debug_merge_point(0, 'StrLiteralSearch at 11/51 [17. 8. 3. 1. 1. 1. 1. 51. 0. 19. 51. 1]') @@ -1912,145 +1912,150 @@ +61: i8 = getfield_gc_pure(p1, descr=) +65: i9 = int_lt(i7, i8) guard_false(i9, descr=) [i7, p1] -+74: finish(0, descr=) ++74: finish(0, descr=) +116: --end of the loop-- -[d0e96544d3d] jit-log-opt-bridge} -[d0e96a79d8b] {jit-backend -[d0e96aae5cb] {jit-backend-dump +[7e18c79b75e5] jit-log-opt-bridge} +[7e18c7efb137] {jit-backend +[7e18c7f2eb61] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d7c6 +0 488DA50000000049BB403207E8D57F00004D8B3B4983C70149BB403207E8D57F00004D893B4C8BBD00FFFFFF4D8B77504D85F60F85000000004D8B77284983FE000F85000000004C8BB5E8FEFFFF41F6470401740F4C89FF4C89F641BB8045C50041FFD34D8977404C8BB5B8FEFFFF49C74608FDFFFFFF4C8B3425E80A9C024983FE000F8C00000000488B042550C95401488D5010483B142568C95401761A49BB2DA279E5D57F000041FFD349BBC2A279E5D57F000041FFD34889142550C9540148C70088190000488B9508FFFFFF48895008488BBD10FFFFFF49BB902190E5D57F00004D89DE41BD0000000041BA0400000048C78548FFFFFF2C00000048898538FFFFFF48C78530FFFFFF0000000048C78528FFFFFF0000000048C78520FFFFFF0000000048C78518FFFFFF0000000049BBE2C079E5D57F000041FFE349BB00A079E5D57F000041FFD34C703C389C0144504858408401749801940103AF00000049BB00A079E5D57F000041FFD34C703C9C0144504858408401749801940103B000000049BB00A079E5D57F000041FFD34C7044504858400774070703B100000049BB00A079E5D57F000041FFD34C7044504858400774070703B2000000 -[d0e96ab43b9] jit-backend-dump} -[d0e96ab496f] {jit-backend-addr -bridge out of Guard 133 has address 7fd5e579d7c6 to 7fd5e579d904 -[d0e96ab5515] jit-backend-addr} -[d0e96ab5c29] {jit-backend-dump +CODE_DUMP @7f74cbf5987a +0 488DA50000000049BBB8F282CE747F00004D8B3B4983C70149BBB8F282CE747F00004D893B4C8BBD00FFFFFF4D8B77504D85F60F85000000004D8B77284983FE000F85000000004C8BB5F0FEFFFF41F6470401740F4C89FF4C89F641BBB0E5C40041FFD34D8977404C8BB5B8FEFFFF49C74608FDFFFFFF4C8B3425C8399D024983FE000F8C00000000488B0425B0685501488D5010483B1425C8685501761A49BB2D62F5CB747F000041FFD349BBC262F5CB747F000041FFD348891425B068550148C70088190000488B9518FFFFFF48895008488BBD10FFFFFF49BB90C10BCC747F00004D89DE41BD0000000041BA0400000048C78550FFFFFF2C00000048898540FFFFFF488B8D08FFFFFF48C78538FFFFFF0000000048C78530FFFFFF0000000048C78528FFFFFF0000000048C78520FFFFFF0000000049BB8C81F5CB747F000041FFE349BB0060F5CB747F000041FFD340703C389C014C445448749801940180016C03AF00000049BB0060F5CB747F000041FFD340703C9C014C445448749801940180016C03B000000049BB0060F5CB747F000041FFD340704C445448740707076C03B100000049BB0060F5CB747F000041FFD340704C445448740707076C03B2000000 +[7e18c7f34a0f] jit-backend-dump} +[7e18c7f350ad] {jit-backend-addr +bridge out of Guard 133 has address 7f74cbf5987a to 7f74cbf599bf +[7e18c7f35bfd] jit-backend-addr} +[7e18c7f36373] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d7c9 +0 E0FDFFFF -[d0e96ab6851] jit-backend-dump} -[d0e96ab708b] {jit-backend-dump +CODE_DUMP @7f74cbf5987d +0 E0FDFFFF +[7e18c7f36f51] jit-backend-dump} +[7e18c7f375bd] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d7fb +0 05010000 -[d0e96ab7b87] jit-backend-dump} -[d0e96ab80d1] {jit-backend-dump +CODE_DUMP @7f74cbf598af +0 0C010000 +[7e18c7f3816b] jit-backend-dump} +[7e18c7f386a1] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d809 +0 1B010000 -[d0e96ab89df] jit-backend-dump} -[d0e96ab8e97] {jit-backend-dump +CODE_DUMP @7f74cbf598bd +0 22010000 +[7e18c7f39187] jit-backend-dump} +[7e18c7f39711] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d84b +0 19010000 -[d0e96ab9751] jit-backend-dump} -[d0e96ab9ecb] {jit-backend-dump +CODE_DUMP @7f74cbf598ff +0 20010000 +[7e18c7f3a0e7] jit-backend-dump} +[7e18c7f3a6a7] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579c654 +0 6E110000 -[d0e96aba79d] jit-backend-dump} -[d0e96abb0f9] jit-backend} -[d0e96abba99] {jit-log-opt-bridge +CODE_DUMP @7f74cbf586fe +0 78110000 +[7e18c7f3af31] jit-backend-dump} +[7e18c7f3b7bf] jit-backend} +[7e18c7f3c2fd] {jit-log-opt-bridge # bridge out of Guard 133 with 19 ops [p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12] -debug_merge_point(1, ' #21 RETURN_VALUE') +debug_merge_point(1, ' #21 RETURN_VALUE') +37: p13 = getfield_gc(p2, descr=) -+48: guard_isnull(p13, descr=) [p0, p1, p2, p13, p5, p6, p7, p8, p9, p10, p11, p12, p4, p3] ++48: guard_isnull(p13, descr=) [p0, p1, p2, p13, p5, p6, p7, p8, p9, p10, p3, p4, p11, p12] +57: i14 = getfield_gc(p2, descr=) +61: i15 = int_is_true(i14) -guard_false(i15, descr=) [p0, p1, p2, p5, p6, p7, p8, p9, p10, p11, p12, p4, p3] +guard_false(i15, descr=) [p0, p1, p2, p5, p6, p7, p8, p9, p10, p3, p4, p11, p12] +71: p16 = getfield_gc(p2, descr=) debug_merge_point(0, ' #65 POP_TOP') debug_merge_point(0, ' #66 JUMP_ABSOLUTE') setfield_gc(p2, p11, descr=) +104: setfield_gc(p5, -3, descr=) -+119: guard_not_invalidated(, descr=) [p0, p1, p6, p7, p8, p9, p10, None, p12, None, None] -+119: i20 = getfield_raw(43780840, descr=) ++119: guard_not_invalidated(, descr=) [p0, p1, p6, p7, p8, p9, p10, None, None, None, p12] ++119: i20 = getfield_raw(43858376, descr=) +127: i22 = int_lt(i20, 0) -guard_false(i22, descr=) [p0, p1, p6, p7, p8, p9, p10, None, p12, None, None] +guard_false(i22, descr=) [p0, p1, p6, p7, p8, p9, p10, None, None, None, p12] debug_merge_point(0, ' #44 FOR_ITER') p24 = new_with_vtable(ConstClass(W_StringObject)) +200: setfield_gc(p24, p12, descr=) -+211: jump(p1, p0, p6, ConstPtr(ptr25), 0, p7, 4, 44, p8, p9, p24, p10, ConstPtr(ptr29), ConstPtr(ptr30), ConstPtr(ptr30), ConstPtr(ptr30), descr=TargetToken(140556696704672)) -+318: --end of the loop-- -[d0e96ad1975] jit-log-opt-bridge} -[d0e96b06a95] {jit-backend -[d0e96b14677] {jit-backend-dump ++211: jump(p1, p0, p6, ConstPtr(ptr25), 0, p7, 4, 44, p8, p9, p24, p10, ConstPtr(ptr29), ConstPtr(ptr30), ConstPtr(ptr30), ConstPtr(ptr30), descr=TargetToken(140139656779344)) ++325: --end of the loop-- +[7e18c7f5ca77] jit-log-opt-bridge} +[7e18c7f94eb5] {jit-backend +[7e18c7fa297f] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d985 +0 488DA50000000049BB583207E8D57F00004D8B3B4983C70149BB583207E8D57F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC3 -[d0e96b1ee49] jit-backend-dump} -[d0e96b1f53f] {jit-backend-addr -bridge out of Guard 87 has address 7fd5e579d985 to 7fd5e579d9eb -[d0e96b1ffb1] jit-backend-addr} -[d0e96b20623] {jit-backend-dump +CODE_DUMP @7f74cbf59a40 +0 488DA50000000049BBD0F282CE747F00004D8B3B4983C70149BBD0F282CE747F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425901A550141BBC0BAF20041FFD3B802000000488D65D8415F415E415D415C5B5DC3 +[7e18c7fa535b] jit-backend-dump} +[7e18c7fa5807] {jit-backend-addr +bridge out of Guard 87 has address 7f74cbf59a40 to 7f74cbf59aa6 +[7e18c7fa6217] jit-backend-addr} +[7e18c7fa67f9] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d988 +0 70FFFFFF -[d0e96b211b9] jit-backend-dump} -[d0e96b2187b] {jit-backend-dump +CODE_DUMP @7f74cbf59a43 +0 70FFFFFF +[7e18c7fa7353] jit-backend-dump} +[7e18c7fa7a73] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bcd1 +0 B01C0000 -[d0e96b22165] jit-backend-dump} -[d0e96b227eb] jit-backend} -[d0e96b22f75] {jit-log-opt-bridge +CODE_DUMP @7f74cbf57d62 +0 DA1C0000 +[7e18c7fa8387] jit-backend-dump} +[7e18c7fa8a1b] jit-backend} +[7e18c7fa90bb] {jit-log-opt-bridge # bridge out of Guard 87 with 5 ops [i0, p1] +37: i3 = int_add(i0, 1) +44: setfield_gc(p1, i3, descr=) +48: setfield_gc(p1, ConstPtr(ptr4), descr=) +56: setfield_gc(p1, i0, descr=) -+60: finish(1, descr=) ++60: finish(1, descr=) +102: --end of the loop-- -[d0e96b2931f] jit-log-opt-bridge} -[d0e96c4ce97] {jit-backend -[d0e96c5a3fd] {jit-backend-dump +[7e18c7faefab] jit-log-opt-bridge} +[7e18c80e256d] {jit-backend +[7e18c80ef8fd] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d9eb +0 488DA50000000049BB703207E8D57F00004D8B3B4983C70149BB703207E8D57F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425107B540141BB503AF20041FFD3B801000000488D65D8415F415E415D415C5B5DC3 -[d0e96c5ce8b] jit-backend-dump} -[d0e96c5d3f3] {jit-backend-addr -bridge out of Guard 89 has address 7fd5e579d9eb to 7fd5e579da51 -[d0e96c5de39] jit-backend-addr} -[d0e96c5e427] {jit-backend-dump +CODE_DUMP @7f74cbf59aa6 +0 488DA50000000049BBE8F282CE747F00004D8B3B4983C70149BBE8F282CE747F00004D893B4989FF4883C70148897E1848C74620000000004C897E28B80100000048890425901A550141BBC0BAF20041FFD3B802000000488D65D8415F415E415D415C5B5DC3 +[7e18c80f2183] jit-backend-dump} +[7e18c80f265d] {jit-backend-addr +bridge out of Guard 89 has address 7f74cbf59aa6 to 7f74cbf59b0c +[7e18c80f3045] jit-backend-addr} +[7e18c80f3621] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579d9ee +0 70FFFFFF -[d0e96c5efcf] jit-backend-dump} -[d0e96c5f62b] {jit-backend-dump +CODE_DUMP @7f74cbf59aa9 +0 70FFFFFF +[7e18c80f405b] jit-backend-dump} +[7e18c80f46fb] {jit-backend-dump BACKEND x86_64 SYS_EXECUTABLE python -CODE_DUMP @7fd5e579bd7d +0 6A1C0000 -[d0e96c5ff59] jit-backend-dump} -[d0e96c605b3] jit-backend} -[d0e96c60d53] {jit-log-opt-bridge +CODE_DUMP @7f74cbf57e0e +0 941C0000 +[7e18c80f50fd] jit-backend-dump} +[7e18c80f57ab] jit-backend} +[7e18c80f5eab] {jit-log-opt-bridge # bridge out of Guard 89 with 5 ops [i0, p1] +37: i3 = int_add(i0, 1) +44: setfield_gc(p1, i3, descr=) +48: setfield_gc(p1, ConstPtr(ptr4), descr=) +56: setfield_gc(p1, i0, descr=) -+60: finish(1, descr=) ++60: finish(1, descr=) +102: --end of the loop-- -[d0e96c663ab] jit-log-opt-bridge} -[d0e96cb4901] {jit-backend-counts -TargetToken(140556656117424):4647 -TargetToken(140556656117504):9292 -TargetToken(140556656121504):201 -TargetToken(140556656121584):4468 +[7e18c80fb77d] jit-log-opt-bridge} +[7e18c814b937] {jit-backend-counts +entry 0:4647 +TargetToken(140139616183984):4647 +TargetToken(140139616184064):9292 +entry 1:201 +TargetToken(140139616188064):201 +TargetToken(140139616188144):4468 bridge 16:4446 bridge 33:4268 -bridge 33:4268 -TargetToken(140556696702032):1 -TargetToken(140556696702112):1938 +TargetToken(140139616190144):4268 +entry 2:1 +TargetToken(140139656776704):1 +TargetToken(140139656776784):1938 +entry 3:3173 bridge 85:2882 bridge 88:2074 bridge 86:158 -TargetToken(140556696704672):527 -TargetToken(140556696703072):1411 +entry 4:377 +TargetToken(140139656779344):527 +TargetToken(140139656777744):1411 bridge 90:1420 bridge 133:150 bridge 87:50 bridge 89:7 -[d0e96cb9b99] jit-backend-counts} +[7e18c8157381] jit-backend-counts} From noreply at buildbot.pypy.org Tue Dec 27 15:41:19 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 15:41:19 +0100 (CET) Subject: [pypy-commit] pypy default: adjust to new situation Message-ID: <20111227144119.1A04282B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50903:974244d55fe2 Date: 2011-12-27 16:41 +0200 http://bitbucket.org/pypy/pypy/changeset/974244d55fe2/ Log: adjust to new situation diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -219,7 +219,6 @@ Also detect inlined functions and make them Function """ stack = [] - seen_dmp = False def getpath(stack): return ",".join([str(len(v)) for v in stack]) @@ -240,14 +239,11 @@ stack = [] for op in operations: if op.name == 'debug_merge_point': - if seen_dmp: - if so_far: - append_to_res(cls.TraceForOpcode(so_far, storage)) - if limit: - break - so_far = [] - else: - seen_dmp = True + if so_far: + append_to_res(cls.TraceForOpcode(so_far, storage)) + if limit: + break + so_far = [] so_far.append(op) if so_far: append_to_res(cls.TraceForOpcode(so_far, storage)) @@ -391,7 +387,7 @@ if trace.comment and 'Guard' in trace.comment: descrs = ['bridge ' + re.search('Guard (\d+)', trace.comment).group(1)] else: - descrs = [''] + descrs = ['entry ' + re.search('Loop (\d+)', trace.comment).group(1)] for i, op in enumerate(trace.operations): if op.name == 'label': labels.append(i) diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -38,7 +38,6 @@ def test_split(): ops = parse(''' [i0] - label() debug_merge_point(0, " #10 ADD") debug_merge_point(0, " #11 SUB") i1 = int_add(i0, 1) @@ -47,7 +46,7 @@ ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 3 - assert len(res.chunks[0].operations) == 2 + assert len(res.chunks[0].operations) == 1 assert len(res.chunks[1].operations) == 2 assert len(res.chunks[2].operations) == 2 assert res.chunks[2].bytecode_no == 11 @@ -97,7 +96,7 @@ i2 = int_add(i1, 1) ''') res = Function.from_operations(ops.operations, LoopStorage()) - assert res.repr() == res.chunks[0].repr() + assert res.repr() == res.chunks[1].repr() def test_lineno(): fname = str(py.path.local(__file__).join('..', 'x.py')) @@ -246,6 +245,7 @@ guard_true(i19, descr=) [] i113 = getfield_raw(151937600, descr=) ''') + loop.comment = 'Loop 0' parts = split_trace(loop) assert len(parts) == 3 assert len(parts[0].operations) == 2 @@ -273,7 +273,7 @@ finish(i0) ''') bridge.comment = 'bridge out of Guard 2 with 1 ops' - loop.comment = '' + loop.comment = 'Loop 0' loops = split_trace(loop) + split_trace(bridge) input = ['grrr:123\nasb:12\nbridge 2:1234'] parse_log_counts(input, loops) From noreply at buildbot.pypy.org Tue Dec 27 15:58:19 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 15:58:19 +0100 (CET) Subject: [pypy-commit] pypy default: improvements Message-ID: <20111227145819.A92AB82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50904:6406dbf9a60e Date: 2011-12-27 16:58 +0200 http://bitbucket.org/pypy/pypy/changeset/6406dbf9a60e/ Log: improvements diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -138,20 +138,27 @@ is_bytecode = True inline_level = None - def __init__(self, operations, storage): + def parse_code_data(self, arg): + m = re.search('\w]+)[\.,] file \'(.+?)\'[\.,] line (\d+)> #(\d+) (\w+)', + arg) + if m is None: + # a non-code loop, like StrLiteralSearch or something + self.bytecode_name = arg + else: + self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups() + self.startlineno = int(lineno) + self.bytecode_no = int(bytecode_no) + + + def __init__(self, operations, storage, loopname): for op in operations: if op.name == 'debug_merge_point': self.inline_level = int(op.args[0]) - m = re.search('\w]+)\. file \'(.+?)\'\. line (\d+)> #(\d+) (\w+)', - op.args[1]) - if m is None: - # a non-code loop, like StrLiteralSearch or something - self.bytecode_name = op.args[1][1:-1] - else: - self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups() - self.startlineno = int(lineno) - self.bytecode_no = int(bytecode_no) + self.parse_code_data(op.args[1]) break + else: + self.inline_level = 0 + self.parse_code_data(loopname) self.operations = operations self.storage = storage self.code = storage.disassemble_code(self.filename, self.startlineno, @@ -214,7 +221,8 @@ self.storage = storage @classmethod - def from_operations(cls, operations, storage, limit=None, inputargs=''): + def from_operations(cls, operations, storage, limit=None, inputargs='', + loopname=''): """ Slice given operation list into a chain of TraceForOpcode chunks. Also detect inlined functions and make them Function """ @@ -240,13 +248,13 @@ for op in operations: if op.name == 'debug_merge_point': if so_far: - append_to_res(cls.TraceForOpcode(so_far, storage)) + append_to_res(cls.TraceForOpcode(so_far, storage, loopname)) if limit: break so_far = [] so_far.append(op) if so_far: - append_to_res(cls.TraceForOpcode(so_far, storage)) + append_to_res(cls.TraceForOpcode(so_far, storage, loopname)) # wrap stack back up if not stack: # no ops whatsoever @@ -294,7 +302,7 @@ def repr(self): if self.filename is None: - return "Unknown" + return self.chunks[0].bytecode_name return "%s, file '%s', line %d" % (self.name, self.filename, self.startlineno) diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -38,18 +38,21 @@ def test_split(): ops = parse(''' [i0] + label() debug_merge_point(0, " #10 ADD") debug_merge_point(0, " #11 SUB") i1 = int_add(i0, 1) debug_merge_point(0, " #11 SUB") i2 = int_add(i1, 1) ''') - res = Function.from_operations(ops.operations, LoopStorage()) - assert len(res.chunks) == 3 + res = Function.from_operations(ops.operations, LoopStorage(), loopname='') + assert len(res.chunks) == 4 assert len(res.chunks[0].operations) == 1 - assert len(res.chunks[1].operations) == 2 + assert len(res.chunks[1].operations) == 1 assert len(res.chunks[2].operations) == 2 - assert res.chunks[2].bytecode_no == 11 + assert len(res.chunks[3].operations) == 2 + assert res.chunks[3].bytecode_no == 11 + assert res.chunks[0].bytecode_name == 'loopname' def test_inlined_call(): ops = parse(""" From noreply at buildbot.pypy.org Tue Dec 27 16:07:50 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Tue, 27 Dec 2011 16:07:50 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: hg merge c8ddbb442986 Message-ID: <20111227150750.E8D9C82B1D@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50905:4481b4f255f7 Date: 2011-12-27 16:05 +0100 http://bitbucket.org/pypy/pypy/changeset/4481b4f255f7/ Log: hg merge c8ddbb442986 diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -619,7 +619,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -655,7 +656,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -674,7 +676,8 @@ self.descr_reqcls, args.prepend(w_obj)) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -690,7 +693,8 @@ raise OperationError(space.w_SystemError, space.wrap("unexpected DescrMismatch error")) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -708,7 +712,8 @@ self.descr_reqcls, Arguments(space, [w1])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -726,7 +731,8 @@ self.descr_reqcls, Arguments(space, [w1, w2])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -744,7 +750,8 @@ self.descr_reqcls, Arguments(space, [w1, w2, w3])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -763,7 +770,8 @@ Arguments(space, [w1, w2, w3, w4])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -162,7 +162,6 @@ _ll_4_list_setslice = rlist.ll_listsetslice _ll_2_list_delslice_startonly = rlist.ll_listdelslice_startonly _ll_3_list_delslice_startstop = rlist.ll_listdelslice_startstop -_ll_1_list_list2fixed = lltypesystem_rlist.ll_list2fixed _ll_2_list_inplace_mul = rlist.ll_inplace_mul _ll_2_list_getitem_foldable = _ll_2_list_getitem diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -578,8 +578,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete.start, strides[:], - backstrides[:], shape[:], concrete)) + return space.wrap(W_NDimSlice(concrete.start, strides, + backstrides, shape, concrete)) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -820,8 +820,8 @@ if self.order == 'C': strides.reverse() backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] + self.strides = strides + self.backstrides = backstrides def array_sig(self, res_shape): if res_shape is not None and self.shape != res_shape: @@ -1025,9 +1025,9 @@ strides.reverse() backstrides.reverse() new_shape.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] - self.shape = new_shape[:] + self.strides = strides + self.backstrides = backstrides + self.shape = new_shape return new_strides = calc_new_strides(new_shape, self.shape, self.strides) if new_strides is None: @@ -1037,7 +1037,7 @@ for nd in range(len(new_shape)): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] self.strides = new_strides[:] - self.backstrides = new_backstrides[:] + self.backstrides = new_backstrides self.shape = new_shape[:] class W_NDimArray(ConcreteArray): diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -537,7 +537,7 @@ builder.append(by) builder.append_slice(input, upper, len(input)) else: - # An ok guess for the result size + # First compute the exact result size count = input.count(sub) if count > maxsplit and maxsplit > 0: count = maxsplit @@ -553,21 +553,16 @@ builder = StringBuilder(result_size) start = 0 sublen = len(sub) - first = True while maxsplit != 0: next = input.find(sub, start) if next < 0: break - if not first: - builder.append(by) - first = False builder.append_slice(input, start, next) + builder.append(by) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - if not first: - builder.append(by) builder.append_slice(input, start, len(input)) return space.wrap(builder.build()) diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -375,7 +375,6 @@ newitems = malloc(LIST.items.TO, n) rgc.ll_arraycopy(olditems, newitems, 0, 0, n) return newitems -ll_list2fixed.oopspec = 'list.list2fixed(l)' def ll_list2fixed_exact(l): ll_assert(l.length == len(l.items), "ll_list2fixed_exact: bad length") diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py --- a/pypy/rpython/test/test_generator.py +++ b/pypy/rpython/test/test_generator.py @@ -54,6 +54,26 @@ res = self.interpret(f, [0]) assert res == 42 + def test_except_block(self): + def foo(): + raise ValueError + def g(a, b, c): + yield a + yield b + try: + foo() + except ValueError: + pass + yield c + def f(): + gen = g(3, 5, 8) + x = gen.next() * 100 + x += gen.next() * 10 + x += gen.next() + return x + res = self.interpret(f, []) + assert res == 358 + class TestLLtype(BaseTestGenerator, LLRtypeMixin): pass diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -385,18 +385,26 @@ parser.postprocess(loop, backend_tp=bname, backend_dump=dump, dump_start=start_ofs)) - loops.append(loop) + loops += split_trace(loop) return log, loops def split_trace(trace): - labels = [i for i, op in enumerate(trace.operations) - if op.name == 'label'] - labels = [0] + labels + [len(trace.operations) - 1] + labels = [0] + if trace.comment and 'Guard' in trace.comment: + descrs = ['bridge ' + re.search('Guard (\d+)', trace.comment).group(1)] + else: + descrs = [''] + for i, op in enumerate(trace.operations): + if op.name == 'label': + labels.append(i) + descrs.append(op.descr) + labels.append(len(trace.operations) - 1) parts = [] for i in range(len(labels) - 1): start, stop = labels[i], labels[i+1] part = copy(trace) part.operations = trace.operations[start : stop + 1] + part.descr = descrs[i] parts.append(part) return parts @@ -407,11 +415,7 @@ lines = input[-1].splitlines() mapping = {} for loop in loops: - com = loop.comment - if 'Loop' in com: - mapping['loop ' + re.search('Loop (\d+)', com).group(1)] = loop - else: - mapping['bridge ' + re.search('Guard (\d+)', com).group(1)] = loop + mapping[loop.descr] = loop for line in lines: if line: num, count = line.split(':', 2) diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -1,6 +1,7 @@ from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, Function, adjust_bridges, - import_log, split_trace, Op) + import_log, split_trace, Op, + parse_log_counts) from pypy.tool.jitlogparser.storage import LoopStorage import py, sys @@ -236,10 +237,10 @@ loop = parse(''' [i7] i9 = int_lt(i7, 1003) - label(i9) + label(i9, descr=grrr) guard_true(i9, descr=) [] i13 = getfield_raw(151937600, descr=) - label(i13) + label(i13, descr=asb) i19 = int_lt(i13, 1003) guard_true(i19, descr=) [] i113 = getfield_raw(151937600, descr=) @@ -249,3 +250,32 @@ assert len(parts[0].operations) == 2 assert len(parts[1].operations) == 4 assert len(parts[2].operations) == 4 + assert parts[1].descr == 'grrr' + assert parts[2].descr == 'asb' + +def test_parse_log_counts(): + loop = parse(''' + [i7] + i9 = int_lt(i7, 1003) + label(i9, descr=grrr) + guard_true(i9, descr=) [] + i13 = getfield_raw(151937600, descr=) + label(i13, descr=asb) + i19 = int_lt(i13, 1003) + guard_true(i19, descr=) [] + i113 = getfield_raw(151937600, descr=) + ''') + bridge = parse(''' + # bridge out of Guard 2 with 1 ops + [] + i0 = int_lt(1, 2) + finish(i0) + ''') + bridge.comment = 'bridge out of Guard 2 with 1 ops' + loop.comment = '' + loops = split_trace(loop) + split_trace(bridge) + input = ['grrr:123\nasb:12\nbridge 2:1234'] + parse_log_counts(input, loops) + assert loops[-1].count == 1234 + assert loops[1].count == 123 + assert loops[2].count == 12 diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py --- a/pypy/translator/generator.py +++ b/pypy/translator/generator.py @@ -2,7 +2,7 @@ from pypy.objspace.flow.model import Variable, Constant, FunctionGraph from pypy.translator.unsimplify import insert_empty_startblock from pypy.translator.unsimplify import split_block -from pypy.translator.simplify import eliminate_empty_blocks +from pypy.translator.simplify import eliminate_empty_blocks, simplify_graph from pypy.tool.sourcetools import func_with_new_name from pypy.interpreter.argument import Signature @@ -64,6 +64,7 @@ def next(self): entry = self.current self.current = None + assert entry is not None # else, recursive generator invocation (next_entry, return_value) = func(entry) self.current = next_entry return return_value @@ -91,6 +92,10 @@ block.inputargs = [v_entry1] def tweak_generator_body_graph(Entry, graph): + # First, always run simplify_graph in order to reduce the number of + # variables passed around + simplify_graph(graph) + # assert graph.startblock.operations[0].opname == 'generator_mark' graph.startblock.operations.pop(0) # @@ -100,12 +105,20 @@ # mappings = [Entry] # + stopblock = Block([]) + v0 = Variable(); v1 = Variable() + stopblock.operations = [ + SpaceOperation('simple_call', [Constant(StopIteration)], v0), + SpaceOperation('type', [v0], v1), + ] + stopblock.closeblock(Link([v1, v0], graph.exceptblock)) + # for block in list(graph.iterblocks()): for exit in block.exits: if exit.target is graph.returnblock: - exit.args = [Constant(StopIteration), - Constant(StopIteration())] - exit.target = graph.exceptblock + exit.args = [] + exit.target = stopblock + assert block is not stopblock for index in range(len(block.operations)-1, -1, -1): op = block.operations[index] if op.opname == 'yield': From noreply at buildbot.pypy.org Tue Dec 27 16:09:42 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 16:09:42 +0100 (CET) Subject: [pypy-commit] pypy default: add some info "we're in re" or "we're in numpy" Message-ID: <20111227150942.A4D5C82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50906:ff4589dda2b1 Date: 2011-12-27 17:09 +0200 http://bitbucket.org/pypy/pypy/changeset/ff4589dda2b1/ Log: add some info "we're in re" or "we're in numpy" diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -7,7 +7,7 @@ def new_printable_location(driver_name): def get_printable_location(shapelen, sig): - return sig.debug_repr() + ' [%d dims,%s]' % (shapelen, driver_name) + return 'numpy ' + sig.debug_repr() + ' [%d dims,%s]' % (shapelen, driver_name) return get_printable_location def sigeq(one, two): diff --git a/pypy/rlib/rsre/rsre_jit.py b/pypy/rlib/rsre/rsre_jit.py --- a/pypy/rlib/rsre/rsre_jit.py +++ b/pypy/rlib/rsre/rsre_jit.py @@ -22,7 +22,7 @@ info = '%s/%d' % (info, args[debugprint[2]]) else: info = '' - return '%s%s %s' % (name, info, s) + return 're %s%s %s' % (name, info, s) # self.get_printable_location = get_printable_location From noreply at buildbot.pypy.org Tue Dec 27 17:15:29 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Dec 2011 17:15:29 +0100 (CET) Subject: [pypy-commit] pypy default: this can be unrolled sometimes Message-ID: <20111227161529.19F6982B1D@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50907:653d45fce4df Date: 2011-12-27 10:15 -0600 http://bitbucket.org/pypy/pypy/changeset/653d45fce4df/ Log: this can be unrolled sometimes diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,4 +1,9 @@ +from pypy.rlib import jit + + at jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: + jit.isconstant(len(chunks)) +) def calculate_slice_strides(shape, start, strides, backstrides, chunks): rstrides = [] rbackstrides = [] From noreply at buildbot.pypy.org Tue Dec 27 17:39:24 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 27 Dec 2011 17:39:24 +0100 (CET) Subject: [pypy-commit] pypy default: A guard can't invalidate the heap cache in the metainterp. Message-ID: <20111227163924.BDA1282B1D@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r50908:c2d42bf471da Date: 2011-12-27 10:39 -0600 http://bitbucket.org/pypy/pypy/changeset/c2d42bf471da/ Log: A guard can't invalidate the heap cache in the metainterp. diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -79,9 +79,9 @@ opnum == rop.COPYSTRCONTENT or opnum == rop.COPYUNICODECONTENT): return - if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: - return - if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: + if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or + rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or + rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST): return if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: effectinfo = descr.get_extra_info() diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -255,6 +255,11 @@ assert h.getarrayitem(box1, descr1, index1) is box2 assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches(rop.GUARD_TRUE, None, []) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches( rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT), []) From noreply at buildbot.pypy.org Tue Dec 27 18:05:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Dec 2011 18:05:39 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Fix the description Message-ID: <20111227170539.ECC4982B1D@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r3994:e88672794b8f Date: 2011-12-27 18:05 +0100 http://bitbucket.org/pypy/extradoc/changeset/e88672794b8f/ Log: Fix the description diff --git a/sprintinfo/leysin-winter-2012/announcement.txt b/sprintinfo/leysin-winter-2012/announcement.txt --- a/sprintinfo/leysin-winter-2012/announcement.txt +++ b/sprintinfo/leysin-winter-2012/announcement.txt @@ -10,9 +10,9 @@ Goals and topics of the sprint ------------------------------ -* Py3k topics: ... +* Py3k: work towards supporting Python 3 in PyPy -* NumPyPy topics: ... +* NumPyPy: work towards supporting the numpy module in PyPy * JIT backends: integrate tests for ARM; look at the PowerPC 64; maybe try again to write an LLVM- or GCC-based one From noreply at buildbot.pypy.org Tue Dec 27 19:27:59 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 19:27:59 +0100 (CET) Subject: [pypy-commit] pypy default: a minor fix Message-ID: <20111227182759.1F3C582B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50909:84b731e827a6 Date: 2011-12-27 19:45 +0200 http://bitbucket.org/pypy/pypy/changeset/84b731e827a6/ Log: a minor fix diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -154,7 +154,7 @@ for op in operations: if op.name == 'debug_merge_point': self.inline_level = int(op.args[0]) - self.parse_code_data(op.args[1]) + self.parse_code_data(op.args[1][1:-1]) break else: self.inline_level = 0 diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -52,7 +52,7 @@ assert len(res.chunks[2].operations) == 2 assert len(res.chunks[3].operations) == 2 assert res.chunks[3].bytecode_no == 11 - assert res.chunks[0].bytecode_name == 'loopname' + assert res.chunks[0].bytecode_name == '' def test_inlined_call(): ops = parse(""" From noreply at buildbot.pypy.org Tue Dec 27 19:28:00 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 19:28:00 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20111227182800.45E1E82B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50910:56272f637518 Date: 2011-12-27 20:27 +0200 http://bitbucket.org/pypy/pypy/changeset/56272f637518/ Log: merge diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -79,9 +79,9 @@ opnum == rop.COPYSTRCONTENT or opnum == rop.COPYUNICODECONTENT): return - if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: - return - if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: + if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or + rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or + rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST): return if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: effectinfo = descr.get_extra_info() diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -255,6 +255,11 @@ assert h.getarrayitem(box1, descr1, index1) is box2 assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches(rop.GUARD_TRUE, None, []) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches( rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT), []) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,4 +1,9 @@ +from pypy.rlib import jit + + at jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: + jit.isconstant(len(chunks)) +) def calculate_slice_strides(shape, start, strides, backstrides, chunks): rstrides = [] rbackstrides = [] From noreply at buildbot.pypy.org Tue Dec 27 21:43:19 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 21:43:19 +0100 (CET) Subject: [pypy-commit] jitviewer default: slightly better - display loops at least Message-ID: <20111227204319.5E25082B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r184:e9485a7845a0 Date: 2011-12-27 16:57 +0200 http://bitbucket.org/pypy/jitviewer/changeset/e9485a7845a0/ Log: slightly better - display loops at least diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -71,14 +71,14 @@ all = flask.request.args.get('all', None) loops = [] for index, loop in enumerate(self.storage.loops): - if 'entry bridge' in loop.comment: - is_entry = True - else: - is_entry = False + is_entry = False try: + start, stop = loop.comment.find('('), loop.comment.rfind(')') + name = loop.comment[start + 1:stop] func = FunctionHtml.from_operations(loop.operations, self.storage, limit=1, - inputargs=loop.inputargs) + inputargs=loop.inputargs, + loopname=name) except CannotFindFile: func = DummyFunc() func.count = getattr(loop, 'count', '?') From noreply at buildbot.pypy.org Tue Dec 27 21:46:50 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Dec 2011 21:46:50 +0100 (CET) Subject: [pypy-commit] pypy default: Meh, at least raise error in case there is a typo Message-ID: <20111227204650.2887682B1D@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50911:94e9969b5f00 Date: 2011-12-27 22:46 +0200 http://bitbucket.org/pypy/pypy/changeset/94e9969b5f00/ Log: Meh, at least raise error in case there is a typo diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -528,6 +528,8 @@ set_param(driver, name1, int(value)) except ValueError: raise + else: + raise ValueError set_user_param._annspecialcase_ = 'specialize:arg(0)' From noreply at buildbot.pypy.org Tue Dec 27 21:48:33 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 27 Dec 2011 21:48:33 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: add tests for numpypy.reduce Message-ID: <20111227204833.5E79D82B1D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50912:e5b246bae93a Date: 2011-12-25 21:09 +0200 http://bitbucket.org/pypy/pypy/changeset/e5b246bae93a/ Log: add tests for numpypy.reduce diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -3,7 +3,8 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_boxes, interp_dtype, types -from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature, find_sig +from pypy.module.micronumpy.signature import (ReduceSignature, ScalarSignature, + ArraySignature, find_sig) from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -46,8 +47,60 @@ ) return self.call(space, __args__.arguments_w) - def descr_reduce(self, space, w_obj): - return self.reduce(space, w_obj, False, space.wrap(-1)) + def descr_reduce(self, space, w_obj, w_dim=0): + '''reduce(...) + reduce(a, axis=0) + + Reduces `a`'s dimension by one, by applying ufunc along one axis. + + Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then + :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = + the result of iterating `j` over :math:`range(N_i)`, cumulatively applying + ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. + For a one-dimensional array, reduce produces results equivalent to: + :: + + r = op.identity # op = ufunc + for i in xrange(len(A)): + r = op(r, A[i]) + return r + + For example, add.reduce() is equivalent to sum(). + + Parameters + ---------- + a : array_like + The array to act on. + axis : int, optional + The axis along which to apply the reduction. + + Examples + -------- + >>> np.multiply.reduce([2,3,5]) + 30 + + A multi-dimensional array example: + + >>> X = np.arange(8).reshape((2,2,2)) + >>> X + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.add.reduce(X, 0) + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X) # confirm: default axis value is 0 + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X, 1) + array([[ 2, 4], + [10, 12]]) + >>> np.add.reduce(X, 2) + array([[ 1, 5], + [ 9, 13]]) + ''' + return self.reduce(space, w_obj, False, w_dim) def reduce(self, space, w_obj, multidim, w_dim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar @@ -57,6 +110,8 @@ dim = -1 if not space.is_w(w_dim, space.w_None): dim = space.int_w(w_dim) + if not multidim and space.is_w(w_dim, space.w_None): + dim = 0 assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if isinstance(obj, Scalar): @@ -69,14 +124,15 @@ promote_to_largest=True ) shapelen = len(obj.shape) - #TODO: if dim>=0 return a ArraySignature? - sig = find_sig(ReduceSignature(self.func, self.name, dtype, + if dim>=0 or 0: + sig = find_sig(ReduceSignature(self.func, self.name, dtype, + ArraySignature(dtype), + obj.create_sig(obj.shape)), obj) + else: + sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), obj.create_sig(obj.shape)), obj) frame = sig.create_frame(obj) - if shapelen > 1 and not multidim: - raise OperationError(space.w_NotImplementedError, - space.wrap("not implemented yet")) if self.identity is None: if size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -339,12 +339,15 @@ raises(TypeError, add.reduce, 1) def test_reduce(self): - from numpypy import add, maximum + from numpypy import add, maximum, arange assert add.reduce([1, 2, 3]) == 6 assert maximum.reduce([1]) == 1 assert maximum.reduce([1, 2, 3]) == 3 raises(ValueError, maximum.reduce, []) + a = arange(12).reshape(3,4) + assert add.reduce(a, 0) == add.reduce(a) + assert (add.reduce(a, 1) == [ 6, 22, 38]).all() def test_comparisons(self): import operator From noreply at buildbot.pypy.org Tue Dec 27 21:48:34 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 27 Dec 2011 21:48:34 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: checkpoint Message-ID: <20111227204834.8933382B1D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50913:da091f2c5c7d Date: 2011-12-26 23:32 +0200 http://bitbucket.org/pypy/pypy/changeset/da091f2c5c7d/ Log: checkpoint diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -103,34 +103,49 @@ def next(self, shapelen): return self -# ------ other iterators that are not part of the computation frame ---------- +def axis_iter_from_arr(arr, dim=-1, start=[]): + return AxisIterator(arr.start, arr.strides, arr.backstrides, arr.shape, + dim, start) class AxisIterator(object): """ This object will return offsets of each start of a stride on the desired dimension, starting at the desired index """ - def __init__(self, arr, dim=-1, start=[]): - self.arr = arr + def __init__(self, start, strides, backstrides, shape, dim=-1, start=[]): + self.shape = shape self.indices = [0] * len(arr.shape) self.done = False - self.offset = arr.start - self.dim = len(arr.shape) - 1 + self.offset = start + self.dim = len(shape) - 1 if dim >= 0: self.dim = dim - if len(start) == len(arr.shape): + if len(start) == len(shape): for i in range(len(start)): - self.offset += arr.strides[i] * start[i] - def next(self): - for i in range(len(self.arr.shape) - 1, -1, -1): + self.offset += strides[i] * start[i] + def next(self, shapelen): + offset = self.offset + indices = [0] * shapelen + for i in range(shapelen): + indices[i] = self.indices[i] + for i in range(shapelen - 1, -1, -1): if i == self.dim: continue - if self.indices[i] < self.arr.shape[i] - 1: - self.indices[i] += 1 - self.offset += self.arr.strides[i] + if indices[i] < self.shape[i] - 1: + indices[i] += 1 + offset += self.strides[i] break else: - self.indices[i] = 0 - self.offset -= self.arr.backstrides[i] + indices[i] = 0 + offset -= self.backstrides[i] else: self.done = True - + res = instantiate(AxisIterator) + res.offset = offset + res.indices = indices + res.strides = self.strides + res.backstrides = self.backstrides + res.shape = self.shape + res.dim = self.dim + res.done = done + return res + diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -664,7 +664,8 @@ self.name = name def _del_sources(self): - # Function for deleting references to source arrays, to allow garbage-collecting them + # Function for deleting references to source arrays, + # to allow garbage-collecting them raise NotImplementedError def compute(self): @@ -730,6 +731,42 @@ def _del_sources(self): self.child = None +class Reduce(VirtualArray): + def __init__(self, ufunc, name, dim, res_dtype, values): + shape=values.shape[0:dim] + values.shape[dim+1:len(values.shape)] + VirtualArray.__init__(self, name, shape, res_dtype) + self.values = values + self.size = values.size + self.ufunc = ufunc + self.res_dtype = res_dtype + self.dim = dim + + def _del_sources(self): + self.values = None + + def create_sig(self, res_shape): + if self.forced_result is not None: + return self.forced_result.create_sig(res_shape) + return signature.ReduceSignature(self.ufunc, self.name, self.res_dtype, + signature.ViewSignature(self.res_dtype), + self.values.create_sig(res_shape)) + + def compute(self): + result = W_NDimArray(self.size, self.shape, self.find_dtype()) + shapelen = len(result.shape) + sig = self.find_sig() + ri = ArrayIterator(self.size) + si = AxisIterator(self,self.dim) + while not ri.done(): + frame = sig.create_frame(self, self.values, chunks = si.indices) + val = sig.eval(frame, self) + result.dtype.setitem(result.storage, ri.offset, val) + ri = ri.next(shapelen) + si = si.next(shapelen) + return result + + + class Call1(VirtualArray): def __init__(self, ufunc, name, shape, res_dtype, values): VirtualArray.__init__(self, name, shape, res_dtype) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -3,8 +3,8 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_boxes, interp_dtype, types -from pypy.module.micronumpy.signature import (ReduceSignature, ScalarSignature, - ArraySignature, find_sig) +from pypy.module.micronumpy.signature import (ReduceSignature, + ScalarSignature, find_sig) from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -124,12 +124,10 @@ promote_to_largest=True ) shapelen = len(obj.shape) - if dim>=0 or 0: - sig = find_sig(ReduceSignature(self.func, self.name, dtype, - ArraySignature(dtype), - obj.create_sig(obj.shape)), obj) - else: - sig = find_sig(ReduceSignature(self.func, self.name, dtype, + if shapelen>1 and dim>=0: + from pypy.module.micronumpy.interp_numarray import Reduce + return Reduce(self.func, self.name, dim, dtype, obj) + sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), obj.create_sig(obj.shape)), obj) frame = sig.create_frame(obj) diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -90,11 +90,11 @@ allnumbers.append(no) self.iter_no = no - def create_frame(self, arr, res_shape=None): + def create_frame(self, arr, res_shape=None, chunks = []): res_shape = res_shape or arr.shape iterlist = [] arraylist = [] - self._create_iter(iterlist, arraylist, arr, res_shape, []) + self._create_iter(iterlist, arraylist, arr, res_shape, chunks) return NumpyEvalFrame(iterlist, arraylist) class ConcreteSignature(Signature): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -338,16 +338,19 @@ raises(ValueError, sin.reduce, [1, 2, 3]) raises(TypeError, add.reduce, 1) - def test_reduce(self): - from numpypy import add, maximum, arange - + def test_reduce1D(self): + from numpypy import add, maximum assert add.reduce([1, 2, 3]) == 6 assert maximum.reduce([1]) == 1 assert maximum.reduce([1, 2, 3]) == 3 raises(ValueError, maximum.reduce, []) - a = arange(12).reshape(3,4) - assert add.reduce(a, 0) == add.reduce(a) + + def test_reduceND(self): + from numpypy import add, arange + a = arange(12).reshape(3, 4) + assert add.reduce(a,1)[0] ==6 assert (add.reduce(a, 1) == [ 6, 22, 38]).all() + assert (add.reduce(a, 0) == add.reduce(a)).all() def test_comparisons(self): import operator From noreply at buildbot.pypy.org Tue Dec 27 21:48:35 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 27 Dec 2011 21:48:35 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: rework AxisIterator, make single tests pass Message-ID: <20111227204835.B369F82B1D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50914:b12a872f0961 Date: 2011-12-27 07:32 +0200 http://bitbucket.org/pypy/pypy/changeset/b12a872f0961/ Log: rework AxisIterator, make single tests pass diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -111,17 +111,19 @@ """ This object will return offsets of each start of a stride on the desired dimension, starting at the desired index """ - def __init__(self, start, strides, backstrides, shape, dim=-1, start=[]): + def __init__(self, arr_start, strides, backstrides, shape, dim=-1, slice_start=[]): self.shape = shape - self.indices = [0] * len(arr.shape) + self.indices = [0] * len(shape) self.done = False - self.offset = start + self.offset = arr_start self.dim = len(shape) - 1 + self.strides = strides + self.backstrides = backstrides if dim >= 0: self.dim = dim - if len(start) == len(shape): - for i in range(len(start)): - self.offset += strides[i] * start[i] + if len(slice_start) == len(shape): + for i in range(len(slice_start)): + self.offset += strides[i] * slice_start[i] def next(self, shapelen): offset = self.offset indices = [0] * shapelen @@ -146,6 +148,6 @@ res.backstrides = self.backstrides res.shape = self.shape res.dim = self.dim - res.done = done + res.done = self.done return res diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -9,7 +9,7 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.rlib.rstring import StringBuilder from pypy.module.micronumpy.interp_iter import ArrayIterator,\ - view_iter_from_arr, OneDimIterator, AxisIterator + view_iter_from_arr, OneDimIterator, axis_iter_from_arr numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], @@ -280,6 +280,8 @@ def _reduce_ufunc_impl(ufunc_name): def impl(self, space, w_dim=None): + if w_dim is None: + w_dim = space.wrap(w_dim) return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, True, w_dim) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) @@ -756,7 +758,7 @@ shapelen = len(result.shape) sig = self.find_sig() ri = ArrayIterator(self.size) - si = AxisIterator(self,self.dim) + si = axis_iter_from_arr(self, self.dim) while not ri.done(): frame = sig.create_frame(self, self.values, chunks = si.indices) val = sig.eval(frame, self) @@ -985,23 +987,24 @@ def _fast_setslice(self, space, w_value): assert isinstance(w_value, ConcreteArray) itemsize = self.dtype.itemtype.get_element_size() - if len(self.shape) == 1: + shapelen = len(self.shape) + if shapelen == 1: rffi.c_memcpy( rffi.ptradd(self.storage, self.start * itemsize), rffi.ptradd(w_value.storage, w_value.start * itemsize), self.size * itemsize ) else: - dest = AxisIterator(self) - source = AxisIterator(w_value) + dest = axis_iter_from_arr(self) + source = axis_iter_from_arr(w_value) while not dest.done: rffi.c_memcpy( rffi.ptradd(self.storage, dest.offset * itemsize), rffi.ptradd(w_value.storage, source.offset * itemsize), self.shape[-1] * itemsize ) - source.next() - dest.next() + source = source.next(shapelen) + dest = dest.next(shapelen) def _sliceloop(self, source, res_shape): sig = source.find_sig(res_shape) diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -8,24 +8,31 @@ class TestAxisIteratorDirect(object): def test_axis_iterator(self): + a = W_NDimArray(5*3, [5, 3], MockDtype(), 'C') + i = AxisIterator(a) + ret = [] + while not i.done: + ret.append(i.offset) + i = i.next() + assert ret == [0, 3, 6, 9, 12] a = W_NDimArray(7*5*3, [7, 5, 3], MockDtype(), 'C') i = AxisIterator(a) ret = [] while not i.done: ret.append(i.offset) - i.next() + i = i.next() assert ret == [3*v for v in range(7*5)] i = AxisIterator(a,2) ret = [] while not i.done: ret.append(i.offset) - i.next() + i = i.next() assert ret == [3*v for v in range(7*5)] i = AxisIterator(a,1) ret = [] while not i.done: ret.append(i.offset) - i.next() + i = i.next() assert ret == [ 0, 1, 2, 15, 16, 17, 30, 31, 32, 45, 46, 47, 60, 61, 62, 75, 76, 77, 90, 91, 92] def test_axis_iterator_with_start(self): @@ -34,18 +41,18 @@ ret = [] while not i.done: ret.append(i.offset) - i.next() + i = i.next() assert ret == [3*v for v in range(7*5)] i = AxisIterator(a, start=[1, 1, 0]) ret = [] while not i.done: ret.append(i.offset) - i.next() + i = i.next() assert ret == [3*v+18 for v in range(7*5)] i = AxisIterator(a, 1, [2, 0, 2]) ret = [] while not i.done: ret.append(i.offset) - i.next() + i = i.next() assert ret == [v + 32 for v in [ 0, 1, 2, 15, 16, 17, 30, 31, 32, 45, 46, 47, 60, 61, 62, 75, 76, 77, 90, 91, 92]] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -720,6 +720,9 @@ assert a.sum() == 5 raises(TypeError, 'a.sum(2, 3)') + + def test_sumND(self): + skip('Not finished yet') a = arange(15).reshape(5, 3) assert (a.sum(0) == [30, 35, 40]).all() assert (a.sum(1) == [3, 12, 21, 30, 39]).all() From noreply at buildbot.pypy.org Tue Dec 27 21:48:36 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 27 Dec 2011 21:48:36 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: why do default args cause problems? Message-ID: <20111227204836.D91C182B1D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50915:cd8f7d57fe3e Date: 2011-12-27 08:12 +0200 http://bitbucket.org/pypy/pypy/changeset/cd8f7d57fe3e/ Log: why do default args cause problems? diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -90,11 +90,13 @@ allnumbers.append(no) self.iter_no = no - def create_frame(self, arr, res_shape=None, chunks = []): + def create_frame(self, arr, res_shape=None): + #def create_frame(self, arr, res_shape=None, chunks = []): res_shape = res_shape or arr.shape iterlist = [] arraylist = [] - self._create_iter(iterlist, arraylist, arr, res_shape, chunks) + #self._create_iter(iterlist, arraylist, arr, res_shape, chunks) + self._create_iter(iterlist, arraylist, arr, res_shape, []) return NumpyEvalFrame(iterlist, arraylist) class ConcreteSignature(Signature): From noreply at buildbot.pypy.org Tue Dec 27 21:48:38 2011 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 27 Dec 2011 21:48:38 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: iterators, frames in place, but still not correct Message-ID: <20111227204838.1276382B1D@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50916:a1810346784a Date: 2011-12-27 22:46 +0200 http://bitbucket.org/pypy/pypy/changeset/a1810346784a/ Log: iterators, frames in place, but still not correct diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -756,15 +756,19 @@ def compute(self): result = W_NDimArray(self.size, self.shape, self.find_dtype()) shapelen = len(result.shape) - sig = self.find_sig() + #sig = self.find_sig(result.shape) ##Don't do this, it causes an infinite recursion + sig = self.create_sig(result.shape) ri = ArrayIterator(self.size) - si = axis_iter_from_arr(self, self.dim) + si = axis_iter_from_arr(self.values, self.dim) while not ri.done(): - frame = sig.create_frame(self, self.values, chunks = si.indices) - val = sig.eval(frame, self) + #frame = sig.create_frame(self.values, chunks = [si.indices]) + #Frame should be returning self.func applied to the axis starting at si.offset + frame = sig.create_frame(self.values, chunks = []) + val = sig.eval(frame, self.values).convert_to( self.find_dtype()) result.dtype.setitem(result.storage, ri.offset, val) ri = ri.next(shapelen) si = si.next(shapelen) + frame = frame.next return result diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -90,13 +90,11 @@ allnumbers.append(no) self.iter_no = no - def create_frame(self, arr, res_shape=None): - #def create_frame(self, arr, res_shape=None, chunks = []): + def create_frame(self, arr, res_shape=None, chunks = []): res_shape = res_shape or arr.shape iterlist = [] arraylist = [] - #self._create_iter(iterlist, arraylist, arr, res_shape, chunks) - self._create_iter(iterlist, arraylist, arr, res_shape, []) + self._create_iter(iterlist, arraylist, arr, res_shape, chunks) return NumpyEvalFrame(iterlist, arraylist) class ConcreteSignature(Signature): From noreply at buildbot.pypy.org Wed Dec 28 02:59:43 2011 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 28 Dec 2011 02:59:43 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: fix test_axis_iterator, start to see some results Message-ID: <20111228015943.7E12C82BB4@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50917:4e517ebe55c0 Date: 2011-12-28 02:31 +0200 http://bitbucket.org/pypy/pypy/changeset/4e517ebe55c0/ Log: fix test_axis_iterator, start to see some results diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -113,6 +113,7 @@ """ def __init__(self, arr_start, strides, backstrides, shape, dim=-1, slice_start=[]): self.shape = shape + self.shapelen = len(shape) self.indices = [0] * len(shape) self.done = False self.offset = arr_start @@ -125,11 +126,12 @@ for i in range(len(slice_start)): self.offset += strides[i] * slice_start[i] def next(self, shapelen): + #shapelen will always be one less than self.shapelen offset = self.offset - indices = [0] * shapelen - for i in range(shapelen): + indices = [0] * self.shapelen + for i in range(self.shapelen): indices[i] = self.indices[i] - for i in range(shapelen - 1, -1, -1): + for i in range(self.shapelen - 1, -1, -1): if i == self.dim: continue if indices[i] < self.shape[i] - 1: @@ -147,6 +149,7 @@ res.strides = self.strides res.backstrides = self.backstrides res.shape = self.shape + res.shapelen = self.shapelen res.dim = self.dim res.done = self.done return res diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -734,7 +734,7 @@ self.child = None class Reduce(VirtualArray): - def __init__(self, ufunc, name, dim, res_dtype, values): + def __init__(self, ufunc, name, dim, res_dtype, values, identity=None): shape=values.shape[0:dim] + values.shape[dim+1:len(values.shape)] VirtualArray.__init__(self, name, shape, res_dtype) self.values = values @@ -742,9 +742,11 @@ self.ufunc = ufunc self.res_dtype = res_dtype self.dim = dim + self.identity = identity def _del_sources(self): self.values = None + pass def create_sig(self, res_shape): if self.forced_result is not None: @@ -754,21 +756,37 @@ self.values.create_sig(res_shape)) def compute(self): - result = W_NDimArray(self.size, self.shape, self.find_dtype()) + dtype = self.res_dtype + result = W_NDimArray(self.size, self.shape, dtype) shapelen = len(result.shape) + objlen = len(self.values.shape) + target_len = self.values.shape[self.dim] #sig = self.find_sig(result.shape) ##Don't do this, it causes an infinite recursion sig = self.create_sig(result.shape) ri = ArrayIterator(self.size) si = axis_iter_from_arr(self.values, self.dim) while not ri.done(): - #frame = sig.create_frame(self.values, chunks = [si.indices]) - #Frame should be returning self.func applied to the axis starting at si.offset - frame = sig.create_frame(self.values, chunks = []) - val = sig.eval(frame, self.values).convert_to( self.find_dtype()) - result.dtype.setitem(result.storage, ri.offset, val) + chunks = [] + for i in range(objlen - 1, -1, -1): + if i==self.dim: + chunks.append((0, target_len, 1, target_len)) + else: + chunks.append((si.indices[i], 0, 0, 1)) + frame = sig.create_frame(self.values, + res_shape = [target_len], chunks = [chunks,]) + if self.identity is None: + value = sig.eval(frame, self.values).convert_to(dtype) + frame.next(shapelen) + else: + value = self.identity.convert_to(dtype) + while not frame.done(): + assert isinstance(sig, signature.ReduceSignature) + nextval = sig.eval(frame, self.values).convert_to(dtype) + value = sig.binfunc(dtype, value, nextval) + frame.next(shapelen) + result.dtype.setitem(result.storage, ri.offset, value) ri = ri.next(shapelen) si = si.next(shapelen) - frame = frame.next return result diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -121,20 +121,20 @@ size = obj.size dtype = find_unaryop_result_dtype( space, obj.find_dtype(), - promote_to_largest=True + promote_to_float=self.promote_to_float ) shapelen = len(obj.shape) + if self.identity is None and size == 0: + raise operationerrfmt(space.w_ValueError, "zero-size array to " + "%s.reduce without identity", self.name) if shapelen>1 and dim>=0: from pypy.module.micronumpy.interp_numarray import Reduce - return Reduce(self.func, self.name, dim, dtype, obj) + return Reduce(self.func, self.name, dim, dtype, obj, self.identity) sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), obj.create_sig(obj.shape)), obj) frame = sig.create_frame(obj) if self.identity is None: - if size == 0: - raise operationerrfmt(space.w_ValueError, "zero-size array to " - "%s.reduce without identity", self.name) value = sig.eval(frame, obj).convert_to(dtype) frame.next(shapelen) else: diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -1,5 +1,5 @@ -from pypy.module.micronumpy.interp_iter import AxisIterator +from pypy.module.micronumpy.interp_iter import axis_iter_from_arr from pypy.module.micronumpy.interp_numarray import W_NDimArray class MockDtype(object): @@ -9,50 +9,50 @@ class TestAxisIteratorDirect(object): def test_axis_iterator(self): a = W_NDimArray(5*3, [5, 3], MockDtype(), 'C') - i = AxisIterator(a) + i = axis_iter_from_arr(a) ret = [] while not i.done: ret.append(i.offset) - i = i.next() + i = i.next(1) assert ret == [0, 3, 6, 9, 12] a = W_NDimArray(7*5*3, [7, 5, 3], MockDtype(), 'C') - i = AxisIterator(a) + i = axis_iter_from_arr(a) ret = [] while not i.done: ret.append(i.offset) - i = i.next() + i = i.next(1) assert ret == [3*v for v in range(7*5)] - i = AxisIterator(a,2) + i = axis_iter_from_arr(a,2) ret = [] while not i.done: ret.append(i.offset) - i = i.next() + i = i.next(1) assert ret == [3*v for v in range(7*5)] - i = AxisIterator(a,1) + i = axis_iter_from_arr(a,1) ret = [] while not i.done: ret.append(i.offset) - i = i.next() + i = i.next(1) assert ret == [ 0, 1, 2, 15, 16, 17, 30, 31, 32, 45, 46, 47, 60, 61, 62, 75, 76, 77, 90, 91, 92] def test_axis_iterator_with_start(self): a = W_NDimArray(7*5*3, [7, 5, 3], MockDtype(), 'C') - i = AxisIterator(a, start=[0, 0, 0]) + i = axis_iter_from_arr(a, start=[0, 0, 0]) ret = [] while not i.done: ret.append(i.offset) - i = i.next() + i = i.next(2) assert ret == [3*v for v in range(7*5)] - i = AxisIterator(a, start=[1, 1, 0]) + i = axis_iter_from_arr(a, start=[1, 1, 0]) ret = [] while not i.done: ret.append(i.offset) - i = i.next() + i = i.next(2) assert ret == [3*v+18 for v in range(7*5)] - i = AxisIterator(a, 1, [2, 0, 2]) + i = axis_iter_from_arr(a, 1, [2, 0, 2]) ret = [] while not i.done: ret.append(i.offset) - i = i.next() + i = i.next(2) assert ret == [v + 32 for v in [ 0, 1, 2, 15, 16, 17, 30, 31, 32, 45, 46, 47, 60, 61, 62, 75, 76, 77, 90, 91, 92]] diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -348,9 +348,8 @@ def test_reduceND(self): from numpypy import add, arange a = arange(12).reshape(3, 4) - assert add.reduce(a,1)[0] ==6 - assert (add.reduce(a, 1) == [ 6, 22, 38]).all() - assert (add.reduce(a, 0) == add.reduce(a)).all() + assert (add.reduce(a, 0) == [12, 15, 18, 21]).all() + assert (add.reduce(a, 1) == [6.0, 22.0, 38.0]).all() def test_comparisons(self): import operator From noreply at buildbot.pypy.org Wed Dec 28 02:59:44 2011 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 28 Dec 2011 02:59:44 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: bugfix, passes tests Message-ID: <20111228015944.A545482BB4@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50918:ff482782c5fe Date: 2011-12-28 02:57 +0200 http://bitbucket.org/pypy/pypy/changeset/ff482782c5fe/ Log: bugfix, passes tests diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -738,7 +738,9 @@ shape=values.shape[0:dim] + values.shape[dim+1:len(values.shape)] VirtualArray.__init__(self, name, shape, res_dtype) self.values = values - self.size = values.size + self.size = 1 + for s in shape: + self.size *= s self.ufunc = ufunc self.res_dtype = res_dtype self.dim = dim @@ -758,16 +760,18 @@ def compute(self): dtype = self.res_dtype result = W_NDimArray(self.size, self.shape, dtype) + self.values = self.values.get_concrete() shapelen = len(result.shape) objlen = len(self.values.shape) target_len = self.values.shape[self.dim] #sig = self.find_sig(result.shape) ##Don't do this, it causes an infinite recursion sig = self.create_sig(result.shape) - ri = ArrayIterator(self.size) + ri = ArrayIterator(result.size) si = axis_iter_from_arr(self.values, self.dim) while not ri.done(): chunks = [] - for i in range(objlen - 1, -1, -1): + #for i in range(objlen - 1, -1, -1): + for i in range(objlen): if i==self.dim: chunks.append((0, target_len, 1, target_len)) else: From noreply at buildbot.pypy.org Wed Dec 28 02:59:46 2011 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 28 Dec 2011 02:59:46 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: merge with default Message-ID: <20111228015946.8EBF282BB4@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50919:1714b0167e37 Date: 2011-12-28 03:02 +0200 http://bitbucket.org/pypy/pypy/changeset/1714b0167e37/ Log: merge with default diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -98,7 +98,6 @@ "Abstract. Get the expected number of locals." raise TypeError, "abstract" - @jit.dont_look_inside def fast2locals(self): # Copy values from the fastlocals to self.w_locals if self.w_locals is None: @@ -112,7 +111,6 @@ w_name = self.space.wrap(name) self.space.setitem(self.w_locals, w_name, w_value) - @jit.dont_look_inside def locals2fast(self): # Copy values from self.w_locals to the fastlocals assert self.w_locals is not None diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -619,7 +619,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -655,7 +656,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -674,7 +676,8 @@ self.descr_reqcls, args.prepend(w_obj)) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -690,7 +693,8 @@ raise OperationError(space.w_SystemError, space.wrap("unexpected DescrMismatch error")) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -708,7 +712,8 @@ self.descr_reqcls, Arguments(space, [w1])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -726,7 +731,8 @@ self.descr_reqcls, Arguments(space, [w1, w2])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -744,7 +750,8 @@ self.descr_reqcls, Arguments(space, [w1, w2, w3])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -763,7 +770,8 @@ Arguments(space, [w1, w2, w3, w4])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -39,6 +39,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_unique_id # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -58,7 +59,8 @@ self.is_guard_not_invalidated = is_guard_not_invalidated DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed), - ('bridge', lltype.Signed), # 0 or 1 + ('type', lltype.Char), # 'b'ridge, 'l'abel or + # 'e'ntry point ('number', lltype.Signed)) class Assembler386(object): @@ -147,12 +149,15 @@ def finish_once(self): if self._debug: debug_start('jit-backend-counts') - for struct in self.loop_run_counters: - if struct.bridge: - prefix = 'bridge ' + for i in range(len(self.loop_run_counters)): + struct = self.loop_run_counters[i] + if struct.type == 'l': + prefix = 'TargetToken(%d)' % struct.number + elif struct.type == 'b': + prefix = 'bridge ' + str(struct.number) else: - prefix = 'loop ' - debug_print(prefix + str(struct.number) + ':' + str(struct.i)) + prefix = 'entry ' + str(struct.number) + debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') def _build_float_constants(self): @@ -422,8 +427,8 @@ self.setup(looptoken) if log: - self._register_counter(False, looptoken.number) - operations = self._inject_debugging_code(looptoken, operations) + operations = self._inject_debugging_code(looptoken, operations, + 'e', looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -489,8 +494,8 @@ self.setup(original_loop_token) if log: - self._register_counter(True, descr_number) - operations = self._inject_debugging_code(faildescr, operations) + operations = self._inject_debugging_code(faildescr, operations, + 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -597,17 +602,21 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number): - if self._debug: - # YYY very minor leak -- we need the counters to stay alive - # forever, just because we want to report them at the end - # of the process - struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', - track_allocation=False) - struct.i = 0 - struct.bridge = int(bridge) + def _register_counter(self, tp, number, token): + # YYY very minor leak -- we need the counters to stay alive + # forever, just because we want to report them at the end + # of the process + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', + track_allocation=False) + struct.i = 0 + struct.type = tp + if tp == 'b' or tp == 'e': struct.number = number - self.loop_run_counters.append(struct) + else: + assert token + struct.number = compute_unique_id(token) + self.loop_run_counters.append(struct) + return struct def _find_failure_recovery_bytecode(self, faildescr): adr_jump_offset = faildescr._x86_adr_jump_offset @@ -651,27 +660,36 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None + def _append_debugging_code(self, operations, tp, number, token): + counter = self._register_counter(tp, number, token) + c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations.extend(ops) + @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations): + def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: # before doing anything, let's increase a counter s = 0 for op in operations: s += op.getopnum() looptoken._x86_debug_checksum = s - c_adr = ConstInt(rffi.cast(lltype.Signed, - self.loop_run_counters[-1])) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - if operations[0].getopnum() == rop.LABEL: - operations = [operations[0]] + ops + operations[1:] - else: - operations = ops + operations + + newoperations = [] + self._append_debugging_code(newoperations, tp, number, + None) + for op in operations: + newoperations.append(op) + if op.getopnum() == rop.LABEL: + self._append_debugging_code(newoperations, 'l', number, + op.getdescr()) + operations = newoperations return operations def _assemble(self, regalloc, operations): diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -519,6 +519,7 @@ from pypy.tool.logparser import parse_log_file, extract_category from pypy.rlib import debug + targettoken, preambletoken = TargetToken(), TargetToken() loop = """ [i0] label(i0, descr=preambletoken) @@ -533,8 +534,8 @@ guard_false(i12) [] jump(i11, descr=targettoken) """ - ops = parse(loop, namespace={'targettoken': TargetToken(), - 'preambletoken': TargetToken()}) + ops = parse(loop, namespace={'targettoken': targettoken, + 'preambletoken': preambletoken}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) @@ -545,11 +546,16 @@ struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 1 struct = self.cpu.assembler.loop_run_counters[1] - assert struct.i == 10 + assert struct.i == 1 + struct = self.cpu.assembler.loop_run_counters[2] + assert struct.i == 9 self.cpu.finish_once() finally: debug._log = None - assert ('jit-backend-counts', [('debug_print', 'loop -1:10')]) in dlog + l0 = ('debug_print', 'entry -1:1') + l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') + l2 = ('debug_print', targettoken.repr_of_descr() + ':9') + assert ('jit-backend-counts', [l0, l1, l2]) in dlog def test_debugger_checksum(self): loop = """ diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -162,7 +162,6 @@ _ll_4_list_setslice = rlist.ll_listsetslice _ll_2_list_delslice_startonly = rlist.ll_listdelslice_startonly _ll_3_list_delslice_startstop = rlist.ll_listdelslice_startstop -_ll_1_list_list2fixed = lltypesystem_rlist.ll_list2fixed _ll_2_list_inplace_mul = rlist.ll_inplace_mul _ll_2_list_getitem_foldable = _ll_2_list_getitem diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -112,33 +112,26 @@ """ from pypy.jit.metainterp.optimizeopt import optimize_trace - history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd + history = metainterp.history - if False: - part = partial_trace - assert False - procedur_token = metainterp.get_procedure_token(greenkey) - assert procedure_token - all_target_tokens = [] - else: - jitcell_token = make_jitcell_token(jitdriver_sd) - part = create_empty_loop(metainterp) - part.inputargs = inputargs[:] - h_ops = history.operations - part.resume_at_jump_descr = resume_at_jump_descr - part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ - [h_ops[i].clone() for i in range(start, len(h_ops))] + \ - [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.resume_at_jump_descr = resume_at_jump_descr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] - try: - optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) - except InvalidLoop: - return None - target_token = part.operations[0].getdescr() - assert isinstance(target_token, TargetToken) - all_target_tokens = [target_token] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] loop = create_empty_loop(metainterp) loop.inputargs = part.inputargs @@ -319,7 +312,10 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) + loopname = jitdriver_sd.warmstate.get_location_str(greenkey) + metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, + type, ops_offset, + name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -79,9 +79,9 @@ opnum == rop.COPYSTRCONTENT or opnum == rop.COPYUNICODECONTENT): return - if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: - return - if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: + if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or + rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or + rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST): return if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: effectinfo = descr.get_extra_info() diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -13,14 +13,14 @@ self.metainterp_sd = metainterp_sd self.guard_number = guard_number - def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None): + def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): if type is None: debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") - debug_print("# Loop", number, ":", type, + debug_print("# Loop", number, '(%s)' % name , ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-loop") diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,10 +1,13 @@ from __future__ import with_statement from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot, + FakeMetaInterpStaticData) from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.jit.metainterp.optimize import InvalidLoop from py.test import raises +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method class BaseTestMultiLabel(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -84,6 +87,8 @@ return optimized +class OptimizeoptTestMultiLabel(BaseTestMultiLabel): + def test_simple(self): ops = """ [i1] @@ -381,6 +386,55 @@ """ self.optimize_loop(ops, expected) -class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + +class OptRenameStrlen(Optimization): + def propagate_forward(self, op): + dispatch_opt(self, op) + + def optimize_STRLEN(self, op): + newop = op.clone() + newop.result = op.result.clonebox() + self.emit_operation(newop) + self.make_equal_to(op.result, self.getvalue(newop.result)) + +dispatch_opt = make_dispatcher_method(OptRenameStrlen, 'optimize_', + default=OptRenameStrlen.emit_operation) + +class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): + + def _do_optimize_loop(self, loop, call_pure_results): + from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll + from pypy.jit.metainterp.optimizeopt.util import args_dict + from pypy.jit.metainterp.optimizeopt.pure import OptPure + + self.loop = loop + loop.call_pure_results = args_dict() + metainterp_sd = FakeMetaInterpStaticData(self.cpu) + optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) + + def test_optimizer_renaming_boxes(self): + ops = """ + [p1] + i1 = strlen(p1) + label(p1) + i2 = strlen(p1) + i3 = int_add(i2, 7) + jump(p1) + """ + expected = """ + [p1] + i1 = strlen(p1) + label(p1, i1) + i11 = same_as(i1) + i2 = int_add(i11, 7) + jump(p1, i11) + """ + self.optimize_loop(ops, expected) + + + +class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin): pass +class TestOptimizerRenamingBoxesLLtype(BaseTestOptimizerRenamingBoxes, LLtypeMixin): + pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7759,7 +7759,7 @@ jump(i0, p0, i2) """ self.optimize_loop(ops, expected) - + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -265,7 +265,12 @@ self.optimizer.importable_values[value] = imp newvalue = self.optimizer.getvalue(op.result) newresult = newvalue.get_key_box() - assert newresult is op.result or newvalue.is_constant() + # note that emitting here SAME_AS should not happen, but + # in case it does, we would prefer to be suboptimal in asm + # to a fatal RPython exception. + if newresult is not op.result and not newvalue.is_constant(): + op = ResOperation(rop.SAME_AS, [op.result], newresult) + self.optimizer._newoperations.append(op) self.optimizer.flush() self.optimizer.emitting_dissabled = False diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -18,7 +18,7 @@ self.seen.append((inputargs, operations, token)) class FakeLogger(object): - def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None): + def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): pass def repr_of_resop(self, op): diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -255,6 +255,11 @@ assert h.getarrayitem(box1, descr1, index1) is box2 assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches(rop.GUARD_TRUE, None, []) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches( rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT), []) diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -180,7 +180,7 @@ def test_intro_loop(self): bare_logger = logger.Logger(self.make_metainterp_sd()) output = capturing(bare_logger.log_loop, [], [], 1, "foo") - assert output.splitlines()[0] == "# Loop 1 : foo with 0 ops" + assert output.splitlines()[0] == "# Loop 1 () : foo with 0 ops" pure_parse(output) def test_intro_bridge(self): diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -4,6 +4,7 @@ class PyPyModule(MixedModule): interpleveldefs = { 'debug_repr': 'interp_extras.debug_repr', + 'remove_invalidates': 'interp_extras.remove_invalidates', } appleveldefs = {} diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py --- a/pypy/module/micronumpy/interp_extras.py +++ b/pypy/module/micronumpy/interp_extras.py @@ -5,3 +5,11 @@ @unwrap_spec(array=BaseArray) def debug_repr(space, array): return space.wrap(array.find_sig().debug_repr()) + + at unwrap_spec(array=BaseArray) +def remove_invalidates(space, array): + """ Array modification will no longer invalidate any of it's + potential children. Use only for performance debugging + """ + del array.invalidates[:] + return space.w_None diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped +from pypy.interpreter.gateway import interp2app, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature from pypy.module.micronumpy.strides import calculate_slice_strides @@ -14,22 +14,26 @@ numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['result_size', 'frame', 'ri', 'self', 'result'] + reds=['result_size', 'frame', 'ri', 'self', 'result'], + get_printable_location=signature.new_printable_location('numpy'), ) all_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['frame', 'self', 'dtype'] + reds=['frame', 'self', 'dtype'], + get_printable_location=signature.new_printable_location('all'), ) any_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['frame', 'self', 'dtype'] + reds=['frame', 'self', 'dtype'], + get_printable_location=signature.new_printable_location('any'), ) slice_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['self', 'frame', 'source', 'res_iter'] + reds=['self', 'frame', 'source', 'res_iter'], + get_printable_location=signature.new_printable_location('slice'), ) def _find_shape_and_elems(space, w_iterable): @@ -294,7 +298,8 @@ def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( greens=['shapelen', 'sig'], - reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'] + reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'], + get_printable_location=signature.new_printable_location(op_name), ) def loop(self): sig = self.find_sig() @@ -581,8 +586,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(W_NDimSlice(concrete.start, strides[:], - backstrides[:], shape[:], concrete)) + return space.wrap(W_NDimSlice(concrete.start, strides, + backstrides, shape, concrete)) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -886,8 +891,8 @@ if self.order == 'C': strides.reverse() backstrides.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] + self.strides = strides + self.backstrides = backstrides def array_sig(self, res_shape): if res_shape is not None and self.shape != res_shape: @@ -1092,9 +1097,9 @@ strides.reverse() backstrides.reverse() new_shape.reverse() - self.strides = strides[:] - self.backstrides = backstrides[:] - self.shape = new_shape[:] + self.strides = strides + self.backstrides = backstrides + self.shape = new_shape return new_strides = calc_new_strides(new_shape, self.shape, self.strides) if new_strides is None: @@ -1104,7 +1109,7 @@ for nd in range(len(new_shape)): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] self.strides = new_strides[:] - self.backstrides = new_backstrides[:] + self.backstrides = new_backstrides self.shape = new_shape[:] class W_NDimArray(ConcreteArray): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -1,10 +1,10 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, types -from pypy.module.micronumpy.signature import (ReduceSignature, - ScalarSignature, find_sig) +from pypy.module.micronumpy import interp_boxes, interp_dtype +from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature,\ + find_sig, new_printable_location from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -12,7 +12,8 @@ reduce_driver = jit.JitDriver( greens = ['shapelen', "sig"], virtualizables = ["frame"], - reds = ["frame", "self", "dtype", "value", "obj"] + reds = ["frame", "self", "dtype", "value", "obj"], + get_printable_location=new_printable_location('reduce'), ) class W_Ufunc(Wrappable): diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -5,6 +5,11 @@ from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib.jit import hint, unroll_safe, promote +def new_printable_location(driver_name): + def get_printable_location(shapelen, sig): + return 'numpy ' + sig.debug_repr() + ' [%d dims,%s]' % (shapelen, driver_name) + return get_printable_location + def sigeq(one, two): return one.eq(two) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,4 +1,9 @@ +from pypy.rlib import jit + + at jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: + jit.isconstant(len(chunks)) +) def calculate_slice_strides(shape, start, strides, backstrides, chunks): rstrides = [] rbackstrides = [] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -906,6 +906,15 @@ b[0] = 3 assert debug_repr(b) == 'Array' + def test_remove_invalidates(self): + from numpypy import array + from numpypy.pypy import remove_invalidates + a = array([1, 2, 3]) + b = a + a + remove_invalidates(a) + a[0] = 14 + assert b[0] == 28 + def test_virtual_views(self): from numpypy import arange a = arange(15) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -537,7 +537,7 @@ builder.append(by) builder.append_slice(input, upper, len(input)) else: - # An ok guess for the result size + # First compute the exact result size count = input.count(sub) if count > maxsplit and maxsplit > 0: count = maxsplit @@ -553,21 +553,16 @@ builder = StringBuilder(result_size) start = 0 sublen = len(sub) - first = True while maxsplit != 0: next = input.find(sub, start) if next < 0: break - if not first: - builder.append(by) - first = False builder.append_slice(input, start, next) + builder.append(by) start = next + sublen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 - if not first: - builder.append(by) builder.append_slice(input, start, len(input)) return space.wrap(builder.build()) diff --git a/pypy/rlib/rsre/rsre_jit.py b/pypy/rlib/rsre/rsre_jit.py --- a/pypy/rlib/rsre/rsre_jit.py +++ b/pypy/rlib/rsre/rsre_jit.py @@ -22,7 +22,7 @@ info = '%s/%d' % (info, args[debugprint[2]]) else: info = '' - return '%s%s %s' % (name, info, s) + return 're %s%s %s' % (name, info, s) # self.get_printable_location = get_printable_location diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -375,7 +375,6 @@ newitems = malloc(LIST.items.TO, n) rgc.ll_arraycopy(olditems, newitems, 0, 0, n) return newitems -ll_list2fixed.oopspec = 'list.list2fixed(l)' def ll_list2fixed_exact(l): ll_assert(l.length == len(l.items), "ll_list2fixed_exact: bad length") diff --git a/pypy/rpython/test/test_generator.py b/pypy/rpython/test/test_generator.py --- a/pypy/rpython/test/test_generator.py +++ b/pypy/rpython/test/test_generator.py @@ -54,6 +54,26 @@ res = self.interpret(f, [0]) assert res == 42 + def test_except_block(self): + def foo(): + raise ValueError + def g(a, b, c): + yield a + yield b + try: + foo() + except ValueError: + pass + yield c + def f(): + gen = g(3, 5, 8) + x = gen.next() * 100 + x += gen.next() * 10 + x += gen.next() + return x + res = self.interpret(f, []) + assert res == 358 + class TestLLtype(BaseTestGenerator, LLRtypeMixin): pass diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -3,6 +3,7 @@ from pypy.jit.metainterp.resoperation import opname from pypy.jit.tool.oparser import OpParser from pypy.tool.logparser import parse_log_file, extract_category +from copy import copy class Op(object): bridge = None @@ -23,19 +24,13 @@ self.failargs = failargs def getarg(self, i): - return self._getvar(self.args[i]) + return self.args[i] def getargs(self): - return [self._getvar(v) for v in self.args] + return self.args[:] def getres(self): - return self._getvar(self.res) - - def getdescr(self): - return self.descr - - def _getvar(self, v): - return v + return self.res def is_guard(self): return self._is_guard @@ -43,7 +38,7 @@ def repr(self): args = self.getargs() if self.descr is not None: - args.append('descr=%s' % self.getdescr()) + args.append('descr=%s' % self.descr) arglist = ', '.join(args) if self.res is not None: return '%s = %s(%s)' % (self.getres(), self.name, arglist) @@ -52,8 +47,6 @@ def __repr__(self): return self.repr() - ## return '<%s (%s)>' % (self.name, ', '.join([repr(a) - ## for a in self.args])) class SimpleParser(OpParser): @@ -145,18 +138,27 @@ is_bytecode = True inline_level = None - def __init__(self, operations, storage): - if operations[0].name == 'debug_merge_point': - self.inline_level = int(operations[0].args[0]) - m = re.search('\w]+)\. file \'(.+?)\'\. line (\d+)> #(\d+) (\w+)', - operations[0].args[1]) - if m is None: - # a non-code loop, like StrLiteralSearch or something - self.bytecode_name = operations[0].args[1][1:-1] - else: - self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups() - self.startlineno = int(lineno) - self.bytecode_no = int(bytecode_no) + def parse_code_data(self, arg): + m = re.search('\w]+)[\.,] file \'(.+?)\'[\.,] line (\d+)> #(\d+) (\w+)', + arg) + if m is None: + # a non-code loop, like StrLiteralSearch or something + self.bytecode_name = arg + else: + self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups() + self.startlineno = int(lineno) + self.bytecode_no = int(bytecode_no) + + + def __init__(self, operations, storage, loopname): + for op in operations: + if op.name == 'debug_merge_point': + self.inline_level = int(op.args[0]) + self.parse_code_data(op.args[1][1:-1]) + break + else: + self.inline_level = 0 + self.parse_code_data(loopname) self.operations = operations self.storage = storage self.code = storage.disassemble_code(self.filename, self.startlineno, @@ -164,7 +166,7 @@ def repr(self): if self.filename is None: - return "Unknown" + return self.bytecode_name return "%s, file '%s', line %d" % (self.name, self.filename, self.startlineno) @@ -219,7 +221,8 @@ self.storage = storage @classmethod - def from_operations(cls, operations, storage, limit=None, inputargs=''): + def from_operations(cls, operations, storage, limit=None, inputargs='', + loopname=''): """ Slice given operation list into a chain of TraceForOpcode chunks. Also detect inlined functions and make them Function """ @@ -245,13 +248,13 @@ for op in operations: if op.name == 'debug_merge_point': if so_far: - append_to_res(cls.TraceForOpcode(so_far, storage)) + append_to_res(cls.TraceForOpcode(so_far, storage, loopname)) if limit: break so_far = [] so_far.append(op) if so_far: - append_to_res(cls.TraceForOpcode(so_far, storage)) + append_to_res(cls.TraceForOpcode(so_far, storage, loopname)) # wrap stack back up if not stack: # no ops whatsoever @@ -299,7 +302,7 @@ def repr(self): if self.filename is None: - return "Unknown" + return self.chunks[0].bytecode_name return "%s, file '%s', line %d" % (self.name, self.filename, self.startlineno) @@ -384,9 +387,30 @@ parser.postprocess(loop, backend_tp=bname, backend_dump=dump, dump_start=start_ofs)) - loops.append(loop) + loops += split_trace(loop) return log, loops +def split_trace(trace): + labels = [0] + if trace.comment and 'Guard' in trace.comment: + descrs = ['bridge ' + re.search('Guard (\d+)', trace.comment).group(1)] + else: + descrs = ['entry ' + re.search('Loop (\d+)', trace.comment).group(1)] + for i, op in enumerate(trace.operations): + if op.name == 'label': + labels.append(i) + descrs.append(op.descr) + labels.append(len(trace.operations) - 1) + parts = [] + for i in range(len(labels) - 1): + start, stop = labels[i], labels[i+1] + part = copy(trace) + part.operations = trace.operations[start : stop + 1] + part.descr = descrs[i] + part.comment = trace.comment + parts.append(part) + + return parts def parse_log_counts(input, loops): if not input: @@ -394,11 +418,7 @@ lines = input[-1].splitlines() mapping = {} for loop in loops: - com = loop.comment - if 'Loop' in com: - mapping['loop ' + re.search('Loop (\d+)', com).group(1)] = loop - else: - mapping['bridge ' + re.search('Guard (\d+)', com).group(1)] = loop + mapping[loop.descr] = loop for line in lines: if line: num, count = line.split(':', 2) diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -1,6 +1,7 @@ from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, Function, adjust_bridges, - import_log, Op) + import_log, split_trace, Op, + parse_log_counts) from pypy.tool.jitlogparser.storage import LoopStorage import py, sys @@ -32,23 +33,26 @@ ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 - assert res.chunks[0].repr() + assert 'SomeRandomStuff' in res.chunks[0].repr() def test_split(): ops = parse(''' [i0] + label() debug_merge_point(0, " #10 ADD") debug_merge_point(0, " #11 SUB") i1 = int_add(i0, 1) debug_merge_point(0, " #11 SUB") i2 = int_add(i1, 1) ''') - res = Function.from_operations(ops.operations, LoopStorage()) - assert len(res.chunks) == 3 + res = Function.from_operations(ops.operations, LoopStorage(), loopname='') + assert len(res.chunks) == 4 assert len(res.chunks[0].operations) == 1 - assert len(res.chunks[1].operations) == 2 + assert len(res.chunks[1].operations) == 1 assert len(res.chunks[2].operations) == 2 - assert res.chunks[2].bytecode_no == 11 + assert len(res.chunks[3].operations) == 2 + assert res.chunks[3].bytecode_no == 11 + assert res.chunks[0].bytecode_name == '' def test_inlined_call(): ops = parse(""" @@ -231,3 +235,51 @@ myrepr = 'c = foobar(a, b, descr=mydescr)' assert op.repr() == myrepr assert op.repr() == myrepr # do it twice + +def test_split_trace(): + loop = parse(''' + [i7] + i9 = int_lt(i7, 1003) + label(i9, descr=grrr) + guard_true(i9, descr=) [] + i13 = getfield_raw(151937600, descr=) + label(i13, descr=asb) + i19 = int_lt(i13, 1003) + guard_true(i19, descr=) [] + i113 = getfield_raw(151937600, descr=) + ''') + loop.comment = 'Loop 0' + parts = split_trace(loop) + assert len(parts) == 3 + assert len(parts[0].operations) == 2 + assert len(parts[1].operations) == 4 + assert len(parts[2].operations) == 4 + assert parts[1].descr == 'grrr' + assert parts[2].descr == 'asb' + +def test_parse_log_counts(): + loop = parse(''' + [i7] + i9 = int_lt(i7, 1003) + label(i9, descr=grrr) + guard_true(i9, descr=) [] + i13 = getfield_raw(151937600, descr=) + label(i13, descr=asb) + i19 = int_lt(i13, 1003) + guard_true(i19, descr=) [] + i113 = getfield_raw(151937600, descr=) + ''') + bridge = parse(''' + # bridge out of Guard 2 with 1 ops + [] + i0 = int_lt(1, 2) + finish(i0) + ''') + bridge.comment = 'bridge out of Guard 2 with 1 ops' + loop.comment = 'Loop 0' + loops = split_trace(loop) + split_trace(bridge) + input = ['grrr:123\nasb:12\nbridge 2:1234'] + parse_log_counts(input, loops) + assert loops[-1].count == 1234 + assert loops[1].count == 123 + assert loops[2].count == 12 diff --git a/pypy/translator/generator.py b/pypy/translator/generator.py --- a/pypy/translator/generator.py +++ b/pypy/translator/generator.py @@ -2,7 +2,7 @@ from pypy.objspace.flow.model import Variable, Constant, FunctionGraph from pypy.translator.unsimplify import insert_empty_startblock from pypy.translator.unsimplify import split_block -from pypy.translator.simplify import eliminate_empty_blocks +from pypy.translator.simplify import eliminate_empty_blocks, simplify_graph from pypy.tool.sourcetools import func_with_new_name from pypy.interpreter.argument import Signature @@ -64,6 +64,7 @@ def next(self): entry = self.current self.current = None + assert entry is not None # else, recursive generator invocation (next_entry, return_value) = func(entry) self.current = next_entry return return_value @@ -91,6 +92,10 @@ block.inputargs = [v_entry1] def tweak_generator_body_graph(Entry, graph): + # First, always run simplify_graph in order to reduce the number of + # variables passed around + simplify_graph(graph) + # assert graph.startblock.operations[0].opname == 'generator_mark' graph.startblock.operations.pop(0) # @@ -100,12 +105,20 @@ # mappings = [Entry] # + stopblock = Block([]) + v0 = Variable(); v1 = Variable() + stopblock.operations = [ + SpaceOperation('simple_call', [Constant(StopIteration)], v0), + SpaceOperation('type', [v0], v1), + ] + stopblock.closeblock(Link([v1, v0], graph.exceptblock)) + # for block in list(graph.iterblocks()): for exit in block.exits: if exit.target is graph.returnblock: - exit.args = [Constant(StopIteration), - Constant(StopIteration())] - exit.target = graph.exceptblock + exit.args = [] + exit.target = stopblock + assert block is not stopblock for index in range(len(block.operations)-1, -1, -1): op = block.operations[index] if op.opname == 'yield': diff --git a/pypy/translator/sandbox/pypy_interact.py b/pypy/translator/sandbox/pypy_interact.py --- a/pypy/translator/sandbox/pypy_interact.py +++ b/pypy/translator/sandbox/pypy_interact.py @@ -26,7 +26,8 @@ from pypy.translator.sandbox.sandlib import SimpleIOSandboxedProc from pypy.translator.sandbox.sandlib import VirtualizedSandboxedProc from pypy.translator.sandbox.vfs import Dir, RealDir, RealFile -from pypy.tool.lib_pypy import LIB_ROOT +import pypy +LIB_ROOT = os.path.dirname(os.path.dirname(pypy.__file__)) class PyPySandboxedProc(VirtualizedSandboxedProc, SimpleIOSandboxedProc): debug = True diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -30,8 +30,9 @@ # load(). Also, marshal.load(f) blocks with the GIL held when # f is a pipe with no data immediately avaialble, preventing the # _waiting_thread to run. -from pypy.tool.lib_pypy import import_from_lib_pypy -marshal = import_from_lib_pypy('marshal') +import pypy +marshal = py.path.local(pypy.__file__).join('..', '..', 'lib_pypy', + 'marshal.py').pyimport() # Non-marshal result types RESULTTYPE_STATRESULT = object() From noreply at buildbot.pypy.org Wed Dec 28 02:59:47 2011 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 28 Dec 2011 02:59:47 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: more finely distinguish between different 'promote_?' s Message-ID: <20111228015947.C42AA82BB4@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50920:e3824eb2fc4c Date: 2011-12-28 03:38 +0200 http://bitbucket.org/pypy/pypy/changeset/e3824eb2fc4c/ Log: more finely distinguish between different 'promote_?' s diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -282,16 +282,17 @@ descr_rpow = _binop_right_impl("power") descr_rmod = _binop_right_impl("mod") - def _reduce_ufunc_impl(ufunc_name): + def _reduce_ufunc_impl(ufunc_name, promote_to_largest = False): def impl(self, space, w_dim=None): if w_dim is None: w_dim = space.wrap(w_dim) return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, - self, True, w_dim) + self, True, promote_to_largest, w_dim) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) descr_sum = _reduce_ufunc_impl("add") - descr_prod = _reduce_ufunc_impl("multiply") + descr_sum_promote = _reduce_ufunc_impl("add", True) + descr_prod = _reduce_ufunc_impl("multiply", True) descr_max = _reduce_ufunc_impl("maximum") descr_min = _reduce_ufunc_impl("minimum") @@ -318,6 +319,7 @@ idx=idx, cur_best=cur_best) new_best = getattr(dtype.itemtype, op_name)(cur_best, sig.eval(frame, self)) + print 'new_best',new_best.value,'cur_best',cur_best.value if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best @@ -560,7 +562,7 @@ return w_result def descr_mean(self, space): - return space.div(self.descr_sum(space), space.wrap(self.size)) + return space.div(self.descr_sum_promote(space), space.wrap(self.size)) def descr_nonzero(self, space): if self.size > 1: diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -101,9 +101,9 @@ array([[ 1, 5], [ 9, 13]]) ''' - return self.reduce(space, w_obj, False, w_dim) + return self.reduce(space, w_obj, False, False, w_dim) - def reduce(self, space, w_obj, multidim, w_dim): + def reduce(self, space, w_obj, multidim, promote_to_largest, w_dim): from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " @@ -122,7 +122,9 @@ size = obj.size dtype = find_unaryop_result_dtype( space, obj.find_dtype(), - promote_to_float=self.promote_to_float + promote_to_float=self.promote_to_float, + promote_to_largest = promote_to_largest, + promote_bools = True ) shapelen = len(obj.shape) if self.identity is None and size == 0: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -722,7 +722,7 @@ raises(TypeError, 'a.sum(2, 3)') def test_sumND(self): - skip('Not finished yet') + from numpypy import arange a = arange(15).reshape(5, 3) assert (a.sum(0) == [30, 35, 40]).all() assert (a.sum(1) == [3, 12, 21, 30, 39]).all() From noreply at buildbot.pypy.org Wed Dec 28 02:59:48 2011 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 28 Dec 2011 02:59:48 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: whoops, start fixes for translate Message-ID: <20111228015948.EF51082BB4@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50921:f1a8c490b279 Date: 2011-12-28 03:56 +0200 http://bitbucket.org/pypy/pypy/changeset/f1a8c490b279/ Log: whoops, start fixes for translate diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -319,7 +319,6 @@ idx=idx, cur_best=cur_best) new_best = getattr(dtype.itemtype, op_name)(cur_best, sig.eval(frame, self)) - print 'new_best',new_best.value,'cur_best',cur_best.value if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best From noreply at buildbot.pypy.org Wed Dec 28 10:13:27 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Dec 2011 10:13:27 +0100 (CET) Subject: [pypy-commit] pypy default: oops, fix test_pypy_c Message-ID: <20111228091327.CEC3882BB2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50922:87d1c652d40b Date: 2011-12-28 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/87d1c652d40b/ Log: oops, fix test_pypy_c diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -32,6 +32,9 @@ def getres(self): return self.res + def getdescr(self): + return self.descr + def is_guard(self): return self._is_guard From noreply at buildbot.pypy.org Wed Dec 28 10:15:32 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Dec 2011 10:15:32 +0100 (CET) Subject: [pypy-commit] pypy default: fix some tests Message-ID: <20111228091532.5BD6582BB2@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r50923:f641bf7c9be8 Date: 2011-12-28 11:15 +0200 http://bitbucket.org/pypy/pypy/changeset/f641bf7c9be8/ Log: fix some tests diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -528,6 +528,7 @@ set_param(driver, name1, int(value)) except ValueError: raise + break else: raise ValueError set_user_param._annspecialcase_ = 'specialize:arg(0)' From noreply at buildbot.pypy.org Wed Dec 28 15:23:26 2011 From: noreply at buildbot.pypy.org (ned) Date: Wed, 28 Dec 2011 15:23:26 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox-2: Py is now required again. Message-ID: <20111228142326.9BC6882BDF@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox-2 Changeset: r50924:c03e4d0528e4 Date: 2011-12-28 08:58 -0500 http://bitbucket.org/pypy/pypy/changeset/c03e4d0528e4/ Log: Py is now required again. diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -8,11 +8,11 @@ import subprocess from pypy.tool.killsubprocess import killsubprocess from pypy.translator.sandbox.vfs import UID, GID +import py def create_log(): """Make and return a log for the sandbox to use, if needed.""" # These imports are local to avoid importing pypy if we don't need to. - import py from pypy.tool.ansi_print import AnsiLog class MyAnsiLog(AnsiLog): From noreply at buildbot.pypy.org Wed Dec 28 15:23:27 2011 From: noreply at buildbot.pypy.org (ned) Date: Wed, 28 Dec 2011 15:23:27 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox-2: Change pypy_interact's default to quiet, you have to ask for verbose. Message-ID: <20111228142327.C4EC982BE0@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox-2 Changeset: r50925:cd7b359d0a00 Date: 2011-12-28 09:21 -0500 http://bitbucket.org/pypy/pypy/changeset/cd7b359d0a00/ Log: Change pypy_interact's default to quiet, you have to ask for verbose. diff --git a/pypy/translator/sandbox/pypy_interact.py b/pypy/translator/sandbox/pypy_interact.py --- a/pypy/translator/sandbox/pypy_interact.py +++ b/pypy/translator/sandbox/pypy_interact.py @@ -13,7 +13,8 @@ ATM this only works with PyPy translated with Boehm or the semispace or generation GCs. --timeout=N limit execution time to N (real-time) seconds. - --log=FILE log all user input into the FILE + --log=FILE log all user input into the FILE. + --verbose log all proxied system calls. Note that you can get readline-like behavior with a tool like 'ledit', provided you use enough -u options: @@ -68,13 +69,13 @@ if __name__ == '__main__': from getopt import getopt # and not gnu_getopt! - options, arguments = getopt(sys.argv[1:], 't:hq', + options, arguments = getopt(sys.argv[1:], 't:hv', ['tmp=', 'heapsize=', 'timeout=', 'log=', - 'quiet', 'help']) + 'verbose', 'help']) tmpdir = None timeout = None logfile = None - debug = True + debug = False extraoptions = [] def help(): @@ -106,8 +107,8 @@ timeout = int(value) elif option == '--log': logfile = value - elif option in ['-q', '--quiet']: - debug = False + elif option in ['-v', '--verbose']: + debug = True elif option in ['-h', '--help']: help() else: From noreply at buildbot.pypy.org Wed Dec 28 15:26:10 2011 From: noreply at buildbot.pypy.org (ned) Date: Wed, 28 Dec 2011 15:26:10 +0100 (CET) Subject: [pypy-commit] pypy default: Merge sandbox tweaks from nedbat-sandbox-2 Message-ID: <20111228142610.7CFF482BDF@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: Changeset: r50926:42fbbcbfc2ac Date: 2011-12-28 09:25 -0500 http://bitbucket.org/pypy/pypy/changeset/42fbbcbfc2ac/ Log: Merge sandbox tweaks from nedbat-sandbox-2 diff --git a/pypy/translator/sandbox/pypy_interact.py b/pypy/translator/sandbox/pypy_interact.py --- a/pypy/translator/sandbox/pypy_interact.py +++ b/pypy/translator/sandbox/pypy_interact.py @@ -13,7 +13,8 @@ ATM this only works with PyPy translated with Boehm or the semispace or generation GCs. --timeout=N limit execution time to N (real-time) seconds. - --log=FILE log all user input into the FILE + --log=FILE log all user input into the FILE. + --verbose log all proxied system calls. Note that you can get readline-like behavior with a tool like 'ledit', provided you use enough -u options: @@ -30,15 +31,15 @@ LIB_ROOT = os.path.dirname(os.path.dirname(pypy.__file__)) class PyPySandboxedProc(VirtualizedSandboxedProc, SimpleIOSandboxedProc): - debug = True argv0 = '/bin/pypy-c' virtual_cwd = '/tmp' virtual_env = {} virtual_console_isatty = True - def __init__(self, executable, arguments, tmpdir=None): + def __init__(self, executable, arguments, tmpdir=None, debug=True): self.executable = executable = os.path.abspath(executable) self.tmpdir = tmpdir + self.debug = debug super(PyPySandboxedProc, self).__init__([self.argv0] + arguments, executable=executable) @@ -68,12 +69,13 @@ if __name__ == '__main__': from getopt import getopt # and not gnu_getopt! - options, arguments = getopt(sys.argv[1:], 't:h', + options, arguments = getopt(sys.argv[1:], 't:hv', ['tmp=', 'heapsize=', 'timeout=', 'log=', - 'help']) + 'verbose', 'help']) tmpdir = None timeout = None logfile = None + debug = False extraoptions = [] def help(): @@ -105,6 +107,8 @@ timeout = int(value) elif option == '--log': logfile = value + elif option in ['-v', '--verbose']: + debug = True elif option in ['-h', '--help']: help() else: @@ -114,7 +118,7 @@ help() sandproc = PyPySandboxedProc(arguments[0], extraoptions + arguments[1:], - tmpdir=tmpdir) + tmpdir=tmpdir, debug=debug) if timeout is not None: sandproc.settimeout(timeout, interrupt_main=True) if logfile is not None: diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -4,25 +4,29 @@ for the outer process, which can run CPython or PyPy. """ -import py import sys, os, posixpath, errno, stat, time -from pypy.tool.ansi_print import AnsiLog import subprocess from pypy.tool.killsubprocess import killsubprocess from pypy.translator.sandbox.vfs import UID, GID +import py -class MyAnsiLog(AnsiLog): - KW_TO_COLOR = { - 'call': ((34,), False), - 'result': ((34,), False), - 'exception': ((34,), False), - 'vpath': ((35,), False), - 'timeout': ((1, 31), True), - } +def create_log(): + """Make and return a log for the sandbox to use, if needed.""" + # These imports are local to avoid importing pypy if we don't need to. + from pypy.tool.ansi_print import AnsiLog -log = py.log.Producer("sandlib") -py.log.setconsumer("sandlib", MyAnsiLog()) + class MyAnsiLog(AnsiLog): + KW_TO_COLOR = { + 'call': ((34,), False), + 'result': ((34,), False), + 'exception': ((34,), False), + 'vpath': ((35,), False), + 'timeout': ((1, 31), True), + } + log = py.log.Producer("sandlib") + py.log.setconsumer("sandlib", MyAnsiLog()) + return log # Note: we use lib_pypy/marshal.py instead of the built-in marshal # for two reasons. The built-in module could be made to segfault @@ -127,6 +131,7 @@ for the external functions xxx that you want to support. """ debug = False + log = None os_level_sandboxing = False # Linux only: /proc/PID/seccomp def __init__(self, args, executable=None): @@ -143,6 +148,9 @@ self.currenttimeout = None self.currentlyidlefrom = None + if self.debug: + self.log = create_log() + def withlock(self, function, *args, **kwds): lock = self.popenlock if lock is not None: @@ -170,7 +178,8 @@ if delay <= 0.0: break # expired! time.sleep(min(delay*1.001, 1)) - log.timeout("timeout!") + if self.log: + self.log.timeout("timeout!") self.kill() #if interrupt_main: # if hasattr(os, 'kill'): @@ -247,22 +256,22 @@ args = read_message(child_stdout) except EOFError, e: break - if self.debug and not self.is_spam(fnname, *args): - log.call('%s(%s)' % (fnname, + if self.log and not self.is_spam(fnname, *args): + self.log.call('%s(%s)' % (fnname, ', '.join([shortrepr(x) for x in args]))) try: answer, resulttype = self.handle_message(fnname, *args) except Exception, e: tb = sys.exc_info()[2] write_exception(child_stdin, e, tb) - if self.debug: + if self.log: if str(e): - log.exception('%s: %s' % (e.__class__.__name__, e)) + self.log.exception('%s: %s' % (e.__class__.__name__, e)) else: - log.exception('%s' % (e.__class__.__name__,)) + self.log.exception('%s' % (e.__class__.__name__,)) else: - if self.debug and not self.is_spam(fnname, *args): - log.result(shortrepr(answer)) + if self.log and not self.is_spam(fnname, *args): + self.log.result(shortrepr(answer)) try: write_message(child_stdin, 0) # error code - 0 for ok write_message(child_stdin, answer, resulttype) @@ -441,7 +450,8 @@ node = dirnode.join(name) else: node = dirnode - log.vpath('%r => %r' % (vpath, node)) + if self.log: + self.log.vpath('%r => %r' % (vpath, node)) return node def do_ll_os__ll_os_stat(self, vpathname): From noreply at buildbot.pypy.org Wed Dec 28 15:28:19 2011 From: noreply at buildbot.pypy.org (ned) Date: Wed, 28 Dec 2011 15:28:19 +0100 (CET) Subject: [pypy-commit] pypy nedbat-sandbox-2: Close nedbat-sandbox-2 Message-ID: <20111228142819.7201C82BDF@wyvern.cs.uni-duesseldorf.de> Author: Ned Batchelder Branch: nedbat-sandbox-2 Changeset: r50927:93bb4d305fdb Date: 2011-12-28 09:27 -0500 http://bitbucket.org/pypy/pypy/changeset/93bb4d305fdb/ Log: Close nedbat-sandbox-2 From noreply at buildbot.pypy.org Wed Dec 28 19:25:07 2011 From: noreply at buildbot.pypy.org (hager) Date: Wed, 28 Dec 2011 19:25:07 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (edelsohn, bivab) fix calls to leave_jitted_hook on PPC64 as well as restoring the LR to return from the compiled code Message-ID: <20111228182507.8B59C82C01@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50928:44255e0c9ffe Date: 2011-12-28 10:24 -0800 http://bitbucket.org/pypy/pypy/changeset/44255e0c9ffe/ Log: (edelsohn, bivab) fix calls to leave_jitted_hook on PPC64 as well as restoring the LR to return from the compiled code diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -872,7 +872,14 @@ # # misaligned stack in the call, but it's ok because the write barrier # is not going to call anything more. - self.mc.bl_abs(func) + if IS_PPC_32: + self.mc.bl_abs(func) + else: + self.mc.load_from_addr(r.SCRATCH, adr) + self.mc.load_from_addr(r.TOC, adr + WORD) + self.mc.load_from_addr(r.r11, adr + 2 * WORD) + self.mc.mtctr(r.SCRATCH.value) + self.mc.bctrl() # patch the JZ above offset = self.mc.currpos() - jz_location diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -279,7 +279,14 @@ mc = PPCBuilder() with Saved_Volatiles(mc): addr = self.cpu.get_on_leave_jitted_int(save_exception=True) - mc.bl_abs(addr) + if IS_PPC_32: + mc.bl_abs(addr) + else: + mc.load_from_addr(r.SCRATCH, addr) + mc.load_from_addr(r.r2, addr + WORD) + mc.load_from_addr(r.r11, addr + 2 * WORD) + mc.mtctr(r.SCRATCH.value) + mc.bctrl() #mc.alloc_scratch_reg(self.cpu.propagate_exception_v) #mc.mr(r.RES.value, r.SCRATCH.value) #mc.free_scratch_reg() @@ -292,7 +299,14 @@ with Saved_Volatiles(mc): addr = self.cpu.get_on_leave_jitted_int(save_exception=save_exc) - mc.bl_abs(addr) + if IS_PPC_32: + mc.bl_abs(addr) + else: + mc.load_from_addr(r.SCRATCH, addr) + mc.load_from_addr(r.r2, addr + WORD) + mc.load_from_addr(r.r11, addr + 2 * WORD) + mc.mtctr(r.SCRATCH.value) + mc.bctrl() mc.b_abs(self.exit_code_adr) mc.prepare_insts_blocks() @@ -345,7 +359,11 @@ mc.mr(r.r5.value, r.SPP.value) self._restore_nonvolatiles(mc, r.r5) # load old backchain into r4 - mc.load(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + WORD) + if IS_PPC_32: + ofs = WORD + else: + ofs = WORD * 2 + mc.load(r.r4.value, r.r5.value, self.OFFSET_SPP_TO_OLD_BACKCHAIN + ofs) mc.mtlr(r.r4.value) # restore LR # From SPP, we have a constant offset to the old backchain. We use the # SPP to re-establish the old backchain because this exit stub is From noreply at buildbot.pypy.org Wed Dec 28 20:03:44 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Wed, 28 Dec 2011 20:03:44 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: BACKCHAIN_SIZE is 6 Message-ID: <20111228190344.6D4D182C01@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50929:4b4775dd052f Date: 2011-12-28 14:03 -0500 http://bitbucket.org/pypy/pypy/changeset/4b4775dd052f/ Log: BACKCHAIN_SIZE is 6 diff --git a/pypy/jit/backend/ppc/ppcgen/arch.py b/pypy/jit/backend/ppc/ppcgen/arch.py --- a/pypy/jit/backend/ppc/ppcgen/arch.py +++ b/pypy/jit/backend/ppc/ppcgen/arch.py @@ -12,7 +12,7 @@ else: WORD = 8 IS_PPC_32 = False - BACKCHAIN_SIZE = 4 + BACKCHAIN_SIZE = 6 DWORD = 2 * WORD IS_PPC_64 = not IS_PPC_32 From noreply at buildbot.pypy.org Wed Dec 28 21:00:41 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Wed, 28 Dec 2011 21:00:41 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: assert offset is_reg in cmp_guard_class. Message-ID: <20111228200041.9BDD182C01@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50930:1d80c59520d2 Date: 2011-12-28 14:59 -0500 http://bitbucket.org/pypy/pypy/changeset/1d80c59520d2/ Log: assert offset is_reg in cmp_guard_class. diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -245,6 +245,7 @@ if offset.is_imm(): self.mc.load(r.SCRATCH.value, locs[0].value, offset.value) else: + assert offset.is_reg() self.mc.loadx(r.SCRATCH.value, locs[0].value, offset.value) self.mc.cmp_op(0, r.SCRATCH.value, locs[1].value) self.mc.free_scratch_reg() From noreply at buildbot.pypy.org Wed Dec 28 21:00:42 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Wed, 28 Dec 2011 21:00:42 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Fix typo in loadx. Message-ID: <20111228200042.C937282C01@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50931:6b75e619e2d4 Date: 2011-12-28 15:00 -0500 http://bitbucket.org/pypy/pypy/changeset/6b75e619e2d4/ Log: Fix typo in loadx. diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -1043,7 +1043,7 @@ if IS_PPC_32: self.lwzx(target_reg, base_reg, offset_reg) else: - self.ldx(target_reg, base_reg. offset_reg) + self.ldx(target_reg, base_reg, offset_reg) def store(self, from_reg, base_reg, offset): if IS_PPC_32: From noreply at buildbot.pypy.org Wed Dec 28 21:29:52 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 28 Dec 2011 21:29:52 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: closing to be merged branch Message-ID: <20111228202952.8F24782C01@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50934:c30513c22175 Date: 2011-12-28 21:26 +0100 http://bitbucket.org/pypy/pypy/changeset/c30513c22175/ Log: closing to be merged branch From noreply at buildbot.pypy.org Wed Dec 28 21:29:50 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 28 Dec 2011 21:29:50 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: hg merge default Message-ID: <20111228202950.DCB4082C02@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50933:3f020e9bd542 Date: 2011-12-28 21:25 +0100 http://bitbucket.org/pypy/pypy/changeset/3f020e9bd542/ Log: hg merge default diff --git a/pypy/translator/sandbox/pypy_interact.py b/pypy/translator/sandbox/pypy_interact.py --- a/pypy/translator/sandbox/pypy_interact.py +++ b/pypy/translator/sandbox/pypy_interact.py @@ -13,7 +13,8 @@ ATM this only works with PyPy translated with Boehm or the semispace or generation GCs. --timeout=N limit execution time to N (real-time) seconds. - --log=FILE log all user input into the FILE + --log=FILE log all user input into the FILE. + --verbose log all proxied system calls. Note that you can get readline-like behavior with a tool like 'ledit', provided you use enough -u options: @@ -30,15 +31,15 @@ LIB_ROOT = os.path.dirname(os.path.dirname(pypy.__file__)) class PyPySandboxedProc(VirtualizedSandboxedProc, SimpleIOSandboxedProc): - debug = True argv0 = '/bin/pypy-c' virtual_cwd = '/tmp' virtual_env = {} virtual_console_isatty = True - def __init__(self, executable, arguments, tmpdir=None): + def __init__(self, executable, arguments, tmpdir=None, debug=True): self.executable = executable = os.path.abspath(executable) self.tmpdir = tmpdir + self.debug = debug super(PyPySandboxedProc, self).__init__([self.argv0] + arguments, executable=executable) @@ -68,12 +69,13 @@ if __name__ == '__main__': from getopt import getopt # and not gnu_getopt! - options, arguments = getopt(sys.argv[1:], 't:h', + options, arguments = getopt(sys.argv[1:], 't:hv', ['tmp=', 'heapsize=', 'timeout=', 'log=', - 'help']) + 'verbose', 'help']) tmpdir = None timeout = None logfile = None + debug = False extraoptions = [] def help(): @@ -105,6 +107,8 @@ timeout = int(value) elif option == '--log': logfile = value + elif option in ['-v', '--verbose']: + debug = True elif option in ['-h', '--help']: help() else: @@ -114,7 +118,7 @@ help() sandproc = PyPySandboxedProc(arguments[0], extraoptions + arguments[1:], - tmpdir=tmpdir) + tmpdir=tmpdir, debug=debug) if timeout is not None: sandproc.settimeout(timeout, interrupt_main=True) if logfile is not None: diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -4,25 +4,29 @@ for the outer process, which can run CPython or PyPy. """ -import py import sys, os, posixpath, errno, stat, time -from pypy.tool.ansi_print import AnsiLog import subprocess from pypy.tool.killsubprocess import killsubprocess from pypy.translator.sandbox.vfs import UID, GID +import py -class MyAnsiLog(AnsiLog): - KW_TO_COLOR = { - 'call': ((34,), False), - 'result': ((34,), False), - 'exception': ((34,), False), - 'vpath': ((35,), False), - 'timeout': ((1, 31), True), - } +def create_log(): + """Make and return a log for the sandbox to use, if needed.""" + # These imports are local to avoid importing pypy if we don't need to. + from pypy.tool.ansi_print import AnsiLog -log = py.log.Producer("sandlib") -py.log.setconsumer("sandlib", MyAnsiLog()) + class MyAnsiLog(AnsiLog): + KW_TO_COLOR = { + 'call': ((34,), False), + 'result': ((34,), False), + 'exception': ((34,), False), + 'vpath': ((35,), False), + 'timeout': ((1, 31), True), + } + log = py.log.Producer("sandlib") + py.log.setconsumer("sandlib", MyAnsiLog()) + return log # Note: we use lib_pypy/marshal.py instead of the built-in marshal # for two reasons. The built-in module could be made to segfault @@ -127,6 +131,7 @@ for the external functions xxx that you want to support. """ debug = False + log = None os_level_sandboxing = False # Linux only: /proc/PID/seccomp def __init__(self, args, executable=None): @@ -143,6 +148,9 @@ self.currenttimeout = None self.currentlyidlefrom = None + if self.debug: + self.log = create_log() + def withlock(self, function, *args, **kwds): lock = self.popenlock if lock is not None: @@ -170,7 +178,8 @@ if delay <= 0.0: break # expired! time.sleep(min(delay*1.001, 1)) - log.timeout("timeout!") + if self.log: + self.log.timeout("timeout!") self.kill() #if interrupt_main: # if hasattr(os, 'kill'): @@ -247,22 +256,22 @@ args = read_message(child_stdout) except EOFError, e: break - if self.debug and not self.is_spam(fnname, *args): - log.call('%s(%s)' % (fnname, + if self.log and not self.is_spam(fnname, *args): + self.log.call('%s(%s)' % (fnname, ', '.join([shortrepr(x) for x in args]))) try: answer, resulttype = self.handle_message(fnname, *args) except Exception, e: tb = sys.exc_info()[2] write_exception(child_stdin, e, tb) - if self.debug: + if self.log: if str(e): - log.exception('%s: %s' % (e.__class__.__name__, e)) + self.log.exception('%s: %s' % (e.__class__.__name__, e)) else: - log.exception('%s' % (e.__class__.__name__,)) + self.log.exception('%s' % (e.__class__.__name__,)) else: - if self.debug and not self.is_spam(fnname, *args): - log.result(shortrepr(answer)) + if self.log and not self.is_spam(fnname, *args): + self.log.result(shortrepr(answer)) try: write_message(child_stdin, 0) # error code - 0 for ok write_message(child_stdin, answer, resulttype) @@ -441,7 +450,8 @@ node = dirnode.join(name) else: node = dirnode - log.vpath('%r => %r' % (vpath, node)) + if self.log: + self.log.vpath('%r => %r' % (vpath, node)) return node def do_ll_os__ll_os_stat(self, vpathname): From noreply at buildbot.pypy.org Wed Dec 28 21:29:49 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 28 Dec 2011 21:29:49 +0100 (CET) Subject: [pypy-commit] pypy jit-improve-nested-loops: hg merge default Message-ID: <20111228202949.A9D2782C01@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-improve-nested-loops Changeset: r50932:a7a84c778e28 Date: 2011-12-28 12:40 +0100 http://bitbucket.org/pypy/pypy/changeset/a7a84c778e28/ Log: hg merge default diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -59,7 +59,8 @@ self.is_guard_not_invalidated = is_guard_not_invalidated DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed), - ('bridge', lltype.Signed), # 0 or 1 + ('type', lltype.Char), # 'b'ridge, 'l'abel or + # 'e'ntry point ('number', lltype.Signed)) class Assembler386(object): @@ -150,10 +151,12 @@ debug_start('jit-backend-counts') for i in range(len(self.loop_run_counters)): struct = self.loop_run_counters[i] - if not struct.bridge: + if struct.type == 'l': prefix = 'TargetToken(%d)' % struct.number + elif struct.type == 'b': + prefix = 'bridge ' + str(struct.number) else: - prefix = 'bridge ' + str(struct.number) + prefix = 'entry ' + str(struct.number) debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') @@ -425,7 +428,7 @@ self.setup(looptoken) if log: operations = self._inject_debugging_code(looptoken, operations, - False, looptoken.number) + 'e', looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -492,7 +495,7 @@ self.setup(original_loop_token) if log: operations = self._inject_debugging_code(faildescr, operations, - True, descr_number) + 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -599,15 +602,15 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number, token): + def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive # forever, just because we want to report them at the end # of the process struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', track_allocation=False) struct.i = 0 - struct.bridge = int(bridge) - if bridge: + struct.type = tp + if tp == 'b' or tp == 'e': struct.number = number else: assert token @@ -657,8 +660,8 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None - def _append_debugging_code(self, operations, bridge, number, token): - counter = self._register_counter(bridge, number, token) + def _append_debugging_code(self, operations, tp, number, token): + counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) box = BoxInt() box2 = BoxInt() @@ -670,7 +673,7 @@ operations.extend(ops) @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations, bridge, number): + def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: # before doing anything, let's increase a counter s = 0 @@ -679,13 +682,12 @@ looptoken._x86_debug_checksum = s newoperations = [] - if bridge: - self._append_debugging_code(newoperations, bridge, number, - None) + self._append_debugging_code(newoperations, tp, number, + None) for op in operations: newoperations.append(op) if op.getopnum() == rop.LABEL: - self._append_debugging_code(newoperations, bridge, number, + self._append_debugging_code(newoperations, 'l', number, op.getdescr()) operations = newoperations return operations diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -546,13 +546,16 @@ struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 1 struct = self.cpu.assembler.loop_run_counters[1] + assert struct.i == 1 + struct = self.cpu.assembler.loop_run_counters[2] assert struct.i == 9 self.cpu.finish_once() finally: debug._log = None + l0 = ('debug_print', 'entry -1:1') l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') l2 = ('debug_print', targettoken.repr_of_descr() + ':9') - assert ('jit-backend-counts', [l1, l2]) in dlog + assert ('jit-backend-counts', [l0, l1, l2]) in dlog def test_debugger_checksum(self): loop = """ diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -112,33 +112,26 @@ """ from pypy.jit.metainterp.optimizeopt import optimize_trace - history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd + history = metainterp.history - if False: - part = partial_trace - assert False - procedur_token = metainterp.get_procedure_token(greenkey) - assert procedure_token - all_target_tokens = [] - else: - jitcell_token = make_jitcell_token(jitdriver_sd) - part = create_empty_loop(metainterp) - part.inputargs = inputargs[:] - h_ops = history.operations - part.resume_at_jump_descr = resume_at_jump_descr - part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ - [h_ops[i].clone() for i in range(start, len(h_ops))] + \ - [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.resume_at_jump_descr = resume_at_jump_descr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] - try: - optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) - except InvalidLoop: - return None - target_token = part.operations[0].getdescr() - assert isinstance(target_token, TargetToken) - all_target_tokens = [target_token] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] loop = create_empty_loop(metainterp) loop.inputargs = part.inputargs @@ -328,7 +321,10 @@ metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # - metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) + loopname = jitdriver_sd.warmstate.get_location_str(greenkey) + metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, + type, ops_offset, + name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -79,9 +79,9 @@ opnum == rop.COPYSTRCONTENT or opnum == rop.COPYUNICODECONTENT): return - if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: - return - if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: + if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or + rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or + rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST): return if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: effectinfo = descr.get_extra_info() diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py --- a/pypy/jit/metainterp/logger.py +++ b/pypy/jit/metainterp/logger.py @@ -13,14 +13,14 @@ self.metainterp_sd = metainterp_sd self.guard_number = guard_number - def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None): + def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): if type is None: debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") - debug_print("# Loop", number, ":", type, + debug_print("# Loop", number, '(%s)' % name , ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-loop") diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -18,7 +18,7 @@ self.seen.append((inputargs, operations, token)) class FakeLogger(object): - def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None): + def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): pass def repr_of_resop(self, op): diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -255,6 +255,11 @@ assert h.getarrayitem(box1, descr1, index1) is box2 assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches(rop.GUARD_TRUE, None, []) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches( rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT), []) diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -180,7 +180,7 @@ def test_intro_loop(self): bare_logger = logger.Logger(self.make_metainterp_sd()) output = capturing(bare_logger.log_loop, [], [], 1, "foo") - assert output.splitlines()[0] == "# Loop 1 : foo with 0 ops" + assert output.splitlines()[0] == "# Loop 1 () : foo with 0 ops" pure_parse(output) def test_intro_bridge(self): diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -4,6 +4,7 @@ class PyPyModule(MixedModule): interpleveldefs = { 'debug_repr': 'interp_extras.debug_repr', + 'remove_invalidates': 'interp_extras.remove_invalidates', } appleveldefs = {} diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py --- a/pypy/module/micronumpy/interp_extras.py +++ b/pypy/module/micronumpy/interp_extras.py @@ -5,3 +5,11 @@ @unwrap_spec(array=BaseArray) def debug_repr(space, array): return space.wrap(array.find_sig().debug_repr()) + + at unwrap_spec(array=BaseArray) +def remove_invalidates(space, array): + """ Array modification will no longer invalidate any of it's + potential children. Use only for performance debugging + """ + del array.invalidates[:] + return space.w_None diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped +from pypy.interpreter.gateway import interp2app, NoneNotWrapped from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature from pypy.module.micronumpy.strides import calculate_slice_strides @@ -14,22 +14,26 @@ numpy_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['result_size', 'frame', 'ri', 'self', 'result'] + reds=['result_size', 'frame', 'ri', 'self', 'result'], + get_printable_location=signature.new_printable_location('numpy'), ) all_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['frame', 'self', 'dtype'] + reds=['frame', 'self', 'dtype'], + get_printable_location=signature.new_printable_location('all'), ) any_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['frame', 'self', 'dtype'] + reds=['frame', 'self', 'dtype'], + get_printable_location=signature.new_printable_location('any'), ) slice_driver = jit.JitDriver( greens=['shapelen', 'sig'], virtualizables=['frame'], - reds=['self', 'frame', 'source', 'res_iter'] + reds=['self', 'frame', 'source', 'res_iter'], + get_printable_location=signature.new_printable_location('slice'), ) def _find_shape_and_elems(space, w_iterable): @@ -291,7 +295,8 @@ def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver( greens=['shapelen', 'sig'], - reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'] + reds=['result', 'idx', 'frame', 'self', 'cur_best', 'dtype'], + get_printable_location=signature.new_printable_location(op_name), ) def loop(self): sig = self.find_sig() diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -1,9 +1,10 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, types -from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature, find_sig +from pypy.module.micronumpy import interp_boxes, interp_dtype +from pypy.module.micronumpy.signature import ReduceSignature, ScalarSignature,\ + find_sig, new_printable_location from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -11,7 +12,8 @@ reduce_driver = jit.JitDriver( greens = ['shapelen', "sig"], virtualizables = ["frame"], - reds = ["frame", "self", "dtype", "value", "obj"] + reds = ["frame", "self", "dtype", "value", "obj"], + get_printable_location=new_printable_location('reduce'), ) class W_Ufunc(Wrappable): diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -5,6 +5,11 @@ from pypy.module.micronumpy.strides import calculate_slice_strides from pypy.rlib.jit import hint, unroll_safe, promote +def new_printable_location(driver_name): + def get_printable_location(shapelen, sig): + return 'numpy ' + sig.debug_repr() + ' [%d dims,%s]' % (shapelen, driver_name) + return get_printable_location + def sigeq(one, two): return one.eq(two) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,4 +1,9 @@ +from pypy.rlib import jit + + at jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: + jit.isconstant(len(chunks)) +) def calculate_slice_strides(shape, start, strides, backstrides, chunks): rstrides = [] rbackstrides = [] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -898,6 +898,15 @@ b[0] = 3 assert debug_repr(b) == 'Array' + def test_remove_invalidates(self): + from numpypy import array + from numpypy.pypy import remove_invalidates + a = array([1, 2, 3]) + b = a + a + remove_invalidates(a) + a[0] = 14 + assert b[0] == 28 + def test_virtual_views(self): from numpypy import arange a = arange(15) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -528,6 +528,9 @@ set_param(driver, name1, int(value)) except ValueError: raise + break + else: + raise ValueError set_user_param._annspecialcase_ = 'specialize:arg(0)' diff --git a/pypy/rlib/rsre/rsre_jit.py b/pypy/rlib/rsre/rsre_jit.py --- a/pypy/rlib/rsre/rsre_jit.py +++ b/pypy/rlib/rsre/rsre_jit.py @@ -22,7 +22,7 @@ info = '%s/%d' % (info, args[debugprint[2]]) else: info = '' - return '%s%s %s' % (name, info, s) + return 're %s%s %s' % (name, info, s) # self.get_printable_location = get_printable_location diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -24,27 +24,24 @@ self.failargs = failargs def getarg(self, i): - return self._getvar(self.args[i]) + return self.args[i] def getargs(self): - return [self._getvar(v) for v in self.args] + return self.args[:] def getres(self): - return self._getvar(self.res) + return self.res def getdescr(self): return self.descr - def _getvar(self, v): - return v - def is_guard(self): return self._is_guard def repr(self): args = self.getargs() if self.descr is not None: - args.append('descr=%s' % self.getdescr()) + args.append('descr=%s' % self.descr) arglist = ', '.join(args) if self.res is not None: return '%s = %s(%s)' % (self.getres(), self.name, arglist) @@ -53,8 +50,6 @@ def __repr__(self): return self.repr() - ## return '<%s (%s)>' % (self.name, ', '.join([repr(a) - ## for a in self.args])) class SimpleParser(OpParser): @@ -146,18 +141,27 @@ is_bytecode = True inline_level = None - def __init__(self, operations, storage): - if operations[0].name == 'debug_merge_point': - self.inline_level = int(operations[0].args[0]) - m = re.search('\w]+)\. file \'(.+?)\'\. line (\d+)> #(\d+) (\w+)', - operations[0].args[1]) - if m is None: - # a non-code loop, like StrLiteralSearch or something - self.bytecode_name = operations[0].args[1][1:-1] - else: - self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups() - self.startlineno = int(lineno) - self.bytecode_no = int(bytecode_no) + def parse_code_data(self, arg): + m = re.search('\w]+)[\.,] file \'(.+?)\'[\.,] line (\d+)> #(\d+) (\w+)', + arg) + if m is None: + # a non-code loop, like StrLiteralSearch or something + self.bytecode_name = arg + else: + self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups() + self.startlineno = int(lineno) + self.bytecode_no = int(bytecode_no) + + + def __init__(self, operations, storage, loopname): + for op in operations: + if op.name == 'debug_merge_point': + self.inline_level = int(op.args[0]) + self.parse_code_data(op.args[1][1:-1]) + break + else: + self.inline_level = 0 + self.parse_code_data(loopname) self.operations = operations self.storage = storage self.code = storage.disassemble_code(self.filename, self.startlineno, @@ -165,7 +169,7 @@ def repr(self): if self.filename is None: - return "Unknown" + return self.bytecode_name return "%s, file '%s', line %d" % (self.name, self.filename, self.startlineno) @@ -220,7 +224,8 @@ self.storage = storage @classmethod - def from_operations(cls, operations, storage, limit=None, inputargs=''): + def from_operations(cls, operations, storage, limit=None, inputargs='', + loopname=''): """ Slice given operation list into a chain of TraceForOpcode chunks. Also detect inlined functions and make them Function """ @@ -246,13 +251,13 @@ for op in operations: if op.name == 'debug_merge_point': if so_far: - append_to_res(cls.TraceForOpcode(so_far, storage)) + append_to_res(cls.TraceForOpcode(so_far, storage, loopname)) if limit: break so_far = [] so_far.append(op) if so_far: - append_to_res(cls.TraceForOpcode(so_far, storage)) + append_to_res(cls.TraceForOpcode(so_far, storage, loopname)) # wrap stack back up if not stack: # no ops whatsoever @@ -300,7 +305,7 @@ def repr(self): if self.filename is None: - return "Unknown" + return self.chunks[0].bytecode_name return "%s, file '%s', line %d" % (self.name, self.filename, self.startlineno) @@ -393,7 +398,7 @@ if trace.comment and 'Guard' in trace.comment: descrs = ['bridge ' + re.search('Guard (\d+)', trace.comment).group(1)] else: - descrs = [''] + descrs = ['entry ' + re.search('Loop (\d+)', trace.comment).group(1)] for i, op in enumerate(trace.operations): if op.name == 'label': labels.append(i) @@ -405,6 +410,7 @@ part = copy(trace) part.operations = trace.operations[start : stop + 1] part.descr = descrs[i] + part.comment = trace.comment parts.append(part) return parts diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -33,23 +33,26 @@ ''') res = Function.from_operations(ops.operations, LoopStorage()) assert len(res.chunks) == 1 - assert res.chunks[0].repr() + assert 'SomeRandomStuff' in res.chunks[0].repr() def test_split(): ops = parse(''' [i0] + label() debug_merge_point(0, " #10 ADD") debug_merge_point(0, " #11 SUB") i1 = int_add(i0, 1) debug_merge_point(0, " #11 SUB") i2 = int_add(i1, 1) ''') - res = Function.from_operations(ops.operations, LoopStorage()) - assert len(res.chunks) == 3 + res = Function.from_operations(ops.operations, LoopStorage(), loopname='') + assert len(res.chunks) == 4 assert len(res.chunks[0].operations) == 1 - assert len(res.chunks[1].operations) == 2 + assert len(res.chunks[1].operations) == 1 assert len(res.chunks[2].operations) == 2 - assert res.chunks[2].bytecode_no == 11 + assert len(res.chunks[3].operations) == 2 + assert res.chunks[3].bytecode_no == 11 + assert res.chunks[0].bytecode_name == '' def test_inlined_call(): ops = parse(""" @@ -245,6 +248,7 @@ guard_true(i19, descr=) [] i113 = getfield_raw(151937600, descr=) ''') + loop.comment = 'Loop 0' parts = split_trace(loop) assert len(parts) == 3 assert len(parts[0].operations) == 2 @@ -272,7 +276,7 @@ finish(i0) ''') bridge.comment = 'bridge out of Guard 2 with 1 ops' - loop.comment = '' + loop.comment = 'Loop 0' loops = split_trace(loop) + split_trace(bridge) input = ['grrr:123\nasb:12\nbridge 2:1234'] parse_log_counts(input, loops) diff --git a/pypy/translator/sandbox/pypy_interact.py b/pypy/translator/sandbox/pypy_interact.py --- a/pypy/translator/sandbox/pypy_interact.py +++ b/pypy/translator/sandbox/pypy_interact.py @@ -26,7 +26,8 @@ from pypy.translator.sandbox.sandlib import SimpleIOSandboxedProc from pypy.translator.sandbox.sandlib import VirtualizedSandboxedProc from pypy.translator.sandbox.vfs import Dir, RealDir, RealFile -from pypy.tool.lib_pypy import LIB_ROOT +import pypy +LIB_ROOT = os.path.dirname(os.path.dirname(pypy.__file__)) class PyPySandboxedProc(VirtualizedSandboxedProc, SimpleIOSandboxedProc): debug = True diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -30,8 +30,9 @@ # load(). Also, marshal.load(f) blocks with the GIL held when # f is a pipe with no data immediately avaialble, preventing the # _waiting_thread to run. -from pypy.tool.lib_pypy import import_from_lib_pypy -marshal = import_from_lib_pypy('marshal') +import pypy +marshal = py.path.local(pypy.__file__).join('..', '..', 'lib_pypy', + 'marshal.py').pyimport() # Non-marshal result types RESULTTYPE_STATRESULT = object() From noreply at buildbot.pypy.org Wed Dec 28 21:29:55 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Wed, 28 Dec 2011 21:29:55 +0100 (CET) Subject: [pypy-commit] pypy default: Merge jit-improve-nested-loops. It allows bridges to end with a jump to the the top of a loop and not only to the bottom. While this realy is the same point (since it is a loop) it prevents bridges going from one loop to another from inlining a full iteration of the target loop at the end of the bridge. Message-ID: <20111228202955.6351582C01@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r50935:69095778cbfd Date: 2011-12-28 21:28 +0100 http://bitbucket.org/pypy/pypy/changeset/69095778cbfd/ Log: Merge jit-improve-nested-loops. It allows bridges to end with a jump to the the top of a loop and not only to the bottom. While this realy is the same point (since it is a loop) it prevents bridges going from one loop to another from inlining a full iteration of the target loop at the end of the bridge. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -169,10 +169,10 @@ loop.original_jitcell_token = jitcell_token for label in all_target_tokens: assert isinstance(label, TargetToken) - label.original_jitcell_token = jitcell_token if label.virtual_state and label.short_preamble: metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) jitcell_token.target_tokens = all_target_tokens + propagate_original_jitcell_token(loop) send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") record_loop_or_bridge(metainterp_sd, loop) return all_target_tokens[0] @@ -240,11 +240,11 @@ for box in loop.inputargs: assert isinstance(box, Box) - target_token = loop.operations[-1].getdescr() + target_token = loop.operations[-1].getdescr() resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() assert isinstance(target_token, TargetToken) - target_token.original_jitcell_token = loop.original_jitcell_token record_loop_or_bridge(metainterp_sd, loop) return target_token @@ -281,6 +281,15 @@ assert i == len(inputargs) loop.operations = extra_ops + loop.operations +def propagate_original_jitcell_token(trace): + for op in trace.operations: + if op.getopnum() == rop.LABEL: + token = op.getdescr() + assert isinstance(token, TargetToken) + assert token.original_jitcell_token is None + token.original_jitcell_token = trace.original_jitcell_token + + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: @@ -554,6 +563,7 @@ inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations + propagate_original_jitcell_token(new_loop) send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.original_jitcell_token) @@ -740,6 +750,7 @@ jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) + propagate_original_jitcell_token(new_loop) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -386,6 +386,17 @@ """ self.optimize_loop(ops, expected) + def test_virtual_as_field_of_forced_box(self): + ops = """ + [p0] + pv1 = new_with_vtable(ConstClass(node_vtable)) + label(pv1, p0) + pv2 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(pv2, pv1, descr=valuedescr) + jump(pv1, pv2) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) class OptRenameStrlen(Optimization): def propagate_forward(self, op): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -409,7 +409,13 @@ if self.level == LEVEL_CONSTANT: return assert 0 <= self.position_in_notvirtuals - boxes[self.position_in_notvirtuals] = value.force_box(optimizer) + if optimizer: + box = value.force_box(optimizer) + else: + if value.is_virtual(): + raise BadVirtualState + box = value.get_key_box() + boxes[self.position_in_notvirtuals] = box def _enum(self, virtual_state): if self.level == LEVEL_CONSTANT: @@ -471,8 +477,14 @@ optimizer = optimizer.optearlyforce assert len(values) == len(self.state) inputargs = [None] * len(self.notvirtuals) + + # We try twice. The first time around we allow boxes to be forced + # which might change the virtual state if the box appear in more + # than one place among the inputargs. for i in range(len(values)): self.state[i].enum_forced_boxes(inputargs, values[i], optimizer) + for i in range(len(values)): + self.state[i].enum_forced_boxes(inputargs, values[i], None) if keyboxes: for i in range(len(values)): diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -976,10 +976,13 @@ self.verify_green_args(jitdriver_sd, greenboxes) self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.in_recursion, greenboxes) - + if self.metainterp.seen_loop_header_for_jdindex < 0: - if not jitdriver_sd.no_loop_header or not any_operation: + if not any_operation: return + if self.metainterp.in_recursion or not self.metainterp.get_procedure_token(greenboxes, True): + if not jitdriver_sd.no_loop_header: + return # automatically add a loop_header if there is none self.metainterp.seen_loop_header_for_jdindex = jdindex # @@ -2053,9 +2056,15 @@ from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) - def get_procedure_token(self, greenkey): + def get_procedure_token(self, greenkey, with_compiled_targets=False): cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - return cell.get_procedure_token() + token = cell.get_procedure_token() + if with_compiled_targets: + if not token: + return None + if not token.target_tokens: + return None + return token def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args @@ -2088,11 +2097,9 @@ def compile_trace(self, live_arg_boxes, resume_at_jump_descr): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] - target_jitcell_token = self.get_procedure_token(greenkey) + target_jitcell_token = self.get_procedure_token(greenkey, True) if not target_jitcell_token: return - if not target_jitcell_token.target_tokens: - return self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2697,7 +2697,7 @@ # bridge back to the preamble of the first loop is produced. A guard in # this bridge is later traced resulting in a failed attempt of retracing # the second loop. - self.check_trace_count(8) + self.check_trace_count(9) # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times. diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -756,7 +756,7 @@ res = self.meta_interp(interpret, [1]) assert res == interpret(1) # XXX it's unsure how many loops should be there - self.check_trace_count(3) + self.check_trace_count(2) def test_path_with_operations_not_from_start(self): jitdriver = JitDriver(greens = ['k'], reds = ['n', 'z']) diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -880,7 +880,7 @@ elif op == 'j': j = Int(0) elif op == '+': - sa += i.val * j.val + sa += (i.val + 2) * (j.val + 2) elif op == 'a': i = Int(i.val + 1) elif op == 'b': @@ -902,6 +902,7 @@ assert res == f(10) self.check_aborted_count(0) self.check_target_token_count(3) + self.check_resops(int_mul=2) def test_nested_loops_bridge(self): class Int(object): From noreply at buildbot.pypy.org Wed Dec 28 21:31:12 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Wed, 28 Dec 2011 21:31:12 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Implement emit_cond_call_gc_wb for PPC64. Fix typo in function address variable name. Message-ID: <20111228203112.3ED2482C01@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50936:cb6d39cadc56 Date: 2011-12-28 15:30 -0500 http://bitbucket.org/pypy/pypy/changeset/cb6d39cadc56/ Log: Implement emit_cond_call_gc_wb for PPC64. Fix typo in function address variable name. diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -855,7 +855,11 @@ # test whether this bit is set self.mc.cmpwi(0, r.SCRATCH.value, 1) else: - assert 0, "not implemented yet" + if bitpos > 0: + self.mc.rldicl(r.SCRATCH.value, r.SCRATCH.value, + 64 - bitpos, 63) + # test whether this bit is set + self.mc.cmpdi(0, r.SCRATCH.value, 1) self.mc.free_scratch_reg() jz_location = self.mc.currpos() @@ -876,9 +880,9 @@ if IS_PPC_32: self.mc.bl_abs(func) else: - self.mc.load_from_addr(r.SCRATCH, adr) - self.mc.load_from_addr(r.TOC, adr + WORD) - self.mc.load_from_addr(r.r11, adr + 2 * WORD) + self.mc.load_from_addr(r.SCRATCH, func) + self.mc.load_from_addr(r.TOC, func + WORD) + self.mc.load_from_addr(r.r11, func + 2 * WORD) self.mc.mtctr(r.SCRATCH.value) self.mc.bctrl() From noreply at buildbot.pypy.org Thu Dec 29 09:57:16 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:16 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: (arigo, biab) make sure excetions are saved for call_assembler and call_may_force Message-ID: <20111229085716.BFF8B82C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50937:977efd55f3c6 Date: 2011-12-13 15:42 +0100 http://bitbucket.org/pypy/pypy/changeset/977efd55f3c6/ Log: (arigo, biab) make sure excetions are saved for call_assembler and call_may_force diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -1079,7 +1079,8 @@ self.mc.LDR_ri(r.ip.value, r.fp.value) self.mc.CMP_ri(r.ip.value, 0) - self._emit_guard(guard_op, regalloc._prepare_guard(guard_op), c.GE) + self._emit_guard(guard_op, regalloc._prepare_guard(guard_op), + c.GE, save_exc=True) return fcond @@ -1098,7 +1099,7 @@ self.mc.LDR_ri(r.ip.value, r.fp.value) self.mc.CMP_ri(r.ip.value, 0) - self._emit_guard(guard_op, arglocs, c.GE) + self._emit_guard(guard_op, arglocs, c.GE, save_exc=True) return fcond emit_guard_call_release_gil = emit_guard_call_may_force From noreply at buildbot.pypy.org Thu Dec 29 09:57:18 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:18 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: make the save_exc parameter non-optional Message-ID: <20111229085718.5075882C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50938:18234110368e Date: 2011-12-13 15:53 +0100 http://bitbucket.org/pypy/pypy/changeset/18234110368e/ Log: make the save_exc parameter non-optional diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -155,7 +155,7 @@ self.releasegil_addr = rffi.cast(lltype.Signed, releasegil_func) self.reacqgil_addr = rffi.cast(lltype.Signed, reacqgil_func) - def _gen_leave_jitted_hook_code(self, save_exc=False): + def _gen_leave_jitted_hook_code(self, save_exc): mc = ARMv7Builder() # XXX add a check if cpu supports floats with saved_registers(mc, r.caller_resp + [r.ip], r.caller_vfp_resp): @@ -428,12 +428,14 @@ encode32(mem, j+1, n) return memaddr - def _gen_path_to_exit_path(self, descr, args, arglocs, fcond=c.AL, save_exc=False): + def _gen_path_to_exit_path(self, descr, args, arglocs, save_exc, fcond=c.AL): + assert isinstance(save_exc, bool) memaddr = self.gen_descr_encoding(descr, args, arglocs) - self.gen_exit_code(self.mc, memaddr, fcond, save_exc) + self.gen_exit_code(self.mc, memaddr, save_exc, fcond) return memaddr - def gen_exit_code(self, mc, memaddr, fcond=c.AL, save_exc=False): + def gen_exit_code(self, mc, memaddr, save_exc, fcond=c.AL): + assert isinstance(save_exc, bool) self.mc.gen_load_int(r.ip.value, memaddr) #mc.LDR_ri(r.ip.value, r.pc.value, imm=WORD) if save_exc: @@ -888,7 +890,7 @@ # regalloc support def load(self, loc, value): - assert (loc.is_reg() and value.is_imm() + assert (loc.is_reg() and value.is_imm() or loc.is_vfp_reg() and value.is_imm_float()) if value.is_imm(): self.mc.gen_load_int(loc.value, value.getint()) diff --git a/pypy/jit/backend/arm/helper/assembler.py b/pypy/jit/backend/arm/helper/assembler.py --- a/pypy/jit/backend/arm/helper/assembler.py +++ b/pypy/jit/backend/arm/helper/assembler.py @@ -29,7 +29,7 @@ guard_opnum = guard.getopnum() if guard_opnum == rop.GUARD_FALSE: cond = false_cond - return self._emit_guard(guard, arglocs[1:], cond) + return self._emit_guard(guard, arglocs[1:], cond, save_exc=False) f.__name__ = 'emit_guard_%s' % name return f @@ -92,7 +92,7 @@ cond = true_cond if guard_opnum == rop.GUARD_FALSE: cond = false_cond - return self._emit_guard(guard, arglocs[2:], cond) + return self._emit_guard(guard, arglocs[2:], cond, save_exc=False) f.__name__ = 'emit_guard_%s' % name return f @@ -137,7 +137,7 @@ guard_opnum = guard.getopnum() if guard_opnum == rop.GUARD_FALSE: cond = false_cond - return self._emit_guard(guard, arglocs[2:], cond) + return self._emit_guard(guard, arglocs[2:], cond, save_exc=False) f.__name__ = 'emit_guard_%s' % name return f diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -35,8 +35,9 @@ NO_FORCE_INDEX = -1 class GuardToken(object): - def __init__(self, descr, failargs, faillocs, offset, fcond=c.AL, - save_exc=False, is_invalidate=False): + def __init__(self, descr, failargs, faillocs, offset, + save_exc, fcond=c.AL, is_invalidate=False): + assert isinstance(save_exc, bool) self.descr = descr self.offset = offset self.is_invalidate = is_invalidate @@ -99,9 +100,9 @@ self.mc.CMP_rr(r.ip.value, res.value, shifttype=shift.ASR, imm=31, cond=fcond) if guard.getopnum() == rop.GUARD_OVERFLOW: - fcond = self._emit_guard(guard, failargs, c.NE) + fcond = self._emit_guard(guard, failargs, c.NE, save_exc=False) elif guard.getopnum() == rop.GUARD_NO_OVERFLOW: - fcond = self._emit_guard(guard, failargs, c.EQ) + fcond = self._emit_guard(guard, failargs, c.EQ, save_exc=False) else: assert 0 return fcond @@ -188,7 +189,9 @@ _mixin_ = True - def _emit_guard(self, op, arglocs, fcond, save_exc=False, is_guard_not_invalidated=False): + def _emit_guard(self, op, arglocs, fcond, save_exc, is_guard_not_invalidated=False): + assert isinstance(save_exc, bool) + assert isinstance(fcond, int) descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) @@ -209,16 +212,16 @@ failargs=op.getfailargs(), faillocs=arglocs, offset=pos, - fcond=fcond, + save_exc=save_exc, is_invalidate=is_guard_not_invalidated, - save_exc=save_exc)) + fcond=fcond)) return c.AL def _emit_guard_overflow(self, guard, failargs, fcond): if guard.getopnum() == rop.GUARD_OVERFLOW: - fcond = self._emit_guard(guard, failargs, c.VS) + fcond = self._emit_guard(guard, failargs, c.VS, save_exc=False) elif guard.getopnum() == rop.GUARD_NO_OVERFLOW: - fcond = self._emit_guard(guard, failargs, c.VC) + fcond = self._emit_guard(guard, failargs, c.VC, save_exc=False) else: assert 0 return fcond @@ -227,14 +230,14 @@ l0 = arglocs[0] failargs = arglocs[1:] self.mc.CMP_ri(l0.value, 0) - fcond = self._emit_guard(op, failargs, c.NE) + fcond = self._emit_guard(op, failargs, c.NE, save_exc=False) return fcond def emit_op_guard_false(self, op, arglocs, regalloc, fcond): l0 = arglocs[0] failargs = arglocs[1:] self.mc.CMP_ri(l0.value, 0) - fcond = self._emit_guard(op, failargs, c.EQ) + fcond = self._emit_guard(op, failargs, c.EQ, save_exc=False) return fcond def emit_op_guard_value(self, op, arglocs, regalloc, fcond): @@ -251,17 +254,17 @@ assert l1.is_vfp_reg() self.mc.VCMP(l0.value, l1.value) self.mc.VMRS(cond=fcond) - fcond = self._emit_guard(op, failargs, c.EQ) + fcond = self._emit_guard(op, failargs, c.EQ, save_exc=False) return fcond emit_op_guard_nonnull = emit_op_guard_true emit_op_guard_isnull = emit_op_guard_false def emit_op_guard_no_overflow(self, op, arglocs, regalloc, fcond): - return self._emit_guard(op, arglocs, c.VC) + return self._emit_guard(op, arglocs, c.VC, save_exc=False) def emit_op_guard_overflow(self, op, arglocs, regalloc, fcond): - return self._emit_guard(op, arglocs, c.VS) + return self._emit_guard(op, arglocs, c.VS, save_exc=False) # from ../x86/assembler.py:1265 def emit_op_guard_class(self, op, arglocs, regalloc, fcond): @@ -273,14 +276,14 @@ self.mc.CMP_ri(arglocs[0].value, 0) if offset is not None: - self._emit_guard(op, arglocs[3:], c.NE) + self._emit_guard(op, arglocs[3:], c.NE, save_exc=False) else: raise NotImplementedError self._cmp_guard_class(op, arglocs, regalloc, fcond) return fcond def emit_op_guard_not_invalidated(self, op, locs, regalloc, fcond): - return self._emit_guard(op, locs, fcond, is_guard_not_invalidated=True) + return self._emit_guard(op, locs, fcond, save_exc=False, is_guard_not_invalidated=True) def _cmp_guard_class(self, op, locs, regalloc, fcond): offset = locs[2] @@ -295,7 +298,7 @@ raise NotImplementedError # XXX port from x86 backend once gc support is in place - return self._emit_guard(op, locs[3:], c.EQ) + return self._emit_guard(op, locs[3:], c.EQ, save_exc=False) class OpAssembler(object): @@ -497,7 +500,7 @@ self.mc.CMP_rr(r.ip.value, loc.value) self._emit_guard(op, failargs, c.EQ, save_exc=True) - self.mc.gen_load_int(loc.value, pos_exc_value.value, fcond) + self.mc.gen_load_int(loc.value, pos_exc_value.value) if resloc: self.mc.LDR_ri(resloc.value, loc.value) self.mc.MOV_ri(r.ip.value, 0) From noreply at buildbot.pypy.org Thu Dec 29 09:57:20 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:20 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: fixes to emit_call and call_reaquire_gil Message-ID: <20111229085720.631A882C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50939:aa7017a83625 Date: 2011-12-23 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/aa7017a83625/ Log: fixes to emit_call and call_reaquire_gil diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -367,10 +367,10 @@ self.gen_func_epilog() return fcond - def emit_op_call(self, op, args, regalloc, fcond, force_index=-1): + def emit_op_call(self, op, args, regalloc, fcond, force_index=NO_FORCE_INDEX): adr = args[0].value arglist = op.getarglist()[1:] - if force_index == -1: + if force_index == NO_FORCE_INDEX: force_index = self.write_new_force_index() cond = self._emit_call(force_index, adr, arglist, regalloc, fcond, op.result) @@ -1122,15 +1122,18 @@ def call_reacquire_gil(self, gcrootmap, save_loc, fcond): # save the previous result into the stack temporarily. # XXX like with call_release_gil(), we assume that we don't need - # to save vfp regs in this case. + # to save vfp regs in this case. Besides the result location regs_to_save = [] + vfp_regs_to_save = [] if save_loc.is_reg(): regs_to_save.append(save_loc) + if save_loc.is_vfp_reg(): + vfp_regs_to_save.append(save_loc) # call the reopenstack() function (also reacquiring the GIL) - if len(regs_to_save) == 1: + if len(regs_to_save) % 2 != 1: regs_to_save.append(r.ip) # for alingment assert gcrootmap.is_shadow_stack - with saved_registers(self.mc, regs_to_save): + with saved_registers(self.mc, regs_to_save, vfp_regs_to_save): self._emit_call(NO_FORCE_INDEX, self.reacqgil_addr, [], self._regalloc, fcond) def write_new_force_index(self): diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -1034,7 +1034,11 @@ self.assembler.emit_op_call(op, args, self, fcond, fail_index) # then reopen the stack if gcrootmap: - self.assembler.call_reacquire_gil(gcrootmap, r.r0, fcond) + if op.result: + result_loc = self.call_result_location(op.result) + else: + result_loc = None + self.assembler.call_reacquire_gil(gcrootmap, result_loc, fcond) locs = self._prepare_guard(guard_op) return locs From noreply at buildbot.pypy.org Thu Dec 29 09:57:22 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:22 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: Cleanup Message-ID: <20111229085722.5273D82C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50940:bd95dd546f05 Date: 2011-12-25 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/bd95dd546f05/ Log: Cleanup diff --git a/pypy/jit/backend/arm/arch.py b/pypy/jit/backend/arm/arch.py --- a/pypy/jit/backend/arm/arch.py +++ b/pypy/jit/backend/arm/arch.py @@ -1,10 +1,9 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.rarithmetic import r_uint -from pypy.rpython.lltypesystem import lltype -FUNC_ALIGN=8 -WORD=4 +FUNC_ALIGN = 8 +WORD = 4 # the number of registers that we need to save around malloc calls N_REGISTERS_SAVED_BY_MALLOC = 9 @@ -27,18 +26,22 @@ } """]) -arm_int_div_sign = lltype.Ptr(lltype.FuncType([lltype.Signed, lltype.Signed], lltype.Signed)) + def arm_int_div_emulator(a, b): - return int(a/float(b)) + return int(a / float(b)) +arm_int_div_sign = lltype.Ptr( + lltype.FuncType([lltype.Signed, lltype.Signed], lltype.Signed)) arm_int_div = rffi.llexternal( "pypy__arm_int_div", [lltype.Signed, lltype.Signed], lltype.Signed, _callable=arm_int_div_emulator, compilation_info=eci, _nowrapper=True, elidable_function=True) -arm_uint_div_sign = lltype.Ptr(lltype.FuncType([lltype.Unsigned, lltype.Unsigned], lltype.Unsigned)) + def arm_uint_div_emulator(a, b): - return r_uint(a)/r_uint(b) + return r_uint(a) / r_uint(b) +arm_uint_div_sign = lltype.Ptr( + lltype.FuncType([lltype.Unsigned, lltype.Unsigned], lltype.Unsigned)) arm_uint_div = rffi.llexternal( "pypy__arm_uint_div", [lltype.Unsigned, lltype.Unsigned], lltype.Unsigned, _callable=arm_uint_div_emulator, @@ -46,7 +49,6 @@ _nowrapper=True, elidable_function=True) -arm_int_mod_sign = arm_int_div_sign def arm_int_mod_emulator(a, b): sign = 1 if a < 0: @@ -56,9 +58,9 @@ b = -1 * b res = a % b return sign * res +arm_int_mod_sign = arm_int_div_sign arm_int_mod = rffi.llexternal( "pypy__arm_int_mod", [lltype.Signed, lltype.Signed], lltype.Signed, _callable=arm_int_mod_emulator, compilation_info=eci, _nowrapper=True, elidable_function=True) - diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -1,41 +1,37 @@ from __future__ import with_statement import os -from pypy.jit.backend.arm.helper.assembler import saved_registers, count_reg_args, \ +from pypy.jit.backend.arm.helper.assembler import saved_registers, \ + count_reg_args, \ decode32, encode32, \ - decode64, encode64 + decode64 from pypy.jit.backend.arm import conditions as c -from pypy.jit.backend.arm import locations from pypy.jit.backend.arm import registers as r -from pypy.jit.backend.arm.arch import WORD, FUNC_ALIGN, PC_OFFSET, N_REGISTERS_SAVED_BY_MALLOC +from pypy.jit.backend.arm.arch import WORD, FUNC_ALIGN, \ + PC_OFFSET, N_REGISTERS_SAVED_BY_MALLOC from pypy.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder -from pypy.jit.backend.arm.regalloc import (Regalloc, ARMFrameManager, ARMv7RegisterMananger, - check_imm_arg, TempInt, - TempPtr, - operations as regalloc_operations, - operations_with_guard as regalloc_operations_with_guard) +from pypy.jit.backend.arm.regalloc import (Regalloc, ARMFrameManager, + ARMv7RegisterMananger, check_imm_arg, + operations as regalloc_operations, + operations_with_guard as regalloc_operations_with_guard) from pypy.jit.backend.arm.jump import remap_frame_layout -from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity, TempBox +from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.backend.model import CompiledLoopToken from pypy.jit.codewriter import longlong -from pypy.jit.metainterp.history import (Const, ConstInt, ConstPtr, - BoxInt, BoxPtr, AbstractFailDescr, - INT, REF, FLOAT) +from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT) from pypy.jit.metainterp.resoperation import rop from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.rarithmetic import r_uint, r_longlong -from pypy.rlib.longlong2float import float2longlong, longlong2float from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.rpython.lltypesystem.lloperation import llop -from pypy.jit.backend.arm.opassembler import ResOpAssembler, GuardToken -from pypy.rlib.debug import (debug_print, debug_start, debug_stop, - have_debug_prints) +from pypy.jit.backend.arm.opassembler import ResOpAssembler +from pypy.rlib.debug import debug_print, debug_start, debug_stop # XXX Move to llsupport from pypy.jit.backend.x86.support import values_array, memcpy_fn + class AssemblerARM(ResOpAssembler): """ Encoding for locations in memory @@ -52,8 +48,8 @@ \xFF = END_OF_LOCS """ FLOAT_TYPE = '\xED' - REF_TYPE = '\xEE' - INT_TYPE = '\xEF' + REF_TYPE = '\xEE' + INT_TYPE = '\xEF' STACK_LOC = '\xFC' IMM_LOC = '\xFD' @@ -62,11 +58,11 @@ END_OF_LOCS = '\xFF' - def __init__(self, cpu, failargs_limit=1000): self.cpu = cpu self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) - self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, failargs_limit) + self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, + failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_count = 0 self.fail_force_index = 0 @@ -87,7 +83,7 @@ def setup(self, looptoken, operations): self.current_clt = looptoken.compiled_loop_token - operations = self.cpu.gc_ll_descr.rewrite_assembler(self.cpu, + operations = self.cpu.gc_ll_descr.rewrite_assembler(self.cpu, operations, self.current_clt.allgcrefs) assert self.memcpy_addr != 0, 'setup_once() not called?' self.mc = ARMv7Builder() @@ -130,7 +126,8 @@ self._build_release_gil(gc_ll_descr.gcrootmap) self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) self._exit_code_addr = self._gen_exit_path() - self._leave_jitted_hook_save_exc = self._gen_leave_jitted_hook_code(True) + self._leave_jitted_hook_save_exc = \ + self._gen_leave_jitted_hook_code(True) self._leave_jitted_hook = self._gen_leave_jitted_hook_code(False) @staticmethod @@ -146,13 +143,14 @@ after() _NOARG_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) + def _build_release_gil(self, gcrootmap): assert gcrootmap.is_shadow_stack releasegil_func = llhelper(self._NOARG_FUNC, self._release_gil_shadowstack) reacqgil_func = llhelper(self._NOARG_FUNC, self._reacquire_gil_shadowstack) - self.releasegil_addr = rffi.cast(lltype.Signed, releasegil_func) + self.releasegil_addr = rffi.cast(lltype.Signed, releasegil_func) self.reacqgil_addr = rffi.cast(lltype.Signed, reacqgil_func) def _gen_leave_jitted_hook_code(self, save_exc): @@ -186,34 +184,37 @@ @rgc.no_collect def failure_recovery_func(mem_loc, frame_pointer, stack_pointer): """mem_loc is a structure in memory describing where the values for - the failargs are stored. - frame loc is the address of the frame pointer for the frame to be - decoded frame """ - return self.decode_registers_and_descr(mem_loc, frame_pointer, stack_pointer) + the failargs are stored. frame loc is the address of the frame + pointer for the frame to be decoded frame """ + return self.decode_registers_and_descr(mem_loc, + frame_pointer, stack_pointer) self.failure_recovery_func = failure_recovery_func - recovery_func_sign = lltype.Ptr(lltype.FuncType([lltype.Signed, lltype.Signed, lltype.Signed], lltype.Signed)) + recovery_func_sign = lltype.Ptr(lltype.FuncType([lltype.Signed, + lltype.Signed, lltype.Signed], lltype.Signed)) @rgc.no_collect def decode_registers_and_descr(self, mem_loc, frame_loc, regs_loc): - """Decode locations encoded in memory at mem_loc and write the values to - the failboxes. - Values for spilled vars and registers are stored on stack at frame_loc - """ - #XXX check if units are correct here, when comparing words and bytes and stuff - # assert 0, 'check if units are correct here, when comparing words and bytes and stuff' + """Decode locations encoded in memory at mem_loc and write the values + to the failboxes. Values for spilled vars and registers are stored on + stack at frame_loc """ + # XXX check if units are correct here, when comparing words and bytes + # and stuff assert 0, 'check if units are correct here, when comparing + # words and bytes and stuff' enc = rffi.cast(rffi.CCHARP, mem_loc) - frame_depth = frame_loc - (regs_loc + len(r.all_regs)*WORD + len(r.all_vfp_regs)*2*WORD) + frame_depth = frame_loc - (regs_loc + len(r.all_regs) + * WORD + len(r.all_vfp_regs) * 2 * WORD) assert (frame_loc - frame_depth) % 4 == 0 stack = rffi.cast(rffi.CCHARP, frame_loc - frame_depth) assert regs_loc % 4 == 0 vfp_regs = rffi.cast(rffi.CCHARP, regs_loc) - assert (regs_loc + len(r.all_vfp_regs)*2*WORD) % 4 == 0 + assert (regs_loc + len(r.all_vfp_regs) * 2 * WORD) % 4 == 0 assert frame_depth >= 0 - regs = rffi.cast(rffi.CCHARP, regs_loc + len(r.all_vfp_regs)*2*WORD) + regs = rffi.cast(rffi.CCHARP, + regs_loc + len(r.all_vfp_regs) * 2 * WORD) i = -1 fail_index = -1 while(True): @@ -231,33 +232,35 @@ if res == self.IMM_LOC: # imm value if group == self.INT_TYPE or group == self.REF_TYPE: - value = decode32(enc, i+1) + value = decode32(enc, i + 1) i += 4 else: assert group == self.FLOAT_TYPE - adr = decode32(enc, i+1) - value = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] + adr = decode32(enc, i + 1) + tp = rffi.CArrayPtr(longlong.FLOATSTORAGE) + value = rffi.cast(tp, adr)[0] self.fail_boxes_float.setitem(fail_index, value) i += 4 continue elif res == self.STACK_LOC: - stack_loc = decode32(enc, i+1) + stack_loc = decode32(enc, i + 1) i += 4 if group == self.FLOAT_TYPE: - value = decode64(stack, frame_depth - (stack_loc+1)*WORD) + value = decode64(stack, + frame_depth - (stack_loc + 1) * WORD) fvalue = rffi.cast(longlong.FLOATSTORAGE, value) self.fail_boxes_float.setitem(fail_index, fvalue) continue else: - value = decode32(stack, frame_depth - stack_loc*WORD) - else: # REG_LOC + value = decode32(stack, frame_depth - stack_loc * WORD) + else: # REG_LOC reg = ord(enc[i]) if group == self.FLOAT_TYPE: - value = decode64(vfp_regs, reg*2*WORD) + value = decode64(vfp_regs, reg * 2 * WORD) self.fail_boxes_float.setitem(fail_index, value) continue else: - value = decode32(regs, reg*WORD) + value = decode32(regs, reg * WORD) if group == self.INT_TYPE: self.fail_boxes_int.setitem(fail_index, value) @@ -268,9 +271,8 @@ else: assert 0, 'unknown type' - assert enc[i] == self.END_OF_LOCS - descr = decode32(enc, i+1) + descr = decode32(enc, i + 1) self.fail_boxes_count = fail_index self.fail_force_index = frame_loc return descr @@ -284,7 +286,8 @@ j += 1 continue - assert res in [self.FLOAT_TYPE, self.INT_TYPE, self.REF_TYPE], 'location type is not supported' + assert res in [self.FLOAT_TYPE, self.INT_TYPE, self.REF_TYPE], \ + 'location type is not supported' res_type = res j += 1 res = enc[j] @@ -298,10 +301,10 @@ t = INT else: t = REF - stack_loc = decode32(enc, j+1) + stack_loc = decode32(enc, j + 1) loc = regalloc.frame_manager.frame_pos(stack_loc, t) j += 4 - else: # REG_LOC + else: # REG_LOC if res_type == self.FLOAT_TYPE: loc = r.all_vfp_regs[ord(res)] else: @@ -311,7 +314,6 @@ return locs def _build_malloc_slowpath(self): - gcrootmap = self.cpu.gc_ll_descr.gcrootmap mc = ARMv7Builder() assert self.cpu.supports_floats # We need to push two registers here because we are going to make a @@ -351,13 +353,16 @@ def _gen_exit_path(self): mc = ARMv7Builder() - decode_registers_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func) - + decode_registers_addr = llhelper(self.recovery_func_sign, + self.failure_recovery_func) self._insert_checks(mc) with saved_registers(mc, r.all_regs, r.all_vfp_regs): - mc.MOV_rr(r.r0.value, r.ip.value) # move mem block address, to r0 to pass as - mc.MOV_rr(r.r1.value, r.fp.value) # pass the current frame pointer as second param - mc.MOV_rr(r.r2.value, r.sp.value) # pass the current stack pointer as third param + # move mem block address, to r0 to pass as + mc.MOV_rr(r.r0.value, r.ip.value) + # pass the current frame pointer as second param + mc.MOV_rr(r.r1.value, r.fp.value) + # pass the current stack pointer as third param + mc.MOV_rr(r.r2.value, r.sp.value) self._insert_checks(mc) mc.BL(rffi.cast(lltype.Signed, decode_registers_addr)) mc.MOV_rr(r.ip.value, r.r0.value) @@ -376,15 +381,15 @@ # 1 separator byte # 4 bytes for the faildescr # const floats are stored in memory and the box contains the address - memsize = (len(arglocs)-1)*6+5 + memsize = (len(arglocs) - 1) * 6 + 5 memaddr = self.datablockwrapper.malloc_aligned(memsize, alignment=1) mem = rffi.cast(rffi.CArrayPtr(lltype.Char), memaddr) i = 0 j = 0 while i < len(args): - if arglocs[i+1]: + if arglocs[i + 1]: arg = args[i] - loc = arglocs[i+1] + loc = arglocs[i + 1] if arg.type == INT: mem[j] = self.INT_TYPE j += 1 @@ -404,7 +409,7 @@ assert (arg.type == INT or arg.type == REF or arg.type == FLOAT) mem[j] = self.IMM_LOC - encode32(mem, j+1, loc.getint()) + encode32(mem, j + 1, loc.getint()) j += 5 else: assert loc.is_stack() @@ -413,9 +418,9 @@ # Float locs store the location number with an offset # of 1 -.- so we need to take this into account here # when generating the encoding - encode32(mem, j+1, loc.position-1) + encode32(mem, j + 1, loc.position - 1) else: - encode32(mem, j+1, loc.position) + encode32(mem, j + 1, loc.position) j += 5 else: mem[j] = self.EMPTY_LOC @@ -425,10 +430,11 @@ mem[j] = chr(0xFF) n = self.cpu.get_fail_descr_number(descr) - encode32(mem, j+1, n) + encode32(mem, j + 1, n) return memaddr - def _gen_path_to_exit_path(self, descr, args, arglocs, save_exc, fcond=c.AL): + def _gen_path_to_exit_path(self, descr, args, arglocs, + save_exc, fcond=c.AL): assert isinstance(save_exc, bool) memaddr = self.gen_descr_encoding(descr, args, arglocs) self.gen_exit_code(self.mc, memaddr, save_exc, fcond) @@ -457,11 +463,13 @@ self.gen_footer_shadowstack(gcrootmap, mc) offset = 1 if self.cpu.supports_floats: - offset += 1 # to keep stack alignment + offset += 1 # to keep stack alignment mc.MOV_rr(r.sp.value, r.fp.value, cond=cond) - mc.ADD_ri(r.sp.value, r.sp.value, (N_REGISTERS_SAVED_BY_MALLOC+offset)*WORD, cond=cond) + mc.ADD_ri(r.sp.value, r.sp.value, + (N_REGISTERS_SAVED_BY_MALLOC + offset) * WORD, cond=cond) if self.cpu.supports_floats: - mc.VPOP([reg.value for reg in r.callee_saved_vfp_registers], cond=cond) + mc.VPOP([reg.value for reg in r.callee_saved_vfp_registers], + cond=cond) mc.POP([reg.value for reg in r.callee_restored_registers], cond=cond) def gen_func_prolog(self): @@ -469,11 +477,12 @@ offset = 1 if self.cpu.supports_floats: self.mc.VPUSH([reg.value for reg in r.callee_saved_vfp_registers]) - offset +=1 # to keep stack alignment + offset += 1 # to keep stack alignment # here we modify the stack pointer to leave room for the 9 registers # that are going to be saved here around malloc calls and one word to # store the force index - self.mc.SUB_ri(r.sp.value, r.sp.value, (N_REGISTERS_SAVED_BY_MALLOC+offset)*WORD) + self.mc.SUB_ri(r.sp.value, r.sp.value, + (N_REGISTERS_SAVED_BY_MALLOC + offset) * WORD) self.mc.MOV_rr(r.fp.value, r.sp.value) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -485,18 +494,19 @@ # XXX add some comments rst = gcrootmap.get_root_stack_top_addr() self.mc.gen_load_int(r.ip.value, rst) - self.mc.LDR_ri(r.r4.value, r.ip.value) # LDR r4, [rootstacktop] - self.mc.ADD_ri(r.r5.value, r.r4.value, imm=2*WORD) # ADD r5, r4 [2*WORD] + self.mc.LDR_ri(r.r4.value, r.ip.value) # LDR r4, [rootstacktop] + self.mc.ADD_ri(r.r5.value, r.r4.value, + imm=2 * WORD) # ADD r5, r4 [2*WORD] self.mc.gen_load_int(r.r6.value, gcrootmap.MARKER) self.mc.STR_ri(r.r6.value, r.r4.value) - self.mc.STR_ri(r.fp.value, r.r4.value, WORD) + self.mc.STR_ri(r.fp.value, r.r4.value, WORD) self.mc.STR_ri(r.r5.value, r.ip.value) def gen_footer_shadowstack(self, gcrootmap, mc): rst = gcrootmap.get_root_stack_top_addr() mc.gen_load_int(r.ip.value, rst) - mc.LDR_ri(r.r4.value, r.ip.value) # LDR r4, [rootstacktop] - mc.SUB_ri(r.r5.value, r.r4.value, imm=2*WORD) # ADD r5, r4 [2*WORD] + mc.LDR_ri(r.r4.value, r.ip.value) # LDR r4, [rootstacktop] + mc.SUB_ri(r.r5.value, r.r4.value, imm=2 * WORD) # ADD r5, r4 [2*WORD] mc.STR_ri(r.r5.value, r.ip.value) def gen_bootstrap_code(self, nonfloatlocs, floatlocs, inputargs): @@ -540,8 +550,6 @@ reg_args = count_reg_args(inputargs) - stack_locs = len(inputargs) - reg_args - selected_reg = 0 count = 0 float_args = [] @@ -569,16 +577,16 @@ # move float arguments to vfp regsiters for loc, vfp_reg in float_args: - self.mov_to_vfp_loc(loc, r.all_regs[loc.value+1], vfp_reg) + self.mov_to_vfp_loc(loc, r.all_regs[loc.value + 1], vfp_reg) # remap values stored in core registers remap_frame_layout(self, nonfloat_args, nonfloat_regs, r.ip) # load values passed on the stack to the corresponding locations - stack_position = len(r.callee_saved_registers)*WORD + \ - len(r.callee_saved_vfp_registers)*2*WORD + \ - N_REGISTERS_SAVED_BY_MALLOC * WORD + \ - 2 * WORD # for the FAIL INDEX and the stack padding + stack_position = len(r.callee_saved_registers) * WORD + \ + len(r.callee_saved_vfp_registers) * 2 * WORD + \ + N_REGISTERS_SAVED_BY_MALLOC * WORD + \ + 2 * WORD # for the FAIL INDEX and the stack padding count = 0 for i in range(reg_args, len(inputargs)): arg = inputargs[i] @@ -625,6 +633,7 @@ for op in ops: debug_print(op.repr()) debug_stop('jit-backend-ops') + # cpu interface def assemble_loop(self, inputargs, operations, looptoken, log): @@ -635,13 +644,14 @@ operations = self.setup(looptoken, operations) self._dump(operations) longevity = compute_vars_longevity(inputargs, operations) - regalloc = Regalloc(longevity, assembler=self, frame_manager=ARMFrameManager()) - + regalloc = Regalloc(longevity, assembler=self, + frame_manager=ARMFrameManager()) self.align() self.gen_func_prolog() sp_patch_location = self._prepare_sp_patch_position() - nonfloatlocs, floatlocs = regalloc.prepare_loop(inputargs, operations, looptoken) + nonfloatlocs, floatlocs = regalloc.prepare_loop(inputargs, + operations, looptoken) self.gen_bootstrap_code(nonfloatlocs, floatlocs, inputargs) looptoken._arm_arglocs = [nonfloatlocs, floatlocs] loop_head = self.mc.currpos() @@ -662,11 +672,13 @@ self.write_pending_failure_recoveries() loop_start = self.materialize_loop(looptoken) looptoken._arm_bootstrap_code = loop_start - looptoken._arm_direct_bootstrap_code = loop_start + direct_bootstrap_code + direct_code_start = loop_start + direct_bootstrap_code + looptoken._arm_direct_bootstrap_code = direct_code_start self.process_pending_guards(loop_start) if log and not we_are_translated(): print 'Loop', inputargs, operations - self.mc._dump_trace(loop_start, 'loop_%s.asm' % self.cpu.total_compiled_loops) + self.mc._dump_trace(loop_start, + 'loop_%s.asm' % self.cpu.total_compiled_loops) print 'Done assembling loop with token %r' % looptoken self.teardown() @@ -689,14 +701,15 @@ self._walk_operations(operations, regalloc) - #original_loop_token._arm_frame_depth = regalloc.frame_manager.frame_depth - self._patch_sp_offset(sp_patch_location, regalloc.frame_manager.frame_depth) + self._patch_sp_offset(sp_patch_location, + regalloc.frame_manager.frame_depth) self.write_pending_failure_recoveries() bridge_start = self.materialize_loop(original_loop_token) self.process_pending_guards(bridge_start) - self.patch_trace(faildescr, original_loop_token, bridge_start, regalloc) + self.patch_trace(faildescr, original_loop_token, + bridge_start, regalloc) if log and not we_are_translated(): print 'Bridge', inputargs, operations self.mc._dump_trace(bridge_start, 'bridge_%d.asm' % @@ -715,10 +728,10 @@ descr = tok.descr #generate the exit stub and the encoded representation pos = self.mc.currpos() - tok.pos_recovery_stub = pos + tok.pos_recovery_stub = pos memaddr = self._gen_path_to_exit_path(descr, tok.failargs, - tok.faillocs, save_exc=tok.save_exc) + tok.faillocs, save_exc=tok.save_exc) # store info on the descr descr._arm_frame_depth = tok.faillocs[0].getint() descr._failure_recovery_code = memaddr @@ -735,13 +748,15 @@ if not tok.is_invalidate: #patch the guard jumpt to the stub - # overwrite the generate NOP with a B_offs to the pos of the stub + # overwrite the generate NOP with a B_offs to the pos of the + # stub mc = ARMv7Builder() - mc.B_offs(descr._arm_guard_pos - tok.offset, c.get_opposite_of(tok.fcond)) + mc.B_offs(descr._arm_guard_pos - tok.offset, + c.get_opposite_of(tok.fcond)) mc.copy_to_raw_memory(block_start + tok.offset) else: clt.invalidate_positions.append( - (block_start + tok.offset, descr._arm_guard_pos - tok.offset)) + (block_start + tok.offset, descr._arm_guard_pos - tok.offset)) def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -750,21 +765,22 @@ return clt.asmmemmgr_blocks def _prepare_sp_patch_position(self): - """Generate NOPs as placeholder to patch the instruction(s) to update the - sp according to the number of spilled variables""" - size = (self.mc.size_of_gen_load_int+WORD) + """Generate NOPs as placeholder to patch the instruction(s) to update + the sp according to the number of spilled variables""" + size = (self.mc.size_of_gen_load_int + WORD) l = self.mc.currpos() - for _ in range(size//WORD): + for _ in range(size // WORD): self.mc.NOP() return l def _patch_sp_offset(self, pos, frame_depth): - cb = OverwritingBuilder(self.mc, pos, OverwritingBuilder.size_of_gen_load_int+WORD) + cb = OverwritingBuilder(self.mc, pos, + OverwritingBuilder.size_of_gen_load_int + WORD) # Note: the frame_depth is one less than the value stored in the frame # manager if frame_depth == 1: return - n = (frame_depth-1)*WORD + n = (frame_depth - 1) * WORD # ensure the sp is 8 byte aligned when patching it if n % 8 != 0: @@ -794,7 +810,7 @@ cb.SUB_rr(r.sp.value, base_reg.value, r.ip.value, cond=fcond) def _walk_operations(self, operations, regalloc): - fcond=c.AL + fcond = c.AL self._regalloc = regalloc while regalloc.position() < len(operations) - 1: regalloc.next_instruction() @@ -804,7 +820,7 @@ if op.has_no_side_effect() and op.result not in regalloc.longevity: regalloc.possibly_free_vars_for_op(op) elif self.can_merge_with_next_guard(op, i, operations): - guard = operations[i+1] + guard = operations[i + 1] assert guard.is_guard() arglocs = regalloc_operations_with_guard[opnum](regalloc, op, guard, fcond) @@ -818,7 +834,8 @@ else: arglocs = regalloc_operations[opnum](regalloc, op, fcond) if arglocs is not None: - fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond) + fcond = asm_operations[opnum](self, op, arglocs, + regalloc, fcond) if op.is_guard(): regalloc.possibly_free_vars(op.getfailargs()) if op.result: @@ -864,7 +881,7 @@ if size == 4: return if size == 1: - if not signed: #unsigned char + if not signed: # unsigned char self.mc.AND_ri(resloc.value, resloc.value, 0xFF) else: self.mc.LSL_ri(resloc.value, resloc.value, 24) @@ -873,9 +890,6 @@ if not signed: self.mc.LSL_ri(resloc.value, resloc.value, 16) self.mc.LSR_ri(resloc.value, resloc.value, 16) - #self.mc.MOV_ri(r.ip.value, 0xFF) - #self.mc.ORR_ri(r.ip.value, 0xCFF) - #self.mc.AND_rr(resloc.value, resloc.value, r.ip.value) else: self.mc.LSL_ri(resloc.value, resloc.value, 16) self.mc.ASR_ri(resloc.value, resloc.value, 16) @@ -924,24 +938,28 @@ temp = r.lr else: temp = r.ip - offset = loc.position*WORD + offset = loc.position * WORD if not check_imm_arg(offset, size=0xFFF): self.mc.PUSH([temp.value], cond=cond) self.mc.gen_load_int(temp.value, -offset, cond=cond) - self.mc.STR_rr(prev_loc.value, r.fp.value, temp.value, cond=cond) + self.mc.STR_rr(prev_loc.value, r.fp.value, + temp.value, cond=cond) self.mc.POP([temp.value], cond=cond) else: - self.mc.STR_ri(prev_loc.value, r.fp.value, imm=-offset, cond=cond) + self.mc.STR_ri(prev_loc.value, r.fp.value, + imm=-offset, cond=cond) else: assert 0, 'unsupported case' def _mov_stack_to_loc(self, prev_loc, loc, cond=c.AL): pushed = False if loc.is_reg(): - assert prev_loc.type != FLOAT, 'trying to load from an incompatible location into a core register' - assert loc is not r.lr, 'lr is not supported as a target when moving from the stack' + assert prev_loc.type != FLOAT, 'trying to load from an \ + incompatible location into a core register' + assert loc is not r.lr, 'lr is not supported as a target \ + when moving from the stack' # unspill a core register - offset = prev_loc.position*WORD + offset = prev_loc.position * WORD if not check_imm_arg(offset, size=0xFFF): self.mc.PUSH([r.lr.value], cond=cond) pushed = True @@ -952,9 +970,10 @@ if pushed: self.mc.POP([r.lr.value], cond=cond) elif loc.is_vfp_reg(): - assert prev_loc.type == FLOAT, 'trying to load from an incompatible location into a float register' + assert prev_loc.type == FLOAT, 'trying to load from an \ + incompatible location into a float register' # load spilled value into vfp reg - offset = prev_loc.position*WORD + offset = prev_loc.position * WORD self.mc.PUSH([r.ip.value], cond=cond) pushed = True if not check_imm_arg(offset): @@ -980,10 +999,11 @@ if loc.is_vfp_reg(): self.mc.VMOV_cc(loc.value, prev_loc.value, cond=cond) elif loc.is_stack(): - assert loc.type == FLOAT, 'trying to store to an incompatible location from a float register' + assert loc.type == FLOAT, 'trying to store to an \ + incompatible location from a float register' # spill vfp register self.mc.PUSH([r.ip.value], cond=cond) - offset = loc.position*WORD + offset = loc.position * WORD if not check_imm_arg(offset): self.mc.gen_load_int(r.ip.value, offset, cond=cond) self.mc.SUB_rr(r.ip.value, r.fp.value, r.ip.value, cond=cond) @@ -1026,7 +1046,7 @@ self.mc.POP([r.ip.value], cond=cond) elif vfp_loc.is_stack() and vfp_loc.type == FLOAT: # load spilled vfp value into two core registers - offset = vfp_loc.position*WORD + offset = vfp_loc.position * WORD if not check_imm_arg(offset, size=0xFFF): self.mc.PUSH([r.ip.value], cond=cond) self.mc.gen_load_int(r.ip.value, -offset, cond=cond) @@ -1036,7 +1056,8 @@ self.mc.POP([r.ip.value], cond=cond) else: self.mc.LDR_ri(reg1.value, r.fp.value, imm=-offset, cond=cond) - self.mc.LDR_ri(reg2.value, r.fp.value, imm=-offset+WORD, cond=cond) + self.mc.LDR_ri(reg2.value, r.fp.value, + imm=-offset + WORD, cond=cond) else: assert 0, 'unsupported case' @@ -1048,7 +1069,7 @@ self.mc.VMOV_cr(vfp_loc.value, reg1.value, reg2.value, cond=cond) elif vfp_loc.is_stack(): # move from two core registers to a float stack location - offset = vfp_loc.position*WORD + offset = vfp_loc.position * WORD if not check_imm_arg(offset, size=0xFFF): self.mc.PUSH([r.ip.value], cond=cond) self.mc.gen_load_int(r.ip.value, -offset, cond=cond) @@ -1058,7 +1079,8 @@ self.mc.POP([r.ip.value], cond=cond) else: self.mc.STR_ri(reg1.value, r.fp.value, imm=-offset, cond=cond) - self.mc.STR_ri(reg2.value, r.fp.value, imm=-offset+WORD, cond=cond) + self.mc.STR_ri(reg2.value, r.fp.value, + imm=-offset + WORD, cond=cond) else: assert 0, 'unsupported case' @@ -1111,7 +1133,7 @@ def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) - size = (size + WORD-1) & ~(WORD-1) # round up + size = (size + WORD - 1) & ~(WORD - 1) # round up self.mc.gen_load_int(r.r0.value, nursery_free_adr) self.mc.LDR_ri(r.r0.value, r.r0.value) @@ -1156,7 +1178,6 @@ self.mc.gen_load_int(r.ip.value, tid) self.mc.STR_ri(r.ip.value, r.r0.value) - def mark_gc_roots(self, force_index, use_copy_area=False): if force_index < 0: return # not needed @@ -1180,14 +1201,18 @@ else: return 0 + def not_implemented(msg): os.write(2, '[ARM/asm] %s\n' % msg) raise NotImplementedError(msg) + def notimplemented_op(self, op, arglocs, regalloc, fcond): - raise NotImplementedError, op + raise NotImplementedError(op) + + def notimplemented_op_with_guard(self, op, guard_op, arglocs, regalloc, fcond): - raise NotImplementedError, op + raise NotImplementedError(op) asm_operations = [notimplemented_op] * (rop._LAST + 1) asm_operations_with_guard = [notimplemented_op_with_guard] * (rop._LAST + 1) @@ -1209,4 +1234,3 @@ if hasattr(AssemblerARM, methname): func = getattr(AssemblerARM, methname).im_func asm_operations_with_guard[value] = func - diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py --- a/pypy/jit/backend/arm/codebuilder.py +++ b/pypy/jit/backend/arm/codebuilder.py @@ -4,13 +4,9 @@ from pypy.jit.backend.arm.arch import (WORD, FUNC_ALIGN) from pypy.jit.backend.arm.instruction_builder import define_instructions from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin -from pypy.jit.metainterp.history import ConstInt, BoxInt, AbstractFailDescr from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.rmmap import alloc, PTR -from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import lltype, rffi, llmemory from pypy.tool.udir import udir -from pypy.translator.tool.cbuild import ExternalCompilationInfo clear_cache = rffi.llexternal( "__clear_cache", @@ -19,9 +15,10 @@ _nowrapper=True, sandboxsafe=True) + def binary_helper_call(name): - signature = getattr(arch, 'arm_%s_sign' % name) function = getattr(arch, 'arm_%s' % name) + def f(self, c=cond.AL): """Generates a call to a helper function, takes its arguments in r0 and r1, result is placed in r0""" @@ -31,9 +28,10 @@ else: self.PUSH(range(2, 4), cond=c) self.BL(addr, c) - self.POP(range(2,4), cond=c) + self.POP(range(2, 4), cond=c) return f + class AbstractARMv7Builder(object): def __init__(self): @@ -42,6 +40,7 @@ def align(self): while(self.currpos() % FUNC_ALIGN != 0): self.writechar(chr(0)) + def NOP(self): self.MOV_rr(0, 0) @@ -79,7 +78,7 @@ | 0xB << 8 | nregs) self.write32(instr) - + def VMOV_rc(self, rt, rt2, dm, cond=cond.AL): """This instruction copies two words from two ARM core registers into a doubleword extension register, or from a doubleword extension register @@ -116,7 +115,7 @@ self.write32(instr) def VMOV_cc(self, dd, dm, cond=cond.AL): - sz = 1 # for 64-bit mode + sz = 1 # for 64-bit mode instr = (cond << 28 | 0xEB << 20 | (dd & 0xF) << 12 @@ -163,10 +162,8 @@ self.write32(cond << 28 | 0xEF1FA10) def B(self, target, c=cond.AL): - #assert self._fits_in_24bits(target) - #return (c << 20 | 0xA << 24 | target & 0xFFFFFF) if c == cond.AL: - self.LDR_ri(reg.pc.value, reg.pc.value, -arch.PC_OFFSET/2) + self.LDR_ri(reg.pc.value, reg.pc.value, -arch.PC_OFFSET / 2) self.write32(target) else: self.gen_load_int(reg.ip.value, target, cond=c) @@ -180,8 +177,8 @@ def BL(self, target, c=cond.AL): if c == cond.AL: - self.ADD_ri(reg.lr.value, reg.pc.value, arch.PC_OFFSET/2) - self.LDR_ri(reg.pc.value, reg.pc.value, imm=-arch.PC_OFFSET/2) + self.ADD_ri(reg.lr.value, reg.pc.value, arch.PC_OFFSET / 2) + self.LDR_ri(reg.pc.value, reg.pc.value, imm=-arch.PC_OFFSET / 2) self.write32(target) else: self.gen_load_int(reg.ip.value, target, cond=c) @@ -235,7 +232,6 @@ def currpos(self): raise NotImplementedError - size_of_gen_load_int = 2 * WORD def gen_load_int(self, r, value, cond=cond.AL): """r is the register number, value is the value to be loaded to the register""" @@ -244,6 +240,8 @@ self.MOVW_ri(r, bottom, cond) if top: self.MOVT_ri(r, top, cond) + size_of_gen_load_int = 2 * WORD + class OverwritingBuilder(AbstractARMv7Builder): def __init__(self, cb, start, size): @@ -260,6 +258,7 @@ self.cb.overwrite(self.index, char) self.index += 1 + class ARMv7Builder(BlockBuilderMixin, AbstractARMv7Builder): def __init__(self): AbstractARMv7Builder.__init__(self) @@ -279,7 +278,7 @@ # XXX remove and setup aligning in llsupport def materialize(self, asmmemmgr, allblocks, gcrootmap=None): size = self.get_relative_pos() - malloced = asmmemmgr.malloc(size, size+7) + malloced = asmmemmgr.malloc(size, size + 7) allblocks.append(malloced) rawstart = malloced[0] while(rawstart % FUNC_ALIGN != 0): @@ -294,7 +293,8 @@ def clear_cache(self, addr): if we_are_translated(): startaddr = rffi.cast(llmemory.Address, addr) - endaddr = rffi.cast(llmemory.Address, addr + self.get_relative_pos()) + endaddr = rffi.cast(llmemory.Address, + addr + self.get_relative_pos()) clear_cache(startaddr, endaddr) def copy_to_raw_memory(self, addr): diff --git a/pypy/jit/backend/arm/conditions.py b/pypy/jit/backend/arm/conditions.py --- a/pypy/jit/backend/arm/conditions.py +++ b/pypy/jit/backend/arm/conditions.py @@ -15,10 +15,12 @@ AL = 0xE opposites = [NE, EQ, CC, CS, PL, MI, VC, VS, LS, HI, LT, GE, LE, GT, AL] + + def get_opposite_of(operation): return opposites[operation] -# see mapping for floating poin according to +# see mapping for floating poin according to # http://blogs.arm.com/software-enablement/405-condition-codes-4-floating-point-comparisons-using-vfp/ VFP_LT = CC VFP_LE = LS diff --git a/pypy/jit/backend/arm/instruction_builder.py b/pypy/jit/backend/arm/instruction_builder.py --- a/pypy/jit/backend/arm/instruction_builder.py +++ b/pypy/jit/backend/arm/instruction_builder.py @@ -1,5 +1,7 @@ from pypy.jit.backend.arm import conditions as cond from pypy.jit.backend.arm import instructions + + # move table lookup out of generated functions def define_load_store_func(name, table): n = (0x1 << 26 @@ -13,6 +15,7 @@ rncond = ('rn' in table and table['rn'] == '!0xF') if table['imm']: assert not b_zero + def f(self, rt, rn, imm=0, cond=cond.AL): assert not (rncond and rn == 0xF) p = 1 @@ -20,7 +23,7 @@ u, imm = self._encode_imm(imm) instr = (n | cond << 28 - | (p & 0x1) << 24 + | (p & 0x1) << 24 | (u & 0x1) << 23 | (w & 0x1) << 21 | imm_operation(rt, rn, imm)) @@ -34,7 +37,7 @@ u, imm = self._encode_imm(imm) instr = (n | cond << 28 - | (p & 0x1) << 24 + | (p & 0x1) << 24 | (u & 0x1) << 23 | (w & 0x1) << 21 | reg_operation(rt, rn, rm, imm, s, shifttype)) @@ -44,6 +47,7 @@ self.write32(instr) return f + def define_extra_load_store_func(name, table): def check_registers(r1, r2): assert r1 % 2 == 0 @@ -57,7 +61,7 @@ p = 1 w = 0 rncond = ('rn' in table and table['rn'] == '!0xF') - dual = (name[-4] == 'D') + dual = (name[-4] == 'D') if dual: if name[-2:] == 'rr': @@ -114,6 +118,7 @@ | (imm & 0xF)) return f + def define_data_proc_imm_func(name, table): n = (0x1 << 25 | (table['op'] & 0x1F) << 20) @@ -139,6 +144,7 @@ | imm_operation(0, rn, imm)) return imm_func + def define_data_proc_func(name, table): n = ((table['op1'] & 0x1F) << 20 | (table['op2'] & 0x1F) << 7 @@ -175,6 +181,7 @@ | reg_operation(rd, rn, rm, imm, s, shifttype)) return f + def define_data_proc_reg_shift_reg_func(name, table): n = ((0x1 << 4) | (table['op1'] & 0x1F) << 20 | (table['op2'] & 0x3) << 5) if 'result' in table and not table['result']: @@ -211,8 +218,10 @@ | (rn & 0xF)) return f + def define_supervisor_and_coproc_func(name, table): n = (0x3 << 26 | (table['op1'] & 0x3F) << 20 | (table['op'] & 0x1) << 4) + def f(self, coproc, opc1, rt, crn, crm, opc2=0, cond=cond.AL): self.write32(n | cond << 28 @@ -224,6 +233,7 @@ | (crm & 0xF)) return f + def define_multiply_func(name, table): n = (table['op'] & 0xF) << 20 | 0x9 << 4 if 'acc' in table and table['acc']: @@ -246,14 +256,14 @@ | (rn & 0xF)) elif 'long' in table and table['long']: - def f(self, rdlo, rdhi, rn, rm, cond=cond.AL): + def f(self, rdlo, rdhi, rn, rm, cond=cond.AL): assert rdhi != rdlo self.write32(n - | cond << 28 - | (rdhi & 0xF) << 16 - | (rdlo & 0xF) << 12 - | (rm & 0xF) << 8 - | (rn & 0xF)) + | cond << 28 + | (rdhi & 0xF) << 16 + | (rdlo & 0xF) << 12 + | (rm & 0xF) << 8 + | (rn & 0xF)) else: def f(self, rd, rn, rm, cond=cond.AL, s=0): self.write32(n @@ -265,8 +275,10 @@ return f + def define_block_data_func(name, table): n = (table['op'] & 0x3F) << 20 + def f(self, rn, regs, w=0, cond=cond.AL): # no R bit for now at bit 15 instr = (n @@ -278,6 +290,8 @@ self.write32(instr) return f + + def define_float_load_store_func(name, table): n = (0x3 << 26 | (table['opcode'] & 0x1F) << 20 @@ -288,9 +302,9 @@ # the value actually encoded is imm / 4 def f(self, dd, rn, imm=0, cond=cond.AL): assert imm % 4 == 0 - imm = imm/4 + imm = imm / 4 u, imm = self._encode_imm(imm) - instr = ( n + instr = (n | (cond & 0xF) << 28 | (u & 0x1) << 23 | (rn & 0xF) << 16 @@ -299,10 +313,11 @@ self.write32(instr) return f + def define_float64_data_proc_instructions_func(name, table): n = (0xE << 24 | 0x5 << 9 - | 0x1 << 8 # 64 bit flag + | 0x1 << 8 # 64 bit flag | (table['opc3'] & 0x3) << 6) if 'opc1' in table: @@ -335,11 +350,13 @@ self.write32(instr) return f + def imm_operation(rt, rn, imm): return ((rn & 0xFF) << 16 | (rt & 0xFF) << 12 | (imm & 0xFFF)) + def reg_operation(rt, rn, rm, imm, s, shifttype): return ((s & 0x1) << 20 | (rn & 0xF) << 16 @@ -348,10 +365,12 @@ | (shifttype & 0x3) << 5 | (rm & 0xF)) + def define_instruction(builder, key, val, target): f = builder(key, val) setattr(target, key, f) + def define_instructions(target): inss = [k for k in instructions.__dict__.keys() if not k.startswith('__')] for name in inss: diff --git a/pypy/jit/backend/arm/instructions.py b/pypy/jit/backend/arm/instructions.py --- a/pypy/jit/backend/arm/instructions.py +++ b/pypy/jit/backend/arm/instructions.py @@ -1,93 +1,94 @@ load_store = { - 'STR_ri': {'A':0, 'op1': 0x0, 'op1not': 0x2, 'imm': True}, - 'STR_rr': {'A':1, 'op1': 0x0, 'op1not': 0x2, 'B': 0, 'imm': False}, - 'LDR_ri': {'A':0, 'op1': 0x1, 'op1not': 0x3, 'imm': True}, - 'LDR_rr': {'A':1, 'op1': 0x1, 'op1not': 0x3, 'B': 0, 'imm': False}, - 'STRB_ri': {'A':0, 'op1': 0x4, 'op1not': 0x6, 'rn':'!0xF', 'imm': True}, - 'STRB_rr': {'A':1, 'op1': 0x4, 'op1not': 0x6, 'B': 0, 'imm': False}, - 'LDRB_ri': {'A':0, 'op1': 0x5, 'op1not': 0x7, 'rn':'!0xF', 'imm': True}, - 'LDRB_rr': {'A':1, 'op1': 0x5, 'op1not': 0x7, 'B': 0, 'imm': False}, + 'STR_ri': {'A': 0, 'op1': 0x0, 'op1not': 0x2, 'imm': True}, + 'STR_rr': {'A': 1, 'op1': 0x0, 'op1not': 0x2, 'B': 0, 'imm': False}, + 'LDR_ri': {'A': 0, 'op1': 0x1, 'op1not': 0x3, 'imm': True}, + 'LDR_rr': {'A': 1, 'op1': 0x1, 'op1not': 0x3, 'B': 0, 'imm': False}, + 'STRB_ri': {'A': 0, 'op1': 0x4, 'op1not': 0x6, 'rn': '!0xF', 'imm': True}, + 'STRB_rr': {'A': 1, 'op1': 0x4, 'op1not': 0x6, 'B': 0, 'imm': False}, + 'LDRB_ri': {'A': 0, 'op1': 0x5, 'op1not': 0x7, 'rn': '!0xF', 'imm': True}, + 'LDRB_rr': {'A': 1, 'op1': 0x5, 'op1not': 0x7, 'B': 0, 'imm': False}, } -extra_load_store = { #Section 5.2.8 +extra_load_store = { # Section 5.2.8 'STRH_rr': {'op2': 0x1, 'op1': 0x0}, 'LDRH_rr': {'op2': 0x1, 'op1': 0x1}, 'STRH_ri': {'op2': 0x1, 'op1': 0x4}, - 'LDRH_ri': {'op2': 0x1, 'op1': 0x5, 'rn':'!0xF'}, + 'LDRH_ri': {'op2': 0x1, 'op1': 0x5, 'rn': '!0xF'}, 'LDRD_rr': {'op2': 0x2, 'op1': 0x0}, 'LDRSB_rr': {'op2': 0x2, 'op1': 0x1}, 'LDRD_ri': {'op2': 0x2, 'op1': 0x4}, - 'LDRSB_ri': {'op2': 0x2, 'op1': 0x5, 'rn':'!0xF'}, + 'LDRSB_ri': {'op2': 0x2, 'op1': 0x5, 'rn': '!0xF'}, 'STRD_rr': {'op2': 0x3, 'op1': 0x0}, 'LDRSH_rr': {'op2': 0x3, 'op1': 0x1}, 'STRD_ri': {'op2': 0x3, 'op1': 0x4}, - 'LDRSH_ri': {'op2': 0x3, 'op1': 0x5, 'rn':'!0xF'}, + 'LDRSH_ri': {'op2': 0x3, 'op1': 0x5, 'rn': '!0xF'}, } data_proc = { - 'AND_rr': {'op1':0x0, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'EOR_rr': {'op1':0x2, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'SUB_rr': {'op1':0x4, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'RSB_rr': {'op1':0x6, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'ADD_rr': {'op1':0x8, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'ADC_rr': {'op1':0xA, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'SBC_rr': {'op1':0xC, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'RSC_rr': {'op1':0xE, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'TST_rr': {'op1':0x11, 'op2':0, 'op3':0, 'result':False, 'base':True}, - 'TEQ_rr': {'op1':0x13, 'op2':0, 'op3':0, 'result':False, 'base':True}, - 'CMP_rr': {'op1':0x15, 'op2':0, 'op3':0, 'result':False, 'base':True}, - 'CMN_rr': {'op1':0x17, 'op2':0, 'op3':0, 'result':False, 'base':True}, - 'ORR_rr': {'op1':0x18, 'op2':0, 'op3':0, 'result':True, 'base':True}, - 'MOV_rr': {'op1':0x1A, 'op2':0, 'op3':0, 'result':True, 'base':False}, - 'LSL_ri': {'op1':0x1A, 'op2':0x0, 'op3':0, 'op2cond':'!0', 'result':False, 'base':True}, - 'LSR_ri': {'op1':0x1A, 'op2':0, 'op3':0x1, 'op2cond':'', 'result':False, 'base':True}, - 'ASR_ri': {'op1':0x1A, 'op2':0, 'op3':0x2, 'op2cond':'', 'result':False, 'base':True}, - #'RRX_ri': {'op1':0x1A, 'op2':0, 'op3':0x3, 'op2cond':'0', 'result':False, 'base':True}, - 'ROR_ri': {'op1':0x1A, 'op2':0x0, 'op3':0x3, 'op2cond':'!0', 'result':True, 'base':False}, - #BIC - 'MVN_rr': {'op1':0x1E, 'op2':0x0, 'op3':0x0, 'result':True, 'base':False}, + 'AND_rr': {'op1': 0x0, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'EOR_rr': {'op1': 0x2, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'SUB_rr': {'op1': 0x4, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'RSB_rr': {'op1': 0x6, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'ADD_rr': {'op1': 0x8, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'ADC_rr': {'op1': 0xA, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'SBC_rr': {'op1': 0xC, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'RSC_rr': {'op1': 0xE, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'TST_rr': {'op1': 0x11, 'op2': 0, 'op3': 0, 'result': False, 'base': True}, + 'TEQ_rr': {'op1': 0x13, 'op2': 0, 'op3': 0, 'result': False, 'base': True}, + 'CMP_rr': {'op1': 0x15, 'op2': 0, 'op3': 0, 'result': False, 'base': True}, + 'CMN_rr': {'op1': 0x17, 'op2': 0, 'op3': 0, 'result': False, 'base': True}, + 'ORR_rr': {'op1': 0x18, 'op2': 0, 'op3': 0, 'result': True, 'base': True}, + 'MOV_rr': {'op1': 0x1A, 'op2': 0, 'op3': 0, 'result': True, 'base': False}, + 'LSL_ri': {'op1': 0x1A, 'op2': 0x0, 'op3': 0, 'op2cond': '!0', + 'result': False, 'base': True}, + 'LSR_ri': {'op1': 0x1A, 'op2': 0, 'op3': 0x1, 'op2cond': '', + 'result': False, 'base': True}, + 'ASR_ri': {'op1': 0x1A, 'op2': 0, 'op3': 0x2, 'op2cond': '', + 'result': False, 'base': True}, + 'ROR_ri': {'op1': 0x1A, 'op2': 0x0, 'op3': 0x3, 'op2cond': '!0', + 'result': True, 'base': False}, + 'MVN_rr': {'op1': 0x1E, 'op2': 0x0, 'op3': 0x0, 'result': True, + 'base': False}, } data_proc_reg_shift_reg = { - 'AND_rr_sr': {'op1':0x0, 'op2':0}, - 'EOR_rr_sr': {'op1':0x2, 'op2':0}, - 'SUB_rr_sr': {'op1':0x4, 'op2':0}, - 'RSB_rr_sr': {'op1':0x6, 'op2':0}, - 'ADD_rr_sr': {'op1':0x8, 'op2':0}, - 'ADC_rr_sr': {'op1':0xA, 'op2':0}, - 'SBC_rr_sr': {'op1':0xC, 'op2':0}, - 'RSC_rr_sr': {'op1':0xE, 'op2':0}, - 'TST_rr_sr': {'op1':0x11, 'op2':0, 'result': False}, - 'TEQ_rr_sr': {'op1':0x13, 'op2':0, 'result': False}, - 'CMP_rr_sr': {'op1':0x15, 'op2':0, 'result': False}, - 'CMN_rr_sr': {'op1':0x17, 'op2':0, 'result': False}, - 'ORR_rr_sr': {'op1':0x18, 'op2':0}, - 'LSL_rr': {'op1':0x1A, 'op2':0, }, - 'LSR_rr': {'op1':0x1A, 'op2':0x1}, - 'ASR_rr': {'op1':0x1A, 'op2':0x2}, - #'RRX_rr': {'op1':0x1A, 'op2':0,}, - 'ROR_rr': {'op1':0x1A, 'op2':0x3}, - # BIC, MVN + 'AND_rr_sr': {'op1': 0x0, 'op2': 0}, + 'EOR_rr_sr': {'op1': 0x2, 'op2': 0}, + 'SUB_rr_sr': {'op1': 0x4, 'op2': 0}, + 'RSB_rr_sr': {'op1': 0x6, 'op2': 0}, + 'ADD_rr_sr': {'op1': 0x8, 'op2': 0}, + 'ADC_rr_sr': {'op1': 0xA, 'op2': 0}, + 'SBC_rr_sr': {'op1': 0xC, 'op2': 0}, + 'RSC_rr_sr': {'op1': 0xE, 'op2': 0}, + 'TST_rr_sr': {'op1': 0x11, 'op2': 0, 'result': False}, + 'TEQ_rr_sr': {'op1': 0x13, 'op2': 0, 'result': False}, + 'CMP_rr_sr': {'op1': 0x15, 'op2': 0, 'result': False}, + 'CMN_rr_sr': {'op1': 0x17, 'op2': 0, 'result': False}, + 'ORR_rr_sr': {'op1': 0x18, 'op2': 0}, + 'LSL_rr': {'op1': 0x1A, 'op2': 0, }, + 'LSR_rr': {'op1': 0x1A, 'op2': 0x1}, + 'ASR_rr': {'op1': 0x1A, 'op2': 0x2}, + 'ROR_rr': {'op1': 0x1A, 'op2': 0x3}, } data_proc_imm = { - 'AND_ri': {'op': 0, 'result':True, 'base':True}, - 'EOR_ri': {'op': 0x2, 'result':True, 'base':True}, - 'SUB_ri': {'op': 0x4, 'result':True, 'base':True}, - 'RSB_ri': {'op': 0x6, 'result':True, 'base':True}, - 'ADD_ri': {'op': 0x8, 'result':True, 'base':True}, - 'ADC_ri': {'op': 0xA, 'result':True, 'base':True}, - 'SBC_ri': {'op': 0xC, 'result':True, 'base':True}, - 'RSC_ri': {'op': 0xE, 'result':True, 'base':True}, - 'TST_ri': {'op': 0x11, 'result':False, 'base':True}, - 'TEQ_ri': {'op': 0x13, 'result':False, 'base':True}, - 'CMP_ri': {'op': 0x15, 'result':False, 'base':True}, - 'CMN_ri': {'op': 0x17, 'result':False, 'base':True}, - 'ORR_ri': {'op': 0x18, 'result':True, 'base':True}, - 'MOV_ri': {'op': 0x1A, 'result':True, 'base':False}, - 'BIC_ri': {'op': 0x1C, 'result':True, 'base':True}, - 'MVN_ri': {'op': 0x1E, 'result':True, 'base':False}, + 'AND_ri': {'op': 0, 'result': True, 'base': True}, + 'EOR_ri': {'op': 0x2, 'result': True, 'base': True}, + 'SUB_ri': {'op': 0x4, 'result': True, 'base': True}, + 'RSB_ri': {'op': 0x6, 'result': True, 'base': True}, + 'ADD_ri': {'op': 0x8, 'result': True, 'base': True}, + 'ADC_ri': {'op': 0xA, 'result': True, 'base': True}, + 'SBC_ri': {'op': 0xC, 'result': True, 'base': True}, + 'RSC_ri': {'op': 0xE, 'result': True, 'base': True}, + 'TST_ri': {'op': 0x11, 'result': False, 'base': True}, + 'TEQ_ri': {'op': 0x13, 'result': False, 'base': True}, + 'CMP_ri': {'op': 0x15, 'result': False, 'base': True}, + 'CMN_ri': {'op': 0x17, 'result': False, 'base': True}, + 'ORR_ri': {'op': 0x18, 'result': True, 'base': True}, + 'MOV_ri': {'op': 0x1A, 'result': True, 'base': False}, + 'BIC_ri': {'op': 0x1C, 'result': True, 'base': True}, + 'MVN_ri': {'op': 0x1E, 'result': True, 'base': False}, } supervisor_and_coproc = { diff --git a/pypy/jit/backend/arm/jump.py b/pypy/jit/backend/arm/jump.py --- a/pypy/jit/backend/arm/jump.py +++ b/pypy/jit/backend/arm/jump.py @@ -1,7 +1,7 @@ # ../x86/jump.py # XXX combine with ../x86/jump.py and move to llsupport import sys -from pypy.tool.pairtype import extendabletype + def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -65,12 +65,14 @@ assembler.regalloc_pop(dst) assert pending_dests == 0 + def _move(assembler, src, dst, tmpreg): if dst.is_stack() and src.is_stack(): assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) + def remap_frame_layout_mixed(assembler, src_locations1, dst_locations1, tmpreg1, src_locations2, dst_locations2, tmpreg2): @@ -84,7 +86,7 @@ src_locations2red = [] dst_locations2red = [] for i in range(len(src_locations2)): - loc = src_locations2[i] + loc = src_locations2[i] dstloc = dst_locations2[i] if loc.is_stack(): key = loc.as_key() diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -1,5 +1,7 @@ -from pypy.jit.metainterp.history import INT, FLOAT, REF +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.backend.arm.arch import WORD + + class AssemblerLocation(object): _immutable_ = True type = INT @@ -22,6 +24,7 @@ def as_key(self): raise NotImplementedError + class RegisterLocation(AssemblerLocation): _immutable_ = True width = WORD @@ -38,13 +41,15 @@ def as_key(self): return self.value + class VFPRegisterLocation(RegisterLocation): _immutable_ = True - type = FLOAT - width = 2*WORD + type = FLOAT + width = 2 * WORD def get_single_precision_regs(self): - return [VFPRegisterLocation(i) for i in [self.value*2, self.value*2+1]] + return [VFPRegisterLocation(i) for i in + [self.value * 2, self.value * 2 + 1]] def __repr__(self): return 'vfp%d' % self.value @@ -58,11 +63,11 @@ def as_key(self): return self.value + 20 + class ImmLocation(AssemblerLocation): _immutable_ = True width = WORD - def __init__(self, value): self.value = value @@ -78,11 +83,12 @@ def as_key(self): return self.value + 40 + class ConstFloatLoc(AssemblerLocation): """This class represents an imm float value which is stored in memory at the address stored in the field value""" _immutable_ = True - width = 2*WORD + width = 2 * WORD type = FLOAT def __init__(self, value): @@ -100,6 +106,7 @@ def as_key(self): return -1 * self.value + class StackLocation(AssemblerLocation): _immutable_ = True @@ -123,5 +130,6 @@ def as_key(self): return -self.position + def imm(i): return ImmLocation(i) diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -1,39 +1,35 @@ from __future__ import with_statement from pypy.jit.backend.arm import conditions as c -from pypy.jit.backend.arm import locations from pypy.jit.backend.arm import registers as r from pypy.jit.backend.arm import shift from pypy.jit.backend.arm.arch import WORD, PC_OFFSET from pypy.jit.backend.arm.helper.assembler import (gen_emit_op_by_helper_call, - gen_emit_op_unary_cmp, - gen_emit_guard_unary_cmp, - gen_emit_op_ri, - gen_emit_cmp_op, - gen_emit_cmp_op_guard, - gen_emit_float_op, - gen_emit_float_cmp_op, - gen_emit_float_cmp_op_guard, - gen_emit_unary_float_op, - saved_registers, - count_reg_args) + gen_emit_op_unary_cmp, + gen_emit_guard_unary_cmp, + gen_emit_op_ri, + gen_emit_cmp_op, + gen_emit_cmp_op_guard, + gen_emit_float_op, + gen_emit_float_cmp_op, + gen_emit_float_cmp_op_guard, + gen_emit_unary_float_op, + saved_registers, + count_reg_args) from pypy.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder from pypy.jit.backend.arm.jump import remap_frame_layout -from pypy.jit.backend.arm.regalloc import Regalloc, TempInt, TempPtr +from pypy.jit.backend.arm.regalloc import TempInt, TempPtr from pypy.jit.backend.arm.locations import imm from pypy.jit.backend.llsupport import symbolic -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity -from pypy.jit.metainterp.history import (Const, ConstInt, BoxInt, Box, - AbstractFailDescr, LoopToken, INT, FLOAT, REF) +from pypy.jit.metainterp.history import (Box, AbstractFailDescr, + LoopToken, INT, FLOAT, REF) from pypy.jit.metainterp.resoperation import rop -from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated -from pypy.rpython.annlowlevel import llhelper -from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory +from pypy.rpython.lltypesystem import lltype, rffi, rstr NO_FORCE_INDEX = -1 + class GuardToken(object): def __init__(self, descr, failargs, faillocs, offset, save_exc, fcond=c.AL, is_invalidate=False): @@ -44,7 +40,8 @@ self.failargs = failargs self.faillocs = faillocs self.save_exc = save_exc - self.fcond=fcond + self.fcond = fcond + class IntOpAsslember(object): @@ -94,10 +91,12 @@ def emit_guard_int_mul_ovf(self, op, guard, arglocs, regalloc, fcond): reg1 = arglocs[0] reg2 = arglocs[1] - res = arglocs[2] + res = arglocs[2] failargs = arglocs[3:] - self.mc.SMULL(res.value, r.ip.value, reg1.value, reg2.value, cond=fcond) - self.mc.CMP_rr(r.ip.value, res.value, shifttype=shift.ASR, imm=31, cond=fcond) + self.mc.SMULL(res.value, r.ip.value, reg1.value, reg2.value, + cond=fcond) + self.mc.CMP_rr(r.ip.value, res.value, shifttype=shift.ASR, + imm=31, cond=fcond) if guard.getopnum() == rop.GUARD_OVERFLOW: fcond = self._emit_guard(guard, failargs, c.NE, save_exc=False) @@ -111,7 +110,7 @@ self.emit_op_int_add(op, arglocs[0:3], regalloc, fcond, flags=True) self._emit_guard_overflow(guard, arglocs[3:], fcond) return fcond - + def emit_guard_int_sub_ovf(self, op, guard, arglocs, regalloc, fcond): self.emit_op_int_sub(op, arglocs[0:3], regalloc, fcond, flags=True) self._emit_guard_overflow(guard, arglocs[3:], fcond) @@ -135,8 +134,6 @@ emit_op_int_gt = gen_emit_cmp_op('int_gt', c.GT) emit_op_int_ge = gen_emit_cmp_op('int_ge', c.GE) - - emit_guard_int_lt = gen_emit_cmp_op_guard('int_lt', c.LT) emit_guard_int_le = gen_emit_cmp_op_guard('int_le', c.LE) emit_guard_int_eq = gen_emit_cmp_op_guard('int_eq', c.EQ) @@ -163,7 +160,6 @@ emit_op_int_sub_ovf = emit_op_int_sub - class UnaryIntOpAssembler(object): _mixin_ = True @@ -185,19 +181,20 @@ self.mc.RSB_ri(resloc.value, l0.value, imm=0) return fcond + class GuardOpAssembler(object): _mixin_ = True - def _emit_guard(self, op, arglocs, fcond, save_exc, is_guard_not_invalidated=False): + def _emit_guard(self, op, arglocs, fcond, save_exc, + is_guard_not_invalidated=False): assert isinstance(save_exc, bool) assert isinstance(fcond, int) descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) - if not we_are_translated() and hasattr(op, 'getfailargs'): - print 'Failargs: ', op.getfailargs() + print 'Failargs: ', op.getfailargs() pos = self.mc.currpos() # For all guards that are not GUARD_NOT_INVALIDATED we emit a @@ -283,7 +280,8 @@ return fcond def emit_op_guard_not_invalidated(self, op, locs, regalloc, fcond): - return self._emit_guard(op, locs, fcond, save_exc=False, is_guard_not_invalidated=True) + return self._emit_guard(op, locs, fcond, save_exc=False, + is_guard_not_invalidated=True) def _cmp_guard_class(self, op, locs, regalloc, fcond): offset = locs[2] @@ -316,12 +314,13 @@ else: target = descr._arm_bootstrap_code + descr._arm_loop_code self.mc.B(target, fcond) - new_fd = max(regalloc.frame_manager.frame_depth, descr._arm_frame_depth) + new_fd = max(regalloc.frame_manager.frame_depth, + descr._arm_frame_depth) regalloc.frame_manager.frame_depth = new_fd return fcond def emit_op_finish(self, op, arglocs, regalloc, fcond): - for i in range(len(arglocs) -1): + for i in range(len(arglocs) - 1): loc = arglocs[i] box = op.getarg(i) if loc is None: @@ -367,16 +366,18 @@ self.gen_func_epilog() return fcond - def emit_op_call(self, op, args, regalloc, fcond, force_index=NO_FORCE_INDEX): + def emit_op_call(self, op, args, regalloc, fcond, + force_index=NO_FORCE_INDEX): adr = args[0].value arglist = op.getarglist()[1:] if force_index == NO_FORCE_INDEX: force_index = self.write_new_force_index() - cond = self._emit_call(force_index, adr, arglist, + cond = self._emit_call(force_index, adr, arglist, regalloc, fcond, op.result) descr = op.getdescr() #XXX Hack, Hack, Hack - if op.result and not we_are_translated() and not isinstance(descr, LoopToken): + if (op.result and not we_are_translated() + and not isinstance(descr, LoopToken)): #XXX check result type loc = regalloc.rm.call_result_location(op.result) size = descr.get_result_size(False) @@ -388,11 +389,11 @@ # emit_op_call_may_force # XXX improve freeing of stuff here # XXX add an interface that takes locations instead of boxes - def _emit_call(self, force_index, adr, args, regalloc, fcond=c.AL, result=None): + def _emit_call(self, force_index, adr, args, regalloc, fcond=c.AL, + result=None): n_args = len(args) reg_args = count_reg_args(args) - # all arguments past the 4th go on the stack n = 0 # used to count the number of words pushed on the stack, so we #can later modify the SP back to its original value @@ -417,15 +418,15 @@ stack_args.append(None) #then we push every thing on the stack - for i in range(len(stack_args) -1, -1, -1): + for i in range(len(stack_args) - 1, -1, -1): arg = stack_args[i] if arg is None: self.mc.PUSH([r.ip.value]) else: self.regalloc_push(regalloc.loc(arg)) - # collect variables that need to go in registers - # and the registers they will be stored in + # collect variables that need to go in registers and the registers they + # will be stored in num = 0 count = 0 non_float_locs = [] @@ -457,7 +458,7 @@ remap_frame_layout(self, non_float_locs, non_float_regs, r.ip) for loc, reg in float_locs: - self.mov_from_vfp_loc(loc, reg, r.all_regs[reg.value+1]) + self.mov_from_vfp_loc(loc, reg, r.all_regs[reg.value + 1]) #the actual call self.mc.BL(adr) @@ -557,9 +558,8 @@ callargs = [r.r0, r.r1, r.r2] remap_frame_layout(self, arglocs, callargs, r.ip) func = rffi.cast(lltype.Signed, addr) - # - # misaligned stack in the call, but it's ok because the write barrier - # is not going to call anything more. + # misaligned stack in the call, but it's ok because the write + # barrier is not going to call anything more. self.mc.BL(func) # patch the JZ above @@ -570,6 +570,7 @@ emit_op_cond_call_gc_wb_array = emit_op_cond_call_gc_wb + class FieldOpAssembler(object): _mixin_ = True @@ -652,7 +653,8 @@ emit_op_getfield_gc_pure = emit_op_getfield_gc def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): - base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize = arglocs + (base_loc, index_loc, res_loc, + ofs_loc, ofs, itemsize, fieldsize) = arglocs self.mc.gen_load_int(r.ip.value, itemsize.value) self.mc.MUL(r.ip.value, index_loc.value, r.ip.value) if ofs.value > 0: @@ -684,7 +686,8 @@ return fcond def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): - base_loc, index_loc, value_loc, ofs_loc, ofs, itemsize, fieldsize = arglocs + (base_loc, index_loc, value_loc, + ofs_loc, ofs, itemsize, fieldsize) = arglocs self.mc.gen_load_int(r.ip.value, itemsize.value) self.mc.MUL(r.ip.value, index_loc.value, r.ip.value) if ofs.value > 0: @@ -710,8 +713,6 @@ return fcond - - class ArrayOpAssember(object): _mixin_ = True @@ -730,7 +731,7 @@ else: scale_loc = ofs_loc - # add the base offset + # add the base offset if ofs.value > 0: self.mc.ADD_ri(r.ip.value, scale_loc.value, imm=ofs.value) scale_loc = r.ip @@ -741,11 +742,14 @@ self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value) self.mc.VSTR(value_loc.value, r.ip.value, cond=fcond) elif scale.value == 2: - self.mc.STR_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) + self.mc.STR_rr(value_loc.value, base_loc.value, scale_loc.value, + cond=fcond) elif scale.value == 1: - self.mc.STRH_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) + self.mc.STRH_rr(value_loc.value, base_loc.value, scale_loc.value, + cond=fcond) elif scale.value == 0: - self.mc.STRB_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) + self.mc.STRB_rr(value_loc.value, base_loc.value, scale_loc.value, + cond=fcond) else: assert 0 return fcond @@ -761,7 +765,7 @@ else: scale_loc = ofs_loc - # add the base offset + # add the base offset if ofs.value > 0: self.mc.ADD_ri(r.ip.value, scale_loc.value, imm=ofs.value) scale_loc = r.ip @@ -772,18 +776,21 @@ self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value) self.mc.VLDR(res.value, r.ip.value, cond=fcond) elif scale.value == 2: - self.mc.LDR_rr(res.value, base_loc.value, scale_loc.value, cond=fcond) + self.mc.LDR_rr(res.value, base_loc.value, scale_loc.value, + cond=fcond) elif scale.value == 1: - self.mc.LDRH_rr(res.value, base_loc.value, scale_loc.value, cond=fcond) + self.mc.LDRH_rr(res.value, base_loc.value, scale_loc.value, + cond=fcond) elif scale.value == 0: - self.mc.LDRB_rr(res.value, base_loc.value, scale_loc.value, cond=fcond) + self.mc.LDRB_rr(res.value, base_loc.value, scale_loc.value, + cond=fcond) else: assert 0 #XXX Hack, Hack, Hack if not we_are_translated(): descr = op.getdescr() - size = descr.get_item_size(False) + size = descr.get_item_size(False) signed = descr.is_item_signed() self._ensure_result_bit_extension(res, size, signed) return fcond @@ -807,9 +814,11 @@ def emit_op_strgetitem(self, op, arglocs, regalloc, fcond): res, base_loc, ofs_loc, basesize = arglocs if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), cond=fcond) + self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), + cond=fcond) else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond) + self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, + cond=fcond) self.mc.LDRB_ri(res.value, r.ip.value, basesize.value, cond=fcond) return fcond @@ -817,11 +826,14 @@ def emit_op_strsetitem(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs_loc, basesize = arglocs if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), cond=fcond) + self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), + cond=fcond) else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond) + self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, + cond=fcond) - self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, cond=fcond) + self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, + cond=fcond) return fcond #from ../x86/regalloc.py:928 ff. @@ -844,18 +856,20 @@ regalloc.possibly_free_var(args[0]) regalloc.free_temp_vars() if args[3] is not args[2] is not args[4]: # MESS MESS MESS: don't free - regalloc.possibly_free_var(args[2]) # it if ==args[3] or args[4] + regalloc.possibly_free_var(args[2]) # it if ==args[3] or args[4] regalloc.free_temp_vars() srcaddr_box = TempPtr() forbidden_vars = [args[1], args[3], args[4], srcaddr_box] - srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, selected_reg=r.r1) + srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, + selected_reg=r.r1) self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc, is_unicode=is_unicode) # compute the destination address forbidden_vars = [args[4], args[3], srcaddr_box] dstaddr_box = TempPtr() - dstaddr_loc = regalloc.force_allocate_reg(dstaddr_box, selected_reg=r.r0) + dstaddr_loc = regalloc.force_allocate_reg(dstaddr_box, + selected_reg=r.r0) forbidden_vars.append(dstaddr_box) base_loc = regalloc._ensure_value_is_boxed(args[1], forbidden_vars) ofs_loc = regalloc._ensure_value_is_boxed(args[3], forbidden_vars) @@ -878,35 +892,35 @@ else: length_box = TempInt() length_loc = regalloc.force_allocate_reg(length_box, - forbidden_vars, selected_reg = r.r2) + forbidden_vars, selected_reg=r.r2) imm = regalloc.convert_to_imm(args[4]) self.load(length_loc, imm) if is_unicode: bytes_box = TempPtr() - bytes_loc = regalloc.force_allocate_reg(bytes_box, forbidden_vars, selected_reg=r.r2) + bytes_loc = regalloc.force_allocate_reg(bytes_box, + forbidden_vars, selected_reg=r.r2) scale = self._get_unicode_item_scale() assert length_loc.is_reg() - self.mc.MOV_ri(r.ip.value, 1<" % (id(self),) + class TempPtr(TempBox): type = REF def __repr__(self): return "" % (id(self),) + class TempFloat(TempBox): type = FLOAT def __repr__(self): return "" % (id(self),) + class ARMFrameManager(FrameManager): + def __init__(self): FrameManager.__init__(self) self.frame_depth = 1 + @staticmethod def frame_pos(loc, type): num_words = ARMFrameManager.frame_size(type) if type == FLOAT: # Make sure that loc is an even value # the frame layout requires loc to be even!! - assert (loc & 1) == 0 - return locations.StackLocation(loc+1, num_words=num_words, type=type) + assert (loc & 1) == 0 + return locations.StackLocation(loc + 1, + num_words=num_words, type=type) return locations.StackLocation(loc, num_words=num_words, type=type) @staticmethod @@ -65,9 +70,11 @@ return 2 return 1 + def void(self, op, fcond): return [] + class VFPRegisterManager(RegisterManager): all_regs = r.all_vfp_regs box_types = [FLOAT] @@ -91,7 +98,6 @@ return r def ensure_value_is_boxed(self, thing, forbidden_vars=[]): - box = None loc = None if isinstance(thing, Const): assert isinstance(thing, ConstFloat) @@ -103,18 +109,20 @@ forbidden_vars=self.temp_boxes + forbidden_vars) return loc - def get_scratch_reg(self, type=FLOAT, forbidden_vars=[], selected_reg=None): - assert type == FLOAT # for now + def get_scratch_reg(self, type=FLOAT, forbidden_vars=[], + selected_reg=None): + assert type == FLOAT # for now box = TempFloat() self.temp_boxes.append(box) - reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars, selected_reg=selected_reg) + reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars, + selected_reg=selected_reg) return reg class ARMv7RegisterMananger(RegisterManager): - all_regs = r.all_regs - box_types = None # or a list of acceptable types - no_lower_byte_regs = all_regs + all_regs = r.all_regs + box_types = None # or a list of acceptable types + no_lower_byte_regs = all_regs save_around_call_regs = r.caller_resp REGLOC_TO_COPY_AREA_OFS = { @@ -144,14 +152,14 @@ assert 0 def ensure_value_is_boxed(self, thing, forbidden_vars=None): - box = None loc = None if isinstance(thing, Const): if isinstance(thing, ConstPtr): tp = REF else: tp = INT - loc = self.get_scratch_reg(tp, forbidden_vars=self.temp_boxes + forbidden_vars) + loc = self.get_scratch_reg(tp, forbidden_vars=self.temp_boxes + + forbidden_vars) imm = self.convert_to_imm(thing) self.assembler.load(loc, imm) else: @@ -163,9 +171,11 @@ assert type == INT or type == REF box = TempBox() self.temp_boxes.append(box) - reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars, selected_reg=selected_reg) + reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars, + selected_reg=selected_reg) return reg + class Regalloc(object): def __init__(self, longevity, frame_manager=None, assembler=None): @@ -181,7 +191,7 @@ return self.vfprm.loc(var) else: return self.rm.loc(var) - + def position(self): return self.rm.position @@ -219,9 +229,11 @@ else: return self.rm.force_allocate_reg(var, forbidden_vars, selected_reg, need_lower_byte) + def try_allocate_reg(self, v, selected_reg=None, need_lower_byte=False): if v.type == FLOAT: - return self.vfprm.try_allocate_reg(v, selected_reg, need_lower_byte) + return self.vfprm.try_allocate_reg(v, selected_reg, + need_lower_byte) else: return self.rm.try_allocate_reg(v, selected_reg, need_lower_byte) @@ -234,17 +246,18 @@ def possibly_free_vars_for_op(self, op): for i in range(op.numargs()): var = op.getarg(i) - if var is not None: # xxx kludgy + if var is not None: # xxx kludgy self.possibly_free_var(var) def possibly_free_vars(self, vars): for var in vars: - if var is not None: # xxx kludgy + if var is not None: # xxx kludgy self.possibly_free_var(var) def get_scratch_reg(self, type, forbidden_vars=[], selected_reg=None): if type == FLOAT: - return self.vfprm.get_scratch_reg(type, forbidden_vars, selected_reg) + return self.vfprm.get_scratch_reg(type, forbidden_vars, + selected_reg) else: return self.rm.get_scratch_reg(type, forbidden_vars, selected_reg) @@ -275,10 +288,9 @@ for i in range(len(inputargs)): arg = inputargs[i] assert not isinstance(arg, Const) - reg = None loc = inputargs[i] if arg not in loop_consts and self.longevity[arg][1] > -1: - reg = self.try_allocate_reg(loc) + self.try_allocate_reg(loc) loc = self.loc(arg) if arg.type == FLOAT: @@ -286,7 +298,6 @@ else: nonfloatlocs[i] = loc self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs def update_bindings(self, locs, frame_depth, inputargs): @@ -318,7 +329,6 @@ # is also used on op args, which is a non-resizable list self.possibly_free_vars(list(inputargs)) - def force_spill_var(self, var): if var.type == FLOAT: self.vfprm.force_spill_var(var) @@ -328,6 +338,7 @@ def before_call(self, force_store=[], save_all_regs=False): self.rm.before_call(force_store, save_all_regs) self.vfprm.before_call(force_store, save_all_regs) + def _ensure_value_is_boxed(self, thing, forbidden_vars=[]): if thing.type == FLOAT: return self.vfprm.ensure_value_is_boxed(thing, forbidden_vars) @@ -405,7 +416,6 @@ res = self.force_allocate_reg(op.result) return self._prepare_guard(guard, [reg1, reg2, res]) - def prepare_guard_int_add_ovf(self, op, guard, fcond): locs = self._prepare_op_int_add(op, fcond) res = self.force_allocate_reg(op.result) @@ -425,9 +435,12 @@ prepare_op_int_and = prepare_op_ri('int_and') prepare_op_int_or = prepare_op_ri('int_or') prepare_op_int_xor = prepare_op_ri('int_xor') - prepare_op_int_lshift = prepare_op_ri('int_lshift', imm_size=0x1F, allow_zero=False, commutative=False) - prepare_op_int_rshift = prepare_op_ri('int_rshift', imm_size=0x1F, allow_zero=False, commutative=False) - prepare_op_uint_rshift = prepare_op_ri('uint_rshift', imm_size=0x1F, allow_zero=False, commutative=False) + prepare_op_int_lshift = prepare_op_ri('int_lshift', imm_size=0x1F, + allow_zero=False, commutative=False) + prepare_op_int_rshift = prepare_op_ri('int_rshift', imm_size=0x1F, + allow_zero=False, commutative=False) + prepare_op_uint_rshift = prepare_op_ri('uint_rshift', imm_size=0x1F, + allow_zero=False, commutative=False) prepare_op_int_lt = prepare_cmp_op('int_lt') prepare_op_int_le = prepare_cmp_op('int_le') @@ -464,7 +477,6 @@ prepare_op_int_add_ovf = prepare_op_int_add prepare_op_int_sub_ovf = prepare_op_int_sub - prepare_op_int_is_true = prepare_op_unary_cmp('int_is_true') prepare_op_int_is_zero = prepare_op_unary_cmp('int_is_zero') @@ -545,7 +557,6 @@ prepare_op_guard_overflow = prepare_op_guard_no_overflow prepare_op_guard_not_invalidated = prepare_op_guard_no_overflow - def prepare_op_guard_exception(self, op, fcond): boxes = list(op.getarglist()) arg0 = ConstInt(rffi.cast(lltype.Signed, op.getarg(0).getint())) @@ -558,7 +569,8 @@ resloc = None pos_exc_value = imm(self.cpu.pos_exc_value()) pos_exception = imm(self.cpu.pos_exception()) - arglocs = self._prepare_guard(op, [loc, loc1, resloc, pos_exc_value, pos_exception]) + arglocs = self._prepare_guard(op, + [loc, loc1, resloc, pos_exc_value, pos_exception]) return arglocs def prepare_op_guard_no_exception(self, op, fcond): @@ -588,9 +600,7 @@ return arglocs - def prepare_op_jump(self, op, fcond): - assembler = self.assembler descr = op.getdescr() assert isinstance(descr, LoopToken) nonfloatlocs, floatlocs = descr._arm_arglocs @@ -667,10 +677,9 @@ self.possibly_free_vars_for_op(op) self.free_temp_vars() result_loc = self.force_allocate_reg(op.result) - return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] + return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs), + imm(itemsize), imm(fieldsize)] - def prepare_op_setinteriorfield_gc(self, op, fcond): t = self._unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = t @@ -699,11 +708,11 @@ return [res, base_loc, imm(ofs)] def prepare_op_setarrayitem_gc(self, op, fcond): - a0, a1, a2 = boxes = list(op.getarglist()) + a0, a1, a2 = list(op.getarglist()) _, scale, base_ofs, _, ptr = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() - base_loc = self._ensure_value_is_boxed(a0, args) + base_loc = self._ensure_value_is_boxed(a0, args) ofs_loc = self._ensure_value_is_boxed(a1, args) value_loc = self._ensure_value_is_boxed(a2, args) assert check_imm_arg(base_ofs) @@ -714,7 +723,7 @@ a0, a1 = boxes = list(op.getarglist()) _, scale, base_ofs, _, ptr = self._unpack_arraydescr(op.getdescr()) - base_loc = self._ensure_value_is_boxed(a0, boxes) + base_loc = self._ensure_value_is_boxed(a0, boxes) ofs_loc = self._ensure_value_is_boxed(a1, boxes) self.possibly_free_vars_for_op(op) self.free_temp_vars() @@ -804,8 +813,9 @@ basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) - scale = itemsize/2 - return [res, base_loc, ofs_loc, imm(scale), imm(basesize), imm(itemsize)] + scale = itemsize / 2 + return [res, base_loc, ofs_loc, + imm(scale), imm(basesize), imm(itemsize)] def prepare_op_unicodesetitem(self, op, fcond): boxes = list(op.getarglist()) @@ -814,8 +824,9 @@ value_loc = self._ensure_value_is_boxed(boxes[2], boxes) basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) - scale = itemsize/2 - return [value_loc, base_loc, ofs_loc, imm(scale), imm(basesize), imm(itemsize)] + scale = itemsize / 2 + return [value_loc, base_loc, ofs_loc, + imm(scale), imm(basesize), imm(itemsize)] def prepare_op_same_as(self, op, fcond): arg = op.getarg(0) @@ -839,8 +850,9 @@ else: arglocs = self._prepare_args_for_new_op(op.getdescr()) force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, self.assembler.malloc_func_addr, - arglocs, self, fcond, result=op.result) + self.assembler._emit_call(force_index, + self.assembler.malloc_func_addr, arglocs, + self, fcond, result=op.result) self.possibly_free_vars(arglocs) self.possibly_free_var(op.result) return [] @@ -853,8 +865,9 @@ else: callargs = self._prepare_args_for_new_op(descrsize) force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, self.assembler.malloc_func_addr, - callargs, self, fcond, result=op.result) + self.assembler._emit_call(force_index, + self.assembler.malloc_func_addr, callargs, + self, fcond, result=op.result) self.possibly_free_vars(callargs) self.possibly_free_var(op.result) return [imm(classint)] @@ -875,8 +888,9 @@ argboxes = [ConstInt(x) for x in args] argboxes.append(box_num_elem) force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, self.assembler.malloc_array_func_addr, - argboxes, self, fcond, result=op.result) + self.assembler._emit_call(force_index, + self.assembler.malloc_array_func_addr, argboxes, self, + fcond, result=op.result) return [] # boehm GC itemsize, scale, basesize, ofs_length, _ = ( @@ -916,7 +930,7 @@ for v, val in self.frame_manager.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert val.is_stack() - gcrootmap.add_frame_offset(shape, val.position*-WORD) + gcrootmap.add_frame_offset(shape, val.position * -WORD) for v, reg in self.rm.reg_bindings.items(): if reg is r.r0: continue @@ -929,6 +943,7 @@ assert 0, 'sure??' return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) + def prepare_op_newstr(self, op, fcond): gc_ll_descr = self.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newstr is not None: @@ -947,8 +962,9 @@ gc_ll_descr = self.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newunicode is not None: force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, self.assembler.malloc_unicode_func_addr, - [op.getarg(0)], self, fcond, op.result) + self.assembler._emit_call(force_index, + self.assembler.malloc_unicode_func_addr, + [op.getarg(0)], self, fcond, op.result) return [] # boehm GC ofs_items, _, ofs = symbolic.get_array_token(rstr.UNICODE, @@ -1047,7 +1063,8 @@ assert isinstance(descr, LoopToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size(self.cpu.translate_support_code) + size = jd.portal_calldescr.get_result_size( + self.cpu.translate_support_code) vable_index = jd.index_of_virtualizable if vable_index >= 0: self._sync_var(op.getarg(vable_index)) @@ -1097,7 +1114,8 @@ arraydescr = descr.arraydescr ofs = arraydescr.get_base_size(self.cpu.translate_support_code) itemsize = arraydescr.get_item_size(self.cpu.translate_support_code) - fieldsize = descr.fielddescr.get_field_size(self.cpu.translate_support_code) + fieldsize = descr.fielddescr.get_field_size( + self.cpu.translate_support_code) sign = descr.fielddescr.is_field_signed() ofs += descr.fielddescr.offset return ofs, itemsize, fieldsize, sign @@ -1105,22 +1123,37 @@ prepare_op_float_add = prepare_float_op(name='prepare_op_float_add') prepare_op_float_sub = prepare_float_op(name='prepare_op_float_sub') prepare_op_float_mul = prepare_float_op(name='prepare_op_float_mul') - prepare_op_float_truediv = prepare_float_op(name='prepare_op_float_truediv') - prepare_op_float_lt = prepare_float_op(float_result=False, name='prepare_op_float_lt') - prepare_op_float_le = prepare_float_op(float_result=False, name='prepare_op_float_le') - prepare_op_float_eq = prepare_float_op(float_result=False, name='prepare_op_float_eq') - prepare_op_float_ne = prepare_float_op(float_result=False, name='prepare_op_float_ne') - prepare_op_float_gt = prepare_float_op(float_result=False, name='prepare_op_float_gt') - prepare_op_float_ge = prepare_float_op(float_result=False, name='prepare_op_float_ge') - prepare_op_float_neg = prepare_float_op(base=False, name='prepare_op_float_neg') - prepare_op_float_abs = prepare_float_op(base=False, name='prepare_op_float_abs') + prepare_op_float_truediv = prepare_float_op( + name='prepare_op_float_truediv') + prepare_op_float_lt = prepare_float_op(float_result=False, + name='prepare_op_float_lt') + prepare_op_float_le = prepare_float_op(float_result=False, + name='prepare_op_float_le') + prepare_op_float_eq = prepare_float_op(float_result=False, + name='prepare_op_float_eq') + prepare_op_float_ne = prepare_float_op(float_result=False, + name='prepare_op_float_ne') + prepare_op_float_gt = prepare_float_op(float_result=False, + name='prepare_op_float_gt') + prepare_op_float_ge = prepare_float_op(float_result=False, + name='prepare_op_float_ge') + prepare_op_float_neg = prepare_float_op(base=False, + name='prepare_op_float_neg') + prepare_op_float_abs = prepare_float_op(base=False, + name='prepare_op_float_abs') - prepare_guard_float_lt = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_lt') - prepare_guard_float_le = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_le') - prepare_guard_float_eq = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_eq') - prepare_guard_float_ne = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_ne') - prepare_guard_float_gt = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_gt') - prepare_guard_float_ge = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_ge') + prepare_guard_float_lt = prepare_float_op(guard=True, + float_result=False, name='prepare_guard_float_lt') + prepare_guard_float_le = prepare_float_op(guard=True, + float_result=False, name='prepare_guard_float_le') + prepare_guard_float_eq = prepare_float_op(guard=True, + float_result=False, name='prepare_guard_float_eq') + prepare_guard_float_ne = prepare_float_op(guard=True, + float_result=False, name='prepare_guard_float_ne') + prepare_guard_float_gt = prepare_float_op(guard=True, + float_result=False, name='prepare_guard_float_gt') + prepare_guard_float_ge = prepare_float_op(guard=True, + float_result=False, name='prepare_guard_float_ge') def prepare_op_math_sqrt(self, op, fcond): loc = self._ensure_value_is_boxed(op.getarg(1)) @@ -1135,7 +1168,7 @@ temp_loc = self.get_scratch_reg(FLOAT) self.possibly_free_vars_for_op(op) self.free_temp_vars() - res = self.rm.force_allocate_reg(op.result) + res = self.rm.force_allocate_reg(op.result) return [loc1, temp_loc, res] def prepare_op_cast_int_to_float(self, op, fcond): @@ -1143,20 +1176,24 @@ temp_loc = self.get_scratch_reg(FLOAT) self.possibly_free_vars_for_op(op) self.free_temp_vars() - res = self.vfprm.force_allocate_reg(op.result) + res = self.vfprm.force_allocate_reg(op.result) return [loc1, temp_loc, res] def prepare_force_spill(self, op, fcond): self.force_spill_var(op.getarg(0)) return [] + def add_none_argument(fn): return lambda self, op, fcond: fn(self, op, None, fcond) + def notimplemented(self, op, fcond): - raise NotImplementedError, op + raise NotImplementedError(op) + + def notimplemented_with_guard(self, op, guard_op, fcond): - raise NotImplementedError, op + raise NotImplementedError(op) operations = [notimplemented] * (rop._LAST + 1) operations_with_guard = [notimplemented_with_guard] * (rop._LAST + 1) diff --git a/pypy/jit/backend/arm/registers.py b/pypy/jit/backend/arm/registers.py --- a/pypy/jit/backend/arm/registers.py +++ b/pypy/jit/backend/arm/registers.py @@ -1,11 +1,14 @@ -from pypy.jit.backend.arm.locations import RegisterLocation, VFPRegisterLocation +from pypy.jit.backend.arm.locations import VFPRegisterLocation +from pypy.jit.backend.arm.locations import RegisterLocation registers = [RegisterLocation(i) for i in range(16)] vfpregisters = [VFPRegisterLocation(i) for i in range(16)] -r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15 = registers +[r0, r1, r2, r3, r4, r5, r6, r7, + r8, r9, r10, r11, r12, r13, r14, r15] = registers #vfp registers interpreted as 64-bit registers -d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15 = vfpregisters +[d0, d1, d2, d3, d4, d5, d6, d7, + d8, d9, d10, d11, d12, d13, d14, d15] = vfpregisters # aliases for registers fp = r11 @@ -20,11 +23,10 @@ caller_resp = [r0, r1, r2, r3] callee_resp = [r4, r5, r6, r7, r8, r9, r10, fp] -callee_saved_registers = callee_resp+[lr] -callee_restored_registers = callee_resp+[pc] +callee_saved_registers = callee_resp + [lr] +callee_restored_registers = callee_resp + [pc] caller_vfp_resp = [d0, d1, d2, d3, d4, d5, d6, d7] callee_vfp_resp = [d8, d9, d10, d11, d12, d13, d14, d15] callee_saved_vfp_registers = callee_vfp_resp - diff --git a/pypy/jit/backend/arm/runner.py b/pypy/jit/backend/arm/runner.py --- a/pypy/jit/backend/arm/runner.py +++ b/pypy/jit/backend/arm/runner.py @@ -21,12 +21,13 @@ assert gcdescr.config.translation.gcremovetypeptr is False AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) + def setup(self): if self.opts is not None: failargs_limit = self.opts.failargs_limit else: failargs_limit = 1000 - self.assembler = AssemblerARM(self) + self.assembler = AssemblerARM(self, failargs_limit=failargs_limit) def setup_once(self): self.assembler.setup_once() @@ -34,7 +35,8 @@ def finish_once(self): pass - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, looptoken, + log=True, name=''): self.assembler.assemble_loop(inputargs, operations, looptoken, log=log) @@ -113,11 +115,11 @@ faildescr = self.get_fail_descr_from_number(fail_index) rffi.cast(TP, addr_of_force_index)[0] = ~fail_index # start of "no gc operation!" block - frame_depth = faildescr._arm_frame_depth*WORD + frame_depth = faildescr._arm_frame_depth * WORD addr_end_of_frame = (addr_of_force_index - (frame_depth + - len(all_regs)*WORD + - len(all_vfp_regs)*2*WORD)) + len(all_regs) * WORD + + len(all_vfp_regs) * 2 * WORD)) fail_index_2 = self.assembler.failure_recovery_func( faildescr._failure_recovery_code, addr_of_force_index, diff --git a/pypy/jit/backend/arm/shift.py b/pypy/jit/backend/arm/shift.py --- a/pypy/jit/backend/arm/shift.py +++ b/pypy/jit/backend/arm/shift.py @@ -3,4 +3,4 @@ LSR = 0x1 ASR = 0x2 ROR = 0x3 -RRX = 0x3 # with imm = 0 +RRX = 0x3 # with imm = 0 From noreply at buildbot.pypy.org Thu Dec 29 09:57:24 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:24 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: merge default up to e4a0b9e4d23b Message-ID: <20111229085724.F2D7582C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50941:c615cc3558ce Date: 2011-12-26 13:48 +0100 http://bitbucket.org/pypy/pypy/changeset/c615cc3558ce/ Log: merge default up to e4a0b9e4d23b diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -36,9 +36,7 @@ newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newstartblock - newstartblock.isstartblock = True argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) # note that we can mostly ignore defaults: if nb_extra_args > 0, diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -304,5 +304,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. +* directly calling the internal magic methods of a few built-in types + with invalid arguments may have a slightly different result. For + example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return + ``NotImplemented`` on PyPy; on CPython, only the later does, and the + former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` + both raise ``TypeError`` everywhere.) This difference is an + implementation detail that shows up because of internal C-level slots + that PyPy does not have. + .. include:: _ref.txt diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -241,12 +241,15 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_direct_call(self, graph, seen=None): - if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - if graph.func._ptr._obj.random_effects_on_gcobjs: + def analyze_external_call(self, op, seen=None): + try: + funcobj = op.args[0].value._obj + if funcobj.random_effects_on_gcobjs: return True - return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, - seen) + except (AttributeError, lltype.DelayedPointer): + pass + return super(RandomEffectsAnalyzer, self).analyze_external_call( + op, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -15,6 +15,8 @@ from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype +class UnsupportedMallocFlags(Exception): + pass def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None): """Transform a control flow graph to make it suitable for @@ -205,7 +207,19 @@ if op.args[0] in self.vable_array_vars: self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] - rewrite_op_cast_pointer = rewrite_op_same_as + def rewrite_op_cast_pointer(self, op): + newop = self.rewrite_op_same_as(op) + assert newop is None + if (self._is_rclass_instance(op.args[0]) and + self._is_rclass_instance(op.result)): + FROM = op.args[0].concretetype.TO + TO = op.result.concretetype.TO + if lltype._castdepth(TO, FROM) > 0: + vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) + const_vtable = Constant(vtable, lltype.typeOf(vtable)) + return [None, # hack, do the right renaming from op.args[0] to op.result + SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -481,8 +495,22 @@ def rewrite_op_malloc_varsize(self, op): if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value.copy() + d.pop('flavor') + add_memory_pressure = d.pop('add_memory_pressure', False) + zero = d.pop('zero', False) + track_allocation = d.pop('track_allocation', True) + if d: + raise UnsupportedMallocFlags(d) ARRAY = op.args[0].value - return self._do_builtin_call(op, 'raw_malloc', + name = 'raw_malloc' + if zero: + name += '_zero' + if add_memory_pressure: + name += '_add_memory_pressure' + if not track_allocation: + name += '_no_track_allocation' + return self._do_builtin_call(op, name, [op.args[2]], extra = (ARRAY,), extrakey = ARRAY) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -599,10 +599,21 @@ return p return _ll_0_alloc_with_del - def build_ll_1_raw_malloc(ARRAY): - def _ll_1_raw_malloc(n): - return lltype.malloc(ARRAY, n, flavor='raw') - return _ll_1_raw_malloc + def build_raw_malloc_builder(zero=False, add_memory_pressure=False, track_allocation=True): + def build_ll_1_raw_malloc(ARRAY): + def _ll_1_raw_malloc(n): + return lltype.malloc(ARRAY, n, flavor='raw', zero=zero, add_memory_pressure=add_memory_pressure) + return _ll_1_raw_malloc + return build_ll_1_raw_malloc + + build_ll_1_raw_malloc = build_raw_malloc_builder() + build_ll_1_raw_malloc_zero = build_raw_malloc_builder(zero=True) + build_ll_1_raw_malloc_zero_add_memory_pressure = build_raw_malloc_builder(zero=True, add_memory_pressure=True) + build_ll_1_raw_malloc_add_memory_pressure = build_raw_malloc_builder(add_memory_pressure=True) + build_ll_1_raw_malloc_no_track_allocation = build_raw_malloc_builder(track_allocation=False) + build_ll_1_raw_malloc_zero_no_track_allocation = build_raw_malloc_builder(zero=True, track_allocation=False) + build_ll_1_raw_malloc_zero_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(zero=True, add_memory_pressure=True, track_allocation=False) + build_ll_1_raw_malloc_add_memory_pressure_no_track_allocation = build_raw_malloc_builder(add_memory_pressure=True, track_allocation=False) def build_ll_1_raw_free(ARRAY): def _ll_1_raw_free(p): diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -192,3 +192,21 @@ [op] = block.operations call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_random_effects_on_stacklet_switch(): + from pypy.jit.backend.llgraph.runner import LLtypeCPU + from pypy.rlib._rffi_stacklet import switch, thread_handle, handle + @jit.dont_look_inside + def f(): + switch(rffi.cast(thread_handle, 0), rffi.cast(handle, 0)) + + rtyper = support.annotate(f, []) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLtypeCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -1,3 +1,5 @@ + +import py import random try: from itertools import product @@ -15,12 +17,12 @@ from pypy.objspace.flow.model import FunctionGraph, Block, Link from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr +from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.lltypesystem.module import ll_math from pypy.translator.unsimplify import varoftype from pypy.jit.codewriter import heaptracker, effectinfo from pypy.jit.codewriter.flatten import ListOfKind -from pypy.jit.codewriter.jtransform import Transformer +from pypy.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from pypy.jit.metainterp.history import getkind def const(x): @@ -538,6 +540,44 @@ assert op1.opname == '-live-' assert op1.args == [] +def test_raw_malloc(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_zero(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + op0, op1 = tr.rewrite_operation(op) + assert op0.opname == 'residual_call_ir_i' + assert op0.args[0].value == 'raw_malloc_zero' # pseudo-function as a str + assert op1.opname == '-live-' + assert op1.args == [] + +def test_raw_malloc_unsupported_flag(): + S = rffi.CArray(lltype.Signed) + v1 = varoftype(lltype.Signed) + v = varoftype(lltype.Ptr(S)) + flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + v1], v) + tr = Transformer(FakeCPU(), FakeResidualCallControl()) + py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) + def test_rename_on_links(): v1 = Variable() v2 = Variable(); v2.concretetype = llmemory.Address @@ -1140,4 +1180,4 @@ assert op1.opname == 'mark_opaque_ptr' assert op1.args == [v1] assert op1.result is None - assert op2 is None \ No newline at end of file + assert op2 is None diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -78,7 +78,7 @@ oplist = tr.rewrite_operation(op) assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' - assert oplist[0].args[0].value == 'llong_from_int' + assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' assert oplist[0].args[1] == 'calldescr-84' assert list(oplist[0].args[2]) == [const(0)] assert list(oplist[0].args[3]) == [] diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -518,6 +518,9 @@ @arguments("r") def bhimpl_mark_opaque_ptr(a): pass + @arguments("r", "i") + def bhimpl_record_known_class(a, b): + pass @arguments("i", returns="i") def bhimpl_int_copy(a): diff --git a/pypy/jit/metainterp/gc.py b/pypy/jit/metainterp/gc.py --- a/pypy/jit/metainterp/gc.py +++ b/pypy/jit/metainterp/gc.py @@ -7,6 +7,9 @@ self.config = config +class GC_none(GcDescription): + malloc_zero_filled = True + class GC_boehm(GcDescription): malloc_zero_filled = True diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -260,6 +260,16 @@ def optimize_GUARD_FALSE(self, op): self.optimize_guard(op, CONST_0) + def optimize_RECORD_KNOWN_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + expectedclassbox = op.getarg(1) + assert isinstance(expectedclassbox, Const) + realclassbox = value.get_constant_class(self.optimizer.cpu) + if realclassbox is not None: + assert realclassbox.same_constant(expectedclassbox) + return + value.make_constant_class(expectedclassbox, None) + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) @@ -481,6 +491,9 @@ self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) + def optimize_SAME_AS(self, op): + self.make_equal_to(op.result, self.getvalue(op.getarg(0))) + dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -28,6 +28,9 @@ def optimize_MARK_OPAQUE_PTR(self, op): pass + def optimize_RECORD_KNOWN_CLASS(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6482,6 +6482,21 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() + def test_record_known_class(self): + ops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + record_known_class(p1, ConstClass(node_vtable)) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p1) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + def test_quasi_immut(self): ops = """ [p0, p1, i0] diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -243,6 +243,18 @@ def opimpl_mark_opaque_ptr(self, box): return self.execute(rop.MARK_OPAQUE_PTR, box) + @arguments("box", "box") + def opimpl_record_known_class(self, box, clsbox): + from pypy.rpython.lltypesystem import llmemory + if self.metainterp.heapcache.is_class_known(box): + return + adr = clsbox.getaddr() + bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) + if bounding_class.subclassrange_max - bounding_class.subclassrange_min == 1: + # precise class knowledge, this can be used + self.execute(rop.RECORD_KNOWN_CLASS, box, clsbox) + self.metainterp.heapcache.class_now_known(box) + @arguments("box") def _opimpl_any_return(self, box): self.metainterp.finishframe(box) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr + 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3585,6 +3585,67 @@ self.interp_operations(f, [5], translationoptions=translationoptions) + def test_annotation_gives_knowledge_to_tracer(self): + class Base(object): + pass + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + assert isinstance(a, A) + z = a.f() + elif x < 0: + assert isinstance(a, B) + z = a.f() + else: + assert isinstance(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): from pypy.rlib.objectmodel import UnboxedValue diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -612,7 +612,7 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(3) + self.check_loop_count(5) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) self.check_loop_count(3) @@ -761,6 +761,27 @@ res = self.meta_interp(f, [0x1F, 0x11]) assert res == f(0x1F, 0x11) + def test_duplicated_virtual(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + def f(n): + node1 = self._new() + node1.value = 0 + node2 = self._new() + node2.value = 1 + while n > 0: + myjitdriver.jit_merge_point(n=n, node1=node1, node2=node2) + next = self._new() + next.value = node1.value + node2.value + n + node1 = next + node2 = next + n -= 1 + return node1.value + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_resops(new_with_vtable=0, new=0) + + + class VirtualMiscTests: def test_multiple_equal_virtuals(self): diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -255,10 +255,8 @@ s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] graph = copygraph(graph) - graph.startblock.isstartblock = False [jmpp] = find_jit_merge_points([graph]) graph.startblock = support.split_before_jit_merge_point(*jmpp) - graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot # to list some variable in greens=[] or reds=[] in JitDriver, # or that a jit_merge_point() takes a constant as an argument. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -27,6 +27,7 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', + 'list_strategy' : 'interp_magic.list_strategy', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -5,7 +5,6 @@ from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache - def internal_repr(space, w_object): return space.wrap('%r' % (w_object,)) @@ -73,3 +72,11 @@ def do_what_I_mean(space): return space.wrap(42) + +def list_strategy(space, w_list): + from pypy.objspace.std.listobject import W_ListObject + if isinstance(w_list, W_ListObject): + return space.wrap(w_list.strategy._applevel_repr) + else: + w_msg = space.wrap("Can only get the list strategy of a list") + raise OperationError(space.w_TypeError, w_msg) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -5,7 +5,7 @@ def setup_class(cls): if option.runappdirect: py.test.skip("does not make sense on pypy-c") - cls.space = gettestobjspace(**{"objspace.usemodules.select": False}) + cls.space = gettestobjspace(**{"objspace.usemodules.select": False, "objspace.std.withrangelist": True}) def test__isfake(self): from __pypy__ import isfake @@ -54,3 +54,21 @@ from __pypy__ import do_what_I_mean x = do_what_I_mean() assert x == 42 + + def test_list_strategy(self): + from __pypy__ import list_strategy + + l = [1, 2, 3] + assert list_strategy(l) == "int" + l = ["a", "b", "c"] + assert list_strategy(l) == "str" + l = [1.1, 2.2, 3.3] + assert list_strategy(l) == "float" + l = range(3) + assert list_strategy(l) == "range" + l = [1, "b", 3] + assert list_strategy(l) == "object" + l = [] + assert list_strategy(l) == "empty" + o = 5 + raises(TypeError, list_strategy, 5) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -131,7 +131,7 @@ def binop(func): - func._annspecialcase_ = "specialize:call_location" + specialize.argtype(1, 2)(func) @functools.wraps(func) def impl(self, v1, v2): return self.adapt_val(func(self, @@ -141,6 +141,7 @@ return impl def raw_binop(func): + specialize.argtype(1, 2)(func) # Returns the result unwrapped. @functools.wraps(func) def impl(self, v1, v2): @@ -151,6 +152,7 @@ return impl def unaryop(func): + specialize.argtype(1)(func) @functools.wraps(func) def impl(self, v): return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1110,6 +1110,14 @@ def debug_repr(self): return 'Slice(%s)' % self.parent.debug_repr() + def copy(self): + array = NDimArray(self.size, self.shape[:], self.find_dtype()) + iter = self.start_iter() + while not iter.done(): + array.setitem(iter.offset, self.getitem(iter.offset)) + iter = iter.next(len(self.shape)) + return array + class NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -760,6 +760,19 @@ a[::-1] = a + a assert (a == [8, 6, 4, 2, 0]).all() + def test_debug_repr(self): + from numpypy import zeros, sin + a = zeros(1) + assert a.__debug_repr__() == 'Array' + assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' + assert (a[::2]).__debug_repr__() == 'Slice(Array)' + assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' + assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' + assert sin(a).__debug_repr__() == 'Call1(sin, Array)' + b = a + a + b[0] = 3 + assert b.__debug_repr__() == 'Call2(add, forced=Array)' + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -989,18 +1002,11 @@ a = array([1, 2, 3]) assert dot(a.flat, a.flat) == 14 - def test_debug_repr(self): - from numpypy import zeros, sin - a = zeros(1) - assert a.__debug_repr__() == 'Array' - assert (a + a).__debug_repr__() == 'Call2(add, Array, Array)' - assert (a[::2]).__debug_repr__() == 'Slice(Array)' - assert (a + 2).__debug_repr__() == 'Call2(add, Array, Scalar)' - assert (a + a.flat).__debug_repr__() == 'Call2(add, Array, FlatIter(Array))' - assert sin(a).__debug_repr__() == 'Call1(sin, Array)' - b = a + a - b[0] = 3 - assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_slice_copy(self): + from numpypy import zeros + a = zeros((10, 10)) + b = a[0].copy() + assert (b == zeros(10)).all() class AppTestSupport(object): def setup_class(cls): diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -38,7 +38,6 @@ def __init__(self, name, startblock, return_var=None): self.name = name # function name (possibly mangled already) self.startblock = startblock - self.startblock.isstartblock = True # build default returnblock self.returnblock = Block([return_var or Variable()]) self.returnblock.operations = () @@ -171,11 +170,10 @@ class Block(object): - __slots__ = """isstartblock inputargs operations exitswitch + __slots__ = """inputargs operations exitswitch exits blockcolor""".split() def __init__(self, inputargs): - self.isstartblock = False self.inputargs = list(inputargs) # mixed list of variable/const XXX self.operations = [] # list of SpaceOperation(s) self.exitswitch = None # a variable or @@ -452,7 +450,6 @@ newblock.closeblock(*newlinks) newstartblock = blockmap[graph.startblock] - newstartblock.isstartblock = True newgraph = FunctionGraph(graph.name, newstartblock) newgraph.returnblock = blockmap[graph.returnblock] newgraph.exceptblock = blockmap[graph.exceptblock] @@ -490,7 +487,6 @@ for block in graph.iterblocks(): - assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( "block.exits is a %s (closeblock() or recloseblock() missing?)" % (type(block.exits).__name__,)) diff --git a/pypy/objspace/flow/test/test_checkgraph.py b/pypy/objspace/flow/test/test_checkgraph.py --- a/pypy/objspace/flow/test/test_checkgraph.py +++ b/pypy/objspace/flow/test/test_checkgraph.py @@ -13,20 +13,6 @@ py.test.raises(AssertionError, checkgraph, g) -def test_nostartblock(): - g = FunctionGraph("g", Block([])) - g.startblock.closeblock(Link([Constant(1)], g.returnblock)) - g.startblock.isstartblock = False - py.test.raises(AssertionError, checkgraph, g) - -def test_twostartblocks(): - g = FunctionGraph("g", Block([])) - b = Block([]) - b.isstartblock = True - g.startblock.closeblock(Link([], b)) - b.closeblock(Link([Constant(1)], g.returnblock)) - py.test.raises(AssertionError, checkgraph, g) - def test_exitlessblocknotexitblock(): g = FunctionGraph("g", Block([])) py.test.raises(AssertionError, checkgraph, g) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -50,6 +50,13 @@ else: return space.fromcache(StringListStrategy) + # check for floats + for w_obj in list_w: + if not is_W_FloatObject(w_obj): + break + else: + return space.fromcache(FloatListStrategy) + return space.fromcache(ObjectListStrategy) def is_W_IntObject(w_object): @@ -60,7 +67,9 @@ from pypy.objspace.std.stringobject import W_StringObject return type(w_object) is W_StringObject - +def is_W_FloatObject(w_object): + from pypy.objspace.std.floatobject import W_FloatObject + return type(w_object) is W_FloatObject class W_ListObject(W_AbstractListObject): from pypy.objspace.std.listtype import list_typedef as typedef @@ -317,6 +326,8 @@ to the added item. W_Lists do not switch back to EmptyListStrategy when becoming empty again.""" + _applevel_repr = "empty" + def __init__(self, space): ListStrategy.__init__(self, space) # cache an empty list that is used whenever getitems is called (i.e. sorting) @@ -364,6 +375,8 @@ strategy = self.space.fromcache(IntegerListStrategy) elif is_W_StringObject(w_item): strategy = self.space.fromcache(StringListStrategy) + elif is_W_FloatObject(w_item): + strategy = self.space.fromcache(FloatListStrategy) else: strategy = self.space.fromcache(ObjectListStrategy) @@ -415,6 +428,8 @@ On any operation destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" + _applevel_repr = "range" + def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -853,6 +868,7 @@ class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -881,6 +897,7 @@ class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 + _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -905,8 +922,36 @@ if reverse: l.reverse() +class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = 0.0 + _applevel_repr = "float" + + def wrap(self, floatval): + return self.space.wrap(floatval) + + def unwrap(self, w_float): + return self.space.float_w(w_float) + + erase, unerase = rerased.new_erasing_pair("float") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def is_correct_type(self, w_obj): + return is_W_FloatObject(w_obj) + + def list_is_correct_type(self, w_list): + return w_list.strategy is self.space.fromcache(FloatListStrategy) + + def sort(self, w_list, reverse): + l = self.unerase(w_list.lstorage) + sorter = FloatSort(l, len(l)) + sorter.sort() + if reverse: + l.reverse() + class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None + _applevel_repr = "str" def wrap(self, stringval): return self.space.wrap(stringval) @@ -934,6 +979,7 @@ def getitems_str(self, w_list): return self.unerase(w_list.lstorage) + # _______________________________________________________ init_signature = Signature(['sequence'], None, None) @@ -1282,6 +1328,7 @@ TimSort = make_timsort_class() IntBaseTimSort = make_timsort_class() +FloatBaseTimSort = make_timsort_class() StringBaseTimSort = make_timsort_class() class KeyContainer(baseobjspace.W_Root): @@ -1302,6 +1349,10 @@ def lt(self, a, b): return a < b +class FloatSort(FloatBaseTimSort): + def lt(self, a, b): + return a < b + class StringSort(StringBaseTimSort): def lt(self, a, b): return a < b diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -470,11 +470,17 @@ l.extend(iter([1, 2, 3, 4])) assert l is l0 assert l == [1, 1, 2, 3, 4] + l = l0 = ['a'] l.extend(iter(['b', 'c', 'd'])) assert l == ['a', 'b', 'c', 'd'] assert l is l0 + l = l0 = [1.2] + l.extend(iter([2.3, 3.4, 4.5])) + assert l == [1.2, 2.3, 3.4, 4.5] + assert l is l0 + def test_sort(self): l = l0 = [1, 5, 3, 0] l.sort() @@ -493,6 +499,10 @@ l.sort(reverse=True) assert l == ["d", "c", "b", "a"] + l = [3.3, 2.2, 4.4, 1.1, 3.1, 5.5] + l.sort() + assert l == [1.1, 2.2, 3.1, 3.3, 4.4, 5.5] + def test_sort_cmp(self): def lencmp(a,b): return cmp(len(a), len(b)) l = [ 'a', 'fiver', 'tre', '' ] @@ -546,11 +556,19 @@ assert l[-2] == 6 raises(IndexError, "l[len(l)]") raises(IndexError, "l[-len(l)-1]") + l = ['a', 'b', 'c'] assert l[0] == 'a' assert l[-1] == 'c' assert l[-2] == 'b' raises(IndexError, "l[len(l)]") + + l = [1.1, 2.2, 3.3] + assert l[0] == 1.1 + assert l[-1] == 3.3 + assert l[-2] == 2.2 + raises(IndexError, "l[len(l)]") + l = [] raises(IndexError, "l[1]") @@ -588,6 +606,16 @@ assert l is l0 raises(IndexError, "del l[0]") + l = l0 = [1.1, 2.2, 3.3] + del l[0] + assert l == [2.2, 3.3] + del l[-1] + assert l == [2.2] + del l[-1] + assert l == [] + assert l is l0 + raises(IndexError, "del l[0]") + l = range(10) del l[5] assert l == [0, 1, 2, 3, 4, 6, 7, 8, 9] @@ -627,9 +655,15 @@ del l[:] assert l is l0 assert l == [] + l = ['a', 'b'] del l[:] assert l == [] + + l = [1.1, 2.2] + del l[:] + assert l == [] + l = range(5) del l[:] assert l == [] @@ -640,6 +674,11 @@ assert l is l0 assert l == [1,2,3,4,5] + l = l0 = [1.1,2.2,3.3] + l += [4.4,5.5] + assert l is l0 + assert l == [1.1,2.2,3.3,4.4,5.5] + l = l0 = ['a', 'b', 'c'] l1 = l[:] l += ['d'] @@ -697,6 +736,11 @@ l *= -5 assert l == [] + l = l0 = [1.1, 2.2] + l *= 2 + assert l is l0 + assert l == [1.1, 2.2, 1.1, 2.2] + l = range(2) l *= 2 assert l == [0, 1, 0, 1] @@ -731,6 +775,10 @@ assert c.index(0) == 0 raises(ValueError, c.index, 3) + c = [0.0, 2.2, 4.4] + assert c.index(0) == 0.0 + raises(ValueError, c.index, 3) + def test_index_cpython_bug(self): if self.on_cpython: skip("cpython has a bug here") @@ -779,6 +827,10 @@ l[::3] = ('a', 'b') assert l == ['a', 1, 2, 'b', 4, 5] + l = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5] + l[::3] = ('a', 'b') + assert l == ['a', 1.1, 2.2, 'b', 4.4, 5.5] + def test_setslice_with_self(self): l = [1,2,3,4] l[:] = l @@ -835,6 +887,10 @@ l.append("a") assert l == [1,2,3,"a"] + l = [1.1, 2.2, 3.3] + l.append(4.4) + assert l == [1.1, 2.2, 3.3, 4.4] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -875,6 +931,10 @@ l.pop() assert l == range(9) + l = [1.1, 2.2, 3.3] + l.pop() + assert l == [1.1, 2.2] + l = [] raises(IndexError, l.pop, 0) @@ -897,16 +957,19 @@ l2 = ["1", "2", "3", "4"] l3 = range(5) l4 = [1, 2, 3, "4"] + l5 = [1.1, 2.2, 3.3, 4.4] raises(IndexError, l1.pop, -5) raises(IndexError, l2.pop, -5) raises(IndexError, l3.pop, -6) raises(IndexError, l4.pop, -5) + raises(IndexError, l5.pop, -5) assert l1.pop(-2) == 3 assert l2.pop(-2) == "3" assert l3.pop(-2) == 3 assert l4.pop(-2) == 3 + assert l5.pop(-2) == 3.3 def test_remove(self): c = list('hello world') @@ -925,6 +988,13 @@ l = [0, 3, 5] raises(ValueError, c.remove, 2) + l = [0.0, 1.1, 2.2, 3.3, 4.4] + l.remove(2.2) + assert l == [0.0, 1.1, 3.3, 4.4] + l = [0.0, 3.3, 5.5] + raises(ValueError, c.remove, 2) + raises(ValueError, c.remove, 2.2) + def test_reverse(self): c = list('hello world') c.reverse() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, StringListStrategy, RangeListStrategy, make_range_list +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -15,7 +15,7 @@ def test_empty_to_any(self): l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) - l.append(self.space.wrap(1.)) + l.append(self.space.wrap((1,3))) assert isinstance(l.strategy, ObjectListStrategy) l = W_ListObject(self.space, []) @@ -28,6 +28,11 @@ l.append(self.space.wrap('a')) assert isinstance(l.strategy, StringListStrategy) + l = W_ListObject(self.space, []) + assert isinstance(l.strategy, EmptyListStrategy) + l.append(self.space.wrap(1.2)) + assert isinstance(l.strategy, FloatListStrategy) + def test_int_to_any(self): l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) @@ -44,6 +49,14 @@ l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) + def test_float_to_any(self): + l = W_ListObject(self.space, [self.space.wrap(1.1),self.space.wrap(2.2),self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.append(self.space.wrap(4.4)) + assert isinstance(l.strategy, FloatListStrategy) + l.append(self.space.wrap("a")) + assert isinstance(l.strategy, ObjectListStrategy) + def test_setitem(self): # This should work if test_listobject.py passes l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) @@ -65,6 +78,12 @@ l.setitem(0, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) + # FloatStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap(1.2),self.space.wrap(2.3),self.space.wrap(3.4)]) + assert isinstance(l.strategy, FloatListStrategy) + l.setitem(0, self.space.wrap("a")) + assert isinstance(l.strategy, ObjectListStrategy) + def test_insert(self): # no change l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) @@ -84,6 +103,12 @@ l.insert(3, self.space.wrap('d')) assert isinstance(l.strategy, ObjectListStrategy) + # FloatStrategy + l = W_ListObject(self.space, [self.space.wrap(1.1),self.space.wrap(2.2),self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.insert(3, self.space.wrap('d')) + assert isinstance(l.strategy, ObjectListStrategy) + # EmptyStrategy l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -95,7 +120,9 @@ l.insert(0, self.space.wrap(2)) assert isinstance(l.strategy, IntegerListStrategy) - def notest_list_empty_after_delete(self): + def test_list_empty_after_delete(self): + import py + py.test.skip("return to emptyliststrategy is not supported anymore") l = W_ListObject(self.space, [self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.deleteitem(0) @@ -117,21 +144,36 @@ l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) + # IntegerStrategy to IntegerStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + # ObjectStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap('b'), self.space.wrap(3)]) assert isinstance(l.strategy, ObjectListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, ObjectListStrategy) + # IntegerStrategy to ObjectStrategy l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')])) assert isinstance(l.strategy, ObjectListStrategy) + # StringStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')]) + assert isinstance(l.strategy, StringListStrategy) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, ObjectListStrategy) + + # FloatStrategy to ObjectStrategy + l = W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, ObjectListStrategy) + def test_setslice_List(self): def wrapitems(items): @@ -160,6 +202,11 @@ keep_other_strategy(l, 0, 2, other.length(), other) assert l.strategy is self.space.fromcache(StringListStrategy) + l = W_ListObject(self.space, wrapitems([1.1, 2.2, 3.3, 4.4, 5.5])) + other = W_ListObject(self.space, []) + keep_other_strategy(l, 0, 1, l.length(), other) + assert l.strategy is self.space.fromcache(FloatListStrategy) + l = W_ListObject(self.space, wrapitems(["a",3,"c",4,"e"])) other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) @@ -194,6 +241,11 @@ l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + l = W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)]) + assert isinstance(l.strategy, FloatListStrategy) + l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) + assert isinstance(l.strategy, ObjectListStrategy) + def test_empty_extend_with_any(self): empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -220,6 +272,11 @@ empty = W_ListObject(self.space, []) assert isinstance(empty.strategy, EmptyListStrategy) + empty.extend(W_ListObject(self.space, [self.space.wrap(1.1), self.space.wrap(2.2), self.space.wrap(3.3)])) + assert isinstance(empty.strategy, FloatListStrategy) + + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(self.space, [])) assert isinstance(empty.strategy, EmptyListStrategy) @@ -293,12 +350,13 @@ l.setslice(0, 1, 3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) - def test_get_items_copy(self): + def test_copy_list(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) - l2 = l1.getitems() + l2 = l1.clone() l2.append(self.space.wrap(4)) assert not l2 == l1.getitems() + def test_getitems_does_not_copy_object_list(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap("two"), self.space.wrap(3)]) l2 = l1.getitems() l2.append(self.space.wrap("four")) @@ -345,7 +403,6 @@ # should not raise assert getslice__List_ANY_ANY(self.space, l, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) - def test_add_to_rangelist(self): l1 = make_range_list(self.space, 1, 1, 3) l2 = W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5)]) diff --git a/pypy/rlib/_stacklet_n_a.py b/pypy/rlib/_stacklet_n_a.py --- a/pypy/rlib/_stacklet_n_a.py +++ b/pypy/rlib/_stacklet_n_a.py @@ -1,4 +1,5 @@ from pypy.rlib import _rffi_stacklet as _c +from pypy.rlib import objectmodel, debug from pypy.rpython.annlowlevel import llhelper from pypy.tool.staticmethods import StaticMethods @@ -21,6 +22,9 @@ def destroy(thrd, h): _c.destroy(thrd._thrd, h) + if objectmodel.we_are_translated(): + debug.debug_print("not using a framework GC: " + "stacklet_destroy() may leak") is_empty_handle = _c.is_empty_handle diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -176,7 +176,6 @@ return decorator @oopspec("jit.isconstant(value)") - at specialize.ll() def isconstant(value): """ While tracing, returns whether or not the value is currently known to be @@ -186,9 +185,9 @@ This is for advanced usage only. """ return NonConstant(False) +isconstant._annspecialcase_ = "specialize:call_location" @oopspec("jit.isvirtual(value)") - at specialize.ll() def isvirtual(value): """ Returns if this value is virtual, while tracing, it's relatively @@ -197,6 +196,7 @@ This is for advanced usage only. """ return NonConstant(False) +isvirtual._annspecialcase_ = "specialize:call_location" class Entry(ExtRegistryEntry): _about_ = hint diff --git a/pypy/rlib/test/test_rstacklet.py b/pypy/rlib/test/test_rstacklet.py --- a/pypy/rlib/test/test_rstacklet.py +++ b/pypy/rlib/test/test_rstacklet.py @@ -65,6 +65,15 @@ self.tasks[0].withdepth(self.random.genrand32() % 50) assert len(self.tasks[0].lst) == 0 + @here_is_a_test + def test_destroy(self): + # this used to give MemoryError in shadowstack tests + for i in range(100000): + self.status = 0 + h = self.sthread.new(switchbackonce_callback, + rffi.cast(llmemory.Address, 321)) + self.sthread.destroy(h) + def any_alive(self): for task in self.tasks: if task.h: diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -92,7 +92,6 @@ # make a copy of the graph that will reload the values graph2 = copygraph(fnptr._obj.graph) block2 = graph2.startblock - block2.isstartblock = False block1 = Block([]) reloadedvars = [] for v, c_p in zip(block2.inputargs, sra): @@ -109,7 +108,6 @@ [w], v)) reloadedvars.append(v) block1.closeblock(Link(reloadedvars, block2)) - block1.isstartblock = True graph2.startblock = block1 FUNC2 = lltype.FuncType([], FUNC1.RESULT) fnptr2 = lltype.functionptr(FUNC2, diff --git a/pypy/rpython/memory/gctransform/shadowstack.py b/pypy/rpython/memory/gctransform/shadowstack.py --- a/pypy/rpython/memory/gctransform/shadowstack.py +++ b/pypy/rpython/memory/gctransform/shadowstack.py @@ -307,7 +307,7 @@ "restore_state_from: broken shadowstack") self.gcdata.root_stack_base = shadowstackref.base self.gcdata.root_stack_top = shadowstackref.top - self.destroy(shadowstackref) + self._cleanup(shadowstackref) def start_fresh_new_state(self): self.gcdata.root_stack_base = self.unused_full_stack @@ -315,6 +315,10 @@ self.unused_full_stack = llmemory.NULL def destroy(self, shadowstackref): + llmemory.raw_free(shadowstackref.base) + self._cleanup(shadowstackref) + + def _cleanup(self, shadowstackref): shadowstackref.base = llmemory.NULL shadowstackref.top = llmemory.NULL shadowstackref.context = llmemory.NULL diff --git a/pypy/rpython/memory/gctransform/test/test_transform.py b/pypy/rpython/memory/gctransform/test/test_transform.py --- a/pypy/rpython/memory/gctransform/test/test_transform.py +++ b/pypy/rpython/memory/gctransform/test/test_transform.py @@ -102,12 +102,12 @@ llops.genop("gc_pop_alive", [var]) -def checkblock(block, is_borrowed): +def checkblock(block, is_borrowed, is_start_block): if block.operations == (): # a return/exception block -- don't want to think about them # (even though the test passes for somewhat accidental reasons) return - if block.isstartblock: + if is_start_block: refs_in = 0 else: refs_in = len([v for v in block.inputargs if isinstance(v, Variable) @@ -167,7 +167,7 @@ if check: for graph, is_borrowed in graphs_borrowed.iteritems(): for block in graph.iterblocks(): - checkblock(block, is_borrowed) + checkblock(block, is_borrowed, block is graph.startblock) return t, transformer def getops(graph): diff --git a/pypy/rpython/memory/gctransform/transform.py b/pypy/rpython/memory/gctransform/transform.py --- a/pypy/rpython/memory/gctransform/transform.py +++ b/pypy/rpython/memory/gctransform/transform.py @@ -263,9 +263,7 @@ # still be empty (but let's check) if starts_with_empty_block(graph) and inserted_empty_startblock: old_startblock = graph.startblock - graph.startblock.isstartblock = False graph.startblock = graph.startblock.exits[0].target - graph.startblock.isstartblock = True checkgraph(graph) diff --git a/pypy/rpython/normalizecalls.py b/pypy/rpython/normalizecalls.py --- a/pypy/rpython/normalizecalls.py +++ b/pypy/rpython/normalizecalls.py @@ -116,8 +116,6 @@ v = Constant(default) outlist.append(v) newblock.closeblock(Link(outlist, oldblock)) - oldblock.isstartblock = False - newblock.isstartblock = True graph.startblock = newblock for i in range(len(newdefaults)-1,-1,-1): if newdefaults[i] is NODEFAULT: @@ -171,8 +169,6 @@ # prepare the output args of newblock and link outlist = inlist[:] newblock.closeblock(Link(outlist, oldblock)) - oldblock.isstartblock = False - newblock.isstartblock = True graph.startblock = newblock # finished checkgraph(graph) diff --git a/pypy/tool/nullpath.py b/pypy/tool/nullpath.py --- a/pypy/tool/nullpath.py +++ b/pypy/tool/nullpath.py @@ -1,4 +1,4 @@ -import py +import py, os class NullPyPathLocal(py.path.local): @@ -6,7 +6,7 @@ return self.__class__(py.path.local.join(self, *args)) def open(self, mode): - return open('/dev/null', mode) + return open(os.devnull, mode) def __repr__(self): return py.path.local.__repr__(self) + ' [fake]' diff --git a/pypy/tool/test/test_nullpath.py b/pypy/tool/test/test_nullpath.py --- a/pypy/tool/test/test_nullpath.py +++ b/pypy/tool/test/test_nullpath.py @@ -1,11 +1,7 @@ -import sys +import sys, os import py from pypy.tool.nullpath import NullPyPathLocal -def setup_module(): - if 'posix' not in sys.builtin_module_names: - py.test.skip('posix only') - def test_nullpath(tmpdir): path = NullPyPathLocal(tmpdir) assert repr(path).endswith('[fake]') @@ -13,4 +9,4 @@ assert isinstance(foo_txt, NullPyPathLocal) # f = foo_txt.open('w') - assert f.name == '/dev/null' + assert f.name == os.devnull diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -37,8 +37,9 @@ except (KeyboardInterrupt, SystemExit): raise except Exception, e: - log.WARNING('constant-folding %r:' % (spaceop,)) - log.WARNING(' %s: %s' % (e.__class__.__name__, e)) + pass # turn off reporting these as warnings: useless + #log.WARNING('constant-folding %r:' % (spaceop,)) + #log.WARNING(' %s: %s' % (e.__class__.__name__, e)) else: # success in folding this space operation if spaceop.opname in fixup_op_result: diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -453,7 +453,6 @@ #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) - copiedstartblock.isstartblock = False #find args passed to startblock of inlined function passon_args = [] for arg in self.op.args[1:]: diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -391,7 +391,6 @@ virtualframe = VirtualFrame(graph2.startblock, 0, nodelist) graphbuilder = GraphBuilder(self, graph2) specblock = graphbuilder.start_from_virtualframe(virtualframe) - specblock.isstartblock = True specgraph = graph2 specgraph.name += '_mallocv' specgraph.startblock = specblock diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -50,7 +50,8 @@ # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) - simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks())) + simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks()), + [graph]) if progress and option.view: t.view() if expected_result is not Ellipsis: diff --git a/pypy/translator/c/test/test_refcount.py b/pypy/translator/c/test/test_refcount.py --- a/pypy/translator/c/test/test_refcount.py +++ b/pypy/translator/c/test/test_refcount.py @@ -229,7 +229,6 @@ graph = t.buildflowgraph(g) assert graph.startblock.operations == [] graph.startblock = graph.startblock.exits[0].target - graph.startblock.isstartblock = True from pypy.objspace.flow.model import checkgraph checkgraph(graph) t._prebuilt_graphs[g] = graph diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -397,7 +397,8 @@ def transform_dead_op_vars(graph, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a graph.""" - return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), translator) + return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), + [graph], translator) # the set of operations that can safely be removed # (they have no side effects, at least in R-Python) @@ -419,11 +420,19 @@ hasattr: True, } -def transform_dead_op_vars_in_blocks(blocks, translator=None): +def find_start_blocks(graphs): + start_blocks = set() + for graph in graphs: + start_blocks.add(graph.startblock) + return start_blocks + +def transform_dead_op_vars_in_blocks(blocks, graphs, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a set of blocks""" read_vars = {} # set of variables really used variable_flow = {} # map {Var: list-of-Vars-it-depends-on} + set_of_blocks = set(blocks) + start_blocks = find_start_blocks(graphs) def canremove(op, block): if op.opname not in CanRemove: @@ -451,7 +460,7 @@ if block.exits: for link in block.exits: - if link.target not in blocks: + if link.target not in set_of_blocks: for arg, targetarg in zip(link.args, link.target.inputargs): read_vars[arg] = True read_vars[targetarg] = True @@ -465,7 +474,7 @@ read_vars[arg] = True # an input block's inputargs should not be modified, even if some # of the function's input arguments are not actually used - if block.isstartblock: + if block in start_blocks: for arg in block.inputargs: read_vars[arg] = True diff --git a/pypy/translator/transform.py b/pypy/translator/transform.py --- a/pypy/translator/transform.py +++ b/pypy/translator/transform.py @@ -115,7 +115,7 @@ # to kill dead (never-followed) links, # which can possibly remove more variables. from pypy.translator.simplify import transform_dead_op_vars_in_blocks - transform_dead_op_vars_in_blocks(block_subset) + transform_dead_op_vars_in_blocks(block_subset, self.translator.graphs) def transform_dead_code(self, block_subset): """Remove dead code: these are the blocks that are not annotated at all diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -42,9 +42,7 @@ vars = [copyvar(annotator, v) for v in graph.startblock.inputargs] newblock = Block(vars) newblock.closeblock(Link(vars, graph.startblock)) - graph.startblock.isstartblock = False graph.startblock = newblock - graph.startblock.isstartblock = True def starts_with_empty_block(graph): return (not graph.startblock.operations @@ -151,9 +149,7 @@ newop = SpaceOperation('direct_call', [c_initial_func], v_none) extrablock.operations = [newop] extrablock.closeblock(Link(args, entry_point.startblock)) - entry_point.startblock.isstartblock = False entry_point.startblock = extrablock - entry_point.startblock.isstartblock = True checkgraph(entry_point) def call_final_function(translator, final_func, annhelper=None): From noreply at buildbot.pypy.org Thu Dec 29 09:57:28 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:28 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: merge default up to 3ff9c88dca39 Message-ID: <20111229085728.6E7EE82C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50942:c61c064e84b4 Date: 2011-12-27 11:52 +0100 http://bitbucket.org/pypy/pypy/changeset/c61c064e84b4/ Log: merge default up to 3ff9c88dca39 diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -351,7 +351,7 @@ self._FuncPtr = _FuncPtr if handle is None: - self._handle = _ffi.CDLL(name) + self._handle = _ffi.CDLL(name, mode) else: self._handle = handle diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -379,12 +379,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -404,7 +406,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: iso-8859-1 +# -*- coding: iso-8859-1 -*- # Note that PyPy contains also a built-in module 'sha' which will hide # this one if compiled in. diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py --- a/lib_pypy/itertools.py +++ b/lib_pypy/itertools.py @@ -25,7 +25,7 @@ __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter', 'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap', - 'takewhile', 'tee'] + 'takewhile', 'tee', 'compress', 'product'] try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -164,6 +164,7 @@ # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() + end = min(end, len(source)) for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py --- a/pypy/bin/checkmodule.py +++ b/pypy/bin/checkmodule.py @@ -1,43 +1,45 @@ #! /usr/bin/env python """ -Usage: checkmodule.py [-b backend] +Usage: checkmodule.py -Compiles the PyPy extension module from pypy/module// -into a fake program which does nothing. Useful for testing whether a -modules compiles without doing a full translation. Default backend is cli. - -WARNING: this is still incomplete: there are chances that the -compilation fails with strange errors not due to the module. If a -module is known to compile during a translation but don't pass -checkmodule.py, please report the bug (or, better, correct it :-). +Check annotation and rtyping of the PyPy extension module from +pypy/module//. Useful for testing whether a +modules compiles without doing a full translation. """ import autopath -import sys +import sys, os from pypy.objspace.fake.checkmodule import checkmodule def main(argv): - try: - assert len(argv) in (2, 4) - if len(argv) == 2: - backend = 'cli' - modname = argv[1] - if modname in ('-h', '--help'): - print >> sys.stderr, __doc__ - sys.exit(0) - if modname.startswith('-'): - print >> sys.stderr, "Bad command line" - print >> sys.stderr, __doc__ - sys.exit(1) - else: - _, b, backend, modname = argv - assert b == '-b' - except AssertionError: + if len(argv) != 2: print >> sys.stderr, __doc__ sys.exit(2) + modname = argv[1] + if modname in ('-h', '--help'): + print >> sys.stderr, __doc__ + sys.exit(0) + if modname.startswith('-'): + print >> sys.stderr, "Bad command line" + print >> sys.stderr, __doc__ + sys.exit(1) + if os.path.sep in modname: + if os.path.basename(modname) == '': + modname = os.path.dirname(modname) + if os.path.basename(os.path.dirname(modname)) != 'module': + print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'." + sys.exit(1) + modname = os.path.basename(modname) + try: + checkmodule(modname) + except Exception, e: + import traceback, pdb + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + return 1 else: - checkmodule(modname, backend, interactive=True) - print 'Module compiled succesfully' + print 'Passed.' + return 0 if __name__ == '__main__': - main(sys.argv) + sys.exit(main(sys.argv)) diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.6`_: the latest official release +* `Release 1.7`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -75,7 +75,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.6`: http://pypy.org/download.html +.. _`Release 1.7`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -120,9 +120,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.6`__. +instead of the latest release, which is `1.7`__. -.. __: release-1.6.0.html +.. __: release-1.7.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -1,6 +1,5 @@ """codegen helpers and AST constant folding.""" import sys -import itertools from pypy.interpreter.astcompiler import ast, consts, misc from pypy.tool import stdlib_opcode as ops @@ -146,8 +145,7 @@ } unrolling_unary_folders = unrolling_iterable(unary_folders.items()) -for folder in itertools.chain(binary_folders.itervalues(), - unary_folders.itervalues()): +for folder in binary_folders.values() + unary_folders.values(): folder._always_inline_ = True del folder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1,4 +1,3 @@ -import itertools import pypy from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction @@ -519,8 +518,8 @@ exception_types_w = self.export_builtin_exceptions() # initialize with "bootstrap types" from objspace (e.g. w_None) - types_w = itertools.chain(self.get_builtin_types().iteritems(), - exception_types_w.iteritems()) + types_w = (self.get_builtin_types().items() + + exception_types_w.items()) for name, w_type in types_w: self.setitem(self.builtin.w_dict, self.wrap(name), w_type) @@ -1608,6 +1607,8 @@ 'UnicodeError', 'ValueError', 'ZeroDivisionError', + 'UnicodeEncodeError', + 'UnicodeDecodeError', ] ## Irregular part of the interface: diff --git a/pypy/jit/backend/arm/test/test_zrpy_gc.py b/pypy/jit/backend/arm/test/test_zrpy_gc.py --- a/pypy/jit/backend/arm/test/test_zrpy_gc.py +++ b/pypy/jit/backend/arm/test/test_zrpy_gc.py @@ -459,6 +459,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + n -= x.foo + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -20,7 +20,7 @@ from pypy.jit.backend.llgraph import symbolic from pypy.jit.codewriter import longlong -from pypy.rlib import libffi +from pypy.rlib import libffi, clibffi from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint @@ -1432,6 +1432,10 @@ res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs) return res +def do_getinteriorfield_raw_float(array, index, width, ofs): + res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs) + return res + def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = cast_from_int(lltype.Ptr(STRUCT), struct) @@ -1510,12 +1514,17 @@ do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage) do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr) -def new_setinteriorfield_raw(ffitype): +def new_setinteriorfield_raw(cast_func, ffitype): def do_setinteriorfield_raw(array, index, newvalue, width, ofs): addr = rffi.cast(rffi.VOIDP, array) + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype2 is ffitype: + newvalue = cast_func(TYPE, newvalue) + break return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue) return do_setinteriorfield_raw -do_setinteriorfield_raw_int = new_setinteriorfield_raw(libffi.types.slong) +do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong) +do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double) def do_setfield_raw_int(struct, fieldnum, newvalue): STRUCT, fieldname = symbolic.TokenToField[fieldnum] diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -825,6 +825,15 @@ bool(v.value)): # store a non-NULL self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETFIELD_RAW) + # ---------- write barrier for SETINTERIORFIELD_GC ------ + if op.getopnum() == rop.SETINTERIORFIELD_GC: + val = op.getarg(0) + if val is not last_malloc: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self._gen_write_barrier(newops, op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) # ---------- write barrier for SETARRAYITEM_GC ---------- if op.getopnum() == rop.SETARRAYITEM_GC: val = op.getarg(0) diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -393,11 +393,17 @@ """ Platform specific - Allocates a temporary register """ raise NotImplementedError("Abstract") -def compute_vars_longevity(inputargs, operations): +def _compute_vars_longevity(self, inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish produced = {} last_used = {} + useful = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -405,8 +411,11 @@ continue assert op.result not in produced produced[op.result] = i + opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) + if opnum != rop.JUMP and opnum != rop.FINISH: + useful[arg] = None if isinstance(arg, Box) and arg not in last_used: last_used[arg] = i if op.is_guard(): @@ -416,7 +425,7 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + longevity = {} for arg in produced: if arg in last_used: @@ -432,8 +441,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity - + return longevity, useful def compute_loop_consts(inputargs, jump, looptoken): if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -570,6 +570,28 @@ assert operations[1].getarg(2) == v_value assert operations[1].getdescr() == array_descr + def test_rewrite_assembler_5(self): + S = lltype.GcStruct('S') + A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) + interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, + A.OF, 'x') + wbdescr = self.gc_ll_descr.write_barrier_descr + ops = parse(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + expected = parse(""" + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiordescr) + jump(p1, p2) + """, namespace=locals()) + operations = get_deep_immutable_oplist(ops.operations) + operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, + operations, []) + equaloplists(operations, expected.operations) + def test_rewrite_assembler_initialization_store(self): S = lltype.GcStruct('S', ('parent', OBJECT), ('x', lltype.Signed)) diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -2,6 +2,8 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT from pypy.jit.backend.llsupport.regalloc import FrameManager from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from pypy.jit.tool.oparser import parse +from pypy.jit.backend.detect_cpu import getcpuclass def newboxes(*values): return [BoxInt(v) for v in values] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,7 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ - TempBox, compute_vars_longevity, compute_loop_consts + TempBox, compute_vars_longevity from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong @@ -167,26 +167,22 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity = compute_vars_longevity(inputargs, operations) + longevity, useful = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) - return operations + return operations, useful def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - jump = operations[-1] - loop_consts = compute_loop_consts(inputargs, jump, looptoken) - self.loop_consts = loop_consts - return self._process_inputargs(inputargs), operations + operations, useful = self._prepare(inputargs, operations, allgcrefs) + return self._process_inputargs(inputargs, useful), operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) - self.loop_consts = {} + operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] @@ -195,7 +191,7 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs): + def _process_inputargs(self, inputargs, useful): # XXX we can sort out here by longevity if we need something # more optimal floatlocs = [None] * len(inputargs) @@ -211,7 +207,7 @@ arg = inputargs[i] assert not isinstance(arg, Const) reg = None - if arg not in self.loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: if arg.type == FLOAT: # xxx is it really a good idea? at the first CALL they # will all be flushed anyway diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -149,6 +149,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = RegAlloc(self.cpu.assembler, False) + regalloc.prepare_loop(loop.inputargs, loop.operations, + loop.token, []) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -422,6 +429,35 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -457,6 +457,46 @@ def test_compile_framework_7(self): self.run('compile_framework_7') + def define_compile_framework_7_interior(cls): + # Array of structs containing pointers (test the write barrier + # for setinteriorfield_gc) + S = lltype.GcStruct('S', ('i', lltype.Signed)) + A = lltype.GcArray(lltype.Struct('entry', ('x', lltype.Ptr(S)), + ('y', lltype.Ptr(S)), + ('z', lltype.Ptr(S)))) + class Glob: + a = lltype.nullptr(A) + glob = Glob() + # + def make_s(i): + s = lltype.malloc(S) + s.i = i + return s + # + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + a = glob.a + if not a: + a = glob.a = lltype.malloc(A, 10) + i = 0 + while i < 10: + a[i].x = make_s(n + i * 100 + 1) + a[i].y = make_s(n + i * 100 + 2) + a[i].z = make_s(n + i * 100 + 3) + i += 1 + i = 0 + while i < 10: + check(a[i].x.i == n + i * 100 + 1) + check(a[i].y.i == n + i * 100 + 2) + check(a[i].z.i == n + i * 100 + 3) + i += 1 + n -= x.foo + return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + return None, f, None + + def test_compile_framework_7_interior(self): + self.run('compile_framework_7_interior') + def define_compile_framework_8(cls): # Array of pointers, of unknown length (test write_barrier_from_array) def before(n, x): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -247,7 +247,7 @@ if funcobj.random_effects_on_gcobjs: return True except (AttributeError, lltype.DelayedPointer): - pass + return True # better safe than sorry return super(RandomEffectsAnalyzer, self).analyze_external_call( op, seen) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -210,6 +210,8 @@ def rewrite_op_cast_pointer(self, op): newop = self.rewrite_op_same_as(op) assert newop is None + return + # disabled for now if (self._is_rclass_instance(op.args[0]) and self._is_rclass_instance(op.result)): FROM = op.args[0].concretetype.TO @@ -220,6 +222,9 @@ return [None, # hack, do the right renaming from op.args[0] to op.result SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_jit_record_known_class(self, op): + return SpaceOperation("record_known_class", [op.args[0], op.args[1]], None) + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -298,7 +298,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # if < 0, there is one counter per value; + _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. # this class also gets the following attributes stored by resume.py code @@ -309,10 +309,13 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_INT = -0x20000000 - CNT_REF = -0x40000000 - CNT_FLOAT = -0x60000000 - CNT_MASK = 0x1FFFFFFF + CNT_BASE_MASK = 0x0FFFFFFF # the base counter value + CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard + CNT_TYPE_MASK = 0x60000000 # mask for the type + + CNT_INT = 0x20000000 + CNT_REF = 0x40000000 + CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): guard_op.setfailargs(boxes) @@ -326,6 +329,8 @@ except ValueError: return # xxx probably very rare else: + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry if box.type == history.INT: cnt = self.CNT_INT elif box.type == history.REF: @@ -334,14 +339,17 @@ cnt = self.CNT_FLOAT else: assert 0, box.type - # we build the following value for _counter, which is always - # a negative value + assert cnt > self.CNT_BASE_MASK self._counter = cnt | i def handle_fail(self, metainterp_sd, jitdriver_sd): if self.must_compile(metainterp_sd, jitdriver_sd): - return self._trace_and_compile_from_bridge(metainterp_sd, - jitdriver_sd) + self.start_compiling() + try: + return self._trace_and_compile_from_bridge(metainterp_sd, + jitdriver_sd) + finally: + self.done_compiling() else: from pypy.jit.metainterp.blackhole import resume_in_blackhole resume_in_blackhole(metainterp_sd, jitdriver_sd, self) @@ -359,12 +367,22 @@ def must_compile(self, metainterp_sd, jitdriver_sd): trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - if self._counter >= 0: + # + if self._counter <= self.CNT_BASE_MASK: + # simple case: just counting from 0 to trace_eagerness self._counter += 1 return self._counter >= trace_eagerness - else: - index = self._counter & self.CNT_MASK - typetag = self._counter & ~ self.CNT_MASK + # + # do we have the BUSY flag? If so, we're tracing right now, e.g. in an + # outer invocation of the same function, so don't trace again for now. + elif self._counter & self.CNT_BUSY_FLAG: + return False + # + else: # we have a GUARD_VALUE that fails. Make a _counters instance + # (only now, when the guard is actually failing at least once), + # and use it to record some statistics about the failing values. + index = self._counter & self.CNT_BASE_MASK + typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters if typetag == self.CNT_INT: intvalue = metainterp_sd.cpu.get_latest_value_int(index) @@ -391,7 +409,16 @@ assert 0, typetag return counter >= trace_eagerness - def reset_counter_from_failure(self): + def start_compiling(self): + # start tracing and compiling from this guard. + self._counter |= self.CNT_BUSY_FLAG + + def done_compiling(self): + # done tracing and compiling from this guard. Either the bridge has + # been successfully compiled, in which case whatever value we store + # in self._counter will not be seen any more, or not, in which case + # we should reset the counter to 0, in order to wait a bit until the + # next attempt. if self._counter >= 0: self._counter = 0 self._counters = None @@ -608,9 +635,6 @@ metainterp.set_compiled_merge_points(self.original_greenkey, old_loop_tokens) - def reset_counter_from_failure(self): - pass - def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): """Try to compile a new bridge leading from the beginning of the history diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -234,6 +234,9 @@ # longlongs are treated as floats, see # e.g. llsupport/descr.py:getDescrClass is_float = True + elif kind == 'u': + # they're all False + pass else: assert False, "unsupported ffitype or kind" # diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1790,7 +1790,6 @@ self.staticdata.profiler.count(reason) debug_print('~~~ ABORTING TRACING') self.staticdata.stats.aborted() - self.resumekey.reset_counter_from_failure() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -14,7 +14,7 @@ from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, loop_invariant, elidable, promote, jit_debug, assert_green, AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, - isconstant, isvirtual, promote_string, set_param) + isconstant, isvirtual, promote_string, set_param, record_known_class) from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype @@ -3585,7 +3585,8 @@ self.interp_operations(f, [5], translationoptions=translationoptions) - def test_annotation_gives_knowledge_to_tracer(self): + def test_annotation_gives_class_knowledge_to_tracer(self): + py.test.skip("disabled") class Base(object): pass class A(Base): @@ -3645,6 +3646,70 @@ # here it works again self.check_operations_history(guard_class=0, record_known_class=1) + def test_give_class_knowledge_to_tracer_explicitly(self): + from pypy.rpython.lltypesystem.lloperation import llop + class Base(object): + def f(self): + raise NotImplementedError + def g(self): + raise NotImplementedError + class A(Base): + def f(self): + return self.a + def g(self): + return self.a + 1 + class B(Base): + def f(self): + return self.b + def g(self): + return self.b + 1 + class C(B): + def f(self): + self.c += 1 + return self.c + def g(self): + return self.c + 1 + @dont_look_inside + def make(x): + if x > 0: + a = A() + a.a = x + 1 + elif x < 0: + a = B() + a.b = -x + else: + a = C() + a.c = 10 + return a + def f(x): + a = make(x) + if x > 0: + record_known_class(a, A) + z = a.f() + elif x < 0: + record_known_class(a, B) + z = a.f() + else: + record_known_class(a, C) + z = a.f() + return z + a.g() + res1 = f(6) + res2 = self.interp_operations(f, [6]) + assert res1 == res2 + self.check_operations_history(guard_class=0, record_known_class=1) + + res1 = f(-6) + res2 = self.interp_operations(f, [-6]) + assert res1 == res2 + # cannot use record_known_class here, because B has a subclass + self.check_operations_history(guard_class=1) + + res1 = f(0) + res2 = self.interp_operations(f, [0]) + assert res1 == res2 + # here it works again + self.check_operations_history(guard_class=0, record_known_class=1) + class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -147,6 +147,29 @@ self.check_resops({'jump': 2, 'int_lt': 2, 'setinteriorfield_raw': 4, 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2}) + def test_array_getitem_uint8(self): + myjitdriver = JitDriver( + greens = [], + reds = ["n", "i", "s", "data"], + ) + def f(data, n): + i = s = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, s=s, data=data) + s += rffi.cast(lltype.Signed, array_getitem(types.uchar, 1, data, 0, 0)) + i += 1 + return s + + def main(n): + with lltype.scoped_alloc(rffi.CArray(rffi.UCHAR), 1) as data: + data[0] = rffi.cast(rffi.UCHAR, 200) + return f(data, n) + + assert self.meta_interp(main, [10]) == 2000 + self.check_resops({'jump': 2, 'int_lt': 2, 'getinteriorfield_raw': 2, + 'guard_true': 2, 'int_add': 4}) + + class TestFfiCall(FfiCallTests, LLJitMixin): supports_all = False diff --git a/pypy/jit/metainterp/test/test_math.py b/pypy/jit/metainterp/test/test_math.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_math.py @@ -0,0 +1,47 @@ +import math +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN + +class MathTests: + + def test_math_sqrt(self): + def f(x): + try: + return math.sqrt(x) + except ValueError: + return -INFINITY + + res = self.interp_operations(f, [0.0]) + assert res == 0.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [25.0]) + assert res == 5.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-0.0]) + assert str(res) == '-0.0' + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [1000000.0]) + assert res == 1000.0 + self.check_operations_history(call_pure=1) + # + res = self.interp_operations(f, [-1.0]) + assert res == -INFINITY + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [INFINITY]) + assert isinf(res) and not isnan(res) and res > 0.0 + self.check_operations_history(call_pure=0) + # + res = self.interp_operations(f, [NAN]) + assert isnan(res) and not isinf(res) + self.check_operations_history(call_pure=0) + + +class TestOOtype(MathTests, OOJitMixin): + pass + +class TestLLtype(MathTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -1238,6 +1238,31 @@ self.meta_interp(portal, [0, 0, 0], inline=True) self.check_resops(call_may_force=0, call=0) + def test_dont_repeatedly_trace_from_the_same_guard(self): + driver = JitDriver(greens = [], reds = ['level', 'i']) + + def portal(level): + if level == 0: + i = -10 + else: + i = 0 + # + while True: + driver.jit_merge_point(level=level, i=i) + if level == 25: + return 42 + i += 1 + if i <= 0: # <- guard + continue # first make a loop + else: + # then we fail the guard above, doing a recursive call, + # which will itself fail the same guard above, and so on + return portal(level + 1) + + self.meta_interp(portal, [0]) + self.check_loop_count_at_most(2) # and not, e.g., 24 + + class TestLLtype(RecursiveTests, LLJitMixin): pass diff --git a/pypy/module/_bisect/test/test_ztranslation.py b/pypy/module/_bisect/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_bisect/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_bisect') diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -67,10 +67,7 @@ if self.unicodedata_handler: return self.unicodedata_handler try: - w_builtin = space.getbuiltinmodule('__builtin__') - w_import = space.getattr(w_builtin, space.wrap("__import__")) - w_unicodedata = space.call_function(w_import, - space.wrap("unicodedata")) + w_unicodedata = space.getbuiltinmodule("unicodedata") w_getcode = space.getattr(w_unicodedata, space.wrap("_get_code")) except OperationError: return None diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -13,12 +13,14 @@ class defaultdict(dict): def __init__(self, *args, **kwds): - self.default_factory = None - if 'default_factory' in kwds: - self.default_factory = kwds.pop('default_factory') - elif len(args) > 0 and (callable(args[0]) or args[0] is None): - self.default_factory = args[0] + if len(args) > 0: + default_factory = args[0] args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + self.default_factory = default_factory super(defaultdict, self).__init__(*args, **kwds) def __missing__(self, key): @@ -36,7 +38,7 @@ recurse.remove(id(self)) def copy(self): - return type(self)(self, default_factory=self.default_factory) + return type(self)(self.default_factory, self) def __copy__(self): return self.copy() diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -19,11 +19,41 @@ def test_keyerror_without_factory(self): from _collections import defaultdict - d1 = defaultdict() - for key in ['foo', (1,)]: - try: - d1[key] - except KeyError, err: - assert err.args[0] == key - else: - assert 0, "expected KeyError" + for d1 in [defaultdict(), defaultdict(None)]: + for key in ['foo', (1,)]: + try: + d1[key] + except KeyError, err: + assert err.args[0] == key + else: + assert 0, "expected KeyError" + + def test_noncallable(self): + from _collections import defaultdict + raises(TypeError, defaultdict, [('a', 5)]) + d = defaultdict(None, [('a', 5)]) + assert d.items() == [('a', 5)] + + def test_kwds(self): + from _collections import defaultdict + d = defaultdict(default_factory=5) + assert d.keys() == ['default_factory'] + + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/module/_ffi/interp_ffi.py b/pypy/module/_ffi/interp_ffi.py --- a/pypy/module/_ffi/interp_ffi.py +++ b/pypy/module/_ffi/interp_ffi.py @@ -457,14 +457,14 @@ # ======================================================================== class W_CDLL(Wrappable): - def __init__(self, space, name): + def __init__(self, space, name, mode): self.space = space if name is None: self.name = "" else: self.name = name try: - self.cdll = libffi.CDLL(name) + self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: raise operationerrfmt(space.w_OSError, '%s: %s', self.name, e.msg or 'unspecified error') @@ -492,9 +492,9 @@ "No symbol %s found in library %s", name, self.name) return space.wrap(address_as_uint) - at unwrap_spec(name='str_or_None') -def descr_new_cdll(space, w_type, name): - return space.wrap(W_CDLL(space, name)) + at unwrap_spec(name='str_or_None', mode=int) +def descr_new_cdll(space, w_type, name, mode=-1): + return space.wrap(W_CDLL(space, name, mode)) W_CDLL.typedef = TypeDef( @@ -509,6 +509,6 @@ def get_libc(space): from pypy.rlib.clibffi import get_libc_name try: - return space.wrap(W_CDLL(space, get_libc_name())) + return space.wrap(W_CDLL(space, get_libc_name(), -1)) except OSError, e: raise wrap_oserror(space, e) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -100,7 +100,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, maxlength) - res = intmask(res) # XXX why? try: if newbuf: return space.wrap(rffi.charpsize2str(newbuf, res)) @@ -117,7 +116,6 @@ res, newbuf = self.do_recv_string( space, length - offset, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: raise BufferTooShort(space, space.wrap( @@ -148,7 +146,6 @@ res, newbuf = self.do_recv_string( space, self.BUFFER_SIZE, PY_SSIZE_T_MAX) - res = intmask(res) # XXX why? try: if newbuf: w_received = space.wrap(rffi.charpsize2str(newbuf, res)) @@ -413,7 +410,7 @@ self.buffer, min(self.BUFFER_SIZE, buflength), read_ptr, rffi.NULL) if result: - return read_ptr[0], lltype.nullptr(rffi.CCHARP.TO) + return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) err = rwin32.GetLastError() if err == ERROR_BROKEN_PIPE: @@ -476,7 +473,7 @@ block = timeout < 0 if not block: # XXX does not check for overflow - deadline = _GetTickCount() + int(1000 * timeout + 0.5) + deadline = intmask(_GetTickCount()) + int(1000 * timeout + 0.5) else: deadline = 0 @@ -500,7 +497,7 @@ return True if not block: - now = _GetTickCount() + now = intmask(_GetTickCount()) if now > deadline: return False diff = deadline - now diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -235,7 +235,7 @@ elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise OperationError(space.w_OverflowError, space.wrap("timeout is too large")) - full_msecs = int(timeout + 0.5) + full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) @@ -243,7 +243,7 @@ if res != rwin32.WAIT_TIMEOUT: return True - msecs = r_uint(full_msecs) + msecs = full_msecs start = _GetTickCount() while True: @@ -269,7 +269,7 @@ ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False - msecs = r_uint(full_msecs - (ticks - start)) + msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py --- a/pypy/module/_multiprocessing/test/test_semaphore.py +++ b/pypy/module/_multiprocessing/test/test_semaphore.py @@ -70,8 +70,10 @@ maxvalue = 1 sem = SemLock(kind, value, maxvalue) - assert sem.acquire() - assert not sem.acquire(timeout=0.1) + res = sem.acquire() + assert res == True + res = sem.acquire(timeout=0.1) + assert res == False def test_semaphore_rebuild(self): from _multiprocessing import SemLock diff --git a/pypy/module/_random/test/test_ztranslation.py b/pypy/module/_random/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_random/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_random') diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -543,6 +543,7 @@ from pypy.rlib.rwin32 import GetLastError return space.wrap(GetLastError()) -def set_last_error(space, w_error): + at unwrap_spec(error=int) +def set_last_error(space, error): from pypy.rlib.rwin32 import SetLastError - SetLastError(space.uint_w(w_error)) + SetLastError(error) diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module._socket.interp_socket import converted_error, W_RSocket from pypy.rlib import rsocket -from pypy.rlib.rsocket import SocketError +from pypy.rlib.rsocket import SocketError, INVALID_SOCKET from pypy.interpreter.error import OperationError def gethostname(space): @@ -284,7 +284,7 @@ space.wrap(socktype), space.wrap(protocol), space.wrap(canonname), - addr.as_object(-1, space)]) # -1 as per cpython + addr.as_object(INVALID_SOCKET, space)]) # -1 as per cpython for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) diff --git a/pypy/module/cStringIO/test/test_ztranslation.py b/pypy/module/cStringIO/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/cStringIO/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('cStringIO') diff --git a/pypy/module/clr/boxing_rules.py b/pypy/module/clr/boxing_rules.py --- a/pypy/module/clr/boxing_rules.py +++ b/pypy/module/clr/boxing_rules.py @@ -43,11 +43,11 @@ def tocli(self): return box(self._value) -from pypy.objspace.fake.objspace import W_Object as W_Object_Fake -from pypy.rlib.nonconst import NonConstant +##from pypy.objspace.fake.objspace import W_Object as W_Object_Fake +##from pypy.rlib.nonconst import NonConstant -class __extend__(W_Object_Fake): - __metaclass__ = extendabletype +##class __extend__(W_Object_Fake): +## __metaclass__ = extendabletype - def tocli(self): - return NonConstant(None) +## def tocli(self): +## return NonConstant(None) diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -439,9 +439,6 @@ self.w_it = self.space.iter(self.space.next(self.w_iterables)) def next_w(self): - if not self.w_iterables: - # already stopped - raise OperationError(self.space.w_StopIteration, self.space.w_None) if not self.w_it: self._advance() try: diff --git a/pypy/module/itertools/test/test_ztranslation.py b/pypy/module/itertools/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/itertools/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('itertools') diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -5,10 +5,11 @@ applevel_name = 'numpypy' interpleveldefs = { - 'array': 'interp_numarray.NDimArray', + 'ndarray': 'interp_numarray.W_NDimArray', 'dtype': 'interp_dtype.W_Dtype', 'ufunc': 'interp_ufuncs.W_Ufunc', + 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', @@ -16,8 +17,23 @@ 'fromstring': 'interp_support.fromstring', 'flatiter': 'interp_numarray.W_FlatIterator', - 'True_': 'space.w_True', - 'False_': 'space.w_False', + 'True_': 'types.Bool.True', + 'False_': 'types.Bool.False', + + 'generic': 'interp_boxes.W_GenericBox', + 'number': 'interp_boxes.W_NumberBox', + 'integer': 'interp_boxes.W_IntegerBox', + 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'bool_': 'interp_boxes.W_BoolBox', + 'int8': 'interp_boxes.W_Int8Box', + 'int16': 'interp_boxes.W_Int16Box', + 'int32': 'interp_boxes.W_Int32Box', + 'int64': 'interp_boxes.W_Int64Box', + 'int_': 'interp_boxes.W_LongBox', + 'inexact': 'interp_boxes.W_InexactBox', + 'floating': 'interp_boxes.W_FloatingBox', + 'float32': 'interp_boxes.W_Float32Box', + 'float64': 'interp_boxes.W_Float64Box', } # ufuncs @@ -61,4 +77,5 @@ 'inf': 'app_numpy.inf', 'e': 'app_numpy.e', 'arange': 'app_numpy.arange', + 'reshape': 'app_numpy.reshape', } diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -36,3 +36,40 @@ j += 1 i += step return arr + + +def reshape(a, shape): + '''reshape(a, newshape) + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred + from the length of the array and remaining dimensions. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. + + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raise if the data is copied, + you should assign the new shape to the shape attribute of the array +''' + if not hasattr(a, 'reshape'): + a = numpypy.array(a) + return a.reshape(shape) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,13 +3,16 @@ It should not be imported by the module itself """ +import re + from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_BoolDtype +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray, - descr_new_array, scalar_w, NDimArray) + scalar_w, W_NDimArray, array) from pypy.module.micronumpy import interp_ufuncs -from pypy.rlib.objectmodel import specialize -import re +from pypy.rlib.objectmodel import specialize, instantiate + class BogusBytecode(Exception): pass @@ -48,15 +51,12 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_float64dtype = W_Float64Dtype(self) def issequence_w(self, w_obj): - return isinstance(w_obj, ListObject) or isinstance(w_obj, NDimArray) + return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def isinstance_w(self, w_obj, w_tp): - if w_obj.tp == w_tp: - return True - return False + return w_obj.tp == w_tp def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -97,8 +97,10 @@ fixedview = listview def float(self, w_obj): - assert isinstance(w_obj, FloatObject) - return w_obj + if isinstance(w_obj, FloatObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): assert isinstance(w_obj, FloatObject) @@ -112,7 +114,10 @@ raise NotImplementedError def int(self, w_obj): - return w_obj + if isinstance(w_obj, IntObject): + return w_obj + assert isinstance(w_obj, interp_boxes.W_GenericBox) + return self.int(w_obj.descr_int(self)) def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) @@ -135,6 +140,9 @@ assert isinstance(what, tp) return what + def allocate_instance(self, klass, w_subtype): + return instantiate(klass) + def len_w(self, w_obj): if isinstance(w_obj, ListObject): return len(w_obj.items) @@ -247,7 +255,7 @@ w_rhs = self.rhs.execute(interp) if not isinstance(w_lhs, BaseArray): # scalar - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype w_lhs = scalar_w(interp.space, dtype, w_lhs) assert isinstance(w_lhs, BaseArray) if self.name == '+': @@ -264,8 +272,9 @@ w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError - if not isinstance(w_res, BaseArray): - dtype = interp.space.fromcache(W_Float64Dtype) + if (not isinstance(w_res, BaseArray) and + not isinstance(w_res, interp_boxes.W_GenericBox)): + dtype = get_dtype_cache(interp.space).w_float64dtype w_res = scalar_w(interp.space, dtype, w_res) return w_res @@ -283,7 +292,7 @@ return space.wrap(self.v) def execute(self, interp): - return FloatObject(self.v) + return interp.space.wrap(self.v) class RangeConstant(Node): def __init__(self, v): @@ -291,10 +300,10 @@ def execute(self, interp): w_list = interp.space.newlist( - [interp.space.wrap(float(i)) for i in range(self.v)]) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + [interp.space.wrap(float(i)) for i in range(self.v)] + ) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return 'Range(%s)' % self.v @@ -315,9 +324,8 @@ def execute(self, interp): w_list = self.wrap(interp.space) - dtype = interp.space.fromcache(W_Float64Dtype) - return descr_new_array(interp.space, None, w_list, w_dtype=dtype, - w_order=None) + dtype = get_dtype_cache(interp.space).w_float64dtype + return array(interp.space, w_list, w_dtype=dtype, w_order=None) def __repr__(self): return "[" + ", ".join([repr(item) for item in self.items]) + "]" @@ -384,9 +392,11 @@ if isinstance(w_res, BaseArray): return w_res if isinstance(w_res, FloatObject): - dtype = interp.space.fromcache(W_Float64Dtype) + dtype = get_dtype_cache(interp.space).w_float64dtype elif isinstance(w_res, BoolObject): - dtype = interp.space.fromcache(W_BoolDtype) + dtype = get_dtype_cache(interp.space).w_booldtype + elif isinstance(w_res, interp_boxes.W_GenericBox): + dtype = w_res.get_dtype(interp.space) else: dtype = None return scalar_w(interp.space, dtype, w_res) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_boxes.py @@ -0,0 +1,267 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef +from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.inttype import int_typedef +from pypy.rlib.rarithmetic import LONG_BIT +from pypy.tool.sourcetools import func_with_new_name + + +MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () + +def new_dtype_getter(name): + def get_dtype(space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return getattr(get_dtype_cache(space), "w_%sdtype" % name) + def new(space, w_subtype, w_value): + dtype = get_dtype(space) + return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) + return func_with_new_name(new, name + "_box_new"), staticmethod(get_dtype) + +class PrimitiveBox(object): + _mixin_ = True + + def __init__(self, value): + self.value = value + + def convert_to(self, dtype): + return dtype.box(self.value) + +class W_GenericBox(Wrappable): + _attrs_ = () + + def descr__new__(space, w_subtype, __args__): + raise operationerrfmt(space.w_TypeError, "cannot create '%s' instances", + w_subtype.getname(space, '?') + ) + + def descr_str(self, space): + return self.descr_repr(space) + + def descr_repr(self, space): + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + + def descr_int(self, space): + box = self.convert_to(W_LongBox.get_dtype(space)) + assert isinstance(box, W_LongBox) + return space.wrap(box.value) + + def descr_float(self, space): + box = self.convert_to(W_Float64Box.get_dtype(space)) + assert isinstance(box, W_Float64Box) + return space.wrap(box.value) + + def descr_nonzero(self, space): + dtype = self.get_dtype(space) + return space.wrap(dtype.itemtype.bool(self)) + + def _binop_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + def _binop_right_impl(ufunc_name): + def impl(self, space, w_other): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) + + def _unaryop_impl(ufunc_name): + def impl(self, space): + from pypy.module.micronumpy import interp_ufuncs + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + + descr_radd = _binop_right_impl("add") + descr_rmul = _binop_right_impl("multiply") + + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") + + +class W_BoolBox(W_GenericBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("bool") + +class W_NumberBox(W_GenericBox): + _attrs_ = () + +class W_IntegerBox(W_NumberBox): + pass + +class W_SignedIntegerBox(W_IntegerBox): + pass + +class W_UnsignedIntgerBox(W_IntegerBox): + pass + +class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int8") + +class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint8") + +class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int16") + +class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint16") + +class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int32") + +class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint32") + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("long") + +class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("int64") + +class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): + pass + +class W_InexactBox(W_NumberBox): + _attrs_ = () + +class W_FloatingBox(W_InexactBox): + _attrs_ = () + +class W_Float32Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float32") + +class W_Float64Box(W_FloatingBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("float64") + + + +W_GenericBox.typedef = TypeDef("generic", + __module__ = "numpypy", + + __new__ = interp2app(W_GenericBox.descr__new__.im_func), + + __str__ = interp2app(W_GenericBox.descr_str), + __repr__ = interp2app(W_GenericBox.descr_repr), + __int__ = interp2app(W_GenericBox.descr_int), + __float__ = interp2app(W_GenericBox.descr_float), + __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + + __add__ = interp2app(W_GenericBox.descr_add), + __sub__ = interp2app(W_GenericBox.descr_sub), + __mul__ = interp2app(W_GenericBox.descr_mul), + __div__ = interp2app(W_GenericBox.descr_div), + + __radd__ = interp2app(W_GenericBox.descr_add), + __rmul__ = interp2app(W_GenericBox.descr_rmul), + + __eq__ = interp2app(W_GenericBox.descr_eq), + __ne__ = interp2app(W_GenericBox.descr_ne), + __lt__ = interp2app(W_GenericBox.descr_lt), + __le__ = interp2app(W_GenericBox.descr_le), + __gt__ = interp2app(W_GenericBox.descr_gt), + __ge__ = interp2app(W_GenericBox.descr_ge), + + __neg__ = interp2app(W_GenericBox.descr_neg), + __abs__ = interp2app(W_GenericBox.descr_abs), +) + +W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_BoolBox.descr__new__.im_func), +) + +W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, + __module__ = "numpypy", +) + +W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + +W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int8Box.descr__new__.im_func), +) + +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int16Box.descr__new__.im_func), +) + +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, + __module__ = "numpypy", + __new__ = interp2app(W_Int32Box.descr__new__.im_func), +) + +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +if LONG_BIT == 32: + long_name = "int32" +elif LONG_BIT == 64: + long_name = "int64" +W_LongBox.typedef = TypeDef(long_name, (W_SignedIntegerBox.typedef, int_typedef,), + __module__ = "numpypy", +) + +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, + __module__ = "numpypy", + __new__ = interp2app(W_Int64Box.descr__new__.im_func), +) + +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, + __module__ = "numpypy", +) + +W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, + __module__ = "numpypy", +) + +W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, + __module__ = "numpypy", +) + +W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, + __module__ = "numpypy", + + __new__ = interp2app(W_Float32Box.descr__new__.im_func), +) + +W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), + __module__ = "numpypy", + + __new__ = interp2app(W_Float64Box.descr__new__.im_func), +) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,16 +1,11 @@ -import functools -import math - from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty -from pypy.module.micronumpy import signature -from pypy.objspace.std.floatobject import float2string -from pypy.rlib import rarithmetic, rfloat -from pypy.rlib.rarithmetic import LONG_BIT, widen -from pypy.rlib.objectmodel import specialize, enforceargs -from pypy.rlib.unroll import unrolling_iterable +from pypy.interpreter.typedef import (TypeDef, GetSetProperty, + interp_attrproperty, interp_attrproperty_w) +from pypy.module.micronumpy import types, signature, interp_boxes +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT from pypy.rpython.lltypesystem import lltype, rffi @@ -19,523 +14,218 @@ BOOLLTR = "b" FLOATINGLTR = "f" + +VOID_STORAGE = lltype.Array(lltype.Char, hints={'nolength': True, 'render_as_void': True}) + class W_Dtype(Wrappable): - def __init__(self, space): - pass + _immutable_fields_ = ["itemtype", "num", "kind"] + + def __init__(self, itemtype, num, kind, name, char, w_box_type, alternate_constructors=[]): + self.signature = signature.BaseSignature() + self.itemtype = itemtype + self.num = num + self.kind = kind + self.name = name + self.char = char + self.w_box_type = w_box_type + self.alternate_constructors = alternate_constructors + + def malloc(self, length): + # XXX find out why test_zjit explodes with tracking of allocations + return lltype.malloc(VOID_STORAGE, self.itemtype.get_element_size() * length, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True + ) + + @specialize.argtype(1) + def box(self, value): + return self.itemtype.box(value) + + def coerce(self, space, w_item): + return self.itemtype.coerce(space, w_item) + + def getitem(self, storage, i): + return self.itemtype.read(storage, self.itemtype.get_element_size(), i, 0) + + def setitem(self, storage, i, box): + self.itemtype.store(storage, self.itemtype.get_element_size(), i, 0, box) + + def fill(self, storage, box, start, stop): + self.itemtype.fill(storage, self.itemtype.get_element_size(), box, start, stop, 0) def descr__new__(space, w_subtype, w_dtype): + cache = get_dtype_cache(space) + if space.is_w(w_dtype, space.w_None): - return space.fromcache(W_Float64Dtype) + return cache.w_float64dtype + elif space.isinstance_w(w_dtype, w_subtype): + return w_dtype elif space.isinstance_w(w_dtype, space.w_str): - dtype = space.str_w(w_dtype) - for alias, dtype_class in dtypes_by_alias: - if alias == dtype: - return space.fromcache(dtype_class) - elif isinstance(space.interpclass_w(w_dtype), W_Dtype): - return w_dtype - elif space.isinstance_w(w_dtype, space.w_type): - for typename, dtype_class in dtypes_by_apptype: - if space.is_w(getattr(space, "w_%s" % typename), w_dtype): - return space.fromcache(dtype_class) + name = space.str_w(w_dtype) + for dtype in cache.builtin_dtypes: + if dtype.name == name or dtype.char == name: + return dtype + else: + for dtype in cache.builtin_dtypes: + if w_dtype in dtype.alternate_constructors: + return dtype + if w_dtype is dtype.w_box_type: + return dtype raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + def descr_str(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("dtype('%s')" % self.name) - def descr_str(self, space): - return space.wrap(self.name) + def descr_get_itemsize(self, space): + return space.wrap(self.itemtype.get_element_size()) def descr_get_shape(self, space): return space.newtuple([]) - -class BaseBox(object): - pass - -VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) - -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, - expected_size=None): - - class Box(BaseBox): - def __init__(self, val): - self.val = val - - def wrap(self, space): - val = self.val - if valtype is rarithmetic.r_singlefloat: - val = float(val) - return space.wrap(val) - - def convert_to(self, dtype): - return dtype.adapt_val(self.val) - Box.__name__ = "%sBox" % T._name - - TP = lltype.Ptr(lltype.Array(T, hints={'nolength': True})) - class W_LowLevelDtype(W_Dtype): - signature = signature.BaseSignature() - - def erase(self, storage): - return rffi.cast(VOID_TP, storage) - - def unerase(self, storage): - return rffi.cast(TP, storage) - - @enforceargs(None, valtype) - def box(self, value): - return Box(value) - - def unbox(self, box): - assert isinstance(box, Box) - return box.val - - def unwrap(self, space, w_item): - raise NotImplementedError - - def malloc(self, size): - # XXX find out why test_zjit explodes with tracking of allocations - return self.erase(lltype.malloc(TP.TO, size, - zero=True, flavor="raw", - track_allocation=False, add_memory_pressure=True - )) - - def getitem(self, storage, i): - return Box(self.unerase(storage)[i]) - - def setitem(self, storage, i, item): - self.unerase(storage)[i] = self.unbox(item) - - def setitem_w(self, space, storage, i, w_item): - self.setitem(storage, i, self.unwrap(space, w_item)) - - def fill(self, storage, item, start, stop): - storage = self.unerase(storage) - item = self.unbox(item) - for i in xrange(start, stop): - storage[i] = item - - @specialize.argtype(1) - def adapt_val(self, val): - return self.box(rffi.cast(TP.TO.OF, val)) - - W_LowLevelDtype.__name__ = "W_%sDtype" % name.capitalize() - W_LowLevelDtype.num = num - W_LowLevelDtype.kind = kind - W_LowLevelDtype.name = name - W_LowLevelDtype.aliases = aliases - W_LowLevelDtype.applevel_types = applevel_types - W_LowLevelDtype.num_bytes = rffi.sizeof(T) - if expected_size is not None: - assert W_LowLevelDtype.num_bytes == expected_size - return W_LowLevelDtype - - -def binop(func): - specialize.argtype(1, 2)(func) - @functools.wraps(func) - def impl(self, v1, v2): - return self.adapt_val(func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)), - )) - return impl - -def raw_binop(func): - specialize.argtype(1, 2)(func) - # Returns the result unwrapped. - @functools.wraps(func) - def impl(self, v1, v2): - return func(self, - self.for_computation(self.unbox(v1)), - self.for_computation(self.unbox(v2)) - ) - return impl - -def unaryop(func): - specialize.argtype(1)(func) - @functools.wraps(func) - def impl(self, v): - return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) - return impl - -class ArithmeticTypeMixin(object): - _mixin_ = True - - @binop - def add(self, v1, v2): - return v1 + v2 - @binop - def sub(self, v1, v2): - return v1 - v2 - @binop - def mul(self, v1, v2): - return v1 * v2 - - @unaryop - def pos(self, v): - return +v - @unaryop - def neg(self, v): - return -v - @unaryop - def abs(self, v): - return abs(v) - - @binop - def max(self, v1, v2): - return max(v1, v2) - @binop - def min(self, v1, v2): - return min(v1, v2) - - def bool(self, v): - return bool(self.for_computation(self.unbox(v))) - @raw_binop - def eq(self, v1, v2): - return v1 == v2 - @raw_binop - def ne(self, v1, v2): - return v1 != v2 - @raw_binop - def lt(self, v1, v2): - return v1 < v2 - @raw_binop - def le(self, v1, v2): - return v1 <= v2 - @raw_binop - def gt(self, v1, v2): - return v1 > v2 - @raw_binop - def ge(self, v1, v2): - return v1 >= v2 - - -class FloatArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def for_computation(self, v): - return float(v) - - def str_format(self, item): - return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION) - - @binop - def div(self, v1, v2): - # XXX this won't work after translation, probably requires ovfcheck - try: - return v1 / v2 - except ZeroDivisionError: - if v1 == v2 == 0.0: - return rfloat.NAN - return rfloat.copysign(rfloat.INFINITY, v1 * v2) - @binop - def mod(self, v1, v2): - return math.fmod(v1, v2) - @binop - def pow(self, v1, v2): - return math.pow(v1, v2) - - @unaryop - def sign(self, v): - if v == 0.0: - return 0.0 - return rfloat.copysign(1.0, v) - @unaryop - def reciprocal(self, v): - if v == 0.0: - return rfloat.copysign(rfloat.INFINITY, v) - return 1.0 / v - @unaryop - def fabs(self, v): - return math.fabs(v) - @unaryop - def floor(self, v): - return math.floor(v) - - @binop - def copysign(self, v1, v2): - return math.copysign(v1, v2) - @unaryop - def exp(self, v): - try: - return math.exp(v) - except OverflowError: - return rfloat.INFINITY - @unaryop - def sin(self, v): - return math.sin(v) - @unaryop - def cos(self, v): - return math.cos(v) - @unaryop - def tan(self, v): - return math.tan(v) - @unaryop - def arcsin(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.asin(v) - @unaryop - def arccos(self, v): - if not -1.0 <= v <= 1.0: - return rfloat.NAN - return math.acos(v) - @unaryop - def arctan(self, v): - return math.atan(v) - @unaryop - def arcsinh(self, v): - return math.asinh(v) - @unaryop - def arctanh(self, v): - if v == 1.0 or v == -1.0: - return math.copysign(rfloat.INFINITY, v) - if not -1.0 < v < 1.0: - return rfloat.NAN - return math.atanh(v) - @unaryop - def sqrt(self, v): - try: - return math.sqrt(v) - except ValueError: - return rfloat.NAN - -class IntegerArithmeticDtype(ArithmeticTypeMixin): - _mixin_ = True - - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) - - def for_computation(self, v): - return widen(v) - - def str_format(self, item): - return str(widen(self.unbox(item))) - - @binop - def div(self, v1, v2): - if v2 == 0: - return 0 - return v1 / v2 - @binop - def mod(self, v1, v2): - return v1 % v2 - @binop - def pow(self, v1, v2): - res = 1 - while v2 > 0: - if v2 & 1: - res *= v1 - v2 >>= 1 - if v2 == 0: - break - v1 *= v1 - return res - - -class SignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - if v > 0: - return 1 - elif v < 0: - return -1 - else: - assert v == 0 - return 0 - -class UnsignedIntegerArithmeticDtype(IntegerArithmeticDtype): - _mixin_ = True - - @unaryop - def sign(self, v): - return int(v != 0) - - -W_BoolDtype = create_low_level_dtype( - num = 0, kind = BOOLLTR, name = "bool", - aliases = ["?", "bool", "bool8"], - applevel_types = ["bool"], - T = lltype.Bool, - valtype = bool, -) -class W_BoolDtype(SignedIntegerArithmeticDtype, W_BoolDtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.is_true(w_item)) - - def str_format(self, item): - v = self.unbox(item) - return "True" if v else "False" - - def for_computation(self, v): - return int(v) - -W_Int8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "int8", - aliases = ["b", "int8", "i1"], - applevel_types = [], - T = rffi.SIGNEDCHAR, - valtype = rffi.SIGNEDCHAR._type, - expected_size = 1, -) -class W_Int8Dtype(SignedIntegerArithmeticDtype, W_Int8Dtype): - pass - -W_UInt8Dtype = create_low_level_dtype( - num = 2, kind = UNSIGNEDLTR, name = "uint8", - aliases = ["B", "uint8", "I1"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(UnsignedIntegerArithmeticDtype, W_UInt8Dtype): - pass - -W_Int16Dtype = create_low_level_dtype( - num = 3, kind = SIGNEDLTR, name = "int16", - aliases = ["h", "int16", "i2"], - applevel_types = [], - T = rffi.SHORT, - valtype = rffi.SHORT._type, - expected_size = 2, -) -class W_Int16Dtype(SignedIntegerArithmeticDtype, W_Int16Dtype): - pass - -W_UInt16Dtype = create_low_level_dtype( - num = 4, kind = UNSIGNEDLTR, name = "uint16", - aliases = ["H", "uint16", "I2"], - applevel_types = [], - T = rffi.USHORT, - valtype = rffi.USHORT._type, - expected_size = 2, -) -class W_UInt16Dtype(UnsignedIntegerArithmeticDtype, W_UInt16Dtype): - pass - -W_Int32Dtype = create_low_level_dtype( - num = 5, kind = SIGNEDLTR, name = "int32", - aliases = ["i", "int32", "i4"], - applevel_types = [], - T = rffi.INT, - valtype = rffi.INT._type, - expected_size = 4, -) -class W_Int32Dtype(SignedIntegerArithmeticDtype, W_Int32Dtype): - pass - -W_UInt32Dtype = create_low_level_dtype( - num = 6, kind = UNSIGNEDLTR, name = "uint32", - aliases = ["I", "uint32", "I4"], - applevel_types = [], - T = rffi.UINT, - valtype = rffi.UINT._type, - expected_size = 4, -) -class W_UInt32Dtype(UnsignedIntegerArithmeticDtype, W_UInt32Dtype): - pass - -W_Int64Dtype = create_low_level_dtype( - num = 9, kind = SIGNEDLTR, name = "int64", - aliases = ["q", "int64", "i8"], - applevel_types = ["long"], - T = rffi.LONGLONG, - valtype = rffi.LONGLONG._type, - expected_size = 8, -) -class W_Int64Dtype(SignedIntegerArithmeticDtype, W_Int64Dtype): - pass - -W_UInt64Dtype = create_low_level_dtype( - num = 10, kind = UNSIGNEDLTR, name = "uint64", - aliases = ["Q", "uint64", "I8"], - applevel_types = [], - T = rffi.ULONGLONG, - valtype = rffi.ULONGLONG._type, - expected_size = 8, -) -class W_UInt64Dtype(UnsignedIntegerArithmeticDtype, W_UInt64Dtype): - pass - -if LONG_BIT == 32: - long_dtype = W_Int32Dtype - ulong_dtype = W_UInt32Dtype -elif LONG_BIT == 64: - long_dtype = W_Int64Dtype - ulong_dtype = W_UInt64Dtype -else: - assert False - -class W_LongDtype(long_dtype): - num = 7 - aliases = ["l"] - applevel_types = ["int"] - -class W_ULongDtype(ulong_dtype): - num = 8 - aliases = ["L"] - -W_Float32Dtype = create_low_level_dtype( - num = 11, kind = FLOATINGLTR, name = "float32", - aliases = ["f", "float32", "f4"], - applevel_types = [], - T = lltype.SingleFloat, - valtype = rarithmetic.r_singlefloat, - expected_size = 4, -) -class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): - pass - -W_Float64Dtype = create_low_level_dtype( - num = 12, kind = FLOATINGLTR, name = "float64", - aliases = ["d", "float64", "f8"], - applevel_types = ["float"], - T = lltype.Float, - valtype = float, - expected_size = 8, -) -class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): - pass - -ALL_DTYPES = [ - W_BoolDtype, - W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, - W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, - W_Int64Dtype, W_UInt64Dtype, - W_Float32Dtype, W_Float64Dtype, -] - -dtypes_by_alias = unrolling_iterable([ - (alias, dtype) - for dtype in ALL_DTYPES - for alias in dtype.aliases -]) -dtypes_by_apptype = unrolling_iterable([ - (apptype, dtype) - for dtype in ALL_DTYPES - for apptype in dtype.applevel_types -]) -dtypes_by_num_bytes = unrolling_iterable(sorted([ - (dtype.num_bytes, dtype) - for dtype in ALL_DTYPES -])) - W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", + __module__ = "numpypy", __new__ = interp2app(W_Dtype.descr__new__.im_func), + __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), - __str__ = interp2app(W_Dtype.descr_str), num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), - itemsize = interp_attrproperty("num_bytes", cls=W_Dtype), + type = interp_attrproperty_w("w_box_type", cls=W_Dtype), + itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), shape = GetSetProperty(W_Dtype.descr_get_shape), ) W_Dtype.typedef.acceptable_as_base_class = False + +class DtypeCache(object): + def __init__(self, space): + self.w_booldtype = W_Dtype( + types.Bool(), + num=0, + kind=BOOLLTR, + name="bool", + char="?", + w_box_type = space.gettypefor(interp_boxes.W_BoolBox), + alternate_constructors=[space.w_bool], + ) + self.w_int8dtype = W_Dtype( + types.Int8(), + num=1, + kind=SIGNEDLTR, + name="int8", + char="b", + w_box_type = space.gettypefor(interp_boxes.W_Int8Box) + ) + self.w_uint8dtype = W_Dtype( + types.UInt8(), + num=2, + kind=UNSIGNEDLTR, + name="uint8", + char="B", + w_box_type = space.gettypefor(interp_boxes.W_UInt8Box), + ) + self.w_int16dtype = W_Dtype( + types.Int16(), + num=3, + kind=SIGNEDLTR, + name="int16", + char="h", + w_box_type = space.gettypefor(interp_boxes.W_Int16Box), + ) + self.w_uint16dtype = W_Dtype( + types.UInt16(), + num=4, + kind=UNSIGNEDLTR, + name="uint16", + char="H", + w_box_type = space.gettypefor(interp_boxes.W_UInt16Box), + ) + self.w_int32dtype = W_Dtype( + types.Int32(), + num=5, + kind=SIGNEDLTR, + name="int32", + char="i", + w_box_type = space.gettypefor(interp_boxes.W_Int32Box), + ) + self.w_uint32dtype = W_Dtype( + types.UInt32(), + num=6, + kind=UNSIGNEDLTR, + name="uint32", + char="I", + w_box_type = space.gettypefor(interp_boxes.W_UInt32Box), + ) + if LONG_BIT == 32: + name = "int32" + elif LONG_BIT == 64: + name = "int64" + self.w_longdtype = W_Dtype( + types.Long(), + num=7, + kind=SIGNEDLTR, + name=name, + char="l", + w_box_type = space.gettypefor(interp_boxes.W_LongBox), + alternate_constructors=[space.w_int], + ) + self.w_ulongdtype = W_Dtype( + types.ULong(), + num=8, + kind=UNSIGNEDLTR, + name="u" + name, + char="L", + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + ) + self.w_int64dtype = W_Dtype( + types.Int64(), + num=9, + kind=SIGNEDLTR, + name="int64", + char="q", + w_box_type = space.gettypefor(interp_boxes.W_Int64Box), + alternate_constructors=[space.w_long], + ) + self.w_uint64dtype = W_Dtype( + types.UInt64(), + num=10, + kind=UNSIGNEDLTR, + name="uint64", + char="Q", + w_box_type = space.gettypefor(interp_boxes.W_UInt64Box), + ) + self.w_float32dtype = W_Dtype( + types.Float32(), + num=11, + kind=FLOATINGLTR, + name="float32", + char="f", + w_box_type = space.gettypefor(interp_boxes.W_Float32Box), + ) + self.w_float64dtype = W_Dtype( + types.Float64(), + num=12, + kind=FLOATINGLTR, + name="float64", + char="d", + w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + alternate_constructors=[space.w_float], + ) + + self.builtin_dtypes = [ + self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, + self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype, + self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype, + self.w_int64dtype, self.w_uint64dtype, self.w_float32dtype, + self.w_float64dtype + ] + self.dtypes_by_num_bytes = sorted( + (dtype.itemtype.get_element_size(), dtype) + for dtype in self.builtin_dtypes + ) + +def get_dtype_cache(space): + return space.fromcache(DtypeCache) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,47 +98,105 @@ endshape[i] = remainder[i] return endshape -def descr_new_array(space, w_subtype, w_item_or_iterable, w_dtype=None, - w_order=NoneNotWrapped): - # find scalar - if not space.issequence_w(w_item_or_iterable): - if space.is_w(w_dtype, space.w_None): - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, - w_item_or_iterable) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - return scalar_w(space, dtype, w_item_or_iterable) - if w_order is None: - order = 'C' +def get_shape_from_iterable(space, old_size, w_iterable): + new_size = 0 + new_shape = [] + if space.isinstance_w(w_iterable, space.w_int): + new_size = space.int_w(w_iterable) + if new_size < 0: + new_size = old_size + new_shape = [new_size] else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise operationerrfmt(space.w_ValueError, "Unknown order: %s", - order) - shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) - # they come back in C order - size = len(elems_w) - if space.is_w(w_dtype, space.w_None): - w_dtype = None - for w_elem in elems_w: - w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - w_dtype) - if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): - break - if w_dtype is None: - w_dtype = space.w_None - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - arr = NDimArray(size, shape[:], dtype=dtype, order=order) - shapelen = len(shape) - arr_iter = arr.start_iter(arr.shape) - for i in range(len(elems_w)): - w_elem = elems_w[i] - dtype.setitem_w(space, arr.storage, arr_iter.offset, w_elem) - arr_iter = arr_iter.next(shapelen) - return arr + neg_dim = -1 + batch = space.listview(w_iterable) + new_size = 1 + if len(batch) < 1: + if old_size == 1: + # Scalars can have an empty size. + new_size = 1 + else: + new_size = 0 + new_shape = [] + i = 0 + for elem in batch: + s = space.int_w(elem) + if s < 0: + if neg_dim >= 0: + raise OperationError(space.w_ValueError, space.wrap( + "can only specify one unknown dimension")) + s = 1 + neg_dim = i + new_size *= s + new_shape.append(s) + i += 1 + if neg_dim >= 0: + new_shape[neg_dim] = old_size / new_size + new_size *= new_shape[neg_dim] + if new_size != old_size: + raise OperationError(space.w_ValueError, + space.wrap("total size of new array must be unchanged")) + return new_shape + +# Recalculating strides. Find the steps that the iteration does for each +# dimension, given the stride and shape. Then try to create a new stride that +# fits the new shape, using those steps. If there is a shape/step mismatch +# (meaning that the realignment of elements crosses from one step into another) +# return None so that the caller can raise an exception. +def calc_new_strides(new_shape, old_shape, old_strides): + # Return the proper strides for new_shape, or None if the mapping crosses + # stepping boundaries + + # Assumes that prod(old_shape) == prod(new_shape), len(old_shape) > 1, and + # len(new_shape) > 0 + steps = [] + last_step = 1 + oldI = 0 + new_strides = [] + if old_strides[0] < old_strides[-1]: + for i in range(len(old_shape)): + steps.append(old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[0] + n_new_elems_used = 1 + n_old_elems_to_use = old_shape[0] + for s in new_shape: + new_strides.append(cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI += 1 + if steps[oldI] != steps[oldI - 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI += 1 + if oldI >= len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + else: + for i in range(len(old_shape) - 1, -1, -1): + steps.insert(0, old_strides[i] / last_step) + last_step *= old_shape[i] + cur_step = steps[-1] + n_new_elems_used = 1 + oldI = -1 + n_old_elems_to_use = old_shape[-1] + for i in range(len(new_shape) - 1, -1, -1): + s = new_shape[i] + new_strides.insert(0, cur_step * n_new_elems_used) + n_new_elems_used *= s + while n_new_elems_used > n_old_elems_to_use: + oldI -= 1 + if steps[oldI] != steps[oldI + 1]: + return None + n_old_elems_to_use *= old_shape[oldI] + if n_new_elems_used == n_old_elems_to_use: + oldI -= 1 + if oldI < -len(old_shape): + break + cur_step = steps[oldI] + n_old_elems_to_use *= old_shape[oldI] + return new_strides # Iterators for arrays # -------------------- @@ -378,6 +436,13 @@ def add_invalidates(self, other): self.invalidates.append(other) + def descr__new__(space, w_subtype, w_size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + size, shape = _find_size_and_shape(space, w_size) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) + def _unaryop_impl(ufunc_name): def impl(self, space): return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) @@ -451,8 +516,8 @@ self=self, dtype=dtype, i=i, result=result, idx=idx, cur_best=cur_best) - new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.ne(new_best, cur_best): + new_best = getattr(dtype.itemtype, op_name)(cur_best, self.eval(i)) + if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best i = i.next(shapelen) @@ -462,8 +527,7 @@ size = self.find_size() if size == 0: raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) + space.wrap("Can't call %s on zero-size arrays" % op_name)) return space.wrap(loop(self)) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -475,10 +539,11 @@ all_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if not dtype.bool(self.eval(i)): + if not dtype.itemtype.bool(self.eval(i)): return False i = i.next(shapelen) return True + def descr_all(self, space): return space.wrap(self._all()) @@ -490,10 +555,11 @@ any_driver.jit_merge_point(signature=self.signature, shapelen=shapelen, self=self, dtype=dtype, i=i) - if dtype.bool(self.eval(i)): + if dtype.itemtype.bool(self.eval(i)): return True i = i.next(shapelen) return False + def descr_any(self, space): return space.wrap(self._any()) @@ -518,6 +584,12 @@ def descr_get_shape(self, space): return space.newtuple([space.wrap(i) for i in self.shape]) + def descr_set_shape(self, space, w_iterable): + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_iterable) + concrete.setshape(space, new_shape) + def descr_get_size(self, space): return space.wrap(self.find_size()) @@ -542,8 +614,8 @@ res.append(')') else: concrete.to_str(space, 1, res, indent=' ') - if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or \ + if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and + dtype is not interp_dtype.get_dtype_cache(space).w_int64dtype) or \ not self.find_size(): res.append(", dtype=" + dtype.name) res.append(")") @@ -612,7 +684,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] # Add a comma only if comma is False - this prevents adding two # commas @@ -625,7 +697,7 @@ start = False else: builder.append(spacer) - builder.append(dtype.str_format(self.getitem(item))) + builder.append(dtype.itemtype.str_format(self.getitem(item))) item += self.strides[0] i += 1 else: @@ -642,11 +714,6 @@ def _index_of_single_item(self, space, w_idx): if space.isinstance_w(w_idx, space.w_int): idx = space.int_w(w_idx) - if not self.shape: - if idx != 0: - raise OperationError(space.w_IndexError, - space.wrap("index out of range")) - return 0 if idx < 0: idx = self.shape[0] + idx if idx < 0 or idx >= self.shape[0]: @@ -712,7 +779,7 @@ raise OperationError(space.w_IndexError, space.wrap( "0-d arrays can't be indexed")) item = concrete._index_of_single_item(space, w_idx) - return concrete.getitem(item).wrap(space) + return concrete.getitem(item) chunks = self._prepare_slice_args(space, w_idx) return space.wrap(self.create_slice(space, chunks)) @@ -765,27 +832,67 @@ strides += self.strides[s:] backstrides += self.backstrides[s:] new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature, + W_NDimSlice.signature, self.signature, ]) - return NDimSlice(self, new_sig, start, strides[:], backstrides[:], - shape[:]) + return W_NDimSlice(self, new_sig, start, strides[:], backstrides[:], + shape[:]) + + def descr_reshape(self, space, args_w): + """reshape(...) + a.reshape(shape) + + Returns an array containing the same data with a new shape. + + Refer to `numpypy.reshape` for full documentation. + + See Also + -------- + numpypy.reshape : equivalent function +""" + if len(args_w) == 1: + w_shape = args_w[0] + else: + w_shape = space.newtuple(args_w) + concrete = self.get_concrete() + new_shape = get_shape_from_iterable(space, + concrete.find_size(), w_shape) + # Since we got to here, prod(new_shape) == self.size + new_strides = calc_new_strides(new_shape, + concrete.shape, concrete.strides) + if new_strides: + # We can create a view, strides somehow match up. + new_sig = signature.Signature.find_sig([ + W_NDimSlice.signature, self.signature + ]) + ndims = len(new_shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + arr = W_NDimSlice(self, new_sig, self.start, new_strides, + new_backstrides, new_shape) + else: + # Create copy with contiguous data + arr = concrete.copy() + arr.setshape(space, new_shape) + return arr def descr_mean(self, space): - return space.wrap(space.float_w(self.descr_sum(space)) / self.find_size()) + return space.div(self.descr_sum(space), space.wrap(self.find_size())) def descr_nonzero(self, space): if self.find_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - return space.wrap(space.is_true(self.get_concrete().eval( - self.start_iter(self.shape)).wrap(space))) + return space.wrap(space.is_true( + self.get_concrete().eval(self.start_iter(self.shape)) + )) def descr_get_transpose(self, space): concrete = self.get_concrete() if len(concrete.shape) < 2: return space.wrap(self) new_sig = signature.Signature.find_sig([ - NDimSlice.signature, self.signature + W_NDimSlice.signature, self.signature ]) strides = [] backstrides = [] @@ -794,8 +901,8 @@ strides.append(concrete.strides[i]) backstrides.append(concrete.backstrides[i]) shape.append(concrete.shape[i]) - return space.wrap(NDimSlice(concrete, new_sig, self.start, strides[:], - backstrides[:], shape[:])) + return space.wrap(W_NDimSlice(concrete, new_sig, self.start, strides[:], + backstrides[:], shape[:])) def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) @@ -809,22 +916,28 @@ def descr_debug_repr(self, space): return space.wrap(self.debug_repr()) + def descr_array_iface(self, space): + concrete = self.get_concrete() + storage = concrete.get_storage(space) + addr = rffi.cast(lltype.Signed, storage) + w_d = space.newdict() + space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), + space.w_False])) + return w_d + def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj elif space.issequence_w(w_obj): # Convert to array. - w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) - assert isinstance(w_obj, BaseArray) - return w_obj + return array(space, w_obj, w_order=None) else: # If it's a scalar dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) return scalar_w(space, dtype, w_obj) def scalar_w(space, dtype, w_obj): - assert isinstance(dtype, interp_dtype.W_Dtype) - return Scalar(dtype, dtype.unwrap(space, w_obj)) + return Scalar(dtype, dtype.coerce(space, w_obj)) class Scalar(BaseArray): """ @@ -835,6 +948,7 @@ _attrs_ = ["dtype", "value", "shape"] def __init__(self, dtype, value): + self.shape = self.strides = [] BaseArray.__init__(self, [], 'C') self.dtype = dtype self.value = value @@ -858,7 +972,7 @@ return ConstantIterator() def to_str(self, space, comma, builder, indent=' ', use_ellipsis=False): - builder.append(self.dtype.str_format(self.value)) + builder.append(self.dtype.itemtype.str_format(self.value)) def copy(self): return Scalar(self.dtype, self.value) @@ -866,6 +980,14 @@ def debug_repr(self): return 'Scalar' + def setshape(self, space, new_shape): + # In order to get here, we already checked that prod(new_shape) == 1, + # so in order to have a consistent API, let it go through. + pass + + def get_storage(self, space): + raise OperationError(space.w_TypeError, space.wrap("Cannot get array interface on scalars in pypy")) + class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs @@ -884,7 +1006,7 @@ i = 0 signature = self.signature result_size = self.find_size() - result = NDimArray(result_size, self.shape, self.find_dtype()) + result = W_NDimArray(result_size, self.shape, self.find_dtype()) shapelen = len(self.shape) i = self.start_iter() ri = result.start_iter() @@ -1058,13 +1180,46 @@ return space.wrap(self.shape[0]) return space.wrap(1) + def setshape(self, space, new_shape): + if len(self.shape) < 1: + return + elif len(self.shape) < 2: + # TODO: this code could be refactored into calc_strides + # but then calc_strides would have to accept a stepping factor + strides = [] + backstrides = [] + s = self.strides[0] + if self.order == 'C': + new_shape.reverse() + for sh in new_shape: + strides.append(s) + backstrides.append(s * (sh - 1)) + s *= sh + if self.order == 'C': + strides.reverse() + backstrides.reverse() + new_shape.reverse() + self.strides = strides[:] + self.backstrides = backstrides[:] + self.shape = new_shape[:] + return + new_strides = calc_new_strides(new_shape, self.shape, self.strides) + if new_strides is None: + raise OperationError(space.w_AttributeError, space.wrap( + "incompatible shape for a non-contiguous array")) + new_backstrides = [0] * len(new_shape) + for nd in range(len(new_shape)): + new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + self.strides = new_strides[:] + self.backstrides = new_backstrides[:] + self.shape = new_shape[:] -class NDimSlice(ViewArray): +class W_NDimSlice(ViewArray): signature = signature.BaseSignature() def __init__(self, parent, signature, start, strides, backstrides, shape): - if isinstance(parent, NDimSlice): + if isinstance(parent, W_NDimSlice): parent = parent.parent ViewArray.__init__(self, parent, signature, strides, backstrides, shape) self.start = start @@ -1111,14 +1266,19 @@ return 'Slice(%s)' % self.parent.debug_repr() def copy(self): - array = NDimArray(self.size, self.shape[:], self.find_dtype()) + array = W_NDimArray(self.size, self.shape[:], self.find_dtype()) iter = self.start_iter() + a_iter = array.start_iter() while not iter.done(): - array.setitem(iter.offset, self.getitem(iter.offset)) + array.setitem(a_iter.offset, self.getitem(iter.offset)) iter = iter.next(len(self.shape)) + a_iter = a_iter.next(len(array.shape)) return array -class NDimArray(BaseArray): + def get_storage(self, space): + return self.parent.get_storage(space) + +class W_NDimArray(BaseArray): """ A class representing contiguous array. We know that each iteration by say ufunc will increase the data index by one """ @@ -1145,11 +1305,11 @@ return self.dtype.getitem(self.storage, iter.get_offset()) def copy(self): - array = NDimArray(self.size, self.shape[:], self.dtype, self.order) + array = W_NDimArray(self.size, self.shape[:], self.dtype, self.order) rffi.c_memcpy( - rffi.cast(rffi.VOIDP, array.storage), - rffi.cast(rffi.VOIDP, self.storage), - self.size * self.dtype.num_bytes + array.storage, + self.storage, + self.size * self.dtype.itemtype.get_element_size() ) return array @@ -1160,8 +1320,7 @@ "len() of unsized object")) def setitem_w(self, space, item, w_value): - self.invalidated() - self.dtype.setitem_w(space, self.storage, item, w_value) + return self.setitem(item, self.dtype.coerce(space, w_value)) def setitem(self, item, value): self.invalidated() @@ -1174,9 +1333,16 @@ return ArrayIterator(self.size) raise NotImplementedError # use ViewIterator simply, test it + def setshape(self, space, new_shape): + self.shape = new_shape + self.calc_strides(new_shape) + def debug_repr(self): return 'Array' + def get_storage(self, space): + return self.storage + def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) @@ -1193,20 +1359,62 @@ shape.append(item) return size, shape +def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): + # find scalar + if not space.issequence_w(w_item_or_iterable): + if space.is_w(w_dtype, space.w_None): + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, + w_item_or_iterable) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return scalar_w(space, dtype, w_item_or_iterable) + if w_order is None: + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise operationerrfmt(space.w_ValueError, "Unknown order: %s", + order) + shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) + # they come back in C order + size = len(elems_w) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_elem in elems_w: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, + w_dtype) + if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: + break + if w_dtype is None: + w_dtype = space.w_None + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) + shapelen = len(shape) + arr_iter = arr.start_iter(arr.shape) + for i in range(len(elems_w)): + w_elem = elems_w[i] + dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) + arr_iter = arr_iter.next(shapelen) + return arr + def zeros(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) size, shape = _find_size_and_shape(space, w_size) - return space.wrap(NDimArray(size, shape[:], dtype=dtype)) + return space.wrap(W_NDimArray(size, shape[:], dtype=dtype)) def ones(space, w_size, w_dtype=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) + size, shape = _find_size_and_shape(space, w_size) - arr = NDimArray(size, shape[:], dtype=dtype) - one = dtype.adapt_val(1) + arr = W_NDimArray(size, shape[:], dtype=dtype) + one = dtype.box(1) arr.dtype.fill(arr.storage, one, 0, size) return space.wrap(arr) @@ -1217,9 +1425,9 @@ return w_arr.descr_dot(space, w_obj2) BaseArray.typedef = TypeDef( - 'numarray', - __new__ = interp2app(descr_new_array), - + 'ndarray', + __module__ = "numpypy", + __new__ = interp2app(BaseArray.descr__new__.im_func), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), @@ -1254,9 +1462,11 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), __debug_repr__ = interp2app(BaseArray.descr_debug_repr), + __array_interface__ = GetSetProperty(BaseArray.descr_array_iface), dtype = GetSetProperty(BaseArray.descr_get_dtype), - shape = GetSetProperty(BaseArray.descr_get_shape), + shape = GetSetProperty(BaseArray.descr_get_shape, + BaseArray.descr_set_shape), size = GetSetProperty(BaseArray.descr_get_size), T = GetSetProperty(BaseArray.descr_get_transpose), @@ -1274,6 +1484,7 @@ dot = interp2app(BaseArray.descr_dot), copy = interp2app(BaseArray.descr_copy), + reshape = interp2app(BaseArray.descr_reshape), ) @@ -1308,10 +1519,10 @@ def descr_next(self, space): if self.iter.done(): - raise OperationError(space.w_StopIteration, space.wrap('')) + raise OperationError(space.w_StopIteration, space.w_None) result = self.eval(self.iter) self.iter = self.iter.next(self.shapelen) - return result.wrap(space) + return result def descr_iter(self): return self diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype +from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi @@ -9,7 +9,7 @@ @unwrap_spec(s=str) def fromstring(space, s): - from pypy.module.micronumpy.interp_numarray import NDimArray + from pypy.module.micronumpy.interp_numarray import W_NDimArray length = len(s) if length % FLOAT_SIZE == 0: @@ -18,8 +18,8 @@ raise OperationError(space.w_ValueError, space.wrap( "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - dtype = space.fromcache(W_Float64Dtype) - a = NDimArray(number, [number], dtype=dtype) + dtype = get_dtype_cache(space).w_float64dtype + a = W_NDimArray(number, [number], dtype=dtype) start = 0 end = FLOAT_SIZE diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_dtype, signature +from pypy.module.micronumpy import interp_boxes, interp_dtype, signature, types from pypy.rlib import jit from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -15,6 +15,7 @@ class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + _immutable_fields_ = ["promote_to_float", "promote_bools"] def __init__(self, name, promote_to_float, promote_bools, identity): self.name = name @@ -29,7 +30,7 @@ def descr_get_identity(self, space): if self.identity is None: return space.w_None - return self.identity.wrap(space) + return self.identity def descr_call(self, space, __args__): if __args__.keywords or len(__args__.arguments_w) < self.argcount: @@ -80,8 +81,7 @@ new_sig = signature.Signature.find_sig([ self.reduce_signature, obj.signature ]) - return self.reduce_loop(new_sig, shapelen, start, value, obj, - dtype).wrap(space) + return self.reduce_loop(new_sig, shapelen, start, value, obj, dtype) def reduce_loop(self, signature, shapelen, i, value, obj, dtype): while not i.done(): @@ -115,7 +115,7 @@ promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)) new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, w_obj.shape, res_dtype, w_obj, w_obj.order) @@ -124,6 +124,7 @@ class W_Ufunc2(W_Ufunc): + _immutable_fields_ = ["comparison_func", "func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, @@ -148,14 +149,14 @@ promote_bools=self.promote_bools, ) if self.comparison_func: - res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): return self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) - ).wrap(space) + ) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature @@ -169,7 +170,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", + __module__ = "numpypy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), @@ -187,7 +188,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. @@ -197,14 +198,14 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == 11 and dt1.num_bytes >= 4: - return space.fromcache(interp_dtype.W_Float64Dtype) + if dt2.num == 11 and dt1.itemtype.get_element_size() >= 4: + return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == interp_dtype.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it - if dt1.num_bytes < dt2.num_bytes: + if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 @@ -214,10 +215,11 @@ # UInt64 + signed = Float64 if dt2.num == 10: dtypenum += 1 - newdtype = interp_dtype.ALL_DTYPES[dtypenum] + newdtype = interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] - if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(newdtype) + if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or + newdtype.kind == interp_dtype.FLOATINGLTR): + return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes @@ -225,35 +227,42 @@ dtypenum += 2 else: dtypenum += 3 - return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum]) + return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): - return space.fromcache(interp_dtype.W_Int8Dtype) + return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt if dt.num >= 5: - return space.fromcache(interp_dtype.W_Float64Dtype) - for bytes, dtype in interp_dtype.dtypes_by_num_bytes: - if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: - return space.fromcache(dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype + for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes: + if (dtype.kind == interp_dtype.FLOATINGLTR and + dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): + return dtype if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: - return space.fromcache(interp_dtype.W_Int64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.FLOATINGLTR: - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.UNSIGNEDLTR: - return space.fromcache(interp_dtype.W_UInt64Dtype) + return interp_dtype.get_dtype_cache(space).w_uint64dtype else: assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) + bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + + if isinstance(w_obj, interp_boxes.W_GenericBox): + dtype = w_obj.get_dtype(space) + if current_guess is None: + return dtype + return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: @@ -269,20 +278,19 @@ current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess - return space.fromcache(interp_dtype.W_Float64Dtype) + return interp_dtype.get_dtype_cache(space).w_float64dtype def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): - return getattr(res_dtype, op_name)(value) + return getattr(res_dtype.itemtype, op_name)(value) elif argcount == 2: + dtype_cache = interp_dtype.get_dtype_cache(space) def impl(res_dtype, lvalue, rvalue): - res = getattr(res_dtype, op_name)(lvalue, rvalue) + res = getattr(res_dtype.itemtype, op_name)(lvalue, rvalue) if comparison_func: - booldtype = space.fromcache(interp_dtype.W_BoolDtype) - assert isinstance(booldtype, interp_dtype.W_BoolDtype) - res = booldtype.box(res) + return dtype_cache.w_booldtype.box(res) return res return func_with_new_name(impl, ufunc_name) @@ -338,7 +346,7 @@ identity = extra_kwargs.get("identity") if identity is not None: - identity = space.fromcache(interp_dtype.W_LongDtype).adapt_val(identity) + identity = interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,6 +1,6 @@ from pypy.conftest import gettestobjspace -from pypy.module.micronumpy import interp_dtype -from pypy.module.micronumpy.interp_numarray import NDimArray, Scalar +from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.interp_numarray import W_NDimArray, Scalar from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) @@ -11,9 +11,10 @@ class TestSignature(object): def test_binop_signature(self, space): - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + float64_dtype = get_dtype_cache(space).w_float64dtype + bool_dtype = get_dtype_cache(space).w_booldtype - ar = NDimArray(10, [10], dtype=float64_dtype) + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature @@ -22,7 +23,7 @@ v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature - bool_ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_BoolDtype)) + bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.signature is not v1.signature assert v5.signature is not v2.signature @@ -30,7 +31,9 @@ assert v5.signature is v6.signature def test_slice_signature(self, space): - ar = NDimArray(10, [10], dtype=space.fromcache(interp_dtype.W_Float64Dtype)) + float64_dtype = get_dtype_cache(space).w_float64dtype + + ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature @@ -41,10 +44,10 @@ class TestUfuncCoerscion(object): def test_binops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Basic pairing assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype @@ -62,19 +65,19 @@ assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype def test_unaryops(self, space): - bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) - int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) - uint8_dtype = space.fromcache(interp_dtype.W_UInt8Dtype) - int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype) - uint16_dtype = space.fromcache(interp_dtype.W_UInt16Dtype) - int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) - uint32_dtype = space.fromcache(interp_dtype.W_UInt32Dtype) - long_dtype = space.fromcache(interp_dtype.W_LongDtype) - ulong_dtype = space.fromcache(interp_dtype.W_ULongDtype) - int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) - uint64_dtype = space.fromcache(interp_dtype.W_UInt64Dtype) - float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype) - float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + bool_dtype = get_dtype_cache(space).w_booldtype + int8_dtype = get_dtype_cache(space).w_int8dtype + uint8_dtype = get_dtype_cache(space).w_uint8dtype + int16_dtype = get_dtype_cache(space).w_int16dtype + uint16_dtype = get_dtype_cache(space).w_uint16dtype + int32_dtype = get_dtype_cache(space).w_int32dtype + uint32_dtype = get_dtype_cache(space).w_uint32dtype + long_dtype = get_dtype_cache(space).w_longdtype + ulong_dtype = get_dtype_cache(space).w_ulongdtype + int64_dtype = get_dtype_cache(space).w_int64dtype + uint64_dtype = get_dtype_cache(space).w_uint64dtype + float32_dtype = get_dtype_cache(space).w_float32dtype + float64_dtype = get_dtype_cache(space).w_float64dtype # Normal rules, everything returns itself assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,9 @@ +import py -import py -from pypy.module.micronumpy.compile import * +from pypy.module.micronumpy.compile import (numpy_compile, Assignment, + ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + FunctionCall, FakeSpace) + class TestCompiler(object): def compile(self, code): @@ -106,7 +109,7 @@ c -> 3 """ interp = self.run(code) - assert interp.results[-1].value.val == 9 + assert interp.results[-1].value == 9 def test_array_getitem(self): code = """ @@ -115,7 +118,7 @@ a + b -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 3 + 6 + assert interp.results[0].value == 3 + 6 def test_range_getitem(self): code = """ @@ -123,7 +126,7 @@ r -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_sum(self): code = """ @@ -132,7 +135,7 @@ r """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value.value == 15 def test_array_write(self): code = """ @@ -141,7 +144,7 @@ a -> 3 """ interp = self.run(code) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_min(self): interp = self.run(""" @@ -150,7 +153,7 @@ b = a + a min(b) """) - assert interp.results[0].value.val == -24 + assert interp.results[0].value.value == -24 def test_max(self): interp = self.run(""" @@ -159,7 +162,7 @@ b = a + a max(b) """) - assert interp.results[0].value.val == 256 + assert interp.results[0].value.value == 256 def test_slice(self): interp = self.run(""" @@ -167,7 +170,7 @@ b = a -> : b -> 3 """) - assert interp.results[0].value.val == 4 + assert interp.results[0].value == 4 def test_slice_step(self): interp = self.run(""" @@ -175,7 +178,7 @@ b = a -> ::2 b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_setslice(self): interp = self.run(""" @@ -185,7 +188,7 @@ a[::3] = b a -> 3 """) - assert interp.results[0].value.val == 5 + assert interp.results[0].value == 5 def test_slice2(self): @@ -196,14 +199,14 @@ b = s1 + s2 b -> 3 """) - assert interp.results[0].value.val == 15 + assert interp.results[0].value == 15 def test_multidim_getitem(self): interp = self.run(""" a = [[1,2]] a -> 0 -> 1 """) - assert interp.results[0].value.val == 2 + assert interp.results[0].value == 2 def test_multidim_getitem_2(self): interp = self.run(""" @@ -211,7 +214,7 @@ b = a + a b -> 1 -> 1 """) - assert interp.results[0].value.val == 8 + assert interp.results[0].value == 8 def test_set_slice(self): interp = self.run(""" @@ -220,7 +223,7 @@ b[:] = a + a b -> 3 """) - assert interp.results[0].value.val == 6 + assert interp.results[0].value == 6 def test_set_slice2(self): interp = self.run(""" @@ -231,4 +234,4 @@ a[0:30:3] = c a -> 3 """) - assert interp.results[0].value.val == 11 + assert interp.results[0].value == 11 diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -30,7 +30,7 @@ def test_repr_str(self): from numpypy import dtype - assert repr(dtype) == "" + assert repr(dtype) == "" d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" @@ -44,13 +44,13 @@ assert a[i] is True_ def test_copy_array_with_dtype(self): - from numpypy import array, False_, True_ + from numpypy import array, False_, True_, int64 a = array([0, 1, 2, 3], dtype=long) # int on 64-bit, long in 32-bit - assert isinstance(a[0], (int, long)) + assert isinstance(a[0], int64) b = a.copy() - assert isinstance(b[0], (int, long)) + assert isinstance(b[0], int64) a = array([0, 1, 2, 3], dtype=bool) assert a[0] is False_ @@ -72,17 +72,17 @@ assert a[i] is True_ def test_zeros_long(self): - from numpypy import zeros + from numpypy import zeros, int64 a = zeros(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 0 def test_ones_long(self): - from numpypy import ones + from numpypy import ones, int64 a = ones(10, dtype=long) for i in range(10): - assert isinstance(a[i], (int, long)) + assert isinstance(a[i], int64) assert a[1] == 1 def test_overflow(self): @@ -165,3 +165,106 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) + +class AppTestTypes(BaseNumpyAppTest): + def test_abstract_types(self): + import numpypy as numpy + raises(TypeError, numpy.generic, 0) + raises(TypeError, numpy.number, 0) + raises(TypeError, numpy.integer, 0) + exc = raises(TypeError, numpy.signedinteger, 0) + assert str(exc.value) == "cannot create 'signedinteger' instances" + + raises(TypeError, numpy.floating, 0) + raises(TypeError, numpy.inexact, 0) + + def test_bool(self): + import numpypy as numpy + + assert numpy.bool_.mro() == [numpy.bool_, numpy.generic, object] + assert numpy.bool_(3) is numpy.True_ + assert numpy.bool_("") is numpy.False_ + assert type(numpy.True_) is type(numpy.False_) is numpy.bool_ + + class X(numpy.bool_): + pass + + assert type(X(True)) is numpy.bool_ + assert X(True) is numpy.True_ + + def test_int8(self): + import numpypy as numpy + + assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.int8) + assert type(a[1]) is numpy.int8 + assert numpy.dtype("int8").type is numpy.int8 + + x = numpy.int8(128) + assert x == -128 + assert x != 128 + assert type(x) is numpy.int8 + assert repr(x) == "-128" + + assert type(int(x)) is int + assert int(x) == -128 + + def test_int16(self): + import numpypy as numpy + + x = numpy.int16(3) + assert x == 3 + + def test_int32(self): + import numpypy as numpy + + x = numpy.int32(23) + assert x == 23 + + def test_int_(self): + import numpypy as numpy + + assert numpy.int_ is numpy.dtype(int).type + assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + + def test_int64(self): + import sys + import numpypy as numpy + + if sys.maxint == 2 ** 63 -1: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object] + else: + assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.int64).type is numpy.int64 + assert numpy.int64(3) == 3 + + def test_float32(self): + import numpypy as numpy + + assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] + + assert numpy.float32(12) == numpy.float64(12) + + def test_float64(self): + import numpypy as numpy + + assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] + + a = numpy.array([1, 2, 3], numpy.float64) + assert type(a[1]) is numpy.float64 + assert numpy.dtype(float).type is numpy.float64 + + assert numpy.float64(2.0) == 2.0 + + def test_subclass_type(self): + import numpypy as numpy + + class X(numpy.float64): + def m(self): + return self + 2 + + b = X(10) + assert type(b) is X + assert b.m() == 12 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,7 +1,7 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_numarray import NDimArray, shape_agreement +from pypy.module.micronumpy.interp_numarray import W_NDimArray, shape_agreement from pypy.module.micronumpy import signature from pypy.interpreter.error import OperationError from pypy.conftest import gettestobjspace @@ -28,18 +28,18 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = NDimArray(100, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(100, [10, 5, 3], MockDtype(), 'C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] def test_create_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -58,7 +58,7 @@ def test_create_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') s = a.create_slice(space, [(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -78,7 +78,7 @@ def test_slice_of_slice_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -96,7 +96,7 @@ def test_slice_of_slice_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = s.create_slice(space, [(3, 0, 0, 1)]) @@ -114,7 +114,7 @@ def test_negative_step_f(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] @@ -122,14 +122,14 @@ def test_negative_step_c(self): space = self.space - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), order='C') s = a.create_slice(space, [(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] def test_index_of_single_item_f(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'F') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 + 2 * 10 + 2 * 50 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -139,7 +139,7 @@ assert r == a._index_of_single_item(self.space, self.newtuple(1, 2, 1)) def test_index_of_single_item_c(self): - a = NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') + a = W_NDimArray(10 * 5 * 3, [10, 5, 3], MockDtype(), 'C') r = a._index_of_single_item(self.space, self.newtuple(1, 2, 2)) assert r == 1 * 3 * 5 + 2 * 3 + 2 s = a.create_slice(self.space, [(0, 10, 1, 10), (2, 0, 0, 1)]) @@ -158,8 +158,30 @@ assert shape_agreement(self.space, [5, 2], [4, 3, 5, 2]) == [4, 3, 5, 2] + def test_calc_new_strides(self): + from pypy.module.micronumpy.interp_numarray import calc_new_strides + assert calc_new_strides([2, 4], [4, 2], [4, 2]) == [8, 2] + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16]) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16]) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1]) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2]) == [2] class AppTestNumArray(BaseNumpyAppTest): + def test_ndarray(self): + from numpypy import ndarray, array, dtype + + assert type(ndarray) is type + assert type(array) is not type + a = ndarray((2, 3)) + assert a.shape == (2, 3) + assert a.dtype == dtype(float) + + raises(TypeError, ndarray, [[1], [2], [3]]) + + a = ndarray(3, dtype=int) + assert a.shape == (3,) + assert a.dtype is dtype(int) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -201,8 +223,8 @@ assert a[2] == 4 def test_copy(self): - from numpypy import array - a = array(range(5)) + from numpypy import arange, array + a = arange(5) b = a.copy() for i in xrange(5): assert b[i] == a[i] @@ -212,6 +234,11 @@ a = array(1) assert a.copy() == a + a = arange(8) + b = a[::2] + c = b.copy() + assert (c == b).all() + def test_iterator_init(self): from numpypy import array a = array(range(5)) @@ -303,8 +330,8 @@ def test_scalar(self): from numpypy import array, dtype a = array(3) - #assert a[0] == 3 raises(IndexError, "a[0]") + raises(IndexError, "a[0] = 5") assert a.size == 1 assert a.shape == () assert a.dtype is dtype(int) @@ -324,6 +351,81 @@ c = a[:3] assert c.shape == (3,) + def test_set_shape(self): + from numpypy import array, zeros + a = array([]) + a.shape = [] + a = array(range(12)) + a.shape = (3, 4) + assert (a == [range(4), range(4, 8), range(8, 12)]).all() + a.shape = (3, 2, 2) + assert a[1, 1, 1] == 7 + a.shape = (3, -1, 2) + assert a.shape == (3, 2, 2) + a.shape = 12 + assert a.shape == (12, ) + exc = raises(ValueError, "a.shape = 10") + assert str(exc.value) == "total size of new array must be unchanged" + a = array(3) + a.shape = () + #numpy allows this + a.shape = (1,) + + def test_reshape(self): + from numpypy import array, zeros + a = array(range(12)) + exc = raises(ValueError, "b = a.reshape((3, 10))") + assert str(exc.value) == "total size of new array must be unchanged" + b = a.reshape((3, 4)) + assert b.shape == (3, 4) + assert (b == [range(4), range(4, 8), range(8, 12)]).all() + b[:, 0] = 1000 + assert (a == [1000, 1, 2, 3, 1000, 5, 6, 7, 1000, 9, 10, 11]).all() + a = zeros((4, 2, 3)) + a.shape = (12, 2) + + def test_slice_reshape(self): + from numpypy import zeros, arange + a = zeros((4, 2, 3)) + b = a[::2, :, :] + b.shape = (2, 6) + exc = raises(AttributeError, "b.shape = 12") + assert str(exc.value) == \ + "incompatible shape for a non-contiguous array" + b = a[::2, :, :].reshape((2, 6)) + assert b.shape == (2, 6) + b = arange(20)[1:17:2] + b.shape = (4, 2) + assert (b == [[1, 3], [5, 7], [9, 11], [13, 15]]).all() + c = b.reshape((2, 4)) + assert (c == [[1, 3, 5, 7], [9, 11, 13, 15]]).all() + + z = arange(96).reshape((12, -1)) + assert z.shape == (12, 8) + y = z.reshape((4, 3, 8)) + v = y[:, ::2, :] + w = y.reshape(96) + u = v.reshape(64) + assert y[1, 2, 1] == z[5, 1] + y[1, 2, 1] = 1000 + # z, y, w, v are views of eachother + assert z[5, 1] == 1000 + assert v[1, 1, 1] == 1000 + assert w[41] == 1000 + # u is not a view, it is a copy! + assert u[25] == 41 + + a = zeros((5, 2)) + assert a.reshape(-1).shape == (10,) + + raises(ValueError, arange(10).reshape, (5, -1, -1)) + + def test_reshape_varargs(self): + from numpypy import arange + z = arange(96).reshape(12, -1) + y = z.reshape(4, 3, 8) + assert y.shape == (4, 3, 8) + def test_add(self): from numpypy import array a = array(range(5)) @@ -359,11 +461,11 @@ assert r[i] == i + 3 def test_add_list(self): - from numpypy import array + from numpypy import array, ndarray a = array(range(5)) b = list(reversed(range(5))) c = a + b - assert isinstance(c, array) + assert isinstance(c, ndarray) for i in range(5): assert c[i] == 4 @@ -709,7 +811,7 @@ assert b[i] == 2.5 * a[i] def test_dtype_guessing(self): - from numpypy import array, dtype + from numpypy import array, dtype, float64, int8, bool_ assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) @@ -719,6 +821,10 @@ assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + assert array([float64(2)]).dtype is dtype(float) + assert array([int8(3)]).dtype is dtype("int8") + assert array([bool_(True)]).dtype is dtype(bool) + assert array([bool_(True), 3.0]).dtype is dtype(float) def test_comparison(self): import operator @@ -1008,10 +1114,20 @@ b = a[0].copy() assert (b == zeros(10)).all() -class AppTestSupport(object): + def test_array_interface(self): + from numpypy import array + a = array([1, 2, 3]) + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + a = a[::2] + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + raises(TypeError, getattr, array(3), '__array_interface__') + +class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): import struct - cls.space = gettestobjspace(usemodules=('micronumpy',)) + BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) def test_fromstring(self): @@ -1136,3 +1252,14 @@ a = arange(0, 0.8, 0.1) assert len(a) == 8 assert arange(False, True, True).dtype is dtype(int) + + +class AppTestRanges(BaseNumpyAppTest): + def test_app_reshape(self): + from numpypy import arange, array, dtype, reshape + a = arange(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) + a = range(12) + b = reshape(a, (3, 4)) + assert b.shape == (3, 4) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -8,7 +8,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -37,36 +37,36 @@ assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, negative, minimum + from numpypy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 b_neg = negative(b) - assert isinstance(b_neg, array) + assert isinstance(b_neg, ndarray) for i in range(3): assert b_neg[i] == -b[i] min_a_b = minimum(a, b) - assert isinstance(min_a_b, array) + assert isinstance(min_a_b, ndarray) for i in range(3): assert min_a_b[i] == min(a[i], b[i]) min_b_a = minimum(b, a) - assert isinstance(min_b_a, array) + assert isinstance(min_b_a, ndarray) for i in range(3): assert min_b_a[i] == min(a[i], b[i]) min_a_c = minimum(a, c) - assert isinstance(min_a_c, array) + assert isinstance(min_a_c, ndarray) for i in range(3): assert min_a_c[i] == min(a[i], c) min_c_a = minimum(c, a) - assert isinstance(min_c_a, array) + assert isinstance(min_c_a, ndarray) for i in range(3): assert min_c_a[i] == min(a[i], c) min_b_c = minimum(b, c) - assert isinstance(min_b_c, array) + assert isinstance(min_b_c, ndarray) for i in range(3): assert min_b_c[i] == min(b[i], c) min_c_b = minimum(c, b) - assert isinstance(min_c_b, array) + assert isinstance(min_c_b, ndarray) for i in range(3): assert min_c_b[i] == min(b[i], c) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -8,13 +8,12 @@ from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp.warmspot import reset_stats -from pypy.module.micronumpy import interp_ufuncs, signature -from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, - FloatObject, IntObject, BoolObject, Parser, InterpreterState) -from pypy.module.micronumpy.interp_numarray import NDimArray, NDimSlice,\ - BaseArray +from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy.compile import (FakeSpace, + IntObject, Parser, InterpreterState) +from pypy.module.micronumpy.interp_numarray import (W_NDimArray, + BaseArray) from pypy.rlib.nonconst import NonConstant -from pypy.rpython.annlowlevel import llstr, hlstr class TestNumpyJIt(LLJitMixin): @@ -48,17 +47,15 @@ def f(i): interp = InterpreterState(codes[i]) interp.run(space) - res = interp.results[-1] - assert isinstance(res, BaseArray) - w_res = res.eval(res.start_iter()).wrap(interp.space) - if isinstance(w_res, BoolObject): - return float(w_res.boolval) - elif isinstance(w_res, FloatObject): - return w_res.floatval - elif isinstance(w_res, IntObject): - return w_res.intval - else: - return -42. + w_res = interp.results[-1] + if isinstance(w_res, BaseArray): + w_res = w_res.eval(w_res.start_iter()) + + if isinstance(w_res, interp_boxes.W_Float64Box): + return w_res.value + elif isinstance(w_res, interp_boxes.W_BoolBox): + return float(w_res.value) + raise TypeError(w_res) if self.graph is None: interp, graph = self.meta_interp(f, [i], @@ -80,9 +77,9 @@ def test_add(self): result = self.run("add") - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, 'jump': 1}) + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, + 'int_ge': 1, 'guard_false': 1, 'jump': 1}) assert result == 3 + 3 def define_float_add(): @@ -94,9 +91,9 @@ def test_floatadd(self): result = self.run("float_add") assert result == 3 + 3 - self.check_simple_loop({"getarrayitem_raw": 1, "float_add": 1, - "setarrayitem_raw": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 1, "float_add": 1, + "setinteriorfield_raw": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_sum(): return """ @@ -108,9 +105,9 @@ def test_sum(self): result = self.run("sum") assert result == 2 * sum(range(30)) - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 2, - "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 2, + "int_add": 2, "int_ge": 1, "guard_false": 1, + "jump": 1}) def define_prod(): return """ @@ -125,9 +122,9 @@ for i in range(30): expected *= i * 2 assert result == expected - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 2, - "int_ge": 1, "guard_false": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 2, + "int_ge": 1, "guard_false": 1, "jump": 1}) def test_max(self): py.test.skip("broken, investigate") @@ -138,9 +135,9 @@ max(b) """) assert result == 256 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def test_min(self): py.test.skip("broken, investigate") @@ -151,9 +148,9 @@ min(b) """) assert result == -24 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_mul": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, "jump": 1}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_mul": 1, "int_add": 1, + "int_lt": 1, "guard_true": 1, "jump": 1}) def define_any(): return """ @@ -166,10 +163,10 @@ def test_any(self): result = self.run("any") assert result == 1 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, - "float_ne": 1, "int_add": 2, - "int_ge": 1, "jump": 1, - "guard_false": 2}) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, + "float_ne": 1, "int_add": 2, + "int_ge": 1, "jump": 1, + "guard_false": 2}) def define_already_forced(): return """ @@ -188,10 +185,10 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setarrayitem_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, - 'getarrayitem_raw': 4, 'float_add': 2, 'guard_false': 4, + 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) def define_ufunc(): @@ -205,10 +202,9 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -6 - self.check_simple_loop({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, - "setarrayitem_raw": 1, "int_add": 3, - "int_ge": 1, "guard_false": 1, "jump": 1, - }) + self.check_simple_loop({"getinteriorfield_raw": 2, "float_add": 1, "float_neg": 1, + "setinteriorfield_raw": 1, "int_add": 3, + "int_ge": 1, "guard_false": 1, "jump": 1}) def define_specialization(): return """ @@ -246,9 +242,9 @@ def test_slice(self): result = self.run("slice") assert result == 18 - self.check_simple_loop({'getarrayitem_raw': 2, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) @@ -265,8 +261,8 @@ def test_slice2(self): result = self.run("slice2") assert result == 15 - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add': 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add': 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_ge': 1, 'guard_false': 1, 'jump': 1}) def define_multidim(): @@ -279,11 +275,11 @@ def test_multidim(self): result = self.run('multidim') assert result == 8 - self.check_simple_loop({'float_add': 1, 'getarrayitem_raw': 2, - 'guard_false': 1, 'int_add': 3, 'int_ge': 1, - 'jump': 1, 'setarrayitem_raw': 1}) # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization + self.check_simple_loop({'float_add': 1, 'getinteriorfield_raw': 2, + 'guard_false': 1, 'int_add': 3, 'int_ge': 1, + 'jump': 1, 'setinteriorfield_raw': 1}) def define_multidim_slice(): return """ @@ -329,18 +325,18 @@ result = self.run("setslice") assert result == 11.0 self.check_loop_count(1) - self.check_simple_loop({'getarrayitem_raw': 2, 'float_add' : 1, - 'setarrayitem_raw': 1, 'int_add': 3, + self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, + 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) class TestNumpyOld(LLJitMixin): def setup_class(cls): py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace - from pypy.module.micronumpy.interp_dtype import W_Float64Dtype + from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() - cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) + cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype def test_int32_sum(self): py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to " @@ -355,7 +351,7 @@ dtype = float64_dtype else: dtype = int32_dtype - ar = NDimArray(n, [n], dtype=dtype) + ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) diff --git a/pypy/module/micronumpy/test/test_ztranslation.py b/pypy/module/micronumpy/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_ztranslation.py @@ -0,0 +1,5 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_numpy_translates(): + checkmodule('micronumpy') diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/types.py @@ -0,0 +1,389 @@ +import functools +import math + +from pypy.module.micronumpy import interp_boxes +from pypy.objspace.std.floatobject import float2string +from pypy.rlib import rfloat, libffi, clibffi +from pypy.rlib.objectmodel import specialize +from pypy.rlib.rarithmetic import LONG_BIT, widen +from pypy.rpython.lltypesystem import lltype, rffi + + +def simple_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v): + return self.box( + func( + self, + self.for_computation(self.unbox(v)) + ) + ) + return dispatcher + +def simple_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return self.box( + func( + self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + ) + ) + return dispatcher + +def raw_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)) + ) + return dispatcher + +class BaseType(object): + def _unimplemented_ufunc(self, *args): + raise NotImplementedError + # add = sub = mul = div = mod = pow = eq = ne = lt = le = gt = ge = max = \ + # min = copysign = pos = neg = abs = sign = reciprocal = fabs = floor = \ + # exp = sin = cos = tan = arcsin = arccos = arctan = arcsinh = \ + # arctanh = _unimplemented_ufunc + +class Primitive(object): + _mixin_ = True + def get_element_size(self): + return rffi.sizeof(self.T) + + @specialize.argtype(1) + def box(self, value): + return self.BoxType(rffi.cast(self.T, value)) + + def unbox(self, box): + assert isinstance(box, self.BoxType) + return box.value + + def coerce(self, space, w_item): + if isinstance(w_item, self.BoxType): + return w_item + return self.coerce_subtype(space, space.gettypefor(self.BoxType), w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # XXX: ugly + w_obj = space.allocate_instance(self.BoxType, w_subtype) + assert isinstance(w_obj, self.BoxType) + w_obj.__init__(self._coerce(space, w_item).value) + return w_obj + + def _coerce(self, space, w_item): + raise NotImplementedError + + def read(self, storage, width, i, offset): + return self.box(libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset + )) + + def store(self, storage, width, i, offset, box): + value = self.unbox(box) + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop): + libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), + width, storage, i, offset, value + ) + + @simple_binary_op + def add(self, v1, v2): + return v1 + v2 + + @simple_binary_op + def sub(self, v1, v2): + return v1 - v2 + + @simple_binary_op + def mul(self, v1, v2): + return v1 * v2 + + @simple_unary_op + def pos(self, v): + return +v + + @simple_unary_op + def neg(self, v): + return -v + + @simple_unary_op + def abs(self, v): + return abs(v) + + @raw_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @raw_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @raw_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @raw_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @raw_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @raw_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + def bool(self, v): + return bool(self.for_computation(self.unbox(v))) + + @simple_binary_op + def max(self, v1, v2): + return max(v1, v2) + + @simple_binary_op + def min(self, v1, v2): + return min(v1, v2) + +class Bool(BaseType, Primitive): + T = lltype.Bool + BoxType = interp_boxes.W_BoolBox + + True = BoxType(True) + False = BoxType(False) + + @specialize.argtype(1) + def box(self, value): + box = Primitive.box(self, value) + if box.value: + return self.True + else: + return self.False + + def coerce_subtype(self, space, w_subtype, w_item): + # Doesn't return subclasses so it can return the constants. + return self._coerce(space, w_item) + + def _coerce(self, space, w_item): + return self.box(space.is_true(w_item)) + + def str_format(self, box): + value = self.unbox(box) + return "True" if value else "False" + + def for_computation(self, v): + return int(v) + +class Integer(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.int_w(space.int(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return str(self.for_computation(value)) + + def for_computation(self, v): + return widen(v) + + @simple_binary_op + def div(self, v1, v2): + if v2 == 0: + return 0 + return v1 / v2 + + @simple_binary_op + def mod(self, v1, v2): + return v1 % v2 + + @simple_binary_op + def pow(self, v1, v2): + res = 1 + while v2 > 0: + if v2 & 1: + res *= v1 + v2 >>= 1 + if v2 == 0: + break + v1 *= v1 + return res + + @simple_unary_op + def sign(self, v): + if v > 0: + return 1 + elif v < 0: + return -1 + else: + assert v == 0 + return 0 + +class Int8(BaseType, Integer): + T = rffi.SIGNEDCHAR + BoxType = interp_boxes.W_Int8Box + +class UInt8(BaseType, Integer): + T = rffi.UCHAR + BoxType = interp_boxes.W_UInt8Box + +class Int16(BaseType, Integer): + T = rffi.SHORT + BoxType = interp_boxes.W_Int16Box + +class UInt16(BaseType, Integer): + T = rffi.USHORT + BoxType = interp_boxes.W_UInt16Box + +class Int32(BaseType, Integer): + T = rffi.INT + BoxType = interp_boxes.W_Int32Box + +class UInt32(BaseType, Integer): + T = rffi.UINT + BoxType = interp_boxes.W_UInt32Box + +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + +class Int64(BaseType, Integer): + T = rffi.LONGLONG + BoxType = interp_boxes.W_Int64Box + +class UInt64(BaseType, Integer): + T = rffi.ULONGLONG + BoxType = interp_boxes.W_UInt64Box + +class Float(Primitive): + _mixin_ = True + + def _coerce(self, space, w_item): + return self.box(space.float_w(space.float(w_item))) + + def str_format(self, box): + value = self.unbox(box) + return float2string(self.for_computation(value), "g", rfloat.DTSF_STR_PRECISION) + + def for_computation(self, v): + return float(v) + + @simple_binary_op + def div(self, v1, v2): + try: + return v1 / v2 + except ZeroDivisionError: + if v1 == v2 == 0.0: + return rfloat.NAN + return rfloat.copysign(rfloat.INFINITY, v1 * v2) + + @simple_binary_op + def mod(self, v1, v2): + return math.fmod(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return math.pow(v1, v2) + + @simple_binary_op + def copysign(self, v1, v2): + return math.copysign(v1, v2) + + @simple_unary_op + def sign(self, v): + if v == 0.0: + return 0.0 + return rfloat.copysign(1.0, v) + + @simple_unary_op + def fabs(self, v): + return math.fabs(v) + + @simple_unary_op + def reciprocal(self, v): + if v == 0.0: + return rfloat.copysign(rfloat.INFINITY, v) + return 1.0 / v + + @simple_unary_op + def floor(self, v): + return math.floor(v) + + @simple_unary_op + def exp(self, v): + try: + return math.exp(v) + except OverflowError: + return rfloat.INFINITY + + @simple_unary_op + def sin(self, v): + return math.sin(v) + + @simple_unary_op + def cos(self, v): + return math.cos(v) + + @simple_unary_op + def tan(self, v): + return math.tan(v) + + @simple_unary_op + def arcsin(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.asin(v) + + @simple_unary_op + def arccos(self, v): + if not -1.0 <= v <= 1.0: + return rfloat.NAN + return math.acos(v) + + @simple_unary_op + def arctan(self, v): + return math.atan(v) + + @simple_unary_op + def arcsinh(self, v): + return math.asinh(v) + + @simple_unary_op + def arctanh(self, v): + if v == 1.0 or v == -1.0: + return math.copysign(rfloat.INFINITY, v) + if not -1.0 < v < 1.0: + return rfloat.NAN + return math.atanh(v) + + @simple_unary_op + def sqrt(self, v): + try: + return math.sqrt(v) + except ValueError: + return rfloat.NAN + + +class Float32(BaseType, Float): + T = rffi.FLOAT + BoxType = interp_boxes.W_Float32Box + +class Float64(BaseType, Float): + T = rffi.DOUBLE + BoxType = interp_boxes.W_Float64Box \ No newline at end of file diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -116,4 +116,15 @@ guard_no_overflow(descr=...) --TICK-- jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=) - """) \ No newline at end of file + """) + + def test_floatlist_unpack_without_calls(self): + def fn(n): + l = [2.3, 3.4, 4.5] + for i in range(n): + x, y, z = l # ID: look + # + log = self.run(fn, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('look') + assert 'call' not in log.opnames(ops) diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -0,0 +1,27 @@ + +""" +Extra tests for the pure Python PyPy _collections module +(not used in normal PyPy's) +""" + +from pypy.conftest import gettestobjspace + +class AppTestcStringIO: + def test_copy(self): + import _collections + def f(): + return 42 + d = _collections.defaultdict(f, {2: 3}) + # + d1 = d.copy() + assert type(d1) is _collections.defaultdict + assert len(d1) == 1 + assert d1[2] == 3 + assert d1[3] == 42 + # + import copy + d2 = copy.deepcopy(d) + assert type(d2) is _collections.defaultdict + assert len(d2) == 1 + assert d2[2] == 3 + assert d2[3] == 42 diff --git a/pypy/objspace/fake/__init__.py b/pypy/objspace/fake/__init__.py --- a/pypy/objspace/fake/__init__.py +++ b/pypy/objspace/fake/__init__.py @@ -1,2 +0,0 @@ -from objspace import FakeObjSpace -Space = FakeObjSpace diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -1,108 +1,12 @@ -import re -from copy import copy -from pypy.tool.error import debug -from pypy.interpreter.argument import Arguments -from pypy.interpreter.gateway import interp2app -from pypy.rlib.nonconst import NonConstant +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root -def my_import(name): - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod -def find_gateways(modname, basepath, module): - identifier = r'[a-zA-Z0-9][a-zA-Z0-9_]*' - r_simplename = re.compile(r'(%s)[.](%s)$' % (identifier, identifier)) - res = [] - for name in module.interpleveldefs.values(): - match = r_simplename.match(name) - if match: - submod_name, obj_name = match.groups() - submod_name = '%s.%s.%s' % (basepath, modname, submod_name) - submod = my_import(submod_name) - obj = getattr(submod, obj_name) - res += find_gw_in_obj(obj) - return res - -def find_gw_in_obj(obj): - if hasattr(obj, 'typedef'): - typedef = obj.typedef - return [gw for gw in typedef.rawdict.values() - if isinstance(gw, interp2app)] - elif hasattr(obj, 'func_code'): - return [interp2app(obj)] - else: - assert False - -## Since the fake objspace is more a hack than a real object space, it -## happens that the annotator complains about operations that cannot -## succeed because it knows too much about the objects involved. For -## example, if it knows that a list is always empty, it will block -## each operations that tries to access that list. This is not what we -## want, because we know that with real objectspaces that operations -## will succeed. - -## As a workaround, we insert dummy rpython code (the function -## dummy_rpython) that manipulates the variables in order to give -## them a more sensible annotation. This is the preferred way to solve -## the problems so far. - -## If the solution above doesn't work, the alternative is to -## substitute the interpreter code with something that doesn't hurt -## the annotator. It's a very ugly hack, better solutions are welcome -## :-) - - -# dummy rpython code to give some variables more sensible annotations -def dummy_rpython(dummy_function): - # to make the annotator flow-in without executing the code - if NonConstant(False): - dummy_function.defs_w = [None] # else the annotator would see an always empty list - -def patch_pypy(): - from pypy.interpreter.baseobjspace import W_Root - - def descr_call_mismatch(self, space, opname, RequiredClass, args): - from pypy.interpreter.error import OperationError - msg = 'This message will never be displayed :-)' - raise OperationError(space.w_TypeError, space.wrap(msg)) - W_Root.descr_call_mismatch = descr_call_mismatch - - -def checkmodule(modname, backend, interactive=False, basepath='pypy.module'): - "Compile a fake PyPy module." - from pypy.objspace.fake.objspace import FakeObjSpace, W_Object - from pypy.translator.driver import TranslationDriver - +def checkmodule(modname): space = FakeObjSpace() - space.config.translating = True - ModuleClass = __import__(basepath + '.%s' % modname, - None, None, ['Module']).Module - module = ModuleClass(space, space.wrap(modname)) - w_moduledict = module.getdict(space) - - gateways = find_gateways(modname, basepath, module) - functions = [gw.__spacebind__(space) for gw in gateways] - arguments = Arguments.frompacked(space, W_Object(), W_Object()) - dummy_function = copy(functions[0]) - - def main(argv): # use the standalone mode not to allow SomeObject - dummy_rpython(dummy_function) - for func in functions: - func.call_args(arguments) - return 0 - - patch_pypy() - driver = TranslationDriver() - driver.setup(main, None) - try: - driver.proceed(['compile_' + backend]) - except SystemExit: - raise - except: - if not interactive: - raise - debug(driver) - raise SystemExit(1) + mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) + # force computation and record what we wrap + module = mod.Module(space, W_Root()) + for name in module.loaders: + module._load_lazily(space, name) + # + space.translates(**{'translation.list_comprehension_operations':True}) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,147 +1,302 @@ -from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, W_Root -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import W_Root, ObjSpace +from pypy.interpreter.baseobjspace import Wrappable, SpaceCache +from pypy.interpreter import argument, gateway +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.annotation.model import SomeInstance, s_None +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rpython.lltypesystem import lltype +from pypy.tool.sourcetools import compile2, func_with_new_name +from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.objectmodel import instantiate, we_are_translated from pypy.rlib.nonconst import NonConstant from pypy.rlib.rarithmetic import r_uint -from pypy.rlib.rbigint import rbigint +from pypy.translator.translator import TranslationContext +from pypy.tool.option import make_config -#class W_Type(W_Root): -# _attrs_ = () -class W_Object(W_Root): - _attrs_ = () -W_Object.typedef = TypeDef('foobar') +class W_MyObject(Wrappable): + typedef = None -def make_dummy(a=W_Object(), b=W_Object()): - def fn(*args): - if NonConstant(True): - return a - else: - return b - return fn + def getdict(self, space): + return w_obj_or_none() -int_dummy = make_dummy(42, 43) -float_dummy = make_dummy(42.0, 42.1) -uint_dummy = make_dummy(r_uint(42), r_uint(43)) -str_dummy = make_dummy('foo', 'bar') -bool_dummy = make_dummy(True, False) -unicode_dummy = make_dummy(u'abc', u'cde') -bigint_dummy = make_dummy(rbigint.fromint(0), rbigint.fromint(1)) + def getdictvalue(self, space, attr): + attr + "xx" # check that it's a string + return w_obj_or_none() + + def setdictvalue(self, space, attr, w_value): + attr + "xx" # check that it's a string + is_root(w_value) + return NonConstant(True) + + def deldictvalue(self, space, attr): + attr + "xx" # check that it's a string + return NonConstant(True) + + def setdict(self, space, w_dict): + is_root(w_dict) + + def setclass(self, space, w_subtype): + is_root(w_subtype) + + def str_w(self, space): + return NonConstant("foobar") + + def unicode_w(self, space): + return NonConstant(u"foobar") + + def int_w(self, space): + return NonConstant(-42) + + def uint_w(self, space): + return r_uint(NonConstant(42)) + + def bigint_w(self, space): + from pypy.rlib.rbigint import rbigint + return rbigint.fromint(NonConstant(42)) + + +def w_some_obj(): + if NonConstant(False): + return W_Root() + return W_MyObject() + +def w_obj_or_none(): + if NonConstant(False): + return None + return w_some_obj() + +def is_root(w_obj): + assert isinstance(w_obj, W_Root) +is_root.expecting = W_Root + +def is_arguments(arg): + assert isinstance(arg, argument.Arguments) +is_arguments.expecting = argument.Arguments + + +class Entry(ExtRegistryEntry): + _about_ = is_root, is_arguments + + def compute_result_annotation(self, s_w_obj): + cls = self.instance.expecting + s_inst = SomeInstance(self.bookkeeper.getuniqueclassdef(cls), + can_be_None=True) + assert s_inst.contains(s_w_obj) + return s_None + + def specialize_call(self, hop): + return hop.inputconst(lltype.Void, None) + +# ____________________________________________________________ + class FakeObjSpace(ObjSpace): - w_None = W_Object() - w_False = W_Object() - w_True = W_Object() - w_Ellipsis = W_Object() - w_NotImplemented = W_Object() - w_int = W_Object() - w_dict = W_Object() - w_float = W_Object() - w_long = W_Object() - w_tuple = W_Object() - w_str = W_Object() - w_basestring = W_Object() - w_unicode = W_Object() - w_type = W_Object() - w_instance = W_Object() - w_slice = W_Object() - w_hex = W_Object() - w_oct = W_Object() - - def initialize(self): - self.config.objspace.geninterp = False - self.config.objspace.disable_call_speedhacks = True - self.wrap_cache = {} - self.make_builtins() - def _freeze_(self): - return True + def __init__(self): + self._seen_extras = [] + ObjSpace.__init__(self) + + def float_w(self, w_obj): + is_root(w_obj) + return NonConstant(42.5) + + def is_true(self, w_obj): + is_root(w_obj) + return NonConstant(False) + + def unwrap(self, w_obj): + "NOT_RPYTHON" + raise NotImplementedError + + def newdict(self, module=False, instance=False, classofinstance=None, + strdict=False): + return w_some_obj() + + def newtuple(self, list_w): + for w_x in list_w: + is_root(w_x) + return w_some_obj() + + def newlist(self, list_w): + for w_x in list_w: + is_root(w_x) + return w_some_obj() + + def newslice(self, w_start, w_end, w_step): + is_root(w_start) + is_root(w_end) + is_root(w_step) + return w_some_obj() + + def newint(self, x): + return w_some_obj() + + def newfloat(self, x): + return w_some_obj() + + def marshal_w(self, w_obj): + "NOT_RPYTHON" + raise NotImplementedError def wrap(self, x): - if isinstance(x, Wrappable): - w_result = x.__spacebind__(self) - return w_result - return W_Object() + if not we_are_translated(): + if isinstance(x, gateway.interp2app): + self._see_interp2app(x) + if isinstance(x, GetSetProperty): + self._see_getsetproperty(x) + return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" - def unwrap(self, w_obj): - assert isinstance(w_obj, W_Object) - return None + def _see_interp2app(self, interp2app): + "NOT_RPYTHON" + activation = interp2app._code.activation + def check(): + scope_w = [w_some_obj()] * NonConstant(42) + w_result = activation._run(self, scope_w) + is_root(w_result) + check = func_with_new_name(check, 'check__' + interp2app.name) + self._seen_extras.append(check) - lookup = make_dummy() - allocate_instance = make_dummy() - getattr = make_dummy() - setattr = make_dummy() - getitem = make_dummy() - setitem = make_dummy() - delitem = make_dummy() - int_w = int_dummy - uint_w = uint_dummy - float_w = float_dummy - unicode_w = unicode_dummy - bigint_w = bigint_dummy - iter = make_dummy() - type = make_dummy() - str = make_dummy() - int = make_dummy() - float = make_dummy() - repr = make_dummy() - id = make_dummy() - len = make_dummy() - str_w = str_dummy - call_args = make_dummy() - new_interned_str = make_dummy() - newint = make_dummy() - newlong = make_dummy() - newfloat = make_dummy() - def newdict(self, module=False): - return self.newfloat() - newlist = make_dummy() - emptylist = make_dummy() - newtuple = make_dummy() - newslice = make_dummy() - lt = make_dummy() - le = make_dummy() - eq = make_dummy() - ne = make_dummy() - gt = make_dummy() - ge = make_dummy() - lt_w = bool_dummy - le_w = bool_dummy - eq_w = bool_dummy - ne_w = bool_dummy - gt_w = bool_dummy - ge_w = bool_dummy - is_w = bool_dummy - is_ = make_dummy() - next = make_dummy() - is_true = bool_dummy - nonzero = make_dummy() - issubtype = make_dummy() - ord = make_dummy() - hash = make_dummy() - delattr = make_dummy() # should return None? - contains = make_dummy() - hex = make_dummy() - oct = make_dummy() - pow = make_dummy() - inplace_pow = make_dummy() - cmp = make_dummy() + def _see_getsetproperty(self, getsetproperty): + "NOT_RPYTHON" + space = self + def checkprop(): + getsetproperty.fget(getsetproperty, space, w_some_obj()) + if getsetproperty.fset is not None: + getsetproperty.fset(getsetproperty, space, w_some_obj(), + w_some_obj()) + if getsetproperty.fdel is not None: + getsetproperty.fdel(getsetproperty, space, w_some_obj()) + if not getsetproperty.name.startswith('<'): + checkprop = func_with_new_name(checkprop, + 'checkprop__' + getsetproperty.name) + self._seen_extras.append(checkprop) - # XXsX missing operations - def coerce(self, *args): raise NotImplementedError("space.coerce()") - def get(self, *args): raise NotImplementedError("space.get()") - def set(self, *args): raise NotImplementedError("space.set()") - def delete(self, *args): raise NotImplementedError("space.delete()") - def userdel(self, *args): raise NotImplementedError("space.userdel()") - def marshal_w(self, *args):raise NotImplementedError("space.marshal_w()") + def call_obj_args(self, w_callable, w_obj, args): + is_root(w_callable) + is_root(w_obj) + is_arguments(args) + return w_some_obj() - gettypefor = make_dummy() - gettypeobject = make_dummy() - unpackiterable = make_dummy([W_Object()], [W_Object()]) + def call(self, w_callable, w_args, w_kwds=None): + is_root(w_callable) + is_root(w_args) + is_root(w_kwds) + return w_some_obj() + def call_function(self, w_func, *args_w): + is_root(w_func) + for w_arg in list(args_w): + is_root(w_arg) + return w_some_obj() -## Register all exceptions -import exceptions -for name in ObjSpace.ExceptionTable: - exc = getattr(exceptions, name) - setattr(FakeObjSpace, 'w_' + name, W_Object()) + def call_args(self, w_func, args): + is_root(w_func) + is_arguments(args) + return w_some_obj() + + def gettypefor(self, cls): + return self.gettypeobject(cls.typedef) + + def gettypeobject(self, typedef): + assert typedef is not None + return self.fromcache(TypeCache).getorbuild(typedef) + + def unpackiterable(self, w_iterable, expected_length=-1): + is_root(w_iterable) + if expected_length < 0: + expected_length = 3 + return [w_some_obj()] * expected_length + + def allocate_instance(self, cls, w_subtype): + is_root(w_subtype) + return instantiate(cls) + allocate_instance._annspecialcase_ = "specialize:arg(1)" + + def decode_index(self, w_index_or_slice, seqlength): + is_root(w_index_or_slice) + return (NonConstant(42), NonConstant(42), NonConstant(42)) + + def decode_index4(self, w_index_or_slice, seqlength): + is_root(w_index_or_slice) + return (NonConstant(42), NonConstant(42), + NonConstant(42), NonConstant(42)) + + def exec_(self, *args, **kwds): + pass + + # ---------- + + def translates(self, func=None, argtypes=None, **kwds): + config = make_config(None, **kwds) + if func is not None: + if argtypes is None: + nb_args = func.func_code.co_argcount + argtypes = [W_Root] * nb_args + # + t = TranslationContext(config=config) + self.t = t # for debugging + ann = t.buildannotator() + if func is not None: + ann.build_types(func, argtypes, complete_now=False) + # + # annotate all _seen_extras, knowing that annotating some may + # grow the list + done = 0 + while done < len(self._seen_extras): + print self._seen_extras + ann.build_types(self._seen_extras[done], [], + complete_now=False) + done += 1 + ann.complete() + #t.viewcg() + t.buildrtyper().specialize() + t.checkgraphs() + + +def setup(): + for name in (ObjSpace.ConstantTable + + ObjSpace.ExceptionTable + + ['int', 'str', 'float', 'long', 'tuple', 'list', + 'dict', 'unicode', 'complex', 'slice', 'bool', + 'type', 'basestring']): + setattr(FakeObjSpace, 'w_' + name, w_some_obj()) + # + for (name, _, arity, _) in ObjSpace.MethodTable: + args = ['w_%d' % i for i in range(arity)] + d = {'is_root': is_root, + 'w_some_obj': w_some_obj} + exec compile2("""\ + def meth(self, %s): + %s + return w_some_obj() + """ % (', '.join(args), + '; '.join(['is_root(%s)' % arg for arg in args]))) in d + meth = func_with_new_name(d['meth'], name) + setattr(FakeObjSpace, name, meth) + # + for name in ObjSpace.IrregularOpTable: + assert hasattr(FakeObjSpace, name) # missing? + +setup() + +# ____________________________________________________________ + +class TypeCache(SpaceCache): + def build(cache, typedef): + assert isinstance(typedef, TypeDef) + for value in typedef.rawdict.values(): + cache.space.wrap(value) + return w_some_obj() + +class FakeCompiler(object): + pass +FakeObjSpace.default_compiler = FakeCompiler() + +class FakeModule(object): + def get(self, name): + name + "xx" # check that it's a string + return w_some_obj() +FakeObjSpace.sys = FakeModule() +FakeObjSpace.sys.filesystemencoding = 'foobar' diff --git a/pypy/objspace/fake/test/__init__.py b/pypy/objspace/fake/test/__init__.py deleted file mode 100644 diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -1,7 +1,63 @@ import py -from pypy.objspace.fake.checkmodule import checkmodule +from pypy.objspace.fake.objspace import FakeObjSpace, is_root +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, W_Root, ObjSpace -def test_dotnet(): - # the only module known to pass checkmodule is _dotnet so far - py.test.skip('fixme') - checkmodule('_dotnet', 'cli') + +def make_checker(): + check = [] + def see(): + check.append(True) + see._annspecialcase_ = 'specialize:memo' + return see, check + +def test_wrap_interp2app(): + see, check = make_checker() + space = FakeObjSpace() + assert len(space._seen_extras) == 0 + assert len(check) == 0 + space.wrap(interp2app(lambda space: see())) + assert len(space._seen_extras) == 1 + assert len(check) == 0 + space.translates() + assert len(check) == 1 + +def test_wrap_interp2app_int(): + see, check = make_checker() + def foobar(space, x, w_y, z): + is_root(w_y) + see() + return space.wrap(x - z) + space = FakeObjSpace() + space.wrap(interp2app(foobar, unwrap_spec=[ObjSpace, int, W_Root, int])) + space.translates() + assert check + +def test_wrap_GetSetProperty(): + see, check = make_checker() + def foobar(w_obj, space): + is_root(w_obj) + see() + return space.w_None + space = FakeObjSpace() + space.wrap(GetSetProperty(foobar)) + space.translates() + assert check + + +def test_gettypefor_untranslated(): + see, check = make_checker() + class W_Foo(Wrappable): + def do_it(self, space, w_x): + is_root(w_x) + see() + return W_Root() + W_Foo.typedef = TypeDef('foo', + __module__ = 'barmod', + do_it = interp2app(W_Foo.do_it)) + space = FakeObjSpace() + space.gettypefor(W_Foo) + assert not check + space.translates() + assert check diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/fake/test/test_objspace.py @@ -0,0 +1,74 @@ +import py +from pypy.objspace.fake.objspace import FakeObjSpace, W_Root +from pypy.interpreter.argument import Arguments +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import Wrappable +from pypy.rlib.unroll import unrolling_iterable + +def test_create(): + FakeObjSpace() + + +class TestTranslate: + def setup_method(self, meth): + self.space = FakeObjSpace() + + def test_simple(self): + space = self.space + space.translates(lambda w_x, w_y: space.add(w_x, w_y)) + + def test_methodtable(self): + space = self.space + for fixed_arity in [1, 2, 3, 4]: + # + methodtable = [name for (name, _, arity, _) in space.MethodTable + if arity == fixed_arity] + methodtable = unrolling_iterable(methodtable) + args_w = (W_Root(),) * fixed_arity + # + def f(): + for name in methodtable: + getattr(space, name)(*args_w) + # + space.translates(f) + + def test_newdict(self): + space = self.space + space.translates(lambda: (space.newdict(), + space.newdict(strdict=True))) + + def test_constants(self): + space = self.space + space.translates(lambda: (space.w_None, space.w_True, space.w_False, + space.w_int, space.w_str, + space.w_TypeError)) + + def test_wrap(self): + space = self.space + space.translates(lambda: (space.wrap(42), space.wrap(42.5), + space.wrap("foo"))) + + def test_call_args(self): + space = self.space + args = Arguments(space, [W_Root()]) + space.translates(lambda: space.call_args(W_Root(), args)) + + def test_gettypefor(self): + space = self.space + class W_Foo(Wrappable): + typedef = TypeDef("foo") + space.translates(lambda: space.gettypefor(W_Foo)) + + def test_is_true(self): + space = self.space + space.translates(lambda: space.is_true(W_Root())) + py.test.raises(AssertionError, + space.translates, lambda: space.is_true(42)) + + def test_unpackiterable(self): + space = self.space + space.translates(lambda: (space.unpackiterable(W_Root()), + space.unpackiterable(W_Root(), 42))) + + def test_newlist(self): + self.space.newlist([W_Root(), W_Root()]) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -102,6 +102,7 @@ 'instancetypedef', 'terminator', '_version_tag?', + 'name?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -418,7 +418,7 @@ if _MSVC: def invalid_socket(fd): return fd == INVALID_SOCKET - INVALID_SOCKET = cConfig.INVALID_SOCKET + INVALID_SOCKET = r_uint(cConfig.INVALID_SOCKET) else: def invalid_socket(fd): return fd < 0 diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -639,11 +639,11 @@ return dlsym(self.lib, name) class CDLL(RawCDLL): - def __init__(self, libname): + def __init__(self, libname, mode=-1): """Load the library, or raises DLOpenError.""" RawCDLL.__init__(self, rffi.cast(DLLHANDLE, -1)) with rffi.scoped_str2charp(libname) as ll_libname: - self.lib = dlopen(ll_libname) + self.lib = dlopen(ll_libname, mode) def __del__(self): if self.lib != rffi.cast(DLLHANDLE, -1): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -738,3 +738,29 @@ return hop.genop('jit_marker', vlist, resulttype=lltype.Void) +def record_known_class(value, cls): + """ + Assure the JIT that value is an instance of cls. This is not a precise + class check, unlike a guard_class. + """ + assert isinstance(value, cls) + + +class Entry(ExtRegistryEntry): + _about_ = record_known_class + + def compute_result_annotation(self, s_inst, s_cls): + from pypy.annotation import model as annmodel + assert s_cls.is_constant() + assert not s_inst.can_be_none() + assert isinstance(s_inst, annmodel.SomeInstance) + + def specialize_call(self, hop): + from pypy.rpython.lltypesystem import lltype, rclass + classrepr = rclass.get_type_repr(hop.rtyper) + + hop.exception_cannot_occur() + v_inst = hop.inputarg(hop.args_r[0], arg=0) + v_cls = hop.inputarg(classrepr, arg=1) + return hop.genop('jit_record_known_class', [v_inst, v_cls], + resulttype=lltype.Void) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -393,11 +393,11 @@ # XXX: it partially duplicate the code in clibffi.py class CDLL(object): - def __init__(self, libname): + def __init__(self, libname, mode=-1): """Load the library, or raises DLOpenError.""" self.lib = rffi.cast(DLLHANDLE, 0) with rffi.scoped_str2charp(libname) as ll_libname: - self.lib = dlopen(ll_libname) + self.lib = dlopen(ll_libname, mode) def __del__(self): if self.lib: @@ -411,6 +411,10 @@ def getaddressindll(self, name): return dlsym(self.lib, name) +# These specialize.call_location's should really be specialize.arg(0), however +# you can't hash a pointer obj, which the specialize machinery wants to do. +# Given the present usage of these functions, it's good enough. + at specialize.call_location() @jit.oopspec("libffi_array_getitem(ffitype, width, addr, index, offset)") def array_getitem(ffitype, width, addr, index, offset): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -420,6 +424,7 @@ return rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] assert False + at specialize.call_location() @jit.oopspec("libffi_array_setitem(ffitype, width, addr, index, offset, value)") def array_setitem(ffitype, width, addr, index, offset, value): for TYPE, ffitype2 in clibffi.ffitype_map: @@ -428,4 +433,4 @@ addr = rffi.ptradd(addr, offset) rffi.cast(rffi.CArrayPtr(TYPE), addr)[0] = value return - assert False \ No newline at end of file + assert False diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -91,9 +91,18 @@ return decorated_func + def call_location(self): + """ Specializes the function for each call site. + """ + def decorated_func(func): + func._annspecialcase_ = "specialize:call_location" + return func + + return decorated_func + def _wrap(self, args): return "("+','.join([repr(arg) for arg in args]) +")" - + specialize = _Specialize() def enforceargs(*args): @@ -125,7 +134,7 @@ def __hash__(self): raise TypeError("Symbolics are not hashable!") - + def __nonzero__(self): raise TypeError("Symbolics are not comparable") @@ -155,7 +164,7 @@ def lltype(self): from pypy.rpython.lltypesystem import lltype return lltype.Signed - + malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) # running_on_llinterp is meant to have the value 0 in all backends @@ -221,7 +230,7 @@ def compute_result_annotation(self, s_sizehint): from pypy.annotation.model import SomeInteger - + assert isinstance(s_sizehint, SomeInteger) return self.bookkeeper.newlist() diff --git a/pypy/rlib/rdynload.py b/pypy/rlib/rdynload.py --- a/pypy/rlib/rdynload.py +++ b/pypy/rlib/rdynload.py @@ -87,9 +87,10 @@ """ if mode == -1: if RTLD_LOCAL is not None: - mode = RTLD_LOCAL | RTLD_NOW + mode = RTLD_LOCAL else: - mode = RTLD_NOW + mode = 0 + mode |= RTLD_NOW res = c_dlopen(name, rffi.cast(rffi.INT, mode)) if not res: err = dlerror() diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -426,7 +426,11 @@ low, high = _get_file_size(self.file_handle) if not high and low <= sys.maxint: return low + # not so sure if the signed/unsigned strictness is a good idea: + high = rffi.cast(lltype.Unsigned, high) + low = rffi.cast(lltype.Unsigned, low) size = (high << 32) + low + size = rffi.cast(lltype.Signed, size) elif _POSIX: st = os.fstat(self.fd) size = st[stat.ST_SIZE] diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -20,6 +20,7 @@ from pypy.rlib.rarithmetic import intmask, r_uint from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.rffi import sizeof, offsetof +INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): return lltype.malloc(rffi.CCHARP.TO, buffersize, flavor='raw') diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -98,8 +98,13 @@ INVALID_HANDLE_VALUE = rffi.cast(HANDLE, -1) PFILETIME = rffi.CArrayPtr(FILETIME) - GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) - SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + _GetLastError = winexternal('GetLastError', [], DWORD, threadsafe=False) + _SetLastError = winexternal('SetLastError', [DWORD], lltype.Void) + + def GetLastError(): + return rffi.cast(lltype.Signed, _GetLastError()) + def SetLastError(err): + _SetLastError(rffi.cast(DWORD, err)) # In tests, the first call to GetLastError is always wrong, because error # is hidden by operations in ll2ctypes. Call it now. @@ -184,12 +189,12 @@ msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, None, - code, + rffi.cast(DWORD, code), DEFAULT_LANGUAGE, rffi.cast(rffi.CCHARP, buf), 0, None) - if msglen <= 2 or msglen > sys.maxint: + if msglen <= 2: # includes the case msglen < 0 return fake_FormatError(code) # FormatMessage always appends \r\n. diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -443,3 +443,4 @@ assert p[1] == 34 lltype.free(p, flavor='raw') lltype.free(ffi_point_struct, flavor='raw') + diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -548,6 +548,9 @@ def op_jit_marker(self, *args): pass + def op_jit_record_known_class(self, *args): + pass + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -112,7 +112,7 @@ rffi.LONGLONG: ctypes.c_longlong, rffi.ULONGLONG: ctypes.c_ulonglong, rffi.SIZE_T: ctypes.c_size_t, - lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_long), + lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_byte), llmemory.Address: ctypes.c_void_p, llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -430,6 +430,7 @@ 'jit_force_virtual': LLOp(canrun=True), 'jit_is_virtual': LLOp(canrun=True), 'jit_force_quasi_immutable': LLOp(canrun=True), + 'jit_record_known_class' : LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -548,6 +548,9 @@ def op_jit_force_quasi_immutable(*args): pass +def op_jit_record_known_class(x, y): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -1773,7 +1773,7 @@ @registering(rwin32.FormatError) def register_rwin32_FormatError(self): - return extdef([rwin32.DWORD], str, + return extdef([lltype.Signed], str, "rwin32_FormatError", llimpl=rwin32.llimpl_FormatError, ooimpl=rwin32.fake_FormatError) diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py --- a/pypy/rpython/module/ll_os_stat.py +++ b/pypy/rpython/module/ll_os_stat.py @@ -12,6 +12,7 @@ from pypy.rpython.tool import rffi_platform as platform from pypy.rpython.lltypesystem.rtupletype import TUPLE_TYPE from pypy.rlib import rposix +from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import specialize from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.annlowlevel import hlstr @@ -442,20 +443,19 @@ # Helper functions for win32 def make_longlong(high, low): - return (lltype.r_longlong(high) << 32) + lltype.r_longlong(low) + return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) # Seconds between 1.1.1601 and 1.1.1970 -secs_between_epochs = lltype.r_longlong(11644473600) +secs_between_epochs = rffi.r_longlong(11644473600) def FILE_TIME_to_time_t_nsec(filetime): ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) # FILETIME is in units of 100 nsec nsec = (ft % 10000000) * 100 time = (ft / 10000000) - secs_between_epochs - return time, nsec + return intmask(time), intmask(nsec) def time_t_to_FILE_TIME(time, filetime): - ft = lltype.r_longlong((time + secs_between_epochs) * 10000000) - filetime.c_dwHighDateTime = lltype.r_uint(ft >> 32) - filetime.c_dwLowDateTime = lltype.r_uint(ft & lltype.r_uint(-1)) - + ft = (rffi.r_longlong(time) + secs_between_epochs) * 10000000 + filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) + filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py --- a/pypy/rpython/tool/rffi_platform.py +++ b/pypy/rpython/tool/rffi_platform.py @@ -56,6 +56,12 @@ DEFINED = DefinedConstantDouble(macro) return configure(CConfig)['DEFINED'] +def getdefinedinteger(macro, c_header_source): + class CConfig: + _compilation_info_ = eci_from_header(c_header_source) + DEFINED = DefinedConstantInteger(macro) + return configure(CConfig)['DEFINED'] + def has(name, c_header_source, include_dirs=None): class CConfig: _compilation_info_ = eci_from_header(c_header_source, include_dirs) diff --git a/pypy/rpython/tool/test/test_rffi_platform.py b/pypy/rpython/tool/test/test_rffi_platform.py --- a/pypy/rpython/tool/test/test_rffi_platform.py +++ b/pypy/rpython/tool/test/test_rffi_platform.py @@ -108,6 +108,12 @@ '#define ALFKJLKJFLKJFKLEJDLKEWMECEE') assert res +def test_defined_constant(): + res = rffi_platform.getdefineddouble('ABCDFGH', '#define ABCDFGH 2.0') + assert res == 2.0 + res = rffi_platform.getdefinedinteger('ABCDFGH', '#define ABCDFGH 2') + assert res == 2 + def test_defined_constant_float(): value = rffi_platform.getdefineddouble('BLAH', '#define BLAH 1.0') assert value == 1.0 diff --git a/pypy/translator/c/src/support.h b/pypy/translator/c/src/support.h --- a/pypy/translator/c/src/support.h +++ b/pypy/translator/c/src/support.h @@ -11,6 +11,7 @@ #endif /* MIN */ #define RUNNING_ON_LLINTERP 0 +#define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ #define FAIL_EXCEPTION(exc, msg) \ { \ diff --git a/pypy/translator/c/test/test_typed.py b/pypy/translator/c/test/test_typed.py --- a/pypy/translator/c/test/test_typed.py +++ b/pypy/translator/c/test/test_typed.py @@ -275,6 +275,14 @@ fn = self.getcompiled(f, [r_longlong]) assert fn(0) == 0 + def test_upcast_int(self): + from pypy.rpython.lltypesystem import rffi + def f(v): + v = rffi.cast(rffi.USHORT, v) + return intmask(v) + fn = self.getcompiled(f, [int]) + assert fn(0x1234CDEF) == 0xCDEF + def test_function_ptr(self): def f1(): return 1 diff --git a/pypy/translator/translator.py b/pypy/translator/translator.py --- a/pypy/translator/translator.py +++ b/pypy/translator/translator.py @@ -150,11 +150,11 @@ from pypy.translator.tool.graphpage import FlowGraphPage FlowGraphPage(self).display() - def viewcg(self, center_graph=None): + def viewcg(self, center_graph=None, huge=100): """Shows the whole call graph and the class hierarchy, based on the computed annotations.""" from pypy.translator.tool.graphpage import TranslatorPage - TranslatorPage(self, center_graph=center_graph).display() + TranslatorPage(self, center_graph=center_graph, huge=huge).display() From noreply at buildbot.pypy.org Thu Dec 29 09:57:30 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:30 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: update backend to new interface provided by compute_vars_longevity Message-ID: <20111229085730.E441082C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50943:4a920a79a182 Date: 2011-12-27 14:10 +0100 http://bitbucket.org/pypy/pypy/changeset/4a920a79a182/ Log: update backend to new interface provided by compute_vars_longevity diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -14,7 +14,6 @@ operations as regalloc_operations, operations_with_guard as regalloc_operations_with_guard) from pypy.jit.backend.arm.jump import remap_frame_layout -from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.backend.model import CompiledLoopToken from pypy.jit.codewriter import longlong @@ -277,7 +276,7 @@ self.fail_force_index = frame_loc return descr - def decode_inputargs(self, enc, regalloc): + def decode_inputargs(self, enc): locs = [] j = 0 while enc[j] != self.END_OF_LOCS: @@ -302,7 +301,7 @@ else: t = REF stack_loc = decode32(enc, j + 1) - loc = regalloc.frame_manager.frame_pos(stack_loc, t) + loc = ARMFrameManager.frame_pos(stack_loc, t) j += 4 else: # REG_LOC if res_type == self.FLOAT_TYPE: @@ -509,7 +508,8 @@ mc.SUB_ri(r.r5.value, r.r4.value, imm=2 * WORD) # ADD r5, r4 [2*WORD] mc.STR_ri(r.r5.value, r.ip.value) - def gen_bootstrap_code(self, nonfloatlocs, floatlocs, inputargs): + def gen_bootstrap_code(self, arglocs, inputargs): + nonfloatlocs, floatlocs = arglocs for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] if loc is None: @@ -636,24 +636,21 @@ # cpu interface def assemble_loop(self, inputargs, operations, looptoken, log): - clt = CompiledLoopToken(self.cpu, looptoken.number) clt.allgcrefs = [] looptoken.compiled_loop_token = clt operations = self.setup(looptoken, operations) self._dump(operations) - longevity = compute_vars_longevity(inputargs, operations) - regalloc = Regalloc(longevity, assembler=self, - frame_manager=ARMFrameManager()) self.align() self.gen_func_prolog() sp_patch_location = self._prepare_sp_patch_position() - nonfloatlocs, floatlocs = regalloc.prepare_loop(inputargs, - operations, looptoken) - self.gen_bootstrap_code(nonfloatlocs, floatlocs, inputargs) - looptoken._arm_arglocs = [nonfloatlocs, floatlocs] + + regalloc = Regalloc(assembler=self, frame_manager=ARMFrameManager()) + arglocs = regalloc.prepare_loop(inputargs, operations) + self.gen_bootstrap_code(arglocs, inputargs) + looptoken._arm_arglocs = arglocs loop_head = self.mc.currpos() looptoken._arm_loop_code = loop_head @@ -689,15 +686,15 @@ assert isinstance(faildescr, AbstractFailDescr) code = faildescr._failure_recovery_code enc = rffi.cast(rffi.CCHARP, code) - longevity = compute_vars_longevity(inputargs, operations) - regalloc = Regalloc(longevity, assembler=self, - frame_manager=ARMFrameManager()) + frame_depth = faildescr._arm_frame_depth + arglocs = self.decode_inputargs(enc) + if not we_are_translated(): + assert len(inputargs) == len(arglocs) + + regalloc = Regalloc(assembler=self, frame_manager=ARMFrameManager()) + regalloc.prepare_bridge(frame_depth, inputargs, arglocs, operations) sp_patch_location = self._prepare_sp_patch_position() - frame_depth = faildescr._arm_frame_depth - locs = self.decode_inputargs(enc, regalloc) - assert len(inputargs) == len(locs) - regalloc.update_bindings(locs, frame_depth, inputargs) self._walk_operations(operations, regalloc) diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -1,5 +1,5 @@ from pypy.jit.backend.llsupport.regalloc import FrameManager, \ - RegisterManager, TempBox, compute_loop_consts + RegisterManager, TempBox, compute_vars_longevity from pypy.jit.backend.arm import registers as r from pypy.jit.backend.arm import locations from pypy.jit.backend.arm.locations import imm @@ -178,13 +178,10 @@ class Regalloc(object): - def __init__(self, longevity, frame_manager=None, assembler=None): + def __init__(self, frame_manager=None, assembler=None): self.cpu = assembler.cpu - self.longevity = longevity + self.assembler = assembler self.frame_manager = frame_manager - self.assembler = assembler - self.vfprm = VFPRegisterManager(longevity, frame_manager, assembler) - self.rm = ARMv7RegisterMananger(longevity, frame_manager, assembler) def loc(self, var): if var.type == FLOAT: @@ -281,15 +278,31 @@ assert isinstance(value, ConstFloat) return self.vfprm.convert_to_imm(value) - def prepare_loop(self, inputargs, operations, looptoken): - loop_consts = compute_loop_consts(inputargs, operations[-1], looptoken) + def _prepare(self, inputargs, operations): + longevity, useful = compute_vars_longevity(inputargs, operations) + self.longevity = longevity + fm = self.frame_manager + asm = self.assembler + self.vfprm = VFPRegisterManager(longevity, fm, asm) + self.rm = ARMv7RegisterMananger(longevity, fm, asm) + return useful + + def prepare_loop(self, inputargs, operations): + useful = self._prepare(inputargs, operations) + return self._process_inputargs(inputargs, useful) + + def prepare_bridge(self, frame_depth, inputargs, arglocs, ops): + self._prepare(inputargs, ops) + self._update_bindings(arglocs, frame_depth, inputargs) + + def _process_inputargs(self, inputargs, useful): floatlocs = [None] * len(inputargs) nonfloatlocs = [None] * len(inputargs) for i in range(len(inputargs)): arg = inputargs[i] assert not isinstance(arg, Const) loc = inputargs[i] - if arg not in loop_consts and self.longevity[arg][1] > -1: + if self.longevity[arg][1] > -1 and arg in useful: self.try_allocate_reg(loc) loc = self.loc(arg) @@ -300,7 +313,7 @@ self.possibly_free_vars(list(inputargs)) return nonfloatlocs, floatlocs - def update_bindings(self, locs, frame_depth, inputargs): + def _update_bindings(self, locs, frame_depth, inputargs): used = {} i = 0 self.frame_manager.frame_depth = frame_depth diff --git a/pypy/jit/backend/arm/test/test_regalloc.py b/pypy/jit/backend/arm/test/test_regalloc.py --- a/pypy/jit/backend/arm/test/test_regalloc.py +++ b/pypy/jit/backend/arm/test/test_regalloc.py @@ -3,14 +3,12 @@ """ import py -from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, LoopToken, BasicFailDescr -from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.history import BasicFailDescr from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass -from pypy.jit.backend.arm.regalloc import Regalloc +from pypy.jit.backend.arm.regalloc import Regalloc, ARMFrameManager from pypy.jit.tool.oparser import parse -from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr from pypy.jit.codewriter.effectinfo import EffectInfo @@ -19,6 +17,8 @@ CPU = getcpuclass() + + class MockGcDescr(GcCache): def get_funcptr_for_new(self): return 123 @@ -29,6 +29,7 @@ def rewrite_assembler(self, cpu, operations): pass + class MockAssembler(object): gcrefs = None _float_constants = None @@ -60,11 +61,14 @@ def load_effective_addr(self, *args): self.lea.append(args) + class RegAllocForTests(Regalloc): position = 0 + def _compute_next_usage(self, v, _): return -1 + class BaseTestRegalloc(object): cpu = CPU(None, None) cpu.setup_once() @@ -138,6 +142,13 @@ self.cpu.execute_token(loop.token) return loop + def prepare_loop(self, ops): + loop = self.parse(ops) + regalloc = Regalloc(assembler=self.cpu.assembler, + frame_manager=ARMFrameManager()) + regalloc.prepare_loop(loop.inputargs, loop.operations) + return regalloc + def getint(self, index): return self.cpu.get_latest_value_int(index) @@ -411,6 +422,34 @@ self.run(loop) assert self.getints(9) == range(9) + def test_loopargs(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_2(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + finish(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + + def test_loopargs_3(self): + ops = """ + [i0, i1, i2, i3] + i4 = int_add(i0, i1) + guard_true(i4) [i0, i1, i2, i3, i4] + jump(i4, i1, i2, i3) + """ + regalloc = self.prepare_loop(ops) + assert len(regalloc.rm.reg_bindings) == 2 + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,4 +1,4 @@ - +import os from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.resoperation import rop @@ -393,7 +393,7 @@ """ Platform specific - Allocates a temporary register """ raise NotImplementedError("Abstract") -def _compute_vars_longevity(self, inputargs, operations): +def compute_vars_longevity(inputargs, operations): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" @@ -443,16 +443,6 @@ assert len(last_used) == 0 return longevity, useful -def compute_loop_consts(inputargs, jump, looptoken): - if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken: - loop_consts = {} - else: - loop_consts = {} - for i in range(len(inputargs)): - if inputargs[i] is jump.getarg(i): - loop_consts[inputargs[i]] = i - return loop_consts - def not_implemented(msg): os.write(2, '[llsupport/regalloc] %s\n' % msg) From noreply at buildbot.pypy.org Thu Dec 29 09:57:33 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:33 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: merge default up to 6fb87770b5d2 Message-ID: <20111229085733.1D54C82C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50944:561c9d8112aa Date: 2011-12-27 14:27 +0100 http://bitbucket.org/pypy/pypy/changeset/561c9d8112aa/ Log: merge default up to 6fb87770b5d2 diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py --- a/lib_pypy/distributed/socklayer.py +++ b/lib_pypy/distributed/socklayer.py @@ -2,7 +2,7 @@ import py from socket import socket -XXX needs import adaptation as 'green' is removed from py lib for years +raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years") from py.impl.green.msgstruct import decodemessage, message from socket import socket, AF_INET, SOCK_STREAM import marshal diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -252,6 +252,10 @@ "use small tuples", default=False), + BoolOption("withspecialisedtuple", + "use specialised tuples", + default=False), + BoolOption("withrope", "use ropes as the string implementation", default=False, requires=[("objspace.std.withstrslice", False), @@ -365,6 +369,7 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -496,6 +496,17 @@ def setup(self): super(AppClassCollector, self).setup() cls = self.obj + # + # + for name in dir(cls): + if name.startswith('test_'): + func = getattr(cls, name, None) + code = getattr(func, 'func_code', None) + if code and code.co_flags & 32: + raise AssertionError("unsupported: %r is a generator " + "app-level test method" % (name,)) + # + # space = cls.space clsname = cls.__name__ if self.config.option.runappdirect: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.6' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.6' +release = '1.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -112,10 +112,32 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. +Note that the JIT has a very high warm-up cost, meaning that the +programs are slow at the beginning. If you want to compare the timings +with CPython, even relatively simple programs need to run *at least* one +second, preferrably at least a few seconds. Large, complicated programs +need even more time to warm-up the JIT. + .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +--------------------------------------------------------------- +Couldn't the JIT dump and reload already-compiled machine code? +--------------------------------------------------------------- + +No, we found no way of doing that. The JIT generates machine code +containing a large number of constant addresses --- constant at the time +the machine code is written. The vast majority is probably not at all +constants that you find in the executable, with a nice link name. E.g. +the addresses of Python classes are used all the time, but Python +classes don't come statically from the executable; they are created anew +every time you restart your program. This makes saving and reloading +machine code completely impossible without some very advanced way of +mapping addresses in the old (now-dead) process to addresses in the new +process, including checking that all the previous assumptions about the +(now-dead) object are still true about the new object. + .. _`prolog and javascript`: diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -487,6 +487,16 @@ 'parser', 'fcntl', '_codecs', 'binascii' ] + # These modules are treated like CPython treats built-in modules, + # i.e. they always shadow any xx.py. The other modules are treated + # like CPython treats extension modules, and are loaded in sys.path + # order by the fake entry '.../lib_pypy/__extensions__'. + MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', + ], None) + def make_builtins(self): "NOT_RPYTHON: only for initializing the space." diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -261,8 +261,10 @@ return fail_index def execute_token(self, loop_token): - """Calls the assembler generated for the given loop. - Returns the ResOperation that failed, of type rop.FAIL. + """Calls the fake 'assembler' generated for the given loop. + Returns the descr of the last executed operation: either the one + attached to the failing guard, or the one attached to the FINISH. + Use set_future_value_xxx() before, and get_latest_value_xxx() after. """ fail_index = self._execute_token(loop_token) return self.get_fail_descr_from_number(fail_index) diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -17,32 +17,101 @@ """ Manage frame positions """ def __init__(self): - self.frame_bindings = {} - self.frame_depth = 0 + self.bindings = {} + self.used = [] # list of bools + self.hint_frame_locations = {} + + frame_depth = property(lambda:xxx, lambda:xxx) # XXX kill me + + def get_frame_depth(self): + return len(self.used) def get(self, box): - return self.frame_bindings.get(box, None) + return self.bindings.get(box, None) def loc(self, box): - res = self.get(box) - if res is not None: - return res + """Return or create the frame location associated with 'box'.""" + # first check if it's already in the frame_manager + try: + return self.bindings[box] + except KeyError: + pass + # check if we have a hint for this box + if box in self.hint_frame_locations: + # if we do, try to reuse the location for this box + loc = self.hint_frame_locations[box] + if self.try_to_reuse_location(box, loc): + return loc + # no valid hint. make up a new free location + return self.get_new_loc(box) + + def get_new_loc(self, box): size = self.frame_size(box.type) - self.frame_depth += ((-self.frame_depth) & (size-1)) - # ^^^ frame_depth is rounded up to a multiple of 'size', assuming + # frame_depth is rounded up to a multiple of 'size', assuming # that 'size' is a power of two. The reason for doing so is to # avoid obscure issues in jump.py with stack locations that try # to move from position (6,7) to position (7,8). - newloc = self.frame_pos(self.frame_depth, box.type) - self.frame_bindings[box] = newloc - self.frame_depth += size + while self.get_frame_depth() & (size - 1): + self.used.append(False) + # + index = self.get_frame_depth() + newloc = self.frame_pos(index, box.type) + for i in range(size): + self.used.append(True) + # + if not we_are_translated(): # extra testing + testindex = self.get_loc_index(newloc) + assert testindex == index + # + self.bindings[box] = newloc return newloc + def set_binding(self, box, loc): + self.bindings[box] = loc + # + index = self.get_loc_index(loc) + endindex = index + self.frame_size(box.type) + while len(self.used) < endindex: + self.used.append(False) + while index < endindex: + self.used[index] = True + index += 1 + def reserve_location_in_frame(self, size): - frame_depth = self.frame_depth - self.frame_depth += size + frame_depth = self.get_frame_depth() + for i in range(size): + self.used.append(True) return frame_depth + def mark_as_free(self, box): + try: + loc = self.bindings[box] + except KeyError: + return # already gone + del self.bindings[box] + # + size = self.frame_size(box.type) + baseindex = self.get_loc_index(loc) + for i in range(size): + index = baseindex + i + assert 0 <= index < len(self.used) + self.used[index] = False + + def try_to_reuse_location(self, box, loc): + index = self.get_loc_index(loc) + assert index >= 0 + size = self.frame_size(box.type) + for i in range(size): + while (index + i) >= len(self.used): + self.used.append(False) + if self.used[index + i]: + return False # already in use + # good, we can reuse the location + for i in range(size): + self.used[index + i] = True + self.bindings[box] = loc + return True + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -50,6 +119,10 @@ @staticmethod def frame_size(type): return 1 + @staticmethod + def get_loc_index(loc): + raise NotImplementedError("Purely abstract") + class RegisterManager(object): """ Class that keeps track of register allocations @@ -70,7 +143,14 @@ self.frame_manager = frame_manager self.assembler = assembler + def is_still_alive(self, v): + # Check if 'v' is alive at the current position. + # Return False if the last usage is strictly before. + return self.longevity[v][1] >= self.position + def stays_alive(self, v): + # Check if 'v' stays alive after the current position. + # Return False if the last usage is before or at position. return self.longevity[v][1] > self.position def next_instruction(self, incr=1): @@ -86,11 +166,14 @@ point for all variables that might be in registers. """ self._check_type(v) - if isinstance(v, Const) or v not in self.reg_bindings: + if isinstance(v, Const): return if v not in self.longevity or self.longevity[v][1] <= self.position: - self.free_regs.append(self.reg_bindings[v]) - del self.reg_bindings[v] + if v in self.reg_bindings: + self.free_regs.append(self.reg_bindings[v]) + del self.reg_bindings[v] + if self.frame_manager is not None: + self.frame_manager.mark_as_free(v) def possibly_free_vars(self, vars): """ Same as 'possibly_free_var', but for all v in vars. diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py --- a/pypy/jit/backend/llsupport/test/test_regalloc.py +++ b/pypy/jit/backend/llsupport/test/test_regalloc.py @@ -42,8 +42,13 @@ def frame_size(self, box_type): if box_type == FLOAT: return 2 + elif box_type == INT: + return 1 else: - return 1 + raise ValueError(box_type) + def get_loc_index(self, loc): + assert isinstance(loc, FakeFramePos) + return loc.pos class MockAsm(object): def __init__(self): @@ -282,7 +287,7 @@ rm.force_allocate_reg(b) rm.before_call() assert len(rm.reg_bindings) == 2 - assert fm.frame_depth == 2 + assert fm.get_frame_depth() == 2 assert len(asm.moves) == 2 rm._check_invariants() rm.after_call(boxes[-1]) @@ -305,7 +310,7 @@ rm.force_allocate_reg(b) rm.before_call(save_all_regs=True) assert len(rm.reg_bindings) == 0 - assert fm.frame_depth == 4 + assert fm.get_frame_depth() == 4 assert len(asm.moves) == 4 rm._check_invariants() rm.after_call(boxes[-1]) @@ -327,7 +332,7 @@ xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm) xrm.loc(f0) rm.loc(b0) - assert fm.frame_depth == 3 + assert fm.get_frame_depth() == 3 @@ -348,3 +353,123 @@ spilled2 = rm.force_allocate_reg(b5) assert spilled2 is loc rm._check_invariants() + + + def test_hint_frame_locations_1(self): + b0, = newboxes(0) + fm = TFrameManager() + loc123 = FakeFramePos(123, INT) + fm.hint_frame_locations[b0] = loc123 + assert fm.get_frame_depth() == 0 + loc = fm.loc(b0) + assert loc == loc123 + assert fm.get_frame_depth() == 124 + + def test_hint_frame_locations_2(self): + b0, b1, b2 = newboxes(0, 1, 2) + longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)} + fm = TFrameManager() + asm = MockAsm() + rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) + rm.force_allocate_reg(b0) + rm.force_allocate_reg(b1) + rm.force_allocate_reg(b2) + rm.force_spill_var(b0) + loc = rm.loc(b0) + assert isinstance(loc, FakeFramePos) + assert fm.get_loc_index(loc) == 0 + rm.position = 1 + assert fm.used == [True] + rm.possibly_free_var(b0) + assert fm.used == [False] + # + fm.hint_frame_locations[b1] = loc + rm.force_spill_var(b1) + loc1 = rm.loc(b1) + assert loc1 == loc + assert fm.used == [True] + # + fm.hint_frame_locations[b2] = loc + rm.force_spill_var(b2) + loc2 = rm.loc(b2) + assert loc2 != loc1 # because it was not free + assert fm.used == [True, True] + # + rm._check_invariants() + + def test_frame_manager_basic(self): + b0, b1 = newboxes(0, 1) + fm = TFrameManager() + loc0 = fm.loc(b0) + assert fm.get_loc_index(loc0) == 0 + # + assert fm.get(b1) is None + loc1 = fm.loc(b1) + assert fm.get_loc_index(loc1) == 1 + assert fm.get(b1) == loc1 + # + loc0b = fm.loc(b0) + assert loc0b == loc0 + # + fm.loc(BoxInt()) + assert fm.get_frame_depth() == 3 + # + f0 = BoxFloat() + locf0 = fm.loc(f0) + assert fm.get_loc_index(locf0) == 4 + assert fm.get_frame_depth() == 6 + # + f1 = BoxFloat() + locf1 = fm.loc(f1) + assert fm.get_loc_index(locf1) == 6 + assert fm.get_frame_depth() == 8 + assert fm.used == [True, True, True, False, True, True, True, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, True, True] + fm.mark_as_free(f1) + assert fm.used == [False, True, True, False, True, True, False, False] + # + fm.reserve_location_in_frame(1) + assert fm.get_frame_depth() == 9 + assert fm.used == [False, True, True, False, True, True, False, False, True] + # + assert b0 not in fm.bindings + fm.set_binding(b0, loc0) + assert b0 in fm.bindings + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + b3 = BoxInt() + assert not fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(b3, loc0) + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b0) # already free + assert fm.used == [True, True, True, False, True, True, False, False, True] + # + fm.mark_as_free(b3) + assert fm.used == [False, True, True, False, True, True, False, False, True] + f3 = BoxFloat() + assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT)) + assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT)) + assert fm.used == [False, True, True, False, True, True, False, False, True] + assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT)) + assert fm.used == [False, True, True, False, True, True, True, True, True] + # + fm.used = [False] + assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True, True] + # + fm.used = [True] + assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT)) + assert fm.used == [True] diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -690,10 +690,11 @@ def _assemble(self, regalloc, operations): self._regalloc = regalloc + regalloc.compute_hint_frame_locations(operations) regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging - frame_depth = regalloc.fm.frame_depth + frame_depth = regalloc.fm.get_frame_depth() param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -138,6 +138,10 @@ return 2 else: return 1 + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, StackLoc) + return loc.position if WORD == 4: gpr_reg_mgr_cls = X86RegisterManager @@ -184,7 +188,6 @@ allgcrefs): operations, _ = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) - self.fm.frame_depth = prev_depths[0] self.param_depth = prev_depths[1] return operations @@ -297,7 +300,7 @@ self.xrm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) else: if isinstance(loc, RegLoc): if loc is ebp: @@ -306,7 +309,7 @@ self.rm.reg_bindings[arg] = loc used[loc] = None else: - self.fm.frame_bindings[arg] = loc + self.fm.set_binding(arg, loc) self.rm.free_regs = [] for reg in self.rm.all_regs: if reg not in used: @@ -342,7 +345,7 @@ def get_current_depth(self): # return (self.fm.frame_depth, self.param_depth), but trying to share # the resulting tuple among several calls - arg0 = self.fm.frame_depth + arg0 = self.fm.get_frame_depth() arg1 = self.param_depth result = self.assembler._current_depths_cache if result[0] != arg0 or result[1] != arg1: @@ -1259,6 +1262,29 @@ self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of rm and xrm based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + descr = op.getdescr() + assert isinstance(descr, LoopToken) + nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) + for i in range(op.numargs()): + box = op.getarg(i) + if isinstance(box, Box): + loc = nonfloatlocs[i] + if isinstance(loc, StackLoc): + assert box.type != FLOAT + self.fm.hint_frame_locations[box] = loc + else: + loc = floatlocs[i] + if isinstance(loc, StackLoc): + assert box.type == FLOAT + self.fm.hint_frame_locations[box] = loc + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None @@ -1303,7 +1329,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) - for v, val in self.fm.frame_bindings.items(): + for v, val in self.fm.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -42,6 +42,7 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) @@ -49,10 +50,9 @@ bridge = self.attach_bridge(ops, loop, -2) descr = loop.operations[2].getdescr() new = descr._x86_bridge_frame_depth - assert descr._x86_bridge_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert new > previous + assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous self.cpu.set_future_value_int(0, 0) fail = self.run(loop) assert fail.identifier == 2 @@ -104,6 +104,9 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) jump(i3, i12, i11, i10, i6, i7, descr=looptoken) @@ -112,9 +115,8 @@ guard_op = loop.operations[5] loop_frame_depth = loop.token._x86_frame_depth assert loop.token._x86_param_depth == 0 - # XXX: Maybe add enough ops to force stack on 64-bit as well? - if IS_X86_32: - assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,5 +1,7 @@ import py -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, dont_look_inside +from pypy.rlib.objectmodel import keepalive_until_here +from pypy.rlib import rgc from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -80,6 +82,47 @@ assert res == 1 self.check_resops(call=1) # for the case B(), but not for the case A() + def test_keepalive(self): + py.test.skip("XXX fails") # hum, I think the test itself is broken + # + mydriver = JitDriver(reds = ['n', 'states'], greens = []) + class State: + num = 1 + class X: + def __init__(self, state): + self.state = state + def __del__(self): + self.state.num += 1 + @dont_look_inside + def do_stuff(): + pass + def f(n): + states = [] + while n > 0: + mydriver.jit_merge_point(n=n, states=states) + state = State() + states.append(state) + x = X(state) + do_stuff() + state.num *= 1000 + do_stuff() + keepalive_until_here(x) + n -= 1 + return states + def main(n): + states = f(n) + rgc.collect() + rgc.collect() + err = 1001 + for state in states: + if state.num != 1001: + err = state.num + print 'ERROR:', err + return err + assert main(20) == 1001 + res = self.meta_interp(main, [20]) + assert res == 1001 + class TestLLtype(DelTests, LLJitMixin): def test_signal_action(self): diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import py from pypy.jit.metainterp.test.support import LLJitMixin diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -2,13 +2,15 @@ pypyjit.set_param(threshold=200) +def g(*args): + return len(args) + def f(n): - pairs = [(0.0, 1.0), (2.0, 3.0)] * n - mag = 0 - for (x1, x2) in pairs: - dx = x1 - x2 - mag += ((dx * dx ) ** (-1.5)) - return n + s = 0 + for i in range(n): + l = [i, n, 2] + s += g(*l) + return s try: print f(301) diff --git a/pypy/module/_continuation/test/test_translated.py b/pypy/module/_continuation/test/test_translated.py --- a/pypy/module/_continuation/test/test_translated.py +++ b/pypy/module/_continuation/test/test_translated.py @@ -93,13 +93,20 @@ if not option.runappdirect: py.test.skip("meant only for -A run") - def test_single_threaded(self): - for i in range(20): - yield Runner().run_test, - - def test_multi_threaded(self): - for i in range(5): - yield multithreaded_test, +def _setup(): + for _i in range(20): + def test_single_threaded(self): + Runner().run_test() + test_single_threaded.func_name = 'test_single_threaded_%d' % _i + setattr(AppTestWrapper, test_single_threaded.func_name, + test_single_threaded) + for _i in range(5): + def test_multi_threaded(self): + multithreaded_test() + test_multi_threaded.func_name = 'test_multi_threaded_%d' % _i + setattr(AppTestWrapper, test_multi_threaded.func_name, + test_multi_threaded) +_setup() class ThreadTest(object): def __init__(self, lock): diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -21,11 +21,11 @@ class W_Hash(Wrappable): ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - _block_size = -1 def __init__(self, space, name): self.name = name - self.digest_size = self.compute_digest_size() + digest_type = self.digest_type_by_name(space) + self.digest_size = rffi.getintfield(digest_type, 'c_md_size') # Allocate a lock for each HASH object. # An optimization would be to not release the GIL on small requests, @@ -34,21 +34,22 @@ ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw') rgc.add_memory_pressure(HASH_MALLOC_SIZE + self.digest_size) + ropenssl.EVP_DigestInit(ctx, digest_type) self.ctx = ctx - def initdigest(self, space, name): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise OperationError(space.w_ValueError, - space.wrap("unknown hash function")) - ropenssl.EVP_DigestInit(self.ctx, digest) - def __del__(self): # self.lock.free() if self.ctx: ropenssl.EVP_MD_CTX_cleanup(self.ctx) lltype.free(self.ctx, flavor='raw') + def digest_type_by_name(self, space): + digest_type = ropenssl.EVP_get_digestbyname(self.name) + if not digest_type: + raise OperationError(space.w_ValueError, + space.wrap("unknown hash function")) + return digest_type + def descr_repr(self, space): addrstring = self.getaddrstring(space) return space.wrap("<%s HASH object at 0x%s>" % ( @@ -87,7 +88,9 @@ return space.wrap(self.digest_size) def get_block_size(self, space): - return space.wrap(self.compute_block_size()) + digest_type = self.digest_type_by_name(space) + block_size = rffi.getintfield(digest_type, 'c_block_size') + return space.wrap(block_size) def _digest(self, space): with lltype.scoped_alloc(ropenssl.EVP_MD_CTX.TO) as ctx: @@ -99,36 +102,6 @@ ropenssl.EVP_MD_CTX_cleanup(ctx) return rffi.charpsize2str(digest, digest_size) - def compute_digest_size(self): - # XXX This isn't the nicest way, but the EVP_MD_size OpenSSL - # XXX function is defined as a C macro on OS X and would be - # XXX significantly harder to implement in another way. - # Values are digest sizes in bytes - return { - 'md5': 16, 'MD5': 16, - 'sha1': 20, 'SHA1': 20, - 'sha224': 28, 'SHA224': 28, - 'sha256': 32, 'SHA256': 32, - 'sha384': 48, 'SHA384': 48, - 'sha512': 64, 'SHA512': 64, - }.get(self.name, 0) - - def compute_block_size(self): - if self._block_size != -1: - return self._block_size - # XXX This isn't the nicest way, but the EVP_MD_CTX_block_size - # XXX OpenSSL function is defined as a C macro on some systems - # XXX and would be significantly harder to implement in - # XXX another way. - self._block_size = { - 'md5': 64, 'MD5': 64, - 'sha1': 64, 'SHA1': 64, - 'sha224': 64, 'SHA224': 64, - 'sha256': 64, 'SHA256': 64, - 'sha384': 128, 'SHA384': 128, - 'sha512': 128, 'SHA512': 128, - }.get(self.name, 0) - return self._block_size W_Hash.typedef = TypeDef( 'HASH', @@ -142,11 +115,11 @@ digestsize=GetSetProperty(W_Hash.get_digest_size), block_size=GetSetProperty(W_Hash.get_block_size), ) +W_Hash.acceptable_as_base_class = False @unwrap_spec(name=str, string='bufferstr') def new(space, name, string=''): w_hash = W_Hash(space, name) - w_hash.initdigest(space, name) w_hash.update(space, string) return space.wrap(w_hash) @@ -158,6 +131,6 @@ return new(space, name, string) return new_hash -for name in algorithms: - newname = 'new_%s' % (name,) - globals()[newname] = make_new_hash(name, newname) +for _name in algorithms: + _newname = 'new_%s' % (_name,) + globals()[_newname] = make_new_hash(_name, _newname) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py --- a/pypy/module/_hashlib/test/test_hashlib.py +++ b/pypy/module/_hashlib/test/test_hashlib.py @@ -79,3 +79,28 @@ assert h.digest() == _hashlib.openssl_md5('x' * 20).digest() _hashlib.openssl_sha1(b).digest() + def test_extra_algorithms(self): + expected_results = { + "md5": "bb649c83dd1ea5c9d9dec9a18df0ffe9", + "md4": "c275b8454684ea416b93d7a418b43176", + "mdc2": None, # XXX find the correct expected value + "sha": "e2b0a8609b47c58e5d984c9ccfe69f9b654b032b", + "ripemd160": "cc4a5ce1b3df48aec5d22d1f16b894a0b894eccc", + "whirlpool": ("1a22b79fe5afda02c63a25927193ed01dc718b74" + "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d" + "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583"), + } + import _hashlib + test_string = "Nobody inspects the spammish repetition" + for hash_name, expected in sorted(expected_results.items()): + try: + m = _hashlib.new(hash_name) + except ValueError, e: + print 'skipped %s: %s' % (hash_name, e) + continue + m.update(test_string) + got = m.hexdigest() + assert got and type(got) is str and len(got) % 2 == 0 + got.decode('hex') + if expected is not None: + assert got == expected diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -864,6 +864,7 @@ elif sys.platform.startswith('linux'): compile_extra.append("-Werror=implicit-function-declaration") export_symbols_eci.append('pypyAPI') + compile_extra.append('-g') else: kwds["includes"] = ['Python.h'] # this is our Python.h diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -14,7 +14,6 @@ METH_VARARGS, build_type_checkers, PyObjectFields, bootstrap_function) from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.rlib.objectmodel import we_are_translated -from pypy.objspace.std.tupleobject import W_TupleObject PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction') PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject)) diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -42,11 +42,11 @@ which case o is returned. Use PySequence_Fast_GET_ITEM() to access the members of the result. Returns NULL on failure. If the object is not a sequence, raises TypeError with m as the message text.""" - if (space.is_true(space.isinstance(w_obj, space.w_list)) or - space.is_true(space.isinstance(w_obj, space.w_tuple))): + if (isinstance(w_obj, listobject.W_ListObject) or + isinstance(w_obj, tupleobject.W_TupleObject)): return w_obj try: - return space.newtuple(space.fixedview(w_obj)) + return tupleobject.W_TupleObject(space.fixedview(w_obj)) except OperationError: raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -6,13 +6,12 @@ borrow_from, make_ref, from_ref) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.tupleobject import W_TupleObject -from pypy.objspace.std.smalltupleobject import W_SmallTupleObject PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple") @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): - return space.newtuple([space.w_None] * size) + return W_TupleObject([space.w_None] * size) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyTuple_SetItem(space, w_t, pos, w_obj): @@ -24,12 +23,12 @@ return 0 def _setitem_tuple(w_t, pos, w_obj): - if isinstance(w_t, W_TupleObject): - w_t.wrappeditems[pos] = w_obj - elif isinstance(w_t, W_SmallTupleObject): - w_t.setitem(pos, w_obj) - else: - assert False + # this function checks that w_t is really a W_TupleObject. It + # should only ever be called with a freshly built tuple from + # PyTuple_New(), which always return a W_TupleObject, even if there + # are also other implementations of tuples. + assert isinstance(w_t, W_TupleObject) + w_t.wrappeditems[pos] = w_obj @cpython_api([PyObject, Py_ssize_t], PyObject) def PyTuple_GetItem(space, w_t, pos): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -483,10 +483,20 @@ # XXX Check for frozen modules? # when w_path is a string + delayed_builtin = None + w_lib_extensions = None + if w_path is None: # check the builtin modules if modulename in space.builtin_modules: - return FindInfo(C_BUILTIN, modulename, None) + delayed_builtin = FindInfo(C_BUILTIN, modulename, None) + # a "real builtin module xx" shadows every file "xx.py" there + # could possibly be; a "pseudo-extension module" does not, and + # is only loaded at the point in sys.path where we find + # '.../lib_pypy/__extensions__'. + if modulename in space.MODULES_THAT_ALWAYS_SHADOW: + return delayed_builtin + w_lib_extensions = space.sys.get_state(space).w_lib_extensions w_path = space.sys.get('path') # XXX check frozen modules? @@ -495,6 +505,9 @@ if w_path is not None: for w_pathitem in space.unpackiterable(w_path): # sys.path_hooks import hook + if (w_lib_extensions is not None and + space.eq_w(w_pathitem, w_lib_extensions)): + return delayed_builtin if use_loader: w_loader = find_in_path_hooks(space, w_modulename, w_pathitem) if w_loader: @@ -527,7 +540,7 @@ # Out of file descriptors. # not found - return None + return delayed_builtin def _prepare_module(space, w_mod, filename, pkgdir): w = space.wrap diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -38,6 +38,8 @@ test_reload = "def test():\n raise ValueError\n", infinite_reload = "import infinite_reload; reload(infinite_reload)", del_sys_module = "import sys\ndel sys.modules['del_sys_module']\n", + itertools = "hello_world = 42\n", + gc = "should_never_be_seen = 42\n", ) root.ensure("notapackage", dir=1) # empty, no __init__.py setuppkg("pkg", @@ -147,6 +149,8 @@ class AppTestImport: def setup_class(cls): # interpreter-level + cls.space = gettestobjspace(usemodules=['itertools']) + cls.w_runappdirect = cls.space.wrap(conftest.option.runappdirect) cls.saved_modules = _setup(cls.space) #XXX Compile class @@ -571,6 +575,50 @@ else: assert False, 'should not work' + def test_shadow_builtin(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import gc' is supposed to always find the built-in module; + # like CPython, it is a built-in module, so it shadows everything, + # even though there is a gc.py. + import sys + assert 'gc' not in sys.modules + import gc + assert not hasattr(gc, 'should_never_be_seen') + assert '(built-in)' in repr(gc) + del sys.modules['gc'] + + def test_shadow_extension_1(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import itertools' is supposed to find itertools.py if there is + # one in sys.path. + import sys + assert 'itertools' not in sys.modules + import itertools + assert hasattr(itertools, 'hello_world') + assert not hasattr(itertools, 'count') + assert '(built-in)' not in repr(itertools) + del sys.modules['itertools'] + + def test_shadow_extension_2(self): + if self.runappdirect: skip("hard to test: module is already imported") + # 'import itertools' is supposed to find the built-in module even + # if there is also one in sys.path as long as it is *after* the + # special entry '.../lib_pypy/__extensions__'. (Note that for now + # there is one in lib_pypy/itertools.py, which should not be seen + # either; hence the (built-in) test below.) + import sys + assert 'itertools' not in sys.modules + sys.path.append(sys.path.pop(0)) + try: + import itertools + assert not hasattr(itertools, 'hello_world') + assert hasattr(itertools, 'izip') + assert '(built-in)' in repr(itertools) + finally: + sys.path.insert(0, sys.path.pop()) + del sys.modules['itertools'] + + class TestAbi: def test_abi_tag(self): space1 = gettestobjspace(soabi='TEST') diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -24,11 +24,16 @@ 'number': 'interp_boxes.W_NumberBox', 'integer': 'interp_boxes.W_IntegerBox', 'signedinteger': 'interp_boxes.W_SignedIntegerBox', + 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox', 'bool_': 'interp_boxes.W_BoolBox', 'int8': 'interp_boxes.W_Int8Box', + 'uint8': 'interp_boxes.W_UInt8Box', 'int16': 'interp_boxes.W_Int16Box', + 'uint16': 'interp_boxes.W_UInt16Box', 'int32': 'interp_boxes.W_Int32Box', + 'uint32': 'interp_boxes.W_UInt32Box', 'int64': 'interp_boxes.W_Int64Box', + 'uint64': 'interp_boxes.W_UInt64Box', 'int_': 'interp_boxes.W_LongBox', 'inexact': 'interp_boxes.W_InexactBox', 'floating': 'interp_boxes.W_FloatingBox', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -38,6 +38,7 @@ w_ValueError = None w_TypeError = None w_IndexError = None + w_OverflowError = None w_None = None w_bool = "bool" @@ -149,6 +150,10 @@ # XXX array probably assert False + def exception_match(self, w_exc_type, w_check_class): + # Good enough for now + raise NotImplementedError + class FloatObject(W_Root): tp = FakeSpace.w_float def __init__(self, floatval): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -91,6 +91,9 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") + def descr_tolist(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("bool") @@ -104,38 +107,38 @@ class W_SignedIntegerBox(W_IntegerBox): pass -class W_UnsignedIntgerBox(W_IntegerBox): +class W_UnsignedIntegerBox(W_IntegerBox): pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int8") -class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint8") class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int16") -class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int32") -class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox): +class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("uint32") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("long") -class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox): +class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): pass class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): descr__new__, get_dtype = new_dtype_getter("int64") -class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox): - pass +class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, get_dtype = new_dtype_getter("uint64") class W_InexactBox(W_NumberBox): _attrs_ = () @@ -179,6 +182,8 @@ __neg__ = interp2app(W_GenericBox.descr_neg), __abs__ = interp2app(W_GenericBox.descr_abs), + + tolist = interp2app(W_GenericBox.descr_tolist), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, @@ -198,13 +203,18 @@ __module__ = "numpypy", ) +W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef, + __module__ = "numpypy", +) + W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), ) -W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef, +W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt8Box.descr__new__.im_func), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, @@ -212,8 +222,9 @@ __new__ = interp2app(W_Int16Box.descr__new__.im_func), ) -W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef, +W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt16Box.descr__new__.im_func), ) W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef, @@ -221,8 +232,9 @@ __new__ = interp2app(W_Int32Box.descr__new__.im_func), ) -W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef, +W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt32Box.descr__new__.im_func), ) if LONG_BIT == 32: @@ -233,7 +245,7 @@ __module__ = "numpypy", ) -W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef, +W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntegerBox.typedef, __module__ = "numpypy", ) @@ -242,8 +254,9 @@ __new__ = interp2app(W_Int64Box.descr__new__.im_func), ) -W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef, +W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", + __new__ = interp2app(W_UInt64Box.descr__new__.im_func), ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -876,6 +876,17 @@ arr.setshape(space, new_shape) return arr + def descr_tolist(self, space): + if len(self.shape) == 0: + assert isinstance(self, Scalar) + return self.value.descr_tolist(space) + w_result = space.newlist([]) + for i in range(self.shape[0]): + space.call_method(w_result, "append", + space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist") + ) + return w_result + def descr_mean(self, space): return space.div(self.descr_sum(space), space.wrap(self.find_size())) @@ -1485,6 +1496,7 @@ copy = interp2app(BaseArray.descr_copy), reshape = interp2app(BaseArray.descr_reshape), + tolist = interp2app(BaseArray.descr_tolist), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -174,6 +174,8 @@ raises(TypeError, numpy.integer, 0) exc = raises(TypeError, numpy.signedinteger, 0) assert str(exc.value) == "cannot create 'signedinteger' instances" + exc = raises(TypeError, numpy.unsignedinteger, 0) + assert str(exc.value) == "cannot create 'unsignedinteger' instances" raises(TypeError, numpy.floating, 0) raises(TypeError, numpy.inexact, 0) @@ -210,17 +212,54 @@ assert type(int(x)) is int assert int(x) == -128 + def test_uint8(self): + import numpypy as numpy + + assert numpy.uint8.mro() == [numpy.uint8, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] + + a = numpy.array([1, 2, 3], numpy.uint8) + assert type(a[1]) is numpy.uint8 + assert numpy.dtype("uint8").type is numpy.uint8 + + x = numpy.uint8(128) + assert x == 128 + assert x != -128 + assert type(x) is numpy.uint8 + assert repr(x) == "128" + + assert type(int(x)) is int + assert int(x) == 128 + + assert numpy.uint8(255) == 255 + assert numpy.uint8(256) == 0 + def test_int16(self): import numpypy as numpy x = numpy.int16(3) assert x == 3 + assert numpy.int16(32767) == 32767 + assert numpy.int16(32768) == -32768 + + def test_uint16(self): + import numpypy as numpy + + assert numpy.uint16(65535) == 65535 + assert numpy.uint16(65536) == 0 def test_int32(self): import numpypy as numpy x = numpy.int32(23) assert x == 23 + assert numpy.int32(2147483647) == 2147483647 + assert numpy.int32(2147483648) == -2147483648 + + def test_uint32(self): + import numpypy as numpy + + assert numpy.uint32(4294967295) == 4294967295 + assert numpy.uint32(4294967296) == 0 def test_int_(self): import numpypy as numpy @@ -240,6 +279,25 @@ assert numpy.dtype(numpy.int64).type is numpy.int64 assert numpy.int64(3) == 3 + assert numpy.int64(9223372036854775807) == 9223372036854775807 + raises(OverflowError, numpy.int64, 9223372036854775808) + + def test_uint64(self): + import sys + import numpypy as numpy + + assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] + + assert numpy.dtype(numpy.uint64).type is numpy.uint64 + skip("see comment") + # These tests pass "by chance" on numpy, things that are larger than + # platform long (i.e. a python int), don't get put in a normal box, + # instead they become an object array containing a long, we don't have + # yet, so these can't pass. + assert numpy.uint64(9223372036854775808) == 9223372036854775808 + assert numpy.uint64(18446744073709551615) == 18446744073709551615 + raises(OverflowError, numpy.uint64(18446744073709551616)) + def test_float32(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -879,6 +879,45 @@ b[0] = 3 assert b.__debug_repr__() == 'Call2(add, forced=Array)' + def test_tolist_scalar(self): + from numpypy import int32, bool_ + x = int32(23) + assert x.tolist() == 23 + assert type(x.tolist()) is int + y = bool_(True) + assert y.tolist() is True + + def test_tolist_zerodim(self): + from numpypy import array + x = array(3) + assert x.tolist() == 3 + assert type(x.tolist()) is int + + def test_tolist_singledim(self): + from numpypy import array + a = array(range(5)) + assert a.tolist() == [0, 1, 2, 3, 4] + assert type(a.tolist()[0]) is int + b = array([0.2, 0.4, 0.6]) + assert b.tolist() == [0.2, 0.4, 0.6] + + def test_tolist_multidim(self): + from numpypy import array + a = array([[1, 2], [3, 4]]) + assert a.tolist() == [[1, 2], [3, 4]] + + def test_tolist_view(self): + from numpypy import array + a = array([[1,2],[3,4]]) + assert (a + a).tolist() == [[2, 4], [6, 8]] + + def test_tolist_slice(self): + from numpypy import array + a = array([[17.1, 27.2], [40.3, 50.3]]) + assert a[:,0].tolist() == [17.1, 40.3] + assert a[0].tolist() == [17.1, 27.2] + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -185,7 +185,8 @@ # sure it was optimized correctly. # XXX the comment above is wrong now. We need preferrably a way to # count the two loops separately - self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41, + self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, + 'getfield_gc': 35, 'getfield_gc_pure': 6, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,6 +1,7 @@ import functools import math +from pypy.interpreter.error import OperationError from pypy.module.micronumpy import interp_boxes from pypy.objspace.std.floatobject import float2string from pypy.rlib import rfloat, libffi, clibffi @@ -77,6 +78,9 @@ w_obj.__init__(self._coerce(space, w_item).value) return w_obj + def to_builtin_type(self, space, box): + return space.wrap(self.for_computation(self.unbox(box))) + def _coerce(self, space, w_item): raise NotImplementedError @@ -179,6 +183,9 @@ def _coerce(self, space, w_item): return self.box(space.is_true(w_item)) + def to_builtin_type(self, space, w_item): + return space.wrap(self.unbox(w_item)) + def str_format(self, box): value = self.unbox(box) return "True" if value else "False" @@ -271,6 +278,19 @@ T = rffi.ULONGLONG BoxType = interp_boxes.W_UInt64Box + def _coerce(self, space, w_item): + try: + return Integer._coerce(self, space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.toulonglong() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + class Float(Primitive): _mixin_ = True diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -170,3 +170,7 @@ def get_flag(self, name): space = self.space return space.int_w(space.getattr(self.get('flags'), space.wrap(name))) + + def get_state(self, space): + from pypy.module.sys import state + return state.get(space) diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py --- a/pypy/module/sys/state.py +++ b/pypy/module/sys/state.py @@ -24,7 +24,7 @@ # Initialize the default path pypydir = os.path.dirname(os.path.abspath(pypy.__file__)) srcdir = os.path.dirname(pypydir) - path = getinitialpath(srcdir) + path = getinitialpath(self, srcdir) self.w_path = space.newlist([space.wrap(p) for p in path]) def checkdir(path): @@ -35,7 +35,7 @@ platform = sys.platform -def getinitialpath(prefix): +def getinitialpath(state, prefix): from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d.%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1]) @@ -49,6 +49,12 @@ checkdir(lib_pypy) importlist = [] + # + if state is not None: # 'None' for testing only + lib_extensions = os.path.join(lib_pypy, '__extensions__') + state.w_lib_extensions = state.space.wrap(lib_extensions) + importlist.append(lib_extensions) + # importlist.append(lib_pypy) importlist.append(python_std_lib_modified) importlist.append(python_std_lib) @@ -71,7 +77,7 @@ @unwrap_spec(srcdir=str) def pypy_initial_path(space, srcdir): try: - path = getinitialpath(srcdir) + path = getinitialpath(get(space), srcdir) except OSError: return space.w_None else: diff --git a/pypy/module/sys/test/test_initialpath.py b/pypy/module/sys/test/test_initialpath.py --- a/pypy/module/sys/test/test_initialpath.py +++ b/pypy/module/sys/test/test_initialpath.py @@ -13,7 +13,7 @@ def test_stdlib_in_prefix(tmpdir): dirs = build_hierarchy(tmpdir) - path = getinitialpath(str(tmpdir)) + path = getinitialpath(None, str(tmpdir)) # we get at least 'dirs', and maybe more (e.g. plat-linux2) assert path[:len(dirs)] == map(str, dirs) @@ -21,7 +21,7 @@ lib_pypy, lib_python_modified, lib_python = build_hierarchy(tmpdir) lib_tk_modified = lib_python_modified.join('lib-tk') lib_tk = lib_python.join('lib-tk') - path = getinitialpath(str(tmpdir)) + path = getinitialpath(None, str(tmpdir)) i = path.index(str(lib_tk_modified)) j = path.index(str(lib_tk)) assert i < j diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -9,7 +9,7 @@ from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import instantiate, we_are_translated from pypy.rlib.nonconst import NonConstant -from pypy.rlib.rarithmetic import r_uint +from pypy.rlib.rarithmetic import r_uint, r_singlefloat from pypy.translator.translator import TranslationContext from pypy.tool.option import make_config @@ -145,9 +145,15 @@ self._see_interp2app(x) if isinstance(x, GetSetProperty): self._see_getsetproperty(x) + if isinstance(x, r_singlefloat): + self._wrap_not_rpython(x) return w_some_obj() wrap._annspecialcase_ = "specialize:argtype(1)" + def _wrap_not_rpython(self, x): + "NOT_RPYTHON" + raise NotImplementedError + def _see_interp2app(self, interp2app): "NOT_RPYTHON" activation = interp2app._code.activation @@ -238,6 +244,7 @@ t = TranslationContext(config=config) self.t = t # for debugging ann = t.buildannotator() + ann.policy.allow_someobjects = False if func is not None: ann.build_types(func, argtypes, complete_now=False) # diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -33,9 +33,6 @@ """Sequence iterator specialized for lists, accessing directly their RPython-level list of wrapped objects. """ - def __init__(w_self, w_seq): - W_AbstractSeqIterObject.__init__(w_self, w_seq) - w_self.w_seq = w_seq class W_FastTupleIterObject(W_AbstractSeqIterObject): """Sequence iterator specialized for tuples, accessing diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -9,8 +9,9 @@ from pypy.interpreter import gateway, baseobjspace from pypy.rlib.objectmodel import instantiate, specialize from pypy.rlib.listsort import make_timsort_class -from pypy.rlib import rerased, jit +from pypy.rlib import rerased, jit, debug from pypy.interpreter.argument import Signature +from pypy.tool.sourcetools import func_with_new_name UNROLL_CUTOFF = 5 @@ -170,6 +171,19 @@ share with the storage, if possible.""" return self.strategy.getitems(self) + def getitems_fixedsize(self): + """Returns a fixed-size list of all items after wrapping them.""" + l = self.strategy.getitems_fixedsize(self) + debug.make_sure_not_resized(l) + return l + + def getitems_unroll(self): + """Returns a fixed-size list of all items after wrapping them. The JIT + will fully unroll this function. """ + l = self.strategy.getitems_unroll(self) + debug.make_sure_not_resized(l) + return l + def getitems_copy(self): """Returns a copy of all items in the list. Same as getitems except for ObjectListStrategy.""" @@ -366,6 +380,8 @@ def getitems_copy(self, w_list): return [] + getitems_fixedsize = func_with_new_name(getitems_copy, "getitems_fixedsize") + getitems_unroll = getitems_fixedsize def getstorage_copy(self, w_list): return self.erase(None) @@ -496,7 +512,6 @@ # tuple is unmutable return w_list.lstorage - @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): l = self.unerase(w_list.lstorage) @@ -519,6 +534,13 @@ return r + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self._getitems_range_unroll(w_list, True) + def getitems_unroll(self, w_list): + return self._getitems_range_unroll(w_list, True) + _getitems_range_unroll = jit.unroll_safe(func_with_new_name(_getitems_range, "_getitems_range_unroll")) + def getslice(self, w_list, start, stop, step, length): v = self.unerase(w_list.lstorage) old_start = v[0] @@ -672,10 +694,19 @@ return self.wrap(r) @jit.look_inside_iff(lambda self, w_list: - jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) def getitems_copy(self, w_list): return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + @jit.unroll_safe + def getitems_unroll(self, w_list): + return [self.wrap(item) for item in self.unerase(w_list.lstorage)] + + @jit.look_inside_iff(lambda self, w_list: + jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF) + def getitems_fixedsize(self, w_list): + return self.getitems_unroll(w_list) + def getstorage_copy(self, w_list): items = self.unerase(w_list.lstorage)[:] return self.erase(items) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -15,6 +15,7 @@ _registered_implementations.add(implcls) option_to_typename = { + "withspecialisedtuple" : ["specialisedtupleobject.W_SpecialisedTupleObject"], "withsmalltuple" : ["smalltupleobject.W_SmallTupleObject"], "withsmallint" : ["smallintobject.W_SmallIntObject"], "withsmalllong" : ["smalllongobject.W_SmallLongObject"], @@ -261,6 +262,11 @@ self.typeorder[smalltupleobject.W_SmallTupleObject] += [ (tupleobject.W_TupleObject, smalltupleobject.delegate_SmallTuple2Tuple)] + if config.objspace.std.withspecialisedtuple: + from pypy.objspace.std import specialisedtupleobject + self.typeorder[specialisedtupleobject.W_SpecialisedTupleObject] += [ + (tupleobject.W_TupleObject, specialisedtupleobject.delegate_SpecialisedTuple2Tuple)] + # put W_Root everywhere self.typeorder[W_Root] = [] for type in self.typeorder: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -29,7 +29,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.smallintobject import W_SmallIntObject from pypy.objspace.std.stringobject import W_StringObject -from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.typeobject import W_TypeObject # types @@ -391,8 +391,8 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject): t = w_obj.getitems_copy() else: @@ -405,11 +405,13 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems + if isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.tolist() elif isinstance(w_obj, W_ListObject): - # XXX this can copy twice - t = w_obj.getitems()[:] + if unroll: + t = w_obj.getitems_unroll() + else: + t = w_obj.getitems_fixedsize() else: if unroll: return make_sure_not_resized(ObjSpace.unpackiterable_unroll( @@ -428,8 +430,8 @@ def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): t = w_obj.getitems() - elif isinstance(w_obj, W_TupleObject): - t = w_obj.wrappeditems[:] + elif isinstance(w_obj, W_AbstractTupleObject): + t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: diff --git a/pypy/objspace/std/smalltupleobject.py b/pypy/objspace/std/smalltupleobject.py --- a/pypy/objspace/std/smalltupleobject.py +++ b/pypy/objspace/std/smalltupleobject.py @@ -9,13 +9,14 @@ from pypy.interpreter import gateway from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name from pypy.objspace.std.tupleobject import W_AbstractTupleObject, W_TupleObject class W_SmallTupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef - def tolist(self): - raise NotImplementedError + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError def length(self): raise NotImplementedError @@ -51,6 +52,9 @@ l[i] = getattr(self, 'w_value%s' % i) return l + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + def length(self): return n diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -0,0 +1,302 @@ +from pypy.interpreter.error import OperationError +from pypy.objspace.std.model import registerimplementation +from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.multimethod import FailedToImplement +from pypy.objspace.std.tupleobject import W_AbstractTupleObject +from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_hash +from pypy.rlib.unroll import unrolling_iterable +from pypy.tool.sourcetools import func_with_new_name + +class NotSpecialised(Exception): + pass + +class W_SpecialisedTupleObject(W_AbstractTupleObject): + from pypy.objspace.std.tupletype import tuple_typedef as typedef + __slots__ = [] + + def __repr__(self): + """ representation for debugging purposes """ + reprlist = [repr(item) for item in self._to_unwrapped_list()] + return "%s(%s)" % (self.__class__.__name__, ', '.join(reprlist)) + + #def tolist(self): --- inherited from W_AbstractTupleObject + # raise NotImplementedError + + def _to_unwrapped_list(self): + "NOT_RPYTHON" + raise NotImplementedError + + def length(self): + raise NotImplementedError + + def getitem(self, index): + raise NotImplementedError + + def hash(self, space): + raise NotImplementedError + + def eq(self, space, w_other): + raise NotImplementedError + + def setitem(self, index, w_item): + raise NotImplementedError + + def unwrap(self, space): + return tuple(self._to_unwrapped_list()) + + def delegating(self): + pass # for tests only + + +def make_specialised_class(typetuple): + assert type(typetuple) == tuple + + nValues = len(typetuple) + iter_n = unrolling_iterable(range(nValues)) + + class cls(W_SpecialisedTupleObject): + def __init__(self, space, *values_w): + self.space = space + assert len(values_w) == nValues + for i in iter_n: + w_obj = values_w[i] + val_type = typetuple[i] + if val_type == int: + unwrapped = space.int_w(w_obj) + elif val_type == float: + unwrapped = space.float_w(w_obj) + elif val_type == str: + unwrapped = space.str_w(w_obj) + elif val_type == object: + unwrapped = w_obj + else: + raise AssertionError + setattr(self, 'value%s' % i, unwrapped) + + def length(self): + return nValues + + def tolist(self): + list_w = [None] * nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + list_w[i] = value + return list_w + + # same source code, but builds and returns a resizable list + getitems_copy = func_with_new_name(tolist, 'getitems_copy') + + def _to_unwrapped_list(self): + "NOT_RPYTHON" + list_w = [None] * nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + value = self.space.unwrap(value) + list_w[i] = value + return list_w + + def hash(self, space): + # XXX duplicate logic from tupleobject.py + mult = 1000003 + x = 0x345678 + z = nValues + for i in iter_n: + value = getattr(self, 'value%s' % i) + if typetuple[i] == object: + y = space.int_w(space.hash(value)) + elif typetuple[i] == float: + # get the correct hash for float which is an + # integer & other less frequent cases + from pypy.objspace.std.floatobject import _hash_float + y = _hash_float(space, value) + else: + y = compute_hash(value) + x = (x ^ y) * mult + z -= 1 + mult += 82520 + z + z + x += 97531 + return space.wrap(intmask(x)) + + def _eq(self, w_other): + if not isinstance(w_other, cls): + # if we are not comparing same types, give up + raise FailedToImplement + for i in iter_n: + myval = getattr(self, 'value%s' % i) + otherval = getattr(w_other, 'value%s' % i) + if typetuple[i] == object: + if not self.space.eq_w(myval, otherval): + return False + else: + if myval != otherval: + return False + else: + return True + + def eq(self, space, w_other): + return space.newbool(self._eq(w_other)) + + def ne(self, space, w_other): + return space.newbool(not self._eq(w_other)) + +## def _compare(self, compare_op, w_other): +## if not isinstance(w_other, cls): +## raise FailedToImplement +## ncmp = min(self.length(), w_other.length()) +## for i in iter_n: +## if typetuple[i] == Any:#like space.eq on wrapped or two params? +## raise FailedToImplement +## if ncmp > i: +## l_val = getattr(self, 'value%s' % i) +## r_val = getattr(w_other, 'value%s' % i) +## if l_val != r_val: +## return compare_op(l_val, r_val) +## return compare_op(self.length(), w_other.length()) + + def getitem(self, index): + for i in iter_n: + if index == i: + value = getattr(self, 'value%s' % i) + if typetuple[i] != object: + value = self.space.wrap(value) + return value + raise IndexError + + cls.__name__ = ('W_SpecialisedTupleObject_' + + ''.join([t.__name__[0] for t in typetuple])) + _specialisations.append(cls) + return cls + +# ---------- current specialized versions ---------- + +_specialisations = [] +Cls_ii = make_specialised_class((int, int)) +Cls_is = make_specialised_class((int, str)) +Cls_io = make_specialised_class((int, object)) +Cls_si = make_specialised_class((str, int)) +Cls_ss = make_specialised_class((str, str)) +Cls_so = make_specialised_class((str, object)) +Cls_oi = make_specialised_class((object, int)) +Cls_os = make_specialised_class((object, str)) +Cls_oo = make_specialised_class((object, object)) +Cls_ff = make_specialised_class((float, float)) +Cls_ooo = make_specialised_class((object, object, object)) + +def makespecialisedtuple(space, list_w): + if len(list_w) == 2: + w_arg1, w_arg2 = list_w + w_type1 = space.type(w_arg1) + w_type2 = space.type(w_arg2) + # + if w_type1 is space.w_int: + if w_type2 is space.w_int: + return Cls_ii(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_is(space, w_arg1, w_arg2) + else: + return Cls_io(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_str: + if w_type2 is space.w_int: + return Cls_si(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_ss(space, w_arg1, w_arg2) + else: + return Cls_so(space, w_arg1, w_arg2) + # + elif w_type1 is space.w_float and w_type2 is space.w_float: + return Cls_ff(space, w_arg1, w_arg2) + # + else: + if w_type2 is space.w_int: + return Cls_oi(space, w_arg1, w_arg2) + elif w_type2 is space.w_str: + return Cls_os(space, w_arg1, w_arg2) + else: + return Cls_oo(space, w_arg1, w_arg2) + # + elif len(list_w) == 3: + return Cls_ooo(space, list_w[0], list_w[1], list_w[2]) + else: + raise NotSpecialised + +# ____________________________________________________________ + +registerimplementation(W_SpecialisedTupleObject) + +def delegate_SpecialisedTuple2Tuple(space, w_specialised): + w_specialised.delegating() + return W_TupleObject(w_specialised.tolist()) + +def len__SpecialisedTuple(space, w_tuple): + return space.wrap(w_tuple.length()) + +def getitem__SpecialisedTuple_ANY(space, w_tuple, w_index): + index = space.getindex_w(w_index, space.w_IndexError, "tuple index") + if index < 0: + index += w_tuple.length() + try: + return w_tuple.getitem(index) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("tuple index out of range")) + +def getitem__SpecialisedTuple_Slice(space, w_tuple, w_slice): + length = w_tuple.length() + start, stop, step, slicelength = w_slice.indices4(space, length) + assert slicelength >= 0 + subitems = [None] * slicelength + for i in range(slicelength): + subitems[i] = w_tuple.getitem(start) + start += step + return space.newtuple(subitems) + +def mul_specialisedtuple_times(space, w_tuple, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + raise FailedToImplement + raise + if times == 1 and space.type(w_tuple) == space.w_tuple: + return w_tuple + items = w_tuple.tolist() + return space.newtuple(items * times) + +def mul__SpecialisedTuple_ANY(space, w_tuple, w_times): + return mul_specialisedtuple_times(space, w_tuple, w_times) + +def mul__ANY_SpecialisedTuple(space, w_times, w_tuple): + return mul_specialisedtuple_times(space, w_tuple, w_times) + +def eq__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): + return w_tuple1.eq(space, w_tuple2) + +def ne__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): + return w_tuple1.ne(space, w_tuple2) + +##from operator import lt, le, ge, gt + +##def lt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(lt, w_tuple2)) + +##def le__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(le, w_tuple2)) + +##def ge__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(ge, w_tuple2)) + +##def gt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2): +## return space.newbool(w_tuple1._compare(gt, w_tuple2)) + +def hash__SpecialisedTuple(space, w_tuple): + return w_tuple.hash(space) + +from pypy.objspace.std import tupletype +register_all(vars(), tupletype) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -48,6 +48,46 @@ for i in range(7): assert self.space.eq_w(l[i], l2[i]) + def test_getitems_fixedsize(self): + w = self.space.wrap + from pypy.objspace.std.listobject import make_range_list + rangelist = make_range_list(self.space, 1,1,7) + emptylist = W_ListObject(self.space, []) + intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]) + strlist = W_ListObject(self.space, [w('1'),w('2'),w('3'),w('4'),w('5'),w('6'),w('7')]) + floatlist = W_ListObject(self.space, [w(1.0),w(2.0),w(3.0),w(4.0),w(5.0),w(6.0),w(7.0)]) + objlist = W_ListObject(self.space, [w(1),w('2'),w(3.0),w(4),w(5),w(6),w(7)]) + + emptylist_copy = emptylist.getitems_fixedsize() + assert emptylist_copy == [] + + rangelist_copy = rangelist.getitems_fixedsize() + intlist_copy = intlist.getitems_fixedsize() + strlist_copy = strlist.getitems_fixedsize() + floatlist_copy = floatlist.getitems_fixedsize() + objlist_copy = objlist.getitems_fixedsize() + for i in range(7): + assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i)) + assert self.space.eq_w(intlist_copy[i], intlist.getitem(i)) + assert self.space.eq_w(strlist_copy[i], strlist.getitem(i)) + assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i)) + assert self.space.eq_w(objlist_copy[i], objlist.getitem(i)) + + emptylist_copy = emptylist.getitems_unroll() + assert emptylist_copy == [] + + rangelist_copy = rangelist.getitems_unroll() + intlist_copy = intlist.getitems_unroll() + strlist_copy = strlist.getitems_unroll() + floatlist_copy = floatlist.getitems_unroll() + objlist_copy = objlist.getitems_unroll() + for i in range(7): + assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i)) + assert self.space.eq_w(intlist_copy[i], intlist.getitem(i)) + assert self.space.eq_w(strlist_copy[i], strlist.getitem(i)) + assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i)) + assert self.space.eq_w(objlist_copy[i], objlist.getitem(i)) + def test_random_getitem(self): w = self.space.wrap s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9') diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -0,0 +1,234 @@ +import py, sys +from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject +from pypy.objspace.std.specialisedtupleobject import _specialisations +from pypy.interpreter.error import OperationError +from pypy.conftest import gettestobjspace, option +from pypy.objspace.std.test import test_tupleobject +from pypy.interpreter import gateway + + +for cls in _specialisations: + globals()[cls.__name__] = cls + + +class TestW_SpecialisedTupleObject(): + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + + def test_isspecialisedtupleobjectintint(self): + w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) + assert isinstance(w_tuple, W_SpecialisedTupleObject_ii) + + def test_isnotspecialisedtupleobject(self): + w_tuple = self.space.newtuple([self.space.wrap({})]) + assert not isinstance(w_tuple, W_SpecialisedTupleObject) + + def test_specialisedtupleclassname(self): + w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)]) + assert w_tuple.__class__.__name__ == 'W_SpecialisedTupleObject_ii' + + def test_hash_against_normal_tuple(self): + N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False}) + S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + + def hash_test(values): + N_values_w = [N_space.wrap(value) for value in values] + S_values_w = [S_space.wrap(value) for value in values] + N_w_tuple = N_space.newtuple(N_values_w) + S_w_tuple = S_space.newtuple(S_values_w) + + assert isinstance(S_w_tuple, W_SpecialisedTupleObject) + assert isinstance(N_w_tuple, W_TupleObject) + assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple)) + assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple)) + assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple))) + + hash_test([1,2]) + hash_test([1.5,2.8]) + hash_test([1.0,2.0]) + hash_test(['arbitrary','strings']) + hash_test([1,(1,2,3,4)]) + hash_test([1,(1,2)]) + hash_test([1,('a',2)]) + hash_test([1,()]) + hash_test([1,2,3]) + + +class AppTestW_SpecialisedTupleObject: + + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True}) + def forbid_delegation(space, w_tuple): + def delegation_forbidden(): + # haaaack + co = sys._getframe(2).f_code + if co.co_name.startswith('_mm_repr_tuple'): + return + raise OperationError(space.w_ReferenceError, w_tuple) + w_tuple.delegating = delegation_forbidden + return w_tuple + if option.runappdirect: + cls.w_forbid_delegation = lambda self, x: x + cls.test_delegation = lambda self: skip("runappdirect") + else: + cls.w_forbid_delegation = cls.space.wrap( + gateway.interp2app(forbid_delegation)) + + def w_isspecialised(self, obj, expected=''): + import __pypy__ + r = __pypy__.internal_repr(obj) + print obj, '==>', r, ' (expected: %r)' % expected + return ("SpecialisedTupleObject" + expected) in r + + def test_createspecialisedtuple(self): + spec = {int: 'i', + float: 'f', + str: 's', + list: 'o'} + # + for x in [42, 4.2, "foo", []]: + for y in [43, 4.3, "bar", []]: + expected1 = spec[type(x)] + expected2 = spec[type(y)] + if (expected1 == 'f') ^ (expected2 == 'f'): + if expected1 == 'f': expected1 = 'o' + if expected2 == 'f': expected2 = 'o' + obj = (x, y) + assert self.isspecialised(obj, '_' + expected1 + expected2) + # + obj = (1, 2, 3) + assert self.isspecialised(obj, '_ooo') + + def test_delegation(self): + t = self.forbid_delegation((42, 43)) + raises(ReferenceError, t.__getslice__, 0, 1) + + def test_len(self): + t = self.forbid_delegation((42,43)) + assert len(t) == 2 + + def test_notspecialisedtuple(self): + assert not self.isspecialised((42,43,44,45)) + assert not self.isspecialised((1.5,)) + + def test_slicing_to_specialised(self): + t = (1, 2, 3) + assert self.isspecialised(t[0:2]) + t = (1, '2', 3) + assert self.isspecialised(t[0:5:2]) + + def test_adding_to_specialised(self): + t = (1,) + assert self.isspecialised(t + (2,)) + + def test_multiply_to_specialised(self): + t = (1,) + assert self.isspecialised(t * 2) + + def test_slicing_from_specialised(self): + t = (1, 2, 3) + assert t[0:2:1] == (1, 2) + + def test_eq_no_delegation(self): + t = (1,) + a = self.forbid_delegation(t + (2,)) + b = (1, 2) + assert a == b + + c = (2, 1) + assert not a == c + + def test_eq_can_delegate(self): + a = (1,2) + b = (1,3,2) + assert not a == b + + values = [2, 2L, 2.0, 1, 1L, 1.0] + for x in values: + for y in values: + assert ((1,2) == (x,y)) == (1 == x and 2 == y) + + def test_neq(self): + a = self.forbid_delegation((1,2)) + b = (1,) + b = b+(2,) + assert not a != b + + c = (1,3) + assert a != c + + def test_ordering(self): + a = (1,2) #self.forbid_delegation((1,2)) --- code commented out + assert a < (2,2) + assert a < (1,3) + assert not a < (1,2) + + assert a <= (2,2) + assert a <= (1,2) + assert not a <= (1,1) + + assert a >= (0,2) + assert a >= (1,2) + assert not a >= (1,3) + + assert a > (0,2) + assert a > (1,1) + assert not a > (1,3) + + assert (2,2) > a + assert (1,3) > a + assert not (1,2) > a + + assert (2,2) >= a + assert (1,2) >= a + assert not (1,1) >= a + + assert (0,2) <= a + assert (1,2) <= a + assert not (1,3) <= a + + assert (0,2) < a + assert (1,1) < a + assert not (1,3) < a + + def test_hash(self): + a = (1,2) + b = (1,) + b += (2,) # else a and b refer to same constant + assert hash(a) == hash(b) + + c = (2,4) + assert hash(a) != hash(c) + + assert hash(a) == hash((1L, 2L)) == hash((1.0, 2.0)) == hash((1.0, 2L)) + + def test_getitem(self): + t = self.forbid_delegation((5,3)) + assert (t)[0] == 5 + assert (t)[1] == 3 + assert (t)[-1] == 3 + assert (t)[-2] == 5 + raises(IndexError, "t[2]") + raises(IndexError, "t[-3]") + + def test_three_tuples(self): + b = self.forbid_delegation((1, 2, 3)) + c = (1,) + d = c + (2, 3) + assert self.isspecialised(d) + assert b == d + + def test_mongrel(self): + a = self.forbid_delegation((1, 2.2, '333')) + assert self.isspecialised(a) + assert len(a) == 3 + assert a[0] == 1 and a[1] == 2.2 and a[2] == '333' + b = ('333',) + assert a == (1, 2.2,) + b + assert not a != (1, 2.2) + b + + +class AppTestAll(test_tupleobject.AppTestW_TupleObject): + pass diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py --- a/pypy/objspace/std/test/test_tupleobject.py +++ b/pypy/objspace/std/test/test_tupleobject.py @@ -280,6 +280,8 @@ assert () * 10 == () assert (5,) * 3 == (5,5,5) assert (5,2) * 2 == (5,2,5,2) + + def test_mul_identity(self): t = (1,2,3) assert (t * 1) is t diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -12,6 +12,15 @@ class W_AbstractTupleObject(W_Object): __slots__ = () + def tolist(self): + "Returns the items, as a fixed-size list." + raise NotImplementedError + + def getitems_copy(self): + "Returns a copy of the items, as a resizable list." + raise NotImplementedError + + class W_TupleObject(W_AbstractTupleObject): from pypy.objspace.std.tupletype import tuple_typedef as typedef _immutable_fields_ = ['wrappeditems[*]'] @@ -29,6 +38,12 @@ items = [space.unwrap(w_item) for w_item in w_tuple.wrappeditems] return tuple(items) + def tolist(self): + return self.wrappeditems + + def getitems_copy(self): + return self.wrappeditems[:] # returns a resizable list + registerimplementation(W_TupleObject) diff --git a/pypy/objspace/std/tupletype.py b/pypy/objspace/std/tupletype.py --- a/pypy/objspace/std/tupletype.py +++ b/pypy/objspace/std/tupletype.py @@ -5,6 +5,14 @@ def wraptuple(space, list_w): from pypy.objspace.std.tupleobject import W_TupleObject + + if space.config.objspace.std.withspecialisedtuple: + from specialisedtupleobject import makespecialisedtuple, NotSpecialised + try: + return makespecialisedtuple(space, list_w) + except NotSpecialised: + pass + if space.config.objspace.std.withsmalltuple: from pypy.objspace.std.smalltupleobject import W_SmallTupleObject2 from pypy.objspace.std.smalltupleobject import W_SmallTupleObject3 diff --git a/pypy/rlib/rdynload.py b/pypy/rlib/rdynload.py --- a/pypy/rlib/rdynload.py +++ b/pypy/rlib/rdynload.py @@ -115,7 +115,8 @@ if _WIN32: DLLHANDLE = rwin32.HMODULE - def dlopen(name): + def dlopen(name, mode=-1): + # mode is unused on windows, but a consistant signature res = rwin32.LoadLibrary(name) if not res: err = rwin32.GetLastError() diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -110,6 +110,10 @@ 'struct GENERAL_NAME_st', [('type', rffi.INT), ]) + EVP_MD_st = rffi_platform.Struct( + 'EVP_MD', + [('md_size', rffi.INT), + ('block_size', rffi.INT)]) EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD') EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX') @@ -258,7 +262,7 @@ [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) EVP_MD_CTX = rffi.COpaquePtr('EVP_MD_CTX', compilation_info=eci) -EVP_MD = rffi.COpaquePtr('EVP_MD', compilation_info=eci) +EVP_MD = lltype.Ptr(EVP_MD_st) OpenSSL_add_all_digests = external( 'OpenSSL_add_all_digests', [], lltype.Void) diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -2,8 +2,7 @@ Environment variables can be used to fine-tune the following parameters: - PYPY_GC_NURSERY The nursery size. Defaults to half the size of - the L2 cache. Try values like '1.2MB'. Small values + PYPY_GC_NURSERY The nursery size. Defaults to '4MB'. Small values (like 1 or 1KB) are useful for debugging. PYPY_GC_MAJOR_COLLECT Major collection memory factor. Default is '1.82', @@ -61,7 +60,7 @@ # # * young objects: allocated in the nursery if they are not too large, or # raw-malloced otherwise. The nursery is a fixed-size memory buffer of -# half the size of the L2 cache. When full, we do a minor collection; +# 4MB by default. When full, we do a minor collection; # the surviving objects from the nursery are moved outside, and the # non-surviving raw-malloced objects are freed. All surviving objects # become old. @@ -329,7 +328,8 @@ # size (needed to handle mallocs just below 'large_objects') but # hacking at the current nursery position in collect_and_reserve(). if newsize <= 0: - newsize = env.estimate_best_nursery_size() + newsize = 4*1024*1024 # fixed to 4MB by default + # (it was env.estimate_best_nursery_size()) if newsize <= 0: newsize = defaultsize if newsize < minsize: diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -63,7 +63,10 @@ exec_ = eval def repr(self, w_value): - return self.space.unwrap(self.space.repr(w_value)) + try: + return self.space.unwrap(self.space.repr(w_value)) + except Exception, e: + return ""%e def is_true(self, w_value): return self.space.is_true(w_value) diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -672,7 +672,7 @@ def pypy_initial_path(s): from pypy.module.sys.state import getinitialpath try: - return getinitialpath(s) + return getinitialpath(None, s) except OSError: return None diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -821,6 +821,8 @@ newpath = app_main.get_library_path('/tmp/pypy-c') # stdlib not found assert newpath == sys.path newpath = app_main.get_library_path(self.fake_exe) + if newpath[0].endswith('__extensions__'): + newpath = newpath[1:] # we get at least 'expected_path', and maybe more (e.g.plat-linux2) assert newpath[:len(self.expected_path)] == self.expected_path finally: diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py --- a/pypy/translator/sandbox/sandlib.py +++ b/pypy/translator/sandbox/sandlib.py @@ -6,11 +6,10 @@ import py import sys, os, posixpath, errno, stat, time -from pypy.rpython.module.ll_os_stat import s_StatResult from pypy.tool.ansi_print import AnsiLog -from pypy.rlib.rarithmetic import r_longlong import subprocess from pypy.tool.killsubprocess import killsubprocess +from pypy.translator.sandbox.vfs import UID, GID class MyAnsiLog(AnsiLog): KW_TO_COLOR = { @@ -34,6 +33,10 @@ from pypy.tool.lib_pypy import import_from_lib_pypy marshal = import_from_lib_pypy('marshal') +# Non-marshal result types +RESULTTYPE_STATRESULT = object() +RESULTTYPE_LONGLONG = object() + def read_message(f, timeout=None): # warning: 'timeout' is not really reliable and should only be used # for testing. Also, it doesn't work if the file f does any buffering. @@ -50,12 +53,30 @@ marshal.dump(msg, g) else: marshal.dump(msg, g, 0) + elif resulttype is RESULTTYPE_STATRESULT: + # Hand-coded marshal for stat results that mimics what rmarshal expects. + # marshal.dump(tuple(msg)) would have been too easy. rmarshal insists + # on 64-bit ints at places, even when the value fits in 32 bits. + import struct + st = tuple(msg) + fmt = "iIIiiiIfff" + buf = [] + buf.append(struct.pack(" Author: David Schneider Branch: arm-backend-2 Changeset: r50945:57c6036823fe Date: 2011-12-27 16:06 +0100 http://bitbucket.org/pypy/pypy/changeset/57c6036823fe/ Log: implement chages to the frame manager diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -87,6 +87,7 @@ assert self.memcpy_addr != 0, 'setup_once() not called?' self.mc = ARMv7Builder() self.pending_guards = [] + self.currently_compiling_loop = None assert self.datablockwrapper is None allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, @@ -98,6 +99,7 @@ self._regalloc = None self.mc = None self.pending_guards = None + self.currently_compiling_loop = None assert self.datablockwrapper is None def setup_once(self): @@ -641,6 +643,7 @@ looptoken.compiled_loop_token = clt operations = self.setup(looptoken, operations) + self.currently_compiling_loop = looptoken self._dump(operations) self.align() @@ -656,9 +659,9 @@ looptoken._arm_loop_code = loop_head looptoken._arm_bootstrap_code = 0 - self._walk_operations(operations, regalloc) - - looptoken._arm_frame_depth = regalloc.frame_manager.frame_depth + looptoken._arm_frame_depth = -1 + frame_depth = self._assemble(operations, regalloc) + looptoken._arm_frame_depth = frame_depth self._patch_sp_offset(sp_patch_location, looptoken._arm_frame_depth) self.align() @@ -679,6 +682,15 @@ print 'Done assembling loop with token %r' % looptoken self.teardown() + def _assemble(self, operations, regalloc): + regalloc.compute_hint_frame_locations(operations) + self._walk_operations(operations, regalloc) + frame_depth = regalloc.frame_manager.get_frame_depth() + jump_target_descr = regalloc.jump_target_descr + if jump_target_descr is not None: + frame_depth = max(frame_depth, jump_target_descr._arm_frame_depth) + return frame_depth + def assemble_bridge(self, faildescr, inputargs, operations, original_loop_token, log): operations = self.setup(original_loop_token, operations) @@ -692,14 +704,13 @@ assert len(inputargs) == len(arglocs) regalloc = Regalloc(assembler=self, frame_manager=ARMFrameManager()) - regalloc.prepare_bridge(frame_depth, inputargs, arglocs, operations) + regalloc.prepare_bridge(inputargs, arglocs, operations) sp_patch_location = self._prepare_sp_patch_position() - self._walk_operations(operations, regalloc) + frame_depth = self._assemble(operations, regalloc) - self._patch_sp_offset(sp_patch_location, - regalloc.frame_manager.frame_depth) + self._patch_sp_offset(sp_patch_location, frame_depth) self.write_pending_failure_recoveries() bridge_start = self.materialize_loop(original_loop_token) @@ -713,6 +724,10 @@ self.cpu.total_compiled_bridges) self.teardown() + + def target_arglocs(self, loop_token): + return loop_token._arm_arglocs + def materialize_loop(self, looptoken): self.datablockwrapper.done() # finish using cpu.asmmemmgr self.datablockwrapper = None diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -309,14 +309,11 @@ assert fcond == c.AL self._insert_checks() - if descr._arm_bootstrap_code == 0: + if descr is self.currently_compiling_loop: self.mc.B_offs(descr._arm_loop_code, fcond) else: target = descr._arm_bootstrap_code + descr._arm_loop_code self.mc.B(target, fcond) - new_fd = max(regalloc.frame_manager.frame_depth, - descr._arm_frame_depth) - regalloc.frame_manager.frame_depth = new_fd return fcond def emit_op_finish(self, op, arglocs, regalloc, fcond): diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -51,7 +51,9 @@ def __init__(self): FrameManager.__init__(self) - self.frame_depth = 1 + self.used = [True] # keep first slot free + # XXX refactor frame to avoid this issue of keeping the first slot + # reserved @staticmethod def frame_pos(loc, type): @@ -70,6 +72,13 @@ return 2 return 1 + @staticmethod + def get_loc_index(loc): + assert loc.is_stack() + if loc.type == FLOAT: + return loc.position - 1 + else: + return loc.position def void(self, op, fcond): return [] @@ -182,6 +191,7 @@ self.cpu = assembler.cpu self.assembler = assembler self.frame_manager = frame_manager + self.jump_target_descr = None def loc(self, var): if var.type == FLOAT: @@ -291,9 +301,9 @@ useful = self._prepare(inputargs, operations) return self._process_inputargs(inputargs, useful) - def prepare_bridge(self, frame_depth, inputargs, arglocs, ops): + def prepare_bridge(self, inputargs, arglocs, ops): self._prepare(inputargs, ops) - self._update_bindings(arglocs, frame_depth, inputargs) + self._update_bindings(arglocs, inputargs) def _process_inputargs(self, inputargs, useful): floatlocs = [None] * len(inputargs) @@ -313,10 +323,9 @@ self.possibly_free_vars(list(inputargs)) return nonfloatlocs, floatlocs - def _update_bindings(self, locs, frame_depth, inputargs): + def _update_bindings(self, locs, inputargs): used = {} i = 0 - self.frame_manager.frame_depth = frame_depth for loc in locs: arg = inputargs[i] i += 1 @@ -326,7 +335,7 @@ self.vfprm.reg_bindings[arg] = loc else: assert loc.is_stack() - self.frame_manager.frame_bindings[arg] = loc + self.frame_manager.set_binding(arg, loc) used[loc] = None # XXX combine with x86 code and move to llsupport @@ -519,7 +528,7 @@ def _prepare_guard(self, op, args=None): if args is None: args = [] - args.append(imm(self.frame_manager.frame_depth)) + args.append(imm(self.frame_manager.get_frame_depth())) for arg in op.getfailargs(): if arg: args.append(self.loc(arg)) @@ -613,10 +622,34 @@ return arglocs + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of rm and xrm based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + descr = op.getdescr() + assert isinstance(descr, LoopToken) + nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) + for i in range(op.numargs()): + box = op.getarg(i) + if isinstance(box, Box): + loc = nonfloatlocs[i] + if loc is not None and loc.is_stack(): + assert box.type != FLOAT + self.frame_manager.hint_frame_locations[box] = loc + else: + loc = floatlocs[i] + if loc is not None and loc.is_stack(): + assert box.type == FLOAT + self.frame_manager.hint_frame_locations[box] = loc + def prepare_op_jump(self, op, fcond): descr = op.getdescr() assert isinstance(descr, LoopToken) - nonfloatlocs, floatlocs = descr._arm_arglocs + self.jump_target_descr = descr + nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) # get temporary locs tmploc = r.ip @@ -940,7 +973,7 @@ def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(False) - for v, val in self.frame_manager.frame_bindings.items(): + for v, val in self.frame_manager.bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert val.is_stack() gcrootmap.add_frame_offset(shape, val.position * -WORD) From noreply at buildbot.pypy.org Thu Dec 29 09:57:36 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:36 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: merge 1a9a44331010 (jit-targets) Message-ID: <20111229085736.8E59E82C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50946:802e7c9df889 Date: 2011-12-28 10:03 +0100 http://bitbucket.org/pypy/pypy/changeset/802e7c9df889/ Log: merge 1a9a44331010 (jit-targets) diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -8,6 +8,7 @@ from pypy.objspace.flow.model import Variable, Constant from pypy.annotation import model as annmodel from pypy.jit.metainterp.history import REF, INT, FLOAT +from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi from pypy.rpython.ootypesystem import ootype @@ -48,6 +49,11 @@ value._the_opaque_pointer = op return op +def _normalize(value): + if isinstance(value, lltype._ptr): + value = lltype.top_container(value._obj) + return value + def from_opaque_string(s): if isinstance(s, str): return s @@ -347,6 +353,14 @@ op = loop.operations[-1] op.descr = weakref.ref(descr) +TARGET_TOKENS = weakref.WeakKeyDictionary() + +def compile_add_target_token(loop, descr): + loop = _from_opaque(loop) + op = loop.operations[-1] + descrobj = _normalize(descr) + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args + def compile_add_var(loop, intvar): loop = _from_opaque(loop) op = loop.operations[-1] @@ -381,13 +395,17 @@ _variables.append(v) return r -def compile_add_jump_target(loop, loop_target): +def compile_add_jump_target(loop, targettoken): loop = _from_opaque(loop) - loop_target = _from_opaque(loop_target) + descrobj = _normalize(targettoken) + loop_target, target_opindex, target_inputargs = TARGET_TOKENS[descrobj] + # op = loop.operations[-1] op.jump_target = loop_target + op.jump_target_opindex = target_opindex + op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(loop_target.inputargs) + assert len(op.args) == len(target_inputargs) if loop_target == loop: log.info("compiling new loop") else: @@ -521,10 +539,11 @@ self.opindex += 1 continue if op.opnum == rop.JUMP: - assert len(op.jump_target.inputargs) == len(args) - self.env = dict(zip(op.jump_target.inputargs, args)) + inputargs = op.jump_target_inputargs + assert len(inputargs) == len(args) + self.env = dict(zip(inputargs, args)) self.loop = op.jump_target - self.opindex = 0 + self.opindex = op.jump_target_opindex _stats.exec_jumps += 1 elif op.opnum == rop.FINISH: if self.verbose: @@ -617,6 +636,15 @@ # return _op_default_implementation + def op_label(self, _, *args): + op = self.loop.operations[self.opindex] + assert op.opnum == rop.LABEL + assert len(op.args) == len(args) + newenv = {} + for v, value in zip(op.args, args): + newenv[v] = value + self.env = newenv + def op_debug_merge_point(self, _, *args): from pypy.jit.metainterp.warmspot import get_stats try: @@ -1791,6 +1819,7 @@ setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) +setannotation(compile_add_target_token, annmodel.s_None) setannotation(compile_add_var, annmodel.s_None) setannotation(compile_add_int_const, annmodel.s_None) setannotation(compile_add_ref_const, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -142,17 +142,17 @@ old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl is not. """ c = llimpl.compile_start() - clt = model.CompiledLoopToken(self, looptoken.number) + clt = model.CompiledLoopToken(self, jitcell_token.number) clt.loop_and_bridges = [c] clt.compiled_version = c - looptoken.compiled_loop_token = clt + jitcell_token.compiled_loop_token = clt self._compile_loop_or_bridge(c, inputargs, operations) def free_loop_and_bridges(self, compiled_loop_token): @@ -183,9 +183,11 @@ llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo, descr.arg_types, descr.extrainfo, descr.width) - if (isinstance(descr, history.LoopToken) and - op.getopnum() != rop.JUMP): + if isinstance(descr, history.JitCellToken): + assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) + if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: + llimpl.compile_add_target_token(c, descr) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -239,9 +241,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - assert isinstance(targettoken, history.LoopToken) - compiled_version = targettoken.compiled_loop_token.compiled_version - llimpl.compile_add_jump_target(c, compiled_version) + llimpl.compile_add_jump_target(c, targettoken) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -2,7 +2,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -108,7 +108,7 @@ ops += 'finish(f99, %s)\n' % arguments loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) expected_result = self._prepare_args(args, floats, ints) @@ -254,7 +254,7 @@ called_ops += 'finish(f%d, descr=fdescr3)\n' % total_index # compile called loop called_loop = parse(called_ops, namespace=locals()) - called_looptoken = LoopToken() + called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) @@ -285,7 +285,7 @@ # we want to take the fast path self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -3,7 +3,7 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, + JitCellToken, TargetToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) @@ -32,7 +32,7 @@ result_type, valueboxes, descr) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) j = 0 for box in inputargs: @@ -106,7 +106,7 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) fail = self.cpu.execute_token(looptoken) @@ -134,15 +134,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -155,18 +157,22 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) + self.cpu.set_future_value_int(0, 44) fail = self.cpu.execute_token(looptoken) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) @@ -178,15 +184,17 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(inputargs, operations, looptoken) @@ -206,15 +214,17 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([i1]) + operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -222,7 +232,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -242,17 +252,21 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + i3 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] - inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + inputargs = [i3] + operations[4].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) i1b = BoxInt() @@ -260,7 +274,7 @@ bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -277,15 +291,17 @@ i1 = BoxInt() i2 = BoxInt() faildescr1 = BasicFailDescr(1) - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[2].setfailargs([None, i1, None]) + operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_int(0, 2) @@ -306,7 +322,7 @@ return AbstractFailDescr.__setattr__(self, name, value) py.test.fail("finish descrs should not be touched") faildescr = UntouchableFailDescr() # to check that is not touched - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] @@ -317,7 +333,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 99 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] @@ -327,7 +343,7 @@ res = self.cpu.get_latest_value_int(0) assert res == 42 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] @@ -336,7 +352,7 @@ assert fail is faildescr if self.cpu.supports_floats: - looptoken = LoopToken() + looptoken = JitCellToken() f0 = BoxFloat() operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) @@ -349,7 +365,7 @@ res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] @@ -366,14 +382,16 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [y, x], None, descr=targettoken), ResOperation(rop.INT_ADD, [x, y], z), ResOperation(rop.INT_SUB, [y, ConstInt(1)], t), ResOperation(rop.INT_EQ, [t, ConstInt(0)], u), ResOperation(rop.GUARD_FALSE, [u], None, descr=BasicFailDescr()), - ResOperation(rop.JUMP, [z, t], None, descr=looptoken), + ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) @@ -435,7 +453,7 @@ ] ops[1].setfailargs([v_res]) # - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([v1, v2], ops, looptoken) for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() @@ -1114,16 +1132,18 @@ inputargs.insert(index_counter, i0) jumpargs.insert(index_counter, i1) # - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() faildescr = BasicFailDescr(15) operations = [ + ResOperation(rop.LABEL, inputargs, None, descr=targettoken), ResOperation(rop.INT_SUB, [i0, ConstInt(1)], i1), ResOperation(rop.INT_GE, [i1, ConstInt(0)], i2), ResOperation(rop.GUARD_TRUE, [i2], None), - ResOperation(rop.JUMP, jumpargs, None, descr=looptoken), + ResOperation(rop.JUMP, jumpargs, None, descr=targettoken), ] - operations[2].setfailargs(inputargs[:]) - operations[2].setdescr(faildescr) + operations[3].setfailargs(inputargs[:]) + operations[3].setdescr(faildescr) # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1181,22 +1201,24 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(12)] i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) operations = [ + ResOperation(rop.LABEL, fboxes, None, descr=targettoken), ResOperation(rop.FLOAT_LE, [fboxes[0], constfloat(9.2)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.FINISH, fboxes, None, descr=faildescr2), ] operations[-2].setfailargs(fboxes) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() bridge = [ ResOperation(rop.FLOAT_SUB, [fboxes2[0], constfloat(1.0)], f3), - ResOperation(rop.JUMP, [f3] + fboxes2[1:], None, descr=looptoken), + ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) @@ -1322,7 +1344,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1432,7 +1454,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1491,7 +1513,7 @@ faildescr = BasicFailDescr(1) operations.append(ResOperation(rop.FINISH, [], None, descr=faildescr)) - looptoken = LoopToken() + looptoken = JitCellToken() # self.cpu.compile_loop(inputargs, operations, looptoken) # @@ -1561,7 +1583,7 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(5))] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, @@ -1839,15 +1861,16 @@ exc_tp = xtp exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1864,9 +1887,10 @@ exc_tp = ytp exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1882,14 +1906,15 @@ finish(0) ''' loop = parse(ops, self.cpu, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -2059,7 +2084,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) self.cpu.set_future_value_int(1, 0) @@ -2104,7 +2129,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, i2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) self.cpu.set_future_value_int(1, 0) @@ -2152,7 +2177,7 @@ ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0)) ] ops[2].setfailargs([i1, f2, i0]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, 20) self.cpu.set_future_value_int(1, 0) @@ -2197,7 +2222,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([i1, i2]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) self.cpu.set_future_value_int(0, ord('G')) fail = self.cpu.execute_token(looptoken) @@ -2257,7 +2282,7 @@ ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(0)) ] ops[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) self.cpu.set_future_value_int(1, 2) @@ -2313,7 +2338,7 @@ ops += [ ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') @@ -2335,7 +2360,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0)) ] ops[0].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) self.cpu.set_future_value_int(0, -42) @@ -2584,7 +2609,7 @@ i18 = int_add(i17, i9) finish(i18)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -2605,7 +2630,7 @@ finish(i11) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) for i in range(10): self.cpu.set_future_value_int(i, i+1) @@ -2658,7 +2683,7 @@ finish(f2)''' loop = parse(ops) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) @@ -2673,7 +2698,7 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) @@ -2686,7 +2711,7 @@ del called[:] self.cpu.done_with_this_frame_float_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) @@ -2751,7 +2776,7 @@ f2 = float_add(f0, f1) finish(f2)''' loop = parse(ops) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) @@ -2769,7 +2794,7 @@ finish(f3) ''' loop = parse(ops, namespace=locals()) - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken @@ -2787,7 +2812,7 @@ f2 = float_sub(f0, f1) finish(f2)''' loop = parse(ops) - looptoken2 = LoopToken() + looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken2) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) @@ -3150,7 +3175,7 @@ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) @@ -3179,6 +3204,133 @@ assert res.getfloat() == expected + def test_compile_loop_with_target(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + looptoken = JitCellToken() + targettoken1 = TargetToken() + targettoken2 = TargetToken() + faildescr = BasicFailDescr(2) + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr), + ResOperation(rop.LABEL, [i1], None, descr=targettoken2), + ResOperation(rop.INT_GE, [i1, ConstInt(0)], i3), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=BasicFailDescr(3)), + ResOperation(rop.JUMP, [i1], None, descr=targettoken1), + ] + inputargs = [i0] + operations[3].setfailargs([i1]) + operations[6].setfailargs([i1]) + + self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.set_future_value_int(0, 2) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 2 + res = self.cpu.get_latest_value_int(0) + assert res == 10 + + inputargs = [i0] + operations = [ + ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), + ResOperation(rop.JUMP, [i2], None, descr=targettoken2), + ] + self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) + + self.cpu.set_future_value_int(0, 2) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 3 + res = self.cpu.get_latest_value_int(0) + assert res == -10 + + def test_compile_bridge_with_target(self): + # This test creates a loopy piece of code in a bridge, and builds another + # unrelated loop that ends in a jump directly to this loopy bit of code. + # It catches a case in which we underestimate the needed frame_depth across + # the cross-loop JUMP, because we estimate it based on the frame_depth stored + # in the original loop. + i0 = BoxInt() + i1 = BoxInt() + looptoken1 = JitCellToken() + targettoken1 = TargetToken() + faildescr1 = BasicFailDescr(2) + inputargs = [i0] + operations = [ + ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), + ResOperation(rop.GUARD_TRUE, [i1], None, descr=faildescr1), + ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(1234)), + ] + operations[1].setfailargs([i0]) + self.cpu.compile_loop(inputargs, operations, looptoken1) + + def func(a, b, c, d, e, f, g, h, i): + assert a + 2 == b + assert a + 4 == c + assert a + 6 == d + assert a + 8 == e + assert a + 10 == f + assert a + 12 == g + assert a + 14 == h + assert a + 16 == i + FPTR = self.Ptr(self.FuncType([lltype.Signed]*9, lltype.Void)) + func_ptr = llhelper(FPTR, func) + cpu = self.cpu + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Signed,)*9, lltype.Void, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + i0 = BoxInt(); i1 = BoxInt(); i2 = BoxInt(); i3 = BoxInt(); i4 = BoxInt() + i5 = BoxInt(); i6 = BoxInt(); i7 = BoxInt(); i8 = BoxInt(); i9 = BoxInt() + i10 = BoxInt(); i11 = BoxInt(); i12 = BoxInt(); i13 = BoxInt(); i14 = BoxInt() + i15 = BoxInt(); i16 = BoxInt(); i17 = BoxInt(); i18 = BoxInt(); i19 = BoxInt() + i20 = BoxInt() + inputargs = [i0] + operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken1), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_ADD, [i1, ConstInt(1)], i2), + ResOperation(rop.INT_ADD, [i2, ConstInt(1)], i3), + ResOperation(rop.INT_ADD, [i3, ConstInt(1)], i4), + ResOperation(rop.INT_ADD, [i4, ConstInt(1)], i5), + ResOperation(rop.INT_ADD, [i5, ConstInt(1)], i6), + ResOperation(rop.INT_ADD, [i6, ConstInt(1)], i7), + ResOperation(rop.INT_ADD, [i7, ConstInt(1)], i8), + ResOperation(rop.INT_ADD, [i8, ConstInt(1)], i9), + ResOperation(rop.INT_ADD, [i9, ConstInt(1)], i10), + ResOperation(rop.INT_ADD, [i10, ConstInt(1)], i11), + ResOperation(rop.INT_ADD, [i11, ConstInt(1)], i12), + ResOperation(rop.INT_ADD, [i12, ConstInt(1)], i13), + ResOperation(rop.INT_ADD, [i13, ConstInt(1)], i14), + ResOperation(rop.INT_ADD, [i14, ConstInt(1)], i15), + ResOperation(rop.INT_ADD, [i15, ConstInt(1)], i16), + ResOperation(rop.INT_ADD, [i16, ConstInt(1)], i17), + ResOperation(rop.INT_ADD, [i17, ConstInt(1)], i18), + ResOperation(rop.INT_ADD, [i18, ConstInt(1)], i19), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.CALL, [funcbox, i2, i4, i6, i8, i10, i12, i14, i16, i18], + None, descr=calldescr), + ResOperation(rop.INT_LT, [i19, ConstInt(100)], i20), + ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), + ResOperation(rop.JUMP, [i19], None, descr=targettoken1), + ] + operations[-2].setfailargs([]) + self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) + + looptoken2 = JitCellToken() + inputargs = [] + operations = [ + ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), + ] + self.cpu.compile_loop(inputargs, operations, looptoken2) + + fail = self.cpu.execute_token(looptoken2) + assert fail.identifier == 42 + class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -3,8 +3,8 @@ from pypy.rlib.rarithmetic import intmask, LONG_BIT from pypy.rpython.lltypesystem import llmemory from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop -from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken -from pypy.jit.metainterp.history import BoxPtr, ConstPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken +from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec @@ -179,7 +179,7 @@ #print >>s, ' operations[%d].suboperations = [' % i #print >>s, ' ResOperation(rop.FAIL, [%s], None)]' % ( # ', '.join([names[v] for v in op.args])) - print >>s, ' looptoken = LoopToken()' + print >>s, ' looptoken = JitCellToken()' print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' if hasattr(self.loop, 'inputargs'): for i, v in enumerate(self.loop.inputargs): @@ -525,29 +525,53 @@ startvars.append(BoxFloat(r.random_float_storage())) else: startvars.append(BoxInt(r.random_integer())) + allow_delay = True + else: + allow_delay = False assert len(dict.fromkeys(startvars)) == len(startvars) self.startvars = startvars self.prebuilt_ptr_consts = [] self.r = r - self.build_random_loop(cpu, builder_factory, r, startvars) + self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay) - def build_random_loop(self, cpu, builder_factory, r, startvars): + def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay): loop = TreeLoop('test_random_function') loop.inputargs = startvars[:] loop.operations = [] - loop.token = LoopToken() - + loop._jitcelltoken = JitCellToken() builder = builder_factory(cpu, loop, startvars[:]) - self.generate_ops(builder, r, loop, startvars) + if allow_delay: + needs_a_label = True + else: + self.insert_label(loop, 0, r) + needs_a_label = False + self.generate_ops(builder, r, loop, startvars, needs_a_label=needs_a_label) self.builder = builder self.loop = loop - cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + dump(loop) + cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) - def generate_ops(self, builder, r, loop, startvars): + def insert_label(self, loop, position, r): + assert not hasattr(loop, '_targettoken') + for i in range(position): + op = loop.operations[i] + if (not op.has_no_side_effect() + or not isinstance(op.result, (BoxInt, BoxFloat))): + position = i + break # cannot move the LABEL later + randompos = r.randrange(0, len(self.startvars)+1) + self.startvars.insert(randompos, op.result) + loop._targettoken = TargetToken() + loop.operations.insert(position, ResOperation(rop.LABEL, self.startvars, None, + loop._targettoken)) + + def generate_ops(self, builder, r, loop, startvars, needs_a_label=False): block_length = pytest.config.option.block_length + istart = 0 for i in range(block_length): + istart = len(loop.operations) try: op = r.choice(builder.OPERATIONS) op.filter(builder) @@ -556,6 +580,12 @@ pass if builder.should_fail_by is not None: break + if needs_a_label and r.random() < 0.2: + self.insert_label(loop, istart, r) + needs_a_label = False + if needs_a_label: + self.insert_label(loop, istart, r) + endvars = [] used_later = {} for op in loop.operations: @@ -581,6 +611,17 @@ if pytest.config.option.output: builder.print_loop() + def runjitcelltoken(self): + if self.startvars == self.loop.inputargs: + return self.loop._jitcelltoken + if not hasattr(self, '_initialjumploop_celltoken'): + self._initialjumploop_celltoken = JitCellToken() + self.cpu.compile_loop(self.startvars[:], + [ResOperation(rop.JUMP, self.startvars[:], None, + descr=self.loop._targettoken)], + self._initialjumploop_celltoken) + return self._initialjumploop_celltoken + def get_fail_args(self): if self.should_fail_by.is_guard(): assert self.should_fail_by.getfailargs() is not None @@ -615,7 +656,7 @@ cpu.set_future_value_float(i, box.value) else: raise NotImplementedError(box) - fail = cpu.execute_token(self.loop.token) + fail = cpu.execute_token(self.runjitcelltoken()) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -683,26 +724,37 @@ args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) + dump(rl.loop) self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations, - rl.loop.token) + rl.loop._jitcelltoken) # done self.should_fail_by = rl.should_fail_by self.expected = rl.expected assert len(rl.loop.inputargs) == len(args) # The new bridge's execution will end normally at its FINISH. # Just replace the FINISH with the JUMP to the new loop. - jump_op = ResOperation(rop.JUMP, subset, None, descr=rl.loop.token) + jump_op = ResOperation(rop.JUMP, subset, None, + descr=rl.loop._targettoken) subloop.operations[-1] = jump_op self.guard_op = rl.guard_op self.prebuilt_ptr_consts += rl.prebuilt_ptr_consts - self.loop.token.record_jump_to(rl.loop.token) + self.loop._jitcelltoken.record_jump_to(rl.loop._jitcelltoken) self.dont_generate_more = True if r.random() < .05: return False + dump(subloop) self.builder.cpu.compile_bridge(fail_descr, fail_args, - subloop.operations, self.loop.token) + subloop.operations, + self.loop._jitcelltoken) return True +def dump(loop): + print >> sys.stderr, loop + if hasattr(loop, 'inputargs'): + print >> sys.stderr, '\t', loop.inputargs + for op in loop.operations: + print >> sys.stderr, '\t', op + def check_random_function(cpu, BuilderClass, r, num=None, max=None): loop = RandomLoop(cpu, BuilderClass, r) while True: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2,8 +2,8 @@ from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from pypy.jit.metainterp.history import Const, Box, BoxInt, ConstInt -from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT, - LoopToken) +from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT +from pypy.jit.metainterp.history import JitCellToken from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper @@ -152,14 +152,13 @@ allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} def teardown(self): self.pending_guard_tokens = None if WORD == 8: self.pending_memoryerror_trampoline_from = None self.mc = None - self.looppos = -1 - self.currently_compiling_loop = None self.current_clt = None def finish_once(self): @@ -425,8 +424,6 @@ _x86_loop_code (an integer giving an address) _x86_bootstrap_code (an integer giving an address) _x86_direct_bootstrap_code ( " " " " ) - _x86_frame_depth - _x86_param_depth _x86_arglocs _x86_debug_checksum ''' @@ -443,7 +440,6 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.currently_compiling_loop = looptoken if log: self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) @@ -455,15 +451,16 @@ bootstrappos = self.mc.get_relative_pos() stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) - self.looppos = self.mc.get_relative_pos() - looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + looppos = self.mc.get_relative_pos() + looptoken._x86_loop_code = looppos + clt.frame_depth = -1 # temporarily + clt.param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) - looptoken._x86_frame_depth = frame_depth - looptoken._x86_param_depth = param_depth + clt.frame_depth = frame_depth + clt.param_depth = param_depth directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, self.looppos, + self._assemble_bootstrap_direct_call(arglocs, looppos, frame_depth+param_depth) self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() @@ -472,7 +469,7 @@ debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, - rawstart + self.looppos, + rawstart + looppos, rawstart + directbootstrappos, rawstart)) debug_stop("jit-backend-addr") @@ -488,8 +485,8 @@ looptoken._x86_ops_offset = ops_offset looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_loop_code = rawstart + self.looppos looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -548,6 +545,9 @@ # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset + self.fixup_target_tokens(rawstart) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -668,6 +668,11 @@ mc.copy_to_raw_memory(adr_target) faildescr._x86_adr_jump_offset = 0 # means "patched" + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._x86_loop_code += rawstart + self.target_tokens_currently_compiling = None + @specialize.argtype(1) def _inject_debugging_code(self, looptoken, operations): if self._debug: @@ -685,7 +690,10 @@ ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), ResOperation(rop.SETFIELD_RAW, [c_adr, box2], None, descr=self.debug_counter_descr)] - operations = ops + operations + if operations[0].getopnum() == rop.LABEL: + operations = [operations[0]] + ops + operations[1:] + else: + operations = ops + operations return operations def _assemble(self, regalloc, operations): @@ -698,8 +706,8 @@ param_depth = regalloc.param_depth jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - target_frame_depth = jump_target_descr._x86_frame_depth - target_param_depth = jump_target_descr._x86_param_depth + target_frame_depth = jump_target_descr._x86_clt.frame_depth + target_param_depth = jump_target_descr._x86_clt.param_depth frame_depth = max(frame_depth, target_frame_depth) param_depth = max(param_depth, target_param_depth) return frame_depth, param_depth @@ -2345,7 +2353,7 @@ fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler @@ -2579,15 +2587,13 @@ gcrootmap.put(self.gcrootmap_retaddr_forced, mark) self.gcrootmap_retaddr_forced = -1 - def target_arglocs(self, loop_token): - return loop_token._x86_arglocs - - def closing_jump(self, loop_token): - if loop_token is self.currently_compiling_loop: + def closing_jump(self, target_token): + target = target_token._x86_loop_code + if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(self.looppos - curpos) + self.mc.JMP_l(target - curpos) else: - self.mc.JMP(imm(loop_token._x86_loop_code)) + self.mc.JMP(imm(target)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -5,7 +5,8 @@ import os from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ResOperation, BoxPtr, ConstFloat, - BoxFloat, LoopToken, INT, REF, FLOAT) + BoxFloat, INT, REF, FLOAT, + TargetToken, JitCellToken) from pypy.jit.backend.x86.regloc import * from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.rlib.objectmodel import we_are_translated @@ -163,6 +164,7 @@ # to be read/used by the assembler too self.jump_target_descr = None self.close_stack_struct = 0 + self.final_jump_op = None def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -832,7 +834,7 @@ def consider_call_assembler(self, op, guard_op): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size(self.translate_support_code) @@ -1264,16 +1266,30 @@ def compute_hint_frame_locations(self, operations): # optimization only: fill in the 'hint_frame_locations' dictionary - # of rm and xrm based on the JUMP at the end of the loop, by looking + # of 'fm' based on the JUMP at the end of the loop, by looking # at where we would like the boxes to be after the jump. op = operations[-1] if op.getopnum() != rop.JUMP: return + self.final_jump_op = op descr = op.getdescr() - assert isinstance(descr, LoopToken) - nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) - for i in range(op.numargs()): - box = op.getarg(i) + assert isinstance(descr, TargetToken) + if descr._x86_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding consider_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): + nonfloatlocs, floatlocs = descr._x86_arglocs + jump_op = self.final_jump_op + assert len(nonfloatlocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) if isinstance(box, Box): loc = nonfloatlocs[i] if isinstance(loc, StackLoc): @@ -1289,9 +1305,9 @@ assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) + nonfloatlocs, floatlocs = descr._x86_arglocs self.jump_target_descr = descr - nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr) # compute 'tmploc' to be all_regs[0] by spilling what is there box = TempBox() box1 = TempBox() @@ -1364,6 +1380,74 @@ # the FORCE_TOKEN operation returns directly 'ebp' self.rm.force_allocate_frame_reg(op.result) + def consider_label(self, op): + # XXX big refactoring needed? + descr = op.getdescr() + assert isinstance(descr, TargetToken) + inputargs = op.getarglist() + floatlocs = [None] * len(inputargs) + nonfloatlocs = [None] * len(inputargs) + # + # we need to make sure that the tmpreg and xmmtmp are free + tmpreg = X86RegisterManager.all_regs[0] + tmpvar = TempBox() + self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) + self.rm.possibly_free_var(tmpvar) + # + xmmtmp = X86XMMRegisterManager.all_regs[0] + tmpvar = TempBox() + self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) + self.xrm.possibly_free_var(tmpvar) + # + # we need to make sure that no variable is stored in ebp + for arg in inputargs: + if self.loc(arg) is ebp: + loc2 = self.fm.loc(arg) + self.assembler.mc.MOV(loc2, ebp) + self.rm.bindings_to_frame_reg.clear() + # + for i in range(len(inputargs)): + arg = inputargs[i] + assert not isinstance(arg, Const) + loc = self.loc(arg) + assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) + if arg.type == FLOAT: + floatlocs[i] = loc + else: + nonfloatlocs[i] = loc + if isinstance(loc, RegLoc): + self.fm.mark_as_free(arg) + descr._x86_arglocs = nonfloatlocs, floatlocs + descr._x86_loop_code = self.assembler.mc.get_relative_pos() + descr._x86_clt = self.assembler.current_clt + self.assembler.target_tokens_currently_compiling[descr] = None + self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) + +## from pypy.rpython.annlowlevel import llhelper +## def fn(addr): +## print '...label:', hex(addr), nonfloatlocs +## FUNC = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) +## ll_disp = llhelper(FUNC, fn) +## faddr = rffi.cast(lltype.Signed, ll_disp) +## for i in range(16): +## self.assembler.mc.PUSH_r(i) +## self.assembler.mc.CALL_l(0) +## self.assembler.mc.POP(edi) +## self.assembler.mc.MOV(r11, imm(faddr)) +## self.assembler.mc.CALL(r11) +## for i in range(15, -1, -1): +## if i == esp.value: +## i -= 1 +## self.assembler.mc.POP_r(i) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) @@ -1419,3 +1503,7 @@ def not_implemented(msg): os.write(2, '[x86/regalloc] %s\n' % msg) raise NotImplementedError(msg) + +# xxx hack: set a default value for TargetToken._x86_loop_code. +# If 0, we know that it is a LABEL that was not compiled yet. +TargetToken._x86_loop_code = 0 diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -215,14 +215,3 @@ super(CPU_X86_64, self).__init__(*args, **kwargs) CPU = CPU386 - -# silence warnings -##history.LoopToken._x86_param_depth = 0 -##history.LoopToken._x86_arglocs = (None, None) -##history.LoopToken._x86_frame_depth = 0 -##history.LoopToken._x86_bootstrap_code = 0 -##history.LoopToken._x86_direct_bootstrap_code = 0 -##history.LoopToken._x86_loop_code = 0 -##history.LoopToken._x86_debug_checksum = 0 -##compile.AbstractFailDescr._x86_current_depths = (0, 0) -##compile.AbstractFailDescr._x86_adr_jump_offset = 0 diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, TreeLoop + BoxPtr, ConstPtr, TreeLoop, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo @@ -113,6 +113,8 @@ descr0 = cpu.fielddescrof(S, 'int') ptr0 = struct_ref + targettoken = TargetToken() + namespace = locals().copy() def test_basic(self): @@ -136,6 +138,7 @@ def test_bug_0(self): ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, descr=targettoken) guard_value(i2, 1) [i2, i3, i4, i5, i6, i7, i0, i1, i8] guard_class(i4, 138998336) [i4, i5, i6, i7, i0, i1, i8] i11 = getfield_gc(i4, descr=descr0) @@ -163,7 +166,7 @@ guard_false(i32) [i4, i6, i7, i0, i1, i24] i33 = getfield_gc(i0, descr=descr0) guard_value(i33, ConstPtr(ptr0)) [i4, i6, i7, i0, i1, i33, i24] - jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24) + jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py --- a/pypy/jit/backend/x86/test/test_recompilation.py +++ b/pypy/jit/backend/x86/test/test_recompilation.py @@ -5,10 +5,11 @@ def test_compile_bridge_not_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -26,14 +27,15 @@ def test_compile_bridge_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) - previous = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + previous = loop._jitcelltoken.compiled_loop_token.frame_depth + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -48,7 +50,7 @@ finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].getdescr() + descr = loop.operations[3].getdescr() new = descr._x86_bridge_frame_depth assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow @@ -64,21 +66,23 @@ def test_bridge_jump_to_other_loop(self): loop = self.interpret(''' [i0, i10, i11, i12, i13, i14, i15, i16] + label(i0, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1, i10, i11, i12, i13, i14, i15, i16) + jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) ''', [0]) other_loop = self.interpret(''' [i3] + label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] - jump(i3) + jump(i3, descr=targettoken2) ''', [1]) ops = ''' [i3] - jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=looptoken) + jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, other_loop, 0, looptoken=loop.token) + bridge = self.attach_bridge(ops, other_loop, 1) self.cpu.set_future_value_int(0, 1) fail = self.run(other_loop) assert fail.identifier == 1 @@ -86,6 +90,7 @@ def test_bridge_jumps_to_self_deeper(self): loop = self.interpret(''' [i0, i1, i2, i31, i32, i33] + label(i0, i1, i2, i31, i32, i33, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i30 = int_add(i1, i2) @@ -94,7 +99,7 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i30, 1, i30, i30, i30) + jump(i3, i30, 1, i30, i30, i30, descr=targettoken) ''', [0]) assert self.getint(0) == 0 assert self.getint(1) == 1 @@ -109,12 +114,12 @@ force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) - jump(i3, i12, i11, i10, i6, i7, descr=looptoken) + jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 5, looptoken=loop.token) - guard_op = loop.operations[5] - loop_frame_depth = loop.token._x86_frame_depth - assert loop.token._x86_param_depth == 0 + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth + bridge = self.attach_bridge(ops, loop, 6) + guard_op = loop.operations[6] + assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 # the force_spill() forces the stack to grow assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth assert guard_op.getdescr()._x86_bridge_param_depth == 0 @@ -128,6 +133,7 @@ def test_bridge_jumps_to_self_shallower(self): loop = self.interpret(''' [i0, i1, i2] + label(i0, i1, i2, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i3 = int_add(i0, 1) @@ -135,15 +141,15 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i1, i2) + jump(i3, i1, i2, descr=targettoken) ''', [0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' [i97, i3] - jump(i3, 0, 1, descr=looptoken) + jump(i3, 0, 1, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 4, looptoken=loop.token) + bridge = self.attach_bridge(ops, loop, 5) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) self.cpu.set_future_value_int(2, 0) diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -4,7 +4,7 @@ import py from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, LoopToken, BasicFailDescr + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass @@ -96,10 +96,16 @@ raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + targettoken = TargetToken() + targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._x86_loop_code = 0 + self.targettoken2._x86_loop_code = 0 + def f1(x): return x+1 @@ -134,7 +140,8 @@ def interpret(self, ops, args, run=True): loop = self.parse(ops) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) for i, arg in enumerate(args): if isinstance(arg, int): self.cpu.set_future_value_int(i, arg) @@ -145,15 +152,16 @@ assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) self.cpu.set_future_value_ref(i, llgcref) + loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) return loop def prepare_loop(self, ops): loop = self.parse(ops) regalloc = RegAlloc(self.cpu.assembler, False) regalloc.prepare_loop(loop.inputargs, loop.operations, - loop.token, []) + loop.original_jitcell_token, []) return regalloc def getint(self, index): @@ -174,10 +182,7 @@ gcref = self.cpu.get_latest_value_ref(index) return lltype.cast_opaque_ptr(T, gcref) - def attach_bridge(self, ops, loop, guard_op_index, looptoken=None, **kwds): - if looptoken is not None: - self.namespace = self.namespace.copy() - self.namespace['looptoken'] = looptoken + def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) @@ -185,20 +190,21 @@ [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, - loop.token) + loop._jitcelltoken) return bridge def run(self, loop): - return self.cpu.execute_token(loop.token) + return self.cpu.execute_token(loop._jitcelltoken) class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -206,27 +212,29 @@ def test_two_loops_and_a_bridge(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(i0, 1) i5 = int_lt(i4, 20) guard_true(i5) [i4, i1, i2, i3] - jump(i4, i1, i2, i3) + jump(i4, i1, i2, i3, descr=targettoken) ''' loop = self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' [i5] + label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) i4 = int_add(i3, 1) i2 = int_lt(i4, 30) guard_true(i2) [i4] - jump(i4) + jump(i4, descr=targettoken2) ''' loop2 = self.interpret(ops2, [0]) bridge_ops = ''' [i4] - jump(i4, i4, i4, i4, descr=looptoken) + jump(i4, i4, i4, i4, descr=targettoken) ''' - bridge = self.attach_bridge(bridge_ops, loop2, 4, looptoken=loop.token) + bridge = self.attach_bridge(bridge_ops, loop2, 5) self.cpu.set_future_value_int(0, 0) self.run(loop2) assert self.getint(0) == 31 @@ -237,10 +245,11 @@ def test_pointer_arg(self): ops = ''' [i0, p0] + label(i0, p0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 10) guard_true(i2) [p0] - jump(i1, p0) + jump(i1, p0, descr=targettoken) ''' S = lltype.GcStruct('S') ptr = lltype.malloc(S) @@ -318,10 +327,11 @@ def test_spill_for_constant(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(3, i1) i5 = int_lt(i4, 30) guard_true(i5) [i0, i4, i2, i3] - jump(1, i4, 3, 4) + jump(1, i4, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1, 30, 3, 4] @@ -329,31 +339,34 @@ def test_spill_for_constant_lshift(self): ops = ''' [i0, i2, i1, i3] + label(i0, i2, i1, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 3, i5, 4) + jump(i4, 3, i5, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, i5, 3, 4) + jump(i4, i5, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] ops = ''' [i0, i3, i1, i2] + label(i0, i3, i1, i2, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 4, i5, 3) + jump(i4, 4, i5, 3, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1<<29, 30, 3, 4] @@ -361,11 +374,12 @@ def test_result_selected_reg_via_neg(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i6 = int_neg(i2) i7 = int_add(1, i1) i4 = int_lt(i7, 10) guard_true(i4) [i0, i6, i7] - jump(1, i7, i2, i6) + jump(1, i7, i2, i6, descr=targettoken) ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] @@ -373,11 +387,12 @@ def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lt(i0, i1) i5 = int_add(i3, 1) i6 = int_lt(i5, 30) guard_true(i6) [i4] - jump(i0, i1, i4, i5) + jump(i0, i1, i4, i5, descr=targettoken) ''' self.interpret(ops, [0, 10, 0, 0]) assert self.getint(0) == 1 @@ -385,10 +400,11 @@ def test_jump_different_args(self): ops = ''' [i0, i15, i16, i18, i1, i2, i3] + label(i0, i15, i16, i18, i1, i2, i3, descr=targettoken) i4 = int_add(i3, 1) i5 = int_lt(i4, 20) guard_true(i5) [i2, i1] - jump(i0, i18, i15, i16, i2, i1, i4) + jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' self.interpret(ops, [0, 1, 2, 3]) @@ -474,6 +490,7 @@ class TestRegallocMoreRegisters(BaseTestRegalloc): cpu = BaseTestRegalloc.cpu + targettoken = TargetToken() S = lltype.GcStruct('S', ('field', lltype.Char)) fielddescr = cpu.fielddescrof(S, 'field') @@ -546,6 +563,7 @@ def test_division_optimized(self): ops = ''' [i7, i6] + label(i7, i6, descr=targettoken) i18 = int_floordiv(i7, i6) i19 = int_xor(i7, i6) i21 = int_lt(i19, 0) @@ -553,7 +571,7 @@ i23 = int_is_true(i22) i24 = int_eq(i6, 4) guard_false(i24) [i18] - jump(i18, i6) + jump(i18, i6, descr=targettoken) ''' self.interpret(ops, [10, 4]) assert self.getint(0) == 2 @@ -624,7 +642,8 @@ ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(1) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(1) def test_two_calls(self): ops = ''' @@ -635,7 +654,8 @@ ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] - assert loop.token._x86_param_depth == self.expected_param_depth(2) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(2) def test_call_many_arguments(self): # NB: The first and last arguments in the call are constants. This @@ -648,7 +668,8 @@ ''' loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 - assert loop.token._x86_param_depth == self.expected_param_depth(10) + clt = loop._jitcelltoken.compiled_loop_token + assert clt.param_depth == self.expected_param_depth(10) def test_bridge_calls_1(self): ops = ''' diff --git a/pypy/jit/backend/x86/test/test_regalloc2.py b/pypy/jit/backend/x86/test/test_regalloc2.py --- a/pypy/jit/backend/x86/test/test_regalloc2.py +++ b/pypy/jit/backend/x86/test/test_regalloc2.py @@ -1,6 +1,6 @@ import py from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, LoopToken + BoxPtr, ConstPtr, BasicFailDescr, JitCellToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD @@ -20,7 +20,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 9) cpu.execute_token(looptoken) @@ -43,7 +43,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -10) cpu.execute_token(looptoken) @@ -140,7 +140,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -13) cpu.set_future_value_int(1, 10) @@ -255,7 +255,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 17) cpu.set_future_value_int(1, -20) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -1,9 +1,10 @@ import py from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr, rclass from pypy.rpython.annlowlevel import llhelper -from pypy.jit.metainterp.history import ResOperation, LoopToken +from pypy.jit.metainterp.history import ResOperation, TargetToken, JitCellToken from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstFloat, - ConstPtr, Box, BoxFloat, BasicFailDescr) + ConstPtr, Box, BoxFloat, + BasicFailDescr) from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.x86.arch import WORD from pypy.jit.backend.x86.rx86 import fits_in_32bits @@ -279,7 +280,7 @@ descr=BasicFailDescr()), ] ops[-2].setfailargs([i1]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) if op == rop.INT_IS_TRUE: self.cpu.set_future_value_int(0, b.value) @@ -329,7 +330,7 @@ ] ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) for i, box in enumerate(inputargs): self.cpu.set_future_value_int(i, box.value) @@ -353,9 +354,10 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() + targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.number = 17 class FakeString(object): def __init__(self, val): @@ -365,14 +367,15 @@ return self.val operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) + operations[-2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" @@ -385,7 +388,7 @@ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None), - ResOperation(rop.JUMP, [i1b], None, descr=looptoken), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) @@ -408,11 +411,13 @@ i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ + ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), - ResOperation(rop.JUMP, [i1], None, descr=looptoken), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] debug._log = dlog = debug.DebugLog() @@ -499,7 +504,7 @@ ops[3].setfailargs([]) ops[5].setfailargs([]) ops[7].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) self.cpu.set_future_value_int(0, 123450) @@ -523,19 +528,21 @@ loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) + self.cpu.execute_token(looptoken) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 10 @@ -547,16 +554,18 @@ def test_debugger_checksum(self): loop = """ [i0] + label(i0, descr=targettoken) debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] - jump(i1) + jump(i1, descr=targettoken) """ - ops = parse(loop) + ops = parse(loop, namespace={'targettoken': TargetToken()}) self.cpu.assembler.set_debug(True) - self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) + looptoken = JitCellToken() + self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(ops.token) - assert ops.token._x86_debug_checksum == sum([op.getopnum() + self.cpu.execute_token(looptoken) + assert looptoken._x86_debug_checksum == sum([op.getopnum() for op in ops.operations]) diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -9,12 +9,13 @@ from pypy.tool.sourcetools import func_with_new_name from pypy.jit.metainterp.resoperation import ResOperation, rop, get_deep_immutable_oplist -from pypy.jit.metainterp.history import TreeLoop, Box, History, LoopToken +from pypy.jit.metainterp.history import TreeLoop, Box, History, JitCellToken, TargetToken from pypy.jit.metainterp.history import AbstractFailDescr, BoxInt from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const from pypy.jit.metainterp import history from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP from pypy.jit.codewriter import heaptracker, longlong @@ -23,7 +24,7 @@ from pypy.jit.metainterp.jitprof import ABORT_BRIDGE raise SwitchToBlackhole(ABORT_BRIDGE) -def show_loop(metainterp_sd, loop=None, error=None): +def show_procedures(metainterp_sd, procedure=None, error=None): # debugging if option.view or option.viewloops: if error: @@ -32,11 +33,12 @@ errmsg += ': ' + str(error) else: errmsg = None - if loop is None: # or type(loop) is TerminatingLoop: - extraloops = [] + if procedure is None: + extraprocedures = [] else: - extraloops = [loop] - metainterp_sd.stats.view(errmsg=errmsg, extraloops=extraloops) + extraprocedures = [procedure] + metainterp_sd.stats.view(errmsg=errmsg, + extraprocedures=extraprocedures) def create_empty_loop(metainterp, name_prefix=''): name = metainterp.staticdata.stats.name_for_new_loop() @@ -45,131 +47,224 @@ return loop -def make_loop_token(nb_args, jitdriver_sd): - loop_token = LoopToken() - loop_token.outermost_jitdriver_sd = jitdriver_sd - return loop_token +def make_jitcell_token(jitdriver_sd): + jitcell_token = JitCellToken() + jitcell_token.outermost_jitdriver_sd = jitdriver_sd + return jitcell_token def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ - # get the original loop token (corresponding to 'loop', or if that is - # a bridge, to the loop that this bridge belongs to) - looptoken = loop.token - assert looptoken is not None + # get the original jitcell token corresponding to jitcell form which + # this trace starts + original_jitcell_token = loop.original_jitcell_token + assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests - assert looptoken.generation > 0 # has been registered with memmgr - wref = weakref.ref(looptoken) + assert original_jitcell_token.generation > 0 # has been registered with memmgr + wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number - looptoken.compiled_loop_token.record_faildescr_index(n) - elif isinstance(descr, LoopToken): - # for a JUMP or a CALL_ASSEMBLER: record it as a potential jump. + original_jitcell_token.compiled_loop_token.record_faildescr_index(n) + elif isinstance(descr, JitCellToken): + # for a CALL_ASSEMBLER: record it as a potential jump. + if descr is not original_jitcell_token: + original_jitcell_token.record_jump_to(descr) + descr.exported_state = None + op._descr = None # clear reference, mostly for tests + elif isinstance(descr, TargetToken): + # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) - if descr is not looptoken: - looptoken.record_jump_to(descr) - op._descr = None # clear reference, mostly for tests + if descr.original_jitcell_token is not original_jitcell_token: + assert descr.original_jitcell_token is not None + original_jitcell_token.record_jump_to(descr.original_jitcell_token) + # exported_state is clear by optimizeopt when the short preamble is + # constrcucted. if that did not happen the label should not show up + # in a trace that will be used + assert descr.exported_state is None if not we_are_translated(): - op._jumptarget_number = descr.number + op._descr_wref = weakref.ref(op._descr) + op._descr = None # clear reference to prevent the history.Stats + # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken - loop.token = None + loop.original_jitcell_token = None if not we_are_translated(): - loop._looptoken_number = looptoken.number + loop._looptoken_number = original_jitcell_token.number # ____________________________________________________________ -def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, - start_resumedescr, full_preamble_needed=True): - """Try to compile a new loop by closing the current history back +def compile_loop(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, full_preamble_needed=True): + """Try to compile a new procedure by closing the current history back to the first operation. """ - from pypy.jit.metainterp.optimize import optimize_loop + from pypy.jit.metainterp.optimizeopt import optimize_trace history = metainterp.history - loop = create_empty_loop(metainterp) - loop.inputargs = history.inputargs[:] + metainterp_sd = metainterp.staticdata + jitdriver_sd = metainterp.jitdriver_sd + + if False: + part = partial_trace + assert False + procedur_token = metainterp.get_procedure_token(greenkey) + assert procedure_token + all_target_tokens = [] + else: + jitcell_token = make_jitcell_token(jitdriver_sd) + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + h_ops = history.operations + part.start_resumedescr = start_resumedescr + part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens = [target_token] + + loop = create_empty_loop(metainterp) + loop.inputargs = part.inputargs + loop.operations = part.operations + loop.quasi_immutable_deps = {} + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + while part.operations[-1].getopnum() == rop.LABEL: + inliner = Inliner(inputargs, jumpargs) + part.quasi_immutable_deps = None + part.operations = [part.operations[-1]] + \ + [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], + None, descr=jitcell_token)] + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens.append(target_token) + inputargs = jumpargs + jumpargs = part.operations[-1].getarglist() + + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + except InvalidLoop: + return None + + loop.operations = loop.operations[:-1] + part.operations + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + + if not loop.quasi_immutable_deps: + loop.quasi_immutable_deps = None for box in loop.inputargs: assert isinstance(box, Box) - # make a copy, because optimize_loop can mutate the ops and descrs - h_ops = history.operations - loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] + + loop.original_jitcell_token = jitcell_token + for label in all_target_tokens: + assert isinstance(label, TargetToken) + label.original_jitcell_token = jitcell_token + if label.virtual_state and label.short_preamble: + metainterp_sd.logger_ops.log_short_preamble([], label.short_preamble) + jitcell_token.target_tokens = all_target_tokens + send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") + record_loop_or_bridge(metainterp_sd, loop) + return all_target_tokens[0] + +def compile_retrace(metainterp, greenkey, start, + inputargs, jumpargs, + start_resumedescr, partial_trace, resumekey): + """Try to compile a new procedure by closing the current history back + to the first operation. + """ + from pypy.jit.metainterp.optimizeopt import optimize_trace + + history = metainterp.history metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.token = loop_token - loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP - loop.preamble = create_empty_loop(metainterp, 'Preamble ') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) - loop.preamble.start_resumedescr = start_resumedescr + loop_jitcell_token = metainterp.get_procedure_token(greenkey) + assert loop_jitcell_token + assert partial_trace.operations[-1].getopnum() == rop.LABEL + part = create_empty_loop(metainterp) + part.inputargs = inputargs[:] + part.start_resumedescr = start_resumedescr + h_ops = history.operations + + part.operations = [partial_trace.operations[-1]] + \ + [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] + label = part.operations[0] + orignial_label = label.clone() + assert label.getopnum() == rop.LABEL try: - old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop, - jitdriver_sd.warmstate.enable_opts) + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: - debug_print("compile_new_loop: got an InvalidLoop") - return None - if old_loop_token is not None: - metainterp.staticdata.log("reusing old loop") - return old_loop_token + #return None # XXX: Dissable for now + # Fall back on jumping to preamble + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert target_token.exported_state + part.operations = [orignial_label] + \ + [ResOperation(rop.JUMP, target_token.exported_state.jump_args, + None, descr=loop_jitcell_token)] + try: + optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + inline_short_preamble=False) + + except InvalidLoop: + return None + assert part.operations[-1].getopnum() != rop.LABEL + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + assert loop_jitcell_token.target_tokens + loop_jitcell_token.target_tokens.append(target_token) - if loop.preamble.operations is not None: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - record_loop_or_bridge(metainterp_sd, loop) - token = loop.preamble.token - if full_preamble_needed: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, - loop.preamble, "entry bridge") - insert_loop_token(old_loop_tokens, loop.preamble.token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.preamble.token) - record_loop_or_bridge(metainterp_sd, loop.preamble) - elif token.short_preamble: - short = token.short_preamble[-1] - metainterp_sd.logger_ops.log_short_preamble(short.inputargs, - short.operations) - return token - else: - send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, - "loop") - insert_loop_token(old_loop_tokens, loop_token) - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - greenkey, loop.token) - record_loop_or_bridge(metainterp_sd, loop) - return loop_token + loop = partial_trace + loop.operations = loop.operations[:-1] + part.operations -def insert_loop_token(old_loop_tokens, loop_token): - # Find where in old_loop_tokens we should insert this new loop_token. - # The following algo means "as late as possible, but before another - # loop token that would be more general and so completely mask off - # the new loop_token". - # XXX do we still need a list? - old_loop_tokens.append(loop_token) + quasi_immutable_deps = {} + if loop.quasi_immutable_deps: + quasi_immutable_deps.update(loop.quasi_immutable_deps) + if part.quasi_immutable_deps: + quasi_immutable_deps.update(part.quasi_immutable_deps) + if quasi_immutable_deps: + loop.quasi_immutable_deps = quasi_immutable_deps + + for box in loop.inputargs: + assert isinstance(box, Box) + + target_token = loop.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, loop) + target_token = label.getdescr() + assert isinstance(target_token, TargetToken) + target_token.original_jitcell_token = loop.original_jitcell_token + record_loop_or_bridge(metainterp_sd, loop) + return target_token def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): - jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, + original_jitcell_token = loop.original_jitcell_token + jitdriver_sd.on_compile(metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata - loop_token = loop.token - loop_token.number = n = globaldata.loopnumbering + original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): - show_loop(metainterp_sd, loop) + show_procedures(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) @@ -177,26 +272,19 @@ debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, - loop.token, name=loopname) + original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): - if type != "entry bridge": - metainterp_sd.stats.compiled() - else: - loop._ignore_during_counting = True + metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) - short = loop.token.short_preamble - if short: - metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, - short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token) + metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): @@ -204,8 +292,9 @@ jitdriver_sd.on_compile_bridge(metainterp_sd.logger_ops, original_loop_token, operations, n) if not we_are_translated(): - show_loop(metainterp_sd) - TreeLoop.check_consistency_of(inputargs, operations) + show_procedures(metainterp_sd) + seen = dict.fromkeys(inputargs) + TreeLoop.check_consistency_of_branch(operations, seen) metainterp_sd.profiler.start_backend() operations = get_deep_immutable_oplist(operations) debug_start("jit-backend") @@ -221,9 +310,9 @@ # metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset) # - if metainterp_sd.warmrunnerdesc is not None: # for tests - metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( - original_loop_token) + #if metainterp_sd.warmrunnerdesc is not None: # for tests + # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( + # original_loop_token) # ____________________________________________________________ @@ -263,7 +352,7 @@ raise metainterp_sd.ExitFrameWithExceptionRef(cpu, value) -class TerminatingLoopToken(LoopToken): +class TerminatingLoopToken(JitCellToken): # FIXME: kill? terminating = True def __init__(self, nargs, finishdescr): @@ -427,13 +516,13 @@ # We managed to create a bridge. Attach the new operations # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None - new_loop.token = metainterp.resumekey_original_loop_token + new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token inputargs = metainterp.history.inputargs if not we_are_translated(): self._debug_suboperations = new_loop.operations send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, self, inputargs, new_loop.operations, - new_loop.token) + new_loop.original_jitcell_token) def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here @@ -616,41 +705,32 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd redargs = new_loop.inputargs - # We make a new LoopToken for this entry bridge, and stick it - # to every guard in the loop. - new_loop_token = make_loop_token(len(redargs), jitdriver_sd) - new_loop.token = new_loop_token + new_loop.original_jitcell_token = jitcell_token = make_jitcell_token(jitdriver_sd) send_loop_to_backend(self.original_greenkey, metainterp.jitdriver_sd, metainterp_sd, new_loop, "entry bridge") # send the new_loop to warmspot.py, to be called directly the next time - jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( - self.original_greenkey, - new_loop_token) - # store the new loop in compiled_merge_points_wref too - old_loop_tokens = metainterp.get_compiled_merge_points( - self.original_greenkey) - # it always goes at the end of the list, as it is the most - # general loop token - old_loop_tokens.append(new_loop_token) - metainterp.set_compiled_merge_points(self.original_greenkey, - old_loop_tokens) + jitdriver_sd.warmstate.attach_procedure_to_interp( + self.original_greenkey, jitcell_token) + metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_new_bridge(metainterp, old_loop_tokens, resumekey, retraced=False): +def compile_trace(metainterp, resumekey, start_resumedescr=None): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ - from pypy.jit.metainterp.optimize import optimize_bridge + from pypy.jit.metainterp.optimizeopt import optimize_trace # The history contains new operations to attach as the code for the # failure of 'resumekey.guard_op'. - # + # # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. - new_loop = create_empty_loop(metainterp) - new_loop.inputargs = metainterp.history.inputargs[:] + new_trace = create_empty_loop(metainterp) + new_trace.inputargs = inputargs = metainterp.history.inputargs[:] # clone ops, as optimize_bridge can mutate the ops - new_loop.operations = [op.clone() for op in metainterp.history.operations] + + new_trace.operations = [op.clone() for op in metainterp.history.operations] + new_trace.start_resumedescr = start_resumedescr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): @@ -658,38 +738,25 @@ else: inline_short_preamble = True try: - target_loop_token = optimize_bridge(metainterp_sd, old_loop_tokens, - new_loop, state.enable_opts, - inline_short_preamble, retraced) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop debug_print('InvalidLoop in compile_new_bridge') return None - # Did it work? - if target_loop_token is not None: - # Yes, we managed to create a bridge. Dispatch to resumekey to + + if new_trace.operations[-1].getopnum() != rop.LABEL: + # We managed to create a bridge. Dispatch to resumekey to # know exactly what we must do (ResumeGuardDescr/ResumeFromInterpDescr) - prepare_last_operation(new_loop, target_loop_token) - resumekey.compile_and_attach(metainterp, new_loop) - record_loop_or_bridge(metainterp_sd, new_loop) - return target_loop_token - -def prepare_last_operation(new_loop, target_loop_token): - op = new_loop.operations[-1] - if not isinstance(target_loop_token, TerminatingLoopToken): - # normal case - #op.setdescr(target_loop_token) # patch the jump target - pass + target_token = new_trace.operations[-1].getdescr() + resumekey.compile_and_attach(metainterp, new_trace) + record_loop_or_bridge(metainterp_sd, new_trace) + return target_token else: - # The target_loop_token is a pseudo loop token, - # e.g. loop_tokens_done_with_this_frame_void[0] - # Replace the operation with the real operation we want, i.e. a FINISH - descr = target_loop_token.finishdescr - args = op.getarglist() - new_op = ResOperation(rop.FINISH, args, None, descr=descr) - new_loop.operations[-1] = new_op + metainterp.retrace_needed(new_trace) + return None + # ____________________________________________________________ @@ -708,7 +775,7 @@ """ # 'redboxes' is only used to know the types of red arguments. inputargs = [box.clonebox() for box in redboxes] - loop_token = make_loop_token(len(inputargs), jitdriver_sd) + jitcell_token = make_jitcell_token(jitdriver_sd) # 'nb_red_args' might be smaller than len(redboxes), # because it doesn't include the virtualizable boxes. nb_red_args = jitdriver_sd.num_red_args @@ -741,7 +808,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, loop_token, log=False) + cpu.compile_loop(inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests - memory_manager.keep_loop_alive(loop_token) - return loop_token + memory_manager.keep_loop_alive(jitcell_token) + return jitcell_token diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -344,6 +344,7 @@ rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, + rop.LABEL, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py --- a/pypy/jit/metainterp/graphpage.py +++ b/pypy/jit/metainterp/graphpage.py @@ -12,8 +12,9 @@ def get_display_text(self): return None -def display_loops(loops, errmsg=None, highlight_loops={}): - graphs = [(loop, highlight_loops.get(loop, 0)) for loop in loops] +def display_procedures(procedures, errmsg=None, highlight_procedures={}): + graphs = [(procedure, highlight_procedures.get(procedure, 0)) + for procedure in procedures] for graph, highlight in graphs: for op in graph.get_operations(): if is_interesting_guard(op): @@ -25,18 +26,19 @@ def is_interesting_guard(op): return hasattr(op.getdescr(), '_debug_suboperations') +def getdescr(op): + if op._descr is not None: + return op._descr + if hasattr(op, '_descr_wref'): + return op._descr_wref() + return None + class ResOpGraphPage(GraphPage): def compute(self, graphs, errmsg=None): resopgen = ResOpGen() for graph, highlight in graphs: - if getattr(graph, 'token', None) is not None: - resopgen.jumps_to_graphs[graph.token] = graph - if getattr(graph, '_looptoken_number', None) is not None: - resopgen.jumps_to_graphs[graph._looptoken_number] = graph - - for graph, highlight in graphs: resopgen.add_graph(graph, highlight) if errmsg: resopgen.set_errmsg(errmsg) @@ -54,7 +56,7 @@ self.block_starters = {} # {graphindex: {set-of-operation-indices}} self.all_operations = {} self.errmsg = None - self.jumps_to_graphs = {} + self.target_tokens = {} def op_name(self, graphindex, opindex): return 'g%dop%d' % (graphindex, opindex) @@ -73,16 +75,21 @@ for graphindex in range(len(self.graphs)): self.block_starters[graphindex] = {0: True} for graphindex, graph in enumerate(self.graphs): - last_was_mergepoint = False + mergepointblock = None for i, op in enumerate(graph.get_operations()): if is_interesting_guard(op): self.mark_starter(graphindex, i+1) if op.getopnum() == rop.DEBUG_MERGE_POINT: - if not last_was_mergepoint: - last_was_mergepoint = True - self.mark_starter(graphindex, i) + if mergepointblock is None: + mergepointblock = i + elif op.getopnum() == rop.LABEL: + self.mark_starter(graphindex, i) + self.target_tokens[getdescr(op)] = (graphindex, i) + mergepointblock = i else: - last_was_mergepoint = False + if mergepointblock is not None: + self.mark_starter(graphindex, mergepointblock) + mergepointblock = None def set_errmsg(self, errmsg): self.errmsg = errmsg @@ -172,24 +179,10 @@ (graphindex, opindex)) break if op.getopnum() == rop.JUMP: - tgt_g = -1 - tgt = None - tgt_number = getattr(op, '_jumptarget_number', None) - if tgt_number is not None: - tgt = self.jumps_to_graphs.get(tgt_number) - else: - tgt_descr = op.getdescr() - if tgt_descr is None: - tgt_g = graphindex - else: - tgt = self.jumps_to_graphs.get(tgt_descr.number) - if tgt is None: - tgt = self.jumps_to_graphs.get(tgt_descr) - if tgt is not None: - tgt_g = self.graphs.index(tgt) - if tgt_g != -1: + tgt_descr = getdescr(op) + if tgt_descr is not None and tgt_descr in self.target_tokens: self.genedge((graphindex, opstartindex), - (tgt_g, 0), + self.target_tokens[tgt_descr], weight="0") lines.append("") label = "\\l".join(lines) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.codewriter import heaptracker, longlong from pypy.rlib.objectmodel import compute_identity_hash +import weakref # ____________________________________________________________ @@ -723,18 +724,17 @@ # ____________________________________________________________ -# The TreeLoop class contains a loop or a generalized loop, i.e. a tree -# of operations. Each branch ends in a jump which can go either to -# the top of the same loop, or to another TreeLoop; or it ends in a FINISH. +# The JitCellToken class is the root of a tree of traces. Each branch ends +# in a jump which goes to a LABEL operation; or it ends in a FINISH. -class LoopToken(AbstractDescr): +class JitCellToken(AbstractDescr): """Used for rop.JUMP, giving the target of the jump. This is different from TreeLoop: the TreeLoop class contains the whole loop, including 'operations', and goes away after the loop was compiled; but the LoopDescr remains alive and points to the generated assembler. """ - short_preamble = None + target_tokens = None failed_states = None retraced_count = 0 terminating = False # see TerminatingLoopToken in compile.py @@ -751,10 +751,11 @@ def __init__(self): # For memory management of assembled loops - self._keepalive_target_looktokens = {} # set of other LoopTokens + self._keepalive_jitcell_tokens = {} # set of other JitCellToken - def record_jump_to(self, target_loop_token): - self._keepalive_target_looktokens[target_loop_token] = None + def record_jump_to(self, jitcell_token): + assert isinstance(jitcell_token, JitCellToken) + self._keepalive_jitcell_tokens[jitcell_token] = None def __repr__(self): return '' % (self.number, self.generation) @@ -765,17 +766,36 @@ def dump(self): self.compiled_loop_token.cpu.dump_loop_token(self) +class TargetToken(AbstractDescr): + def __init__(self, targeting_jitcell_token=None): + # The jitcell to which jumps might result in a jump to this label + self.targeting_jitcell_token = targeting_jitcell_token + + # The jitcell where the trace containing the label with this TargetToken begins + self.original_jitcell_token = None + + self.virtual_state = None + self.exported_state = None + class TreeLoop(object): inputargs = None operations = None - token = None call_pure_results = None logops = None quasi_immutable_deps = None + start_resumedescr = None + + def _token(*args): + raise Exception("TreeLoop.token is killed") + token = property(_token, _token) + + # This is the jitcell where the trace starts. Labels within the trace might + # belong to some other jitcells in the sens that jumping to this other + # jitcell will result in a jump to the label. + original_jitcell_token = None def __init__(self, name): self.name = name - # self.inputargs = list of distinct Boxes # self.operations = list of ResOperations # ops of the kind 'guard_xxx' contain a further list of operations, # which may itself contain 'guard_xxx' and so on, making a tree. @@ -808,6 +828,10 @@ def check_consistency(self): # for testing "NOT_RPYTHON" self.check_consistency_of(self.inputargs, self.operations) + for op in self.operations: + descr = op.getdescr() + if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): + assert descr.original_jitcell_token is self.original_jitcell_token @staticmethod def check_consistency_of(inputargs, operations): @@ -842,15 +866,23 @@ assert isinstance(box, Box) assert box not in seen seen[box] = True + if op.getopnum() == rop.LABEL: + inputargs = op.getarglist() + for box in inputargs: + assert isinstance(box, Box), "LABEL contains %r" % (box,) + seen = dict.fromkeys(inputargs) + assert len(seen) == len(inputargs), ( + "duplicate Box in the LABEL arguments") + assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: target = operations[-1].getdescr() if target is not None: - assert isinstance(target, LoopToken) + assert isinstance(target, TargetToken) def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -932,6 +964,9 @@ def clear(self): pass + def add_jitcell_token(self, token): + pass + class Stats(object): """For tests.""" @@ -944,7 +979,8 @@ self.loops = [] self.locations = [] self.aborted_keys = [] - self.invalidated_token_numbers = set() + self.invalidated_token_numbers = set() # <- not RPython + self.jitcell_token_wrefs = [] def clear(self): del self.loops[:] @@ -955,6 +991,10 @@ self.enter_count = 0 self.aborted_count = 0 + def add_jitcell_token(self, token): + assert isinstance(token, JitCellToken) + self.jitcell_token_wrefs.append(weakref.ref(token)) + def set_history(self, history): self.operations = history.operations @@ -984,6 +1024,15 @@ def get_all_loops(self): return self.loops + def get_all_jitcell_tokens(self): + tokens = [t() for t in self.jitcell_token_wrefs] + if None in tokens: + assert False, "get_all_jitcell_tokens will not work as "+\ + "loops have been freed" + return tokens + + + def check_history(self, expected=None, **check): insns = {} for op in self.operations: @@ -1001,13 +1050,14 @@ def check_resops(self, expected=None, **check): insns = {} - for loop in self.loops: + for loop in self.get_all_loops(): insns = loop.summary(adding_insns=insns) return self._check_insns(insns, expected, check) def _check_insns(self, insns, expected, check): if expected is not None: insns.pop('debug_merge_point', None) + insns.pop('label', None) assert insns == expected for insn, expected_count in check.items(): getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist @@ -1034,29 +1084,83 @@ opname = op.getopname() insns[opname] = insns.get(opname, 0) + 1 return self._check_insns(insns, expected, check) + + def check_simple_loop(self, expected=None, **check): + # Usefull in the simplest case when we have only one trace ending with + # a jump back to itself and possibly a few bridges ending with finnish. + # Only the operations within the loop formed by that single jump will + # be counted. + loops = self.get_all_loops() + assert len(loops) == 1 + loop = loops[0] + jumpop = loop.operations[-1] + assert jumpop.getopnum() == rop.JUMP + assert self.check_resops(jump=1) + labels = [op for op in loop.operations if op.getopnum() == rop.LABEL] + targets = [op._descr_wref() for op in labels] + assert None not in targets # TargetToken was freed, give up + target = jumpop._descr_wref() + assert target + assert targets.count(target) == 1 + i = loop.operations.index(labels[targets.index(target)]) + insns = {} + for op in loop.operations[i:]: + opname = op.getopname() + insns[opname] = insns.get(opname, 0) + 1 + return self._check_insns(insns, expected, check) + def check_loops(self, expected=None, everywhere=False, **check): + insns = {} + for loop in self.get_all_loops(): + #if not everywhere: + # if getattr(loop, '_ignore_during_counting', False): + # continue + insns = loop.summary(adding_insns=insns) + if expected is not None: + insns.pop('debug_merge_point', None) + print + print + print " self.check_resops(%s)" % str(insns) + print + import pdb; pdb.set_trace() + else: + chk = ['%s=%d' % (i, insns.get(i, 0)) for i in check] + print + print + print " self.check_resops(%s)" % ', '.join(chk) + print + import pdb; pdb.set_trace() + return + + for insn, expected_count in check.items(): + getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist + found = insns.get(insn, 0) + assert found == expected_count, ( + "found %d %r, expected %d" % (found, insn, expected_count)) + return insns + def check_consistency(self): "NOT_RPYTHON" - for loop in self.loops: + for loop in self.get_all_loops(): loop.check_consistency() def maybe_view(self): if option.view: self.view() - def view(self, errmsg=None, extraloops=[]): - from pypy.jit.metainterp.graphpage import display_loops - loops = self.get_all_loops()[:] - for loop in extraloops: - if loop in loops: - loops.remove(loop) - loops.append(loop) - highlight_loops = dict.fromkeys(extraloops, 1) - for loop in loops: - if hasattr(loop, '_looptoken_number') and ( - loop._looptoken_number in self.invalidated_token_numbers): - highlight_loops.setdefault(loop, 2) - display_loops(loops, errmsg, highlight_loops) + def view(self, errmsg=None, extraprocedures=[]): + from pypy.jit.metainterp.graphpage import display_procedures + procedures = self.get_all_loops()[:] + for procedure in extraprocedures: + if procedure in procedures: + procedures.remove(procedure) + procedures.append(procedure) + highlight_procedures = dict.fromkeys(extraprocedures, 1) + for procedure in procedures: + if hasattr(procedure, '_looptoken_number') and ( + procedure._looptoken_number in self.invalidated_token_numbers): + highlight_procedures.setdefault(procedure, 2) + display_procedures(procedures, errmsg, highlight_procedures) # ---------------------------------------------------------------- diff --git a/pypy/jit/metainterp/inliner.py b/pypy/jit/metainterp/inliner.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/inliner.py @@ -0,0 +1,57 @@ +from pypy.jit.metainterp.history import Const +from pypy.jit.metainterp.resume import Snapshot + +class Inliner(object): + def __init__(self, inputargs, jump_args): + assert len(inputargs) == len(jump_args) + self.argmap = {} + for i in range(len(inputargs)): + if inputargs[i] in self.argmap: + assert self.argmap[inputargs[i]] == jump_args[i] + else: + self.argmap[inputargs[i]] = jump_args[i] + self.snapshot_map = {None: None} + + def inline_op(self, newop, ignore_result=False, clone=True, + ignore_failargs=False): + if clone: + newop = newop.clone() + args = newop.getarglist() + newop.initarglist([self.inline_arg(a) for a in args]) + + if newop.is_guard(): + args = newop.getfailargs() + if args and not ignore_failargs: + newop.setfailargs([self.inline_arg(a) for a in args]) + else: + newop.setfailargs([]) + + if newop.result and not ignore_result: + old_result = newop.result + newop.result = newop.result.clonebox() + self.argmap[old_result] = newop.result + + self.inline_descr_inplace(newop.getdescr()) + + return newop + + def inline_descr_inplace(self, descr): + from pypy.jit.metainterp.compile import ResumeGuardDescr + if isinstance(descr, ResumeGuardDescr): + descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) + + def inline_arg(self, arg): + if arg is None: + return None + if isinstance(arg, Const): + return arg + return self.argmap[arg] + + def inline_snapshot(self, snapshot): + if snapshot in self.snapshot_map: + return self.snapshot_map[snapshot] + boxes = [self.inline_arg(a) for a in snapshot.boxes] + new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) + self.snapshot_map[snapshot] = new_snapshot + return new_snapshot + diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -4,13 +4,15 @@ from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize from pypy.jit.metainterp.optimizeopt.heap import OptHeap from pypy.jit.metainterp.optimizeopt.vstring import OptString -from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble +from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce from pypy.rlib.jit import PARAMETERS from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.debug import debug_start, debug_stop, debug_print + ALL_OPTS = [('intbounds', OptIntBounds), ('rewrite', OptRewrite), @@ -28,8 +30,7 @@ ALL_OPTS_LIST = [name for name, _ in ALL_OPTS] ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS]) -def build_opt_chain(metainterp_sd, enable_opts, - inline_short_preamble=True, retraced=False): +def build_opt_chain(metainterp_sd, enable_opts): config = metainterp_sd.config optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict @@ -45,12 +46,9 @@ optimizations.append(OptFfiCall()) if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + or 'heap' not in enable_opts or 'unroll' not in enable_opts): optimizations.append(OptSimplify()) - if inline_short_preamble: - optimizations = [OptInlineShortPreamble(retraced)] + optimizations - return optimizations, unroll @@ -80,3 +78,21 @@ if __name__ == '__main__': print ALL_OPTS_NAMES + +def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): + """Optimize loop.operations to remove internal overheadish operations. + """ + + debug_start("jit-optimize") + try: + loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, + loop.operations) + optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) + if unroll: + optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) + else: + optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer.propagate_all_forward() + finally: + debug_stop("jit-optimize") + diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -500,8 +500,9 @@ else: return CVAL_ZERO - def propagate_all_forward(self): - self.clear_newoperations() + def propagate_all_forward(self, clear=True): + if clear: + self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) self.loop.operations = self.get_newoperations() diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -1,9 +1,12 @@ from pypy.jit.metainterp.optimizeopt.optimizer import Optimization from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import ResOperation, rop - +from pypy.jit.metainterp.history import TargetToken, JitCellToken class OptSimplify(Optimization): + def __init__(self): + self.last_label_descr = None + def optimize_CALL_PURE(self, op): args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, @@ -31,6 +34,23 @@ def optimize_RECORD_KNOWN_CLASS(self, op): pass + def optimize_LABEL(self, op): + self.last_label_descr = op.getdescr() + self.emit_operation(op) + + def optimize_JUMP(self, op): + descr = op.getdescr() + assert isinstance(descr, JitCellToken) + if not descr.target_tokens: + assert self.last_label_descr is not None + target_token = self.last_label_descr + assert isinstance(target_token, TargetToken) + assert target_token.targeting_jitcell_token is descr + op.setdescr(self.last_label_descr) + else: + assert len(descr.target_tokens) == 1 + op.setdescr(descr.target_tokens[0]) + self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -0,0 +1,200 @@ +from pypy.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot) +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimize import InvalidLoop +from py.test import raises + +class BaseTestMultiLabel(BaseTest): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + + def optimize_loop(self, ops, expected): + loop = self.parse(ops) + if expected != "crash!": + expected = self.parse(expected) + + part = TreeLoop('part') + part.inputargs = loop.inputargs + part.start_resumedescr = FakeDescrWithSnapshot() + token = loop.original_jitcell_token + + optimized = TreeLoop('optimized') + optimized.inputargs = loop.inputargs + optimized.operations = [] + + labels = [i for i, op in enumerate(loop.operations) \ + if op.getopnum()==rop.LABEL] + prv = 0 + last_label = [] + for nxt in labels + [len(loop.operations)]: + assert prv != nxt + operations = last_label + loop.operations[prv:nxt] + if nxt < len(loop.operations): + label = loop.operations[nxt] + assert label.getopnum() == rop.LABEL + jumpop = ResOperation(rop.JUMP, label.getarglist(), + None, descr=token) + operations.append(jumpop) + part.operations = operations + self._do_optimize_loop(part, None) + if part.operations[-1].getopnum() == rop.LABEL: + last_label = [part.operations.pop()] + else: + last_label = [] + optimized.operations.extend(part.operations) + prv = nxt + 1 + + # + print + print "Optimized:" + if optimized.operations: + print '\n'.join([str(o) for o in optimized.operations]) + else: + print 'Failed!' + print + + assert expected != "crash!", "should have raised an exception" + self.assert_equal(optimized, expected) + + return optimized + + def test_simple(self): + ops = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1) + i3 = int_add(i1, 1) + escape(i3) + jump(i1) + """ + expected = """ + [i1] + i2 = int_add(i1, 1) + escape(i2) + label(i1, i2) + escape(i2) + jump(i1, i2) + """ + self.optimize_loop(ops, expected) + + def test_forced_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + escape(p3) + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_with_nonmatching_fields(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, 1, descr=valuedescr) + label(p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p4, 1, descr=nextdescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_arrays_with_nonmatching_lens(self): + ops = """ + [p1] + p2 = new_array(3, descr=arraydescr) + label(p2) + p4 = new_array(2, descr=arraydescr) + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_1(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p4, 2, f0, descr=compleximagdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_nonmatching_arraystruct_2(self): + ops = """ + [p1, f0] + p2 = new_array(3, descr=complexarraydescr) + setinteriorfield_gc(p2, 2, f0, descr=complexrealdescr) + label(p2, f0) + p4 = new_array(2, descr=complexarraydescr) + setinteriorfield_gc(p4, 0, f0, descr=complexrealdescr) + jump(p4, f0) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_array(self): + ops = """ + [p1] + p3 = new_array(3, descr=arraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_not_virtual_arraystruct(self): + ops = """ + [p1] + p3 = new_array(3, descr=complexarraydescr) + label(p3) + p4 = escape() + jump(p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtual_turns_constant(self): + ops = """ + [p1] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3) + guard_value(p3, ConstPtr(myptr)) [] + jump(p3) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + def test_virtuals_turns_not_equal(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + label(p3, p3) + p4 = new_with_vtable(ConstClass(node_vtable)) + jump(p3, p4) + """ + with raises(InvalidLoop): + self.optimize_loop(ops, ops) + + +class TestLLtype(BaseTestMultiLabel, LLtypeMixin): + pass + diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1,7 +1,8 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, FakeMetaInterpStaticData) + LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) +from pypy.jit.metainterp.history import TargetToken, JitCellToken from pypy.jit.metainterp.test.test_compile import FakeLogger import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize @@ -11,7 +12,6 @@ from pypy.jit.metainterp.resoperation import rop, opname, ResOperation from pypy.rlib.rarithmetic import LONG_BIT - def test_store_final_boxes_in_guard(): from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.resume import tag, TAGBOX @@ -116,9 +116,13 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" def optimize_loop(self, ops, optops, call_pure_results=None): - loop = self.parse(ops) - expected = self.parse(optops) + token = JitCellToken() + loop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=TargetToken(token))] + \ + loop.operations + if loop.operations[-1].getopnum() == rop.JUMP: + loop.operations[-1].setdescr(token) + expected = convert_old_style_to_targets(self.parse(optops), jump=True) self._do_optimize_loop(loop, call_pure_results) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,13 +1,13 @@ import py from pypy.rlib.objectmodel import instantiate from pypy.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes) + LLtypeMixin, BaseTest, Storage, _sortboxes, convert_old_style_to_targets) import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt import pypy.jit.metainterp.optimizeopt.virtualize as virtualize from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.jit.metainterp import executor, compile, resume, history from pypy.jit.metainterp.resoperation import rop, opname, ResOperation @@ -15,7 +15,7 @@ from pypy.jit.metainterp.optimizeopt.util import args_dict from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData from pypy.config.pypyoption import get_pypy_config - +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_build_opt_chain(): def check(chain, expected_names): @@ -23,49 +23,37 @@ assert names == expected_names # metainterp_sd = FakeMetaInterpStaticData(None) - chain, _ = build_opt_chain(metainterp_sd, "", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "") check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) # chain, _ = build_opt_chain(metainterp_sd, "heap:intbounds") - check(chain, ["OptInlineShortPreamble", "OptIntBounds", "OptHeap", "OptSimplify"]) + check(chain, ["OptIntBounds", "OptHeap", "OptSimplify"]) # chain, unroll = build_opt_chain(metainterp_sd, "unroll") - check(chain, ["OptInlineShortPreamble", "OptSimplify"]) + check(chain, ["OptSimplify"]) assert unroll # - chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptFfiCall", "OptSimplify"]) # metainterp_sd.config = get_pypy_config(translating=True) assert not metainterp_sd.config.translation.jit_ffi - chain, _ = build_opt_chain(metainterp_sd, "ffi", inline_short_preamble=False) + chain, _ = build_opt_chain(metainterp_sd, "ffi") check(chain, ["OptSimplify"]) # ____________________________________________________________ -class FakeDescr(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescr() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescr) - - class BaseTestWithUnroll(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -79,40 +67,41 @@ expected_preamble = self.parse(expected_preamble) if expected_short: expected_short = self.parse(expected_short) - loop.preamble = TreeLoop('preamble') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = LoopToken() - loop.preamble.start_resumedescr = FakeDescr() - # - self._do_optimize_loop(loop, call_pure_results) + + preamble = self.unroll_and_optimize(loop, call_pure_results) + # print print "Preamble:" - print loop.preamble.inputargs - if loop.preamble.operations: - print '\n'.join([str(o) for o in loop.preamble.operations]) + if preamble.operations: + print '\n'.join([str(o) for o in preamble.operations]) else: print 'Failed!' print print "Loop:" - print loop.inputargs print '\n'.join([str(o) for o in loop.operations]) print if expected_short: print "Short Preamble:" - short = loop.preamble.token.short_preamble[0] - print short.inputargs - print '\n'.join([str(o) for o in short.operations]) + short = loop.operations[0].getdescr().short_preamble + print '\n'.join([str(o) for o in short]) print assert expected != "crash!", "should have raised an exception" - self.assert_equal(loop, expected) + self.assert_equal(loop, convert_old_style_to_targets(expected, jump=True)) + assert loop.operations[0].getdescr() == loop.operations[-1].getdescr() if expected_preamble: - self.assert_equal(loop.preamble, expected_preamble, + self.assert_equal(preamble, convert_old_style_to_targets(expected_preamble, jump=False), text_right='expected preamble') + assert preamble.operations[-1].getdescr() == loop.operations[0].getdescr() if expected_short: - self.assert_equal(short, expected_short, + short_preamble = TreeLoop('short preamble') + assert short[0].getopnum() == rop.LABEL + short_preamble.inputargs = short[0].getarglist() + short_preamble.operations = short + self.assert_equal(short_preamble, convert_old_style_to_targets(expected_short, jump=True), text_right='expected short preamble') + assert short[-1].getdescr() == loop.operations[0].getdescr() return loop @@ -234,7 +223,7 @@ """ % expected_value self.optimize_loop(ops, expected) - def test_reverse_of_cast(self): + def test_reverse_of_cast_1(self): ops = """ [i0] p0 = cast_int_to_ptr(i0) @@ -246,6 +235,8 @@ jump(i0) """ self.optimize_loop(ops, expected) + + def test_reverse_of_cast_2(self): ops = """ [p0] i1 = cast_ptr_to_int(p0) @@ -1181,6 +1172,7 @@ i1 = getfield_gc(p0, descr=valuedescr) i2 = int_sub(i1, 1) i3 = int_add(i0, i1) + i4 = same_as(i2) # This same_as should be killed by backend jump(i3, i2, i1) """ expected = """ @@ -1252,10 +1244,10 @@ i1 = int_add(i0, 1) p1 = new_with_vtable(ConstClass(node_vtable2)) p2 = new_with_vtable(ConstClass(node_vtable2)) - setfield_gc(p0, p1, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) setfield_gc(p2, p1, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p0, p1, descr=nextdescr) jump(p1) """ self.optimize_loop(ops, loop, preamble) @@ -1317,6 +1309,7 @@ p30 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) + p46 = same_as(p30) # This same_as should be killed by backend jump(i29, p30, p3) """ expected = """ @@ -1324,8 +1317,8 @@ i28 = int_add(i0, 1) i29 = int_add(i28, 1) p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) - setfield_gc(p30, i28, descr=nextdescr) jump(i29, p30, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2118,7 +2111,9 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i7 = same_as(i2) # This same_as should be killed by backend + i6 = same_as(i4) + jump(p1, i1, i2, i4, i6) """ expected = """ [p1, i1, i2, i4, i5] @@ -2148,7 +2143,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2177,7 +2173,8 @@ i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) escape() - jump(p1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i2, i4, i5) """ expected = """ [p1, i2, i4, i5] @@ -2207,7 +2204,9 @@ guard_true(i5) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2, i4, i4) + i8 = same_as(i2) # This same_as should be killed by backend + i7 = same_as(i4) + jump(p1, i1, i2, i4, i7) """ expected = """ [p1, i1, i2, i4, i7] @@ -2433,7 +2432,8 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p4, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - jump(p1, i2, i4, p4, i4) + i101 = same_as(i4) + jump(p1, i2, i4, p4, i101) """ expected = """ [p1, i2, i4, p4, i5] @@ -3276,7 +3276,15 @@ setfield_gc(p1, i3, descr=valuedescr) jump(p1, i4, i3) ''' - self.optimize_loop(ops, ops, ops) + preamble = ''' + [p1, i1, i4] + setfield_gc(p1, i1, descr=valuedescr) + i3 = call_assembler(i1, descr=asmdescr) + setfield_gc(p1, i3, descr=valuedescr) + i143 = same_as(i3) # Should be killed by backend + jump(p1, i4, i3) + ''' + self.optimize_loop(ops, ops, preamble) def test_call_assembler_invalidates_heap_knowledge(self): ops = ''' @@ -3307,7 +3315,9 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i3, descr=valuedescr) - jump(p1, i4, i3, i3) + i148 = same_as(i3) + i147 = same_as(i3) + jump(p1, i4, i3, i148) ''' self.optimize_loop(ops, expected, preamble) @@ -3330,7 +3340,8 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i4, i3, i3) + i151 = same_as(i3) + jump(p1, i4, i3, i151) ''' self.optimize_loop(ops, expected, preamble) @@ -3350,7 +3361,8 @@ escape(i1) escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) - jump(i0, i4, i4) + i153 = same_as(i4) + jump(i0, i4, i153) ''' expected = ''' [i0, i4, i5] @@ -3380,7 +3392,8 @@ escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) guard_no_exception() [] - jump(i0, i4, i4) + i155 = same_as(i4) + jump(i0, i4, i155) ''' expected = ''' [i0, i2, i3] @@ -4198,6 +4211,7 @@ preamble = """ [p0] i0 = strlen(p0) + i3 = same_as(i0) # Should be killed by backend jump(p0) """ expected = """ @@ -5418,6 +5432,7 @@ [p0] p1 = getfield_gc(p0, descr=valuedescr) setfield_gc(p0, p0, descr=valuedescr) + p4450 = same_as(p0) # Should be killed by backend jump(p0) """ expected = """ @@ -5653,7 +5668,8 @@ p3 = newstr(i3) copystrcontent(p1, p3, 0, 0, i1) copystrcontent(p2, p3, 0, i1, i2) - jump(p2, p3, i2) + i7 = same_as(i2) + jump(p2, p3, i7) """ expected = """ [p1, p2, i1] @@ -5728,7 +5744,9 @@ copystrcontent(p1, p5, 0, 0, i1) copystrcontent(p2, p5, 0, i1, i2) copystrcontent(p3, p5, 0, i12, i3) - jump(p2, p3, p5, i2, i3) + i129 = same_as(i2) + i130 = same_as(i3) + jump(p2, p3, p5, i129, i130) """ expected = """ [p1, p2, p3, i1, i2] @@ -5788,7 +5806,8 @@ [p1, i1, i2, i3] escape(i3) i4 = int_sub(i2, i1) - jump(p1, i1, i2, i4, i4) + i5 = same_as(i4) + jump(p1, i1, i2, i4, i5) """ expected = """ [p1, i1, i2, i3, i4] @@ -5813,7 +5832,8 @@ escape(i5) i4 = int_sub(i2, i1) setfield_gc(p2, i4, descr=valuedescr) - jump(p1, i1, i2, p2, i4, i4) + i8 = same_as(i4) + jump(p1, i1, i2, p2, i8, i4) """ expected = """ [p1, i1, i2, p2, i5, i6] @@ -5939,7 +5959,8 @@ p4 = newstr(i5) copystrcontent(p1, p4, i1, 0, i3) copystrcontent(p2, p4, 0, i3, i4) - jump(p4, i1, i2, p2, i5, i3, i4) + i9 = same_as(i4) + jump(p4, i1, i2, p2, i5, i3, i9) """ expected = """ [p1, i1, i2, p2, i5, i3, i4] @@ -6061,7 +6082,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, p3, p4, descr=strequaldescr) escape(i0) - jump(p1, p2, p3, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, p3, i3, i11, i12) """ expected = """ [p1, p2, p3, i3, i1, i2] @@ -6281,6 +6304,7 @@ i1 = strlen(p1) i0 = int_eq(i1, 0) escape(i0) + i3 = same_as(i1) jump(p1, i0) """ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) @@ -6326,7 +6350,9 @@ copystrcontent(p2, p4, 0, i1, i2) i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr) escape(i0) - jump(p1, p2, i3, i1, i2) + i11 = same_as(i1) + i12 = same_as(i2) + jump(p1, p2, i3, i11, i12) """ expected = """ [p1, p2, i3, i1, i2] @@ -6629,7 +6655,8 @@ p188 = getarrayitem_gc(p187, 42, descr=) guard_value(p188, ConstPtr(myptr)) [] p25 = getfield_gc(ConstPtr(myptr), descr=otherdescr) - jump(p25, p187, i184, p25) + p26 = same_as(p25) + jump(p25, p187, i184, p26) """ short = """ [p1, p187, i184] @@ -6898,7 +6925,8 @@ [p9] i843 = strlen(p9) call(i843, descr=nonwritedescr) - jump(p9, i843) + i0 = same_as(i843) + jump(p9, i0) """ short = """ [p9] @@ -7014,6 +7042,40 @@ """ self.optimize_loop(ops, expected) + def test_duplicated_aliased_virtual(self): + ops = """ + [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + jump(p3, p4) + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, expected) + + def test_imported_aliased_virtual_in_failargs(self): + ops = """ + [p1, p2, i0] + i2 = int_lt(i0, 10) + guard_true(i2) [p1, p2] + p3 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3, p3, descr=nextdescr) + p4 = getfield_gc(p3, descr=nextdescr) + i1 = int_add(i0, 1) + jump(p3, p4, i1) + """ + expected = """ + [i0] + i2 = int_lt(i0, 10) + guard_true(i2) [] + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize_loop(ops, expected) + def test_chained_virtuals(self): ops = """ [p0, p1] @@ -7590,7 +7652,8 @@ call(i2, descr=nonwritedescr) setfield_gc(p22, i1, descr=valuedescr) guard_nonnull_class(p18, ConstClass(node_vtable)) [] - jump(p22, p18, i1, i1) + i10 = same_as(i1) + jump(p22, p18, i1, i10) """ short = """ [p22, p18, i1] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -8,7 +8,8 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, Const, TreeLoop, BoxObj, - ConstObj, AbstractDescr) + ConstObj, AbstractDescr, + JitCellToken, TargetToken) from pypy.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.codewriter.effectinfo import EffectInfo @@ -18,6 +19,8 @@ from pypy.jit.metainterp import compile, resume, history from pypy.jit.metainterp.jitprof import EmptyProfiler from pypy.config.pypyoption import get_pypy_config +from pypy.jit.metainterp.resoperation import rop, opname, ResOperation +from pypy.jit.metainterp.optimizeopt.unroll import Inliner def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -344,6 +347,11 @@ self.config = get_pypy_config(translating=True) self.config.translation.jit_ffi = True + class logger_noopt: + @classmethod + def log_loop(*args): + pass + class warmrunnerdesc: class memory_manager: retrace_limit = 5 @@ -394,7 +402,7 @@ expected.operations, False, remap, text_right) def _do_optimize_loop(self, loop, call_pure_results): - from pypy.jit.metainterp.optimizeopt import optimize_loop_1 + from pypy.jit.metainterp.optimizeopt import optimize_trace from pypy.jit.metainterp.optimizeopt.util import args_dict self.loop = loop @@ -408,7 +416,83 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - optimize_loop_1(metainterp_sd, loop, self.enable_opts) + optimize_trace(metainterp_sd, loop, self.enable_opts) + + def unroll_and_optimize(self, loop, call_pure_results=None): + operations = loop.operations + jumpop = operations[-1] + assert jumpop.getopnum() == rop.JUMP + inputargs = loop.inputargs + + jump_args = jumpop.getarglist()[:] + operations = operations[:-1] + cloned_operations = [op.clone() for op in operations] + + preamble = TreeLoop('preamble') + preamble.inputargs = inputargs + preamble.start_resumedescr = FakeDescrWithSnapshot() + + token = JitCellToken() + preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ + operations + \ + [ResOperation(rop.JUMP, jump_args, None, descr=token)] + self._do_optimize_loop(preamble, call_pure_results) + + assert preamble.operations[-1].getopnum() == rop.LABEL + + inliner = Inliner(inputargs, jump_args) + loop.start_resumedescr = preamble.start_resumedescr + loop.operations = [preamble.operations[-1]] + \ + [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], + None, descr=token)] + #[inliner.inline_op(jumpop)] + assert loop.operations[-1].getopnum() == rop.JUMP + assert loop.operations[0].getopnum() == rop.LABEL + loop.inputargs = loop.operations[0].getarglist() + + self._do_optimize_loop(loop, call_pure_results) + extra_same_as = [] + while loop.operations[0].getopnum() != rop.LABEL: + extra_same_as.append(loop.operations[0]) + del loop.operations[0] + + # Hack to prevent random order of same_as ops + extra_same_as.sort(key=lambda op: str(preamble.operations).find(str(op.getarg(0)))) + + for op in extra_same_as: + preamble.operations.insert(-1, op) + + return preamble + + +class FakeDescr(compile.ResumeGuardDescr): + def clone_if_mutable(self): + return FakeDescr() + def __eq__(self, other): + return isinstance(other, FakeDescr) + +class FakeDescrWithSnapshot(compile.ResumeGuardDescr): + class rd_snapshot: + class prev: + prev = None + boxes = [] + boxes = [] + def clone_if_mutable(self): + return FakeDescrWithSnapshot() + def __eq__(self, other): + return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) + + +def convert_old_style_to_targets(loop, jump): + newloop = TreeLoop(loop.name) + newloop.inputargs = loop.inputargs + newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=FakeDescr())] + \ + loop.operations + if not jump: + assert newloop.operations[-1].getopnum() == rop.JUMP + newloop.operations[-1] = ResOperation(rop.LABEL, newloop.operations[-1].getarglist(), None, descr=FakeDescr()) + return newloop # ____________________________________________________________ diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -1,11 +1,12 @@ from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes +from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState from pypy.jit.metainterp.compile import ResumeGuardDescr -from pypy.jit.metainterp.history import TreeLoop, LoopToken +from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken from pypy.jit.metainterp.jitexc import JitException from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop from pypy.jit.metainterp.optimizeopt.optimizer import * from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds +from pypy.jit.metainterp.inliner import Inliner from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.resume import Snapshot from pypy.rlib.debug import debug_print @@ -13,63 +14,11 @@ # FIXME: Introduce some VirtualOptimizer super class instead -def optimize_unroll(metainterp_sd, loop, optimizations): +def optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble=True): opt = UnrollOptimizer(metainterp_sd, loop, optimizations) + opt.inline_short_preamble = inline_short_preamble opt.propagate_all_forward() -class Inliner(object): - def __init__(self, inputargs, jump_args): - assert len(inputargs) == len(jump_args) - self.argmap = {} - for i in range(len(inputargs)): - if inputargs[i] in self.argmap: - assert self.argmap[inputargs[i]] == jump_args[i] - else: - self.argmap[inputargs[i]] = jump_args[i] - self.snapshot_map = {None: None} - - def inline_op(self, newop, ignore_result=False, clone=True, - ignore_failargs=False): - if clone: - newop = newop.clone() - args = newop.getarglist() - newop.initarglist([self.inline_arg(a) for a in args]) - - if newop.is_guard(): - args = newop.getfailargs() - if args and not ignore_failargs: - newop.setfailargs([self.inline_arg(a) for a in args]) - else: - newop.setfailargs([]) - - if newop.result and not ignore_result: - old_result = newop.result - newop.result = newop.result.clonebox() - self.argmap[old_result] = newop.result - - self.inline_descr_inplace(newop.getdescr()) - - return newop - - def inline_descr_inplace(self, descr): - if isinstance(descr, ResumeGuardDescr): - descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) - - def inline_arg(self, arg): - if arg is None: - return None - if isinstance(arg, Const): - return arg - return self.argmap[arg] - - def inline_snapshot(self, snapshot): - if snapshot in self.snapshot_map: - return self.snapshot_map[snapshot] - boxes = [self.inline_arg(a) for a in snapshot.boxes] - new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) - self.snapshot_map[snapshot] = new_snapshot - return new_snapshot - class UnrollableOptimizer(Optimizer): def setup(self): self.importable_values = {} @@ -101,14 +50,13 @@ become the preamble or entry bridge (don't think there is a distinction anymore)""" + inline_short_preamble = True + did_import = False + def __init__(self, metainterp_sd, loop, optimizations): self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) - self.cloned_operations = [] - for op in self.optimizer.loop.operations: - newop = op.clone() - self.cloned_operations.append(newop) - def fix_snapshot(self, loop, jump_args, snapshot): + def fix_snapshot(self, jump_args, snapshot): if snapshot is None: return None snapshot_args = snapshot.boxes @@ -116,116 +64,348 @@ for a in snapshot_args: a = self.getvalue(a).get_key_box() new_snapshot_args.append(a) - prev = self.fix_snapshot(loop, jump_args, snapshot.prev) + prev = self.fix_snapshot(jump_args, snapshot.prev) return Snapshot(prev, new_snapshot_args) def propagate_all_forward(self): loop = self.optimizer.loop + self.optimizer.clear_newoperations() + + + start_label = loop.operations[0] + if start_label.getopnum() == rop.LABEL: + loop.operations = loop.operations[1:] + # We need to emit the label op before import_state() as emitting it + # will clear heap caches + self.optimizer.send_extra_operation(start_label) + else: + start_label = None + jumpop = loop.operations[-1] if jumpop.getopnum() == rop.JUMP: loop.operations = loop.operations[:-1] else: - loopop = None + jumpop = None - self.optimizer.propagate_all_forward() + self.import_state(start_label) + self.optimizer.propagate_all_forward(clear=False) + if not jumpop: + return + if self.jump_to_already_compiled_trace(jumpop): + # Found a compiled trace to jump to + if self.did_import: - if jumpop: - assert jumpop.getdescr() is loop.token - jump_args = jumpop.getarglist() - jumpop.initarglist([]) + self.close_bridge(start_label) + self.finilize_short_preamble(start_label) + return + + cell_token = jumpop.getdescr() + assert isinstance(cell_token, JitCellToken) + stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) + + if not self.did_import: # Enforce the previous behaviour of always peeling exactly one iteration (for now) self.optimizer.flush() + KillHugeIntBounds(self.optimizer).apply() - KillHugeIntBounds(self.optimizer).apply() + loop.operations = self.optimizer.get_newoperations() + self.export_state(stop_label) + loop.operations.append(stop_label) + else: + assert stop_label + assert start_label + stop_target = stop_label.getdescr() + start_target = start_label.getdescr() + assert isinstance(stop_target, TargetToken) + assert isinstance(start_target, TargetToken) + assert stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token + jumpop = ResOperation(rop.JUMP, stop_label.getarglist(), None, descr=start_label.getdescr()) + + self.close_loop(jumpop) + self.finilize_short_preamble(start_label) + + def export_state(self, targetop): + original_jump_args = targetop.getarglist() + jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] + + assert self.optimizer.loop.start_resumedescr + start_resumedescr = self.optimizer.loop.start_resumedescr.clone_if_mutable() + assert isinstance(start_resumedescr, ResumeGuardDescr) + start_resumedescr.rd_snapshot = self.fix_snapshot(jump_args, start_resumedescr.rd_snapshot) + # FIXME: I dont thnik we need fix_snapshot anymore + + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(jump_args) - loop.preamble.operations = self.optimizer.get_newoperations() - jump_args = [self.getvalue(a).get_key_box() for a in jump_args] + values = [self.getvalue(arg) for arg in jump_args] + inputargs = virtual_state.make_inputargs(values, self.optimizer) + short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) - start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() - self.start_resumedescr = start_resumedescr - assert isinstance(start_resumedescr, ResumeGuardDescr) - start_resumedescr.rd_snapshot = self.fix_snapshot(loop, jump_args, - start_resumedescr.rd_snapshot) + constant_inputargs = {} + for box in jump_args: + const = self.get_constant_box(box) + if const: + constant_inputargs[box] = const - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(jump_args) + short_boxes = ShortBoxes(self.optimizer, inputargs + constant_inputargs.keys()) + aliased_vrituals = {} + for i in range(len(original_jump_args)): + if original_jump_args[i] is not jump_args[i]: + if values[i].is_virtual(): + aliased_vrituals[original_jump_args[i]] = jump_args[i] + else: + short_boxes.alias(original_jump_args[i], jump_args[i]) + + self.optimizer.clear_newoperations() + for box in short_inputargs: + value = self.getvalue(box) + if value.is_virtual(): + value.force_box(self.optimizer) + inputarg_setup_ops = self.optimizer.get_newoperations() + + target_token = targetop.getdescr() + assert isinstance(target_token, TargetToken) + targetop.initarglist(inputargs) + target_token.virtual_state = virtual_state + target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] + target_token.start_resumedescr = start_resumedescr + target_token.exported_state = ExportedState(constant_inputargs, short_boxes, + inputarg_setup_ops, self.optimizer, + aliased_vrituals, jump_args) + + def import_state(self, targetop): + self.did_import = False + if not targetop: + # FIXME: Set up some sort of empty state with no virtuals? + return + target_token = targetop.getdescr() + if not target_token: + return + assert isinstance(target_token, TargetToken) + exported_state = target_token.exported_state + if not exported_state: + # FIXME: Set up some sort of empty state with no virtuals + return + self.did_import = True + + self.short = target_token.short_preamble[:] + self.short_seen = {} + self.short_boxes = exported_state.short_boxes.clone() + for box, const in exported_state.constant_inputargs.items(): + self.short_seen[box] = True + self.imported_state = exported_state + self.inputargs = targetop.getarglist() + self.initial_virtual_state = target_token.virtual_state + self.start_resumedescr = target_token.start_resumedescr + + seen = {} + for box in self.inputargs: + if box in seen: + continue + seen[box] = True + preamble_value = exported_state.optimizer.getvalue(box) + value = self.optimizer.getvalue(box) + value.import_from(preamble_value, self.optimizer) + + for newbox, oldbox in self.short_boxes.aliases.items(): + self.optimizer.make_equal_to(newbox, self.optimizer.getvalue(oldbox)) + + # Setup the state of the new optimizer by emiting the + # short operations and discarding the result + self.optimizer.emitting_dissabled = True + for op in exported_state.inputarg_setup_ops: + self.optimizer.send_extra_operation(op) + seen = {} + + for op in self.short_boxes.operations(): + self.ensure_short_op_emitted(op, self.optimizer, seen) + if op and op.result: + preamble_value = exported_state.optimizer.getvalue(op.result) + value = self.optimizer.getvalue(op.result) + if not value.is_virtual(): + imp = ValueImporter(self, preamble_value, op) + self.optimizer.importable_values[value] = imp + newvalue = self.optimizer.getvalue(op.result) + newresult = newvalue.get_key_box() + if newresult is not op.result and not newvalue.is_constant(): + self.short_boxes.alias(newresult, op.result) + op = ResOperation(rop.SAME_AS, [op.result], newresult) + self.optimizer._newoperations = [op] + self.optimizer._newoperations # XXX + #self.optimizer.getvalue(op.result).box = op.result # FIXME: HACK!!! + self.optimizer.flush() + self.optimizer.emitting_dissabled = False + + for box, key_box in exported_state.aliased_vrituals.items(): + self.optimizer.make_equal_to(box, self.getvalue(key_box)) + + def close_bridge(self, start_label): + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # We dont need to inline the short preamble we are creating as we are conneting + # the bridge to a different trace with a different short preamble + self.short_inliner = None + + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations): + op = newoperations[i] + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + for a in args: + self.import_box(a, inputargs, short_jumpargs, []) + i += 1 + newoperations = self.optimizer.get_newoperations() + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) + + def close_loop(self, jumpop): + virtual_state = self.initial_virtual_state + short_inputargs = self.short[0].getarglist() + constant_inputargs = self.imported_state.constant_inputargs + inputargs = self.inputargs + short_jumpargs = inputargs[:] + + # Construct jumpargs from the virtual state + original_jumpargs = jumpop.getarglist()[:] + values = [self.getvalue(arg) for arg in jumpop.getarglist()] + try: + jumpargs = virtual_state.make_inputargs(values, self.optimizer) + except BadVirtualState: + raise InvalidLoop + jumpop.initarglist(jumpargs) + + # Inline the short preamble at the end of the loop + jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) + assert len(short_inputargs) == len(jmp_to_short_args) + args = {} + for i in range(len(short_inputargs)): + if short_inputargs[i] in args: + if args[short_inputargs[i]] != jmp_to_short_args[i]: + raise InvalidLoop + args[short_inputargs[i]] = jmp_to_short_args[i] + self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) + for box, const in constant_inputargs.items(): + self.short_inliner.argmap[box] = const + for op in self.short[1:]: + newop = self.short_inliner.inline_op(op) + self.optimizer.send_extra_operation(newop) + + # Import boxes produced in the preamble but used in the loop + newoperations = self.optimizer.get_newoperations() + self.boxes_created_this_iteration = {} + i = j = 0 + while newoperations[i].getopnum() != rop.LABEL: + i += 1 + while i < len(newoperations) or j < len(jumpargs): + if i == len(newoperations): + while j < len(jumpargs): + a = jumpargs[j] + if self.optimizer.loop.logops: + debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + j += 1 + else: + op = newoperations[i] + + self.boxes_created_this_iteration[op.result] = True + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + + if self.optimizer.loop.logops: + debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) + for a in args: + if self.optimizer.loop.logops: + debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) + self.import_box(a, inputargs, short_jumpargs, jumpargs) + i += 1 + newoperations = self.optimizer.get_newoperations() + + jumpop.initarglist(jumpargs) + self.optimizer.send_extra_operation(jumpop) + self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=jumpop.getdescr())) + + # Verify that the virtual state at the end of the loop is one + # that is compatible with the virtual state at the start of the loop + modifier = VirtualStateAdder(self.optimizer) + final_virtual_state = modifier.get_virtual_state(original_jumpargs) + debug_start('jit-log-virtualstate') + virtual_state.debug_print('Closed loop with ') + bad = {} + if not virtual_state.generalization_of(final_virtual_state, bad): + # We ended up with a virtual state that is not compatible + # and we are thus unable to jump to the start of the loop + final_virtual_state.debug_print("Bad virtual state at end of loop, ", + bad) + debug_stop('jit-log-virtualstate') + raise InvalidLoop - values = [self.getvalue(arg) for arg in jump_args] - inputargs = virtual_state.make_inputargs(values, self.optimizer) - short_inputargs = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) + debug_stop('jit-log-virtualstate') - self.constant_inputargs = {} - for box in jump_args: - const = self.get_constant_box(box) - if const: - self.constant_inputargs[box] = const + maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards + if self.optimizer.emitted_guards > maxguards: + target_token = jumpop.getdescr() + assert isinstance(target_token, TargetToken) + target_token.targeting_jitcell_token.retraced_count = sys.maxint + + def finilize_short_preamble(self, start_label): + short = self.short + assert short[-1].getopnum() == rop.JUMP + target_token = start_label.getdescr() + assert isinstance(target_token, TargetToken) - sb = ShortBoxes(self.optimizer, inputargs + self.constant_inputargs.keys()) - self.short_boxes = sb + # Turn guards into conditional jumps to the preamble + for i in range(len(short)): + op = short[i] + if op.is_guard(): + op = op.clone() + op.setfailargs(None) + descr = target_token.start_resumedescr.clone_if_mutable() + op.setdescr(descr) + short[i] = op + + # Clone ops and boxes to get private versions and + short_inputargs = short[0].getarglist() + boxmap = {} + newargs = [None] * len(short_inputargs) + for i in range(len(short_inputargs)): + a = short_inputargs[i] + if a in boxmap: + newargs[i] = boxmap[a] + else: + newargs[i] = a.clonebox() + boxmap[a] = newargs[i] + inliner = Inliner(short_inputargs, newargs) + for box, const in self.imported_state.constant_inputargs.items(): + inliner.argmap[box] = const + for i in range(len(short)): + short[i] = inliner.inline_op(short[i]) + + target_token.start_resumedescr = self.start_resumedescr.clone_if_mutable() + inliner.inline_descr_inplace(target_token.start_resumedescr) + + # Forget the values to allow them to be freed + for box in short[0].getarglist(): + box.forget_value() + for op in short: + if op.result: + op.result.forget_value() + target_token.short_preamble = self.short + target_token.exported_state = None + + + def FIXME_old_stuff(): preamble_optimizer = self.optimizer loop.preamble.quasi_immutable_deps = ( self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.new() loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps - logops = self.optimizer.loop.logops - if logops: - args = ", ".join([logops.repr_of_arg(arg) for arg in inputargs]) - debug_print('inputargs: ' + args) - args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs]) - debug_print('short inputargs: ' + args) - self.short_boxes.debug_print(logops) - - - # Force virtuals amoung the jump_args of the preamble to get the - # operations needed to setup the proper state of those virtuals - # in the peeled loop - inputarg_setup_ops = [] - preamble_optimizer.clear_newoperations() - seen = {} - for box in inputargs: - if box in seen: - continue - seen[box] = True - preamble_value = preamble_optimizer.getvalue(box) - value = self.optimizer.getvalue(box) - value.import_from(preamble_value, self.optimizer) - for box in short_inputargs: - if box in seen: - continue - seen[box] = True - value = preamble_optimizer.getvalue(box) - value.force_box(preamble_optimizer) - inputarg_setup_ops += preamble_optimizer.get_newoperations() - - # Setup the state of the new optimizer by emiting the - # short preamble operations and discarding the result - self.optimizer.emitting_dissabled = True - for op in inputarg_setup_ops: - self.optimizer.send_extra_operation(op) - seen = {} - for op in self.short_boxes.operations(): - self.ensure_short_op_emitted(op, self.optimizer, seen) - if op and op.result: - preamble_value = preamble_optimizer.getvalue(op.result) - value = self.optimizer.getvalue(op.result) - if not value.is_virtual(): - imp = ValueImporter(self, preamble_value, op) - self.optimizer.importable_values[value] = imp - newresult = self.optimizer.getvalue(op.result).get_key_box() - if newresult is not op.result: - self.short_boxes.alias(newresult, op.result) - self.optimizer.flush() - self.optimizer.emitting_dissabled = False - - initial_inputargs_len = len(inputargs) - self.inliner = Inliner(loop.inputargs, jump_args) - - - short = self.inline(inputargs, self.cloned_operations, - loop.inputargs, short_inputargs, - virtual_state) loop.inputargs = inputargs args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\ @@ -241,149 +421,7 @@ loop.preamble.token.retraced_count = sys.maxint if short: - assert short[-1].getopnum() == rop.JUMP - short[-1].setdescr(loop.token) - - # Turn guards into conditional jumps to the preamble - for i in range(len(short)): - op = short[i] - if op.is_guard(): - op = op.clone() - op.setfailargs(None) - descr = self.start_resumedescr.clone_if_mutable() - op.setdescr(descr) - short[i] = op - - short_loop = TreeLoop('short preamble') - short_loop.inputargs = short_inputargs - short_loop.operations = short - - # Clone ops and boxes to get private versions and - boxmap = {} - newargs = [None] * len(short_loop.inputargs) - for i in range(len(short_loop.inputargs)): - a = short_loop.inputargs[i] - if a in boxmap: - newargs[i] = boxmap[a] - else: - newargs[i] = a.clonebox() - boxmap[a] = newargs[i] - inliner = Inliner(short_loop.inputargs, newargs) - for box, const in self.constant_inputargs.items(): - inliner.argmap[box] = const - short_loop.inputargs = newargs - ops = [inliner.inline_op(op) for op in short_loop.operations] - short_loop.operations = ops - descr = self.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - short_loop.start_resumedescr = descr - - assert isinstance(loop.preamble.token, LoopToken) - if loop.preamble.token.short_preamble: - loop.preamble.token.short_preamble.append(short_loop) - else: - loop.preamble.token.short_preamble = [short_loop] - short_loop.virtual_state = virtual_state - - # Forget the values to allow them to be freed - for box in short_loop.inputargs: - box.forget_value() - for op in short_loop.operations: - if op.result: - op.result.forget_value() - - def inline(self, inputargs, loop_operations, loop_args, short_inputargs, virtual_state): - inliner = self.inliner - - short_jumpargs = inputargs[:] - - short = self.short = [] - short_seen = self.short_seen = {} - for box, const in self.constant_inputargs.items(): - short_seen[box] = True - - # This loop is equivalent to the main optimization loop in - # Optimizer.propagate_all_forward - jumpop = None - for newop in loop_operations: - newop = inliner.inline_op(newop, clone=False) - if newop.getopnum() == rop.JUMP: - jumpop = newop - break - - #self.optimizer.first_optimization.propagate_forward(newop) - self.optimizer.send_extra_operation(newop) - - self.boxes_created_this_iteration = {} - - assert jumpop - original_jumpargs = jumpop.getarglist()[:] - values = [self.getvalue(arg) for arg in jumpop.getarglist()] - jumpargs = virtual_state.make_inputargs(values, self.optimizer) - jumpop.initarglist(jumpargs) - jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - - for box, const in self.constant_inputargs.items(): - self.short_inliner.argmap[box] = const - - for op in short: - newop = self.short_inliner.inline_op(op) - self.optimizer.send_extra_operation(newop) - - newoperations = self.optimizer.get_newoperations() - - i = j = 0 - while i < len(newoperations) or j < len(jumpargs): - if i == len(newoperations): - while j < len(jumpargs): - a = jumpargs[j] - if self.optimizer.loop.logops: - debug_print('J: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - j += 1 - else: - op = newoperations[i] - - self.boxes_created_this_iteration[op.result] = True - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - - if self.optimizer.loop.logops: - debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) - for a in args: - if self.optimizer.loop.logops: - debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, short_seen) - i += 1 - newoperations = self.optimizer.get_newoperations() - - jumpop.initarglist(jumpargs) - self.optimizer.send_extra_operation(jumpop) - short.append(ResOperation(rop.JUMP, short_jumpargs, None)) - - modifier = VirtualStateAdder(self.optimizer) - final_virtual_state = modifier.get_virtual_state(original_jumpargs) - debug_start('jit-log-virtualstate') - virtual_state.debug_print('Closed loop with ') - bad = {} - if not virtual_state.generalization_of(final_virtual_state, bad): - # We ended up with a virtual state that is not compatible - # and we are thus unable to jump to the start of the loop - # XXX Is it possible to end up here? If so, consider: - # - Fallback on having the preamble jump to itself? - # - Would virtual_state.generate_guards make sense here? - final_virtual_state.debug_print("Bad virtual state at end of loop, ", - bad) - debug_stop('jit-log-virtualstate') - raise InvalidLoop - debug_stop('jit-log-virtualstate') - - return short + pass def ensure_short_op_emitted(self, op, optimizer, seen): if op is None: @@ -399,19 +437,18 @@ guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) optimizer.send_extra_operation(guard) - def add_op_to_short(self, op, short, short_seen, emit=True, guards_needed=False): + def add_op_to_short(self, op, emit=True, guards_needed=False): if op is None: return None - if op.result is not None and op.result in short_seen: - if emit: + if op.result is not None and op.result in self.short_seen: + if emit and self.short_inliner: return self.short_inliner.inline_arg(op.result) else: return None for a in op.getarglist(): - if not isinstance(a, Const) and a not in short_seen: - self.add_op_to_short(self.short_boxes.producer(a), short, short_seen, - emit, guards_needed) + if not isinstance(a, Const) and a not in self.short_seen: + self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): descr = self.start_resumedescr.clone_if_mutable() op.setdescr(descr) @@ -421,9 +458,9 @@ else: value_guards = [] - short.append(op) - short_seen[op.result] = True - if emit: + self.short.append(op) + self.short_seen[op.result] = True + if emit and self.short_inliner: newop = self.short_inliner.inline_op(op) self.optimizer.send_extra_operation(newop) else: @@ -432,23 +469,22 @@ if op.is_ovf(): # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) for guard in value_guards: - self.add_op_to_short(guard, short, short_seen, emit, guards_needed) + self.add_op_to_short(guard, emit, guards_needed) if newop: return newop.result return None - def import_box(self, box, inputargs, short, short_jumpargs, - jumpargs, short_seen): + def import_box(self, box, inputargs, short_jumpargs, jumpargs): if isinstance(box, Const) or box in inputargs: return if box in self.boxes_created_this_iteration: return short_op = self.short_boxes.producer(box) - newresult = self.add_op_to_short(short_op, short, short_seen) + newresult = self.add_op_to_short(short_op) short_jumpargs.append(short_op.result) inputargs.append(box) @@ -456,98 +492,94 @@ if box in self.optimizer.values: box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) - -class OptInlineShortPreamble(Optimization): - def __init__(self, retraced): - self.retraced = retraced + def jump_to_already_compiled_trace(self, jumpop): + assert jumpop.getopnum() == rop.JUMP + cell_token = jumpop.getdescr() - def new(self): - return OptInlineShortPreamble(self.retraced) + assert isinstance(cell_token, JitCellToken) + if not cell_token.target_tokens: + return False - def propagate_forward(self, op): - if op.getopnum() == rop.JUMP: - loop_token = op.getdescr() - assert isinstance(loop_token, LoopToken) - short = loop_token.short_preamble - if short: - args = op.getarglist() - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(args) - debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + if not self.inline_short_preamble: + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return True - for sh in short: - ok = False - extra_guards = [] + args = jumpop.getarglist() + modifier = VirtualStateAdder(self.optimizer) + virtual_state = modifier.get_virtual_state(args) + debug_start('jit-log-virtualstate') + virtual_state.debug_print("Looking for ") - bad = {} - debugmsg = 'Did not match ' - if sh.virtual_state.generalization_of(virtual_state, bad): - ok = True - debugmsg = 'Matched ' - else: - try: - cpu = self.optimizer.cpu - sh.virtual_state.generate_guards(virtual_state, - args, cpu, - extra_guards) + for target in cell_token.target_tokens: + if not target.virtual_state: + continue + ok = False + extra_guards = [] - ok = True - debugmsg = 'Guarded to match ' - except InvalidLoop: - pass - sh.virtual_state.debug_print(debugmsg, bad) - - if ok: - debug_stop('jit-log-virtualstate') + bad = {} + debugmsg = 'Did not match ' + if target.virtual_state.generalization_of(virtual_state, bad): + ok = True + debugmsg = 'Matched ' + else: + try: + cpu = self.optimizer.cpu + target.virtual_state.generate_guards(virtual_state, + args, cpu, + extra_guards) - values = [self.getvalue(arg) - for arg in op.getarglist()] - args = sh.virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - inliner = Inliner(sh.inputargs, args) - - for guard in extra_guards: - if guard.is_guard(): - descr = sh.start_resumedescr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - guard.setdescr(descr) - self.emit_operation(guard) - - try: - for shop in sh.operations: - newop = inliner.inline_op(shop) - self.emit_operation(newop) - except InvalidLoop: - debug_print("Inlining failed unexpectedly", - "jumping to preamble instead") - self.emit_operation(op) - return + ok = True + debugmsg = 'Guarded to match ' + except InvalidLoop: + pass + target.virtual_state.debug_print(debugmsg, bad) + + if ok: debug_stop('jit-log-virtualstate') - retraced_count = loop_token.retraced_count - limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit - if not self.retraced and retraced_count self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -180,10 +188,15 @@ self.arraydescr is other.arraydescr) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState for i in range(len(self.fieldstate)): - v = value._items[i] + try: + v = value._items[i] + except IndexError: + raise BadVirtualState s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -248,12 +261,19 @@ s.enum(virtual_state) def enum_forced_boxes(self, boxes, value, optimizer): - assert isinstance(value, virtualize.VArrayStructValue) - assert value.is_virtual() + if not isinstance(value, virtualize.VArrayStructValue): + raise BadVirtualState + if not value.is_virtual(): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): - v = value._items[i][self.fielddescrs[i][j]] + try: + v = value._items[i][self.fielddescrs[i][j]] + except IndexError: + raise BadVirtualState + except KeyError: + raise BadVirtualState s = self.fieldstate[p] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -546,18 +566,27 @@ self.aliases = {} self.rename = {} self.optimizer = optimizer - for box in surviving_boxes: - self.potential_ops[box] = None - optimizer.produce_potential_short_preamble_ops(self) - self.short_boxes = {} - self.short_boxes_in_production = {} + if surviving_boxes is not None: + for box in surviving_boxes: + self.potential_ops[box] = None + optimizer.produce_potential_short_preamble_ops(self) - for box in self.potential_ops.keys(): - try: - self.produce_short_preamble_box(box) - except BoxNotProducable: - pass + self.short_boxes = {} + self.short_boxes_in_production = {} + + for box in self.potential_ops.keys(): + try: + self.produce_short_preamble_box(box) + except BoxNotProducable: + pass + + def clone(self): + sb = ShortBoxes(self.optimizer, None) + sb.aliases.update(self.aliases) + sb.short_boxes = {} + sb.short_boxes.update(self.short_boxes) + return sb def prioritized_alternatives(self, box): if box not in self.alternatives: @@ -598,6 +627,7 @@ newbox = newop.result = op.result.clonebox() self.short_boxes[newop.result] = newop value = self.optimizer.getvalue(box) + self.optimizer.emit_operation(ResOperation(rop.SAME_AS, [box], newbox)) self.optimizer.make_equal_to(newbox, value) else: self.short_boxes[box] = op diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp import history, compile, resume from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstFloat -from pypy.jit.metainterp.history import Box +from pypy.jit.metainterp.history import Box, TargetToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import executor from pypy.jit.metainterp.logger import Logger @@ -22,7 +22,6 @@ from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr from pypy.jit.codewriter import heaptracker from pypy.jit.metainterp.optimizeopt.util import args_dict_box -from pypy.jit.metainterp.optimize import RetraceLoop # ____________________________________________________________ @@ -1567,10 +1566,17 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None - self.retracing_loop_from = None + self.partial_trace = None + self.retracing_from = -1 self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + def retrace_needed(self, trace): + self.partial_trace = trace + self.retracing_from = len(self.history.operations) - 1 + self.heapcache.reset() + + def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction f = self.newframe(jitcode, greenkey) @@ -1937,14 +1943,9 @@ # that failed; # - if self.resumekey is a ResumeFromInterpDescr, it starts directly # from the interpreter. - if not self.retracing_loop_from: - try: - self.compile_bridge(live_arg_boxes) - except RetraceLoop: - start = len(self.history.operations) - self.current_merge_points.append((live_arg_boxes, start)) - self.retracing_loop_from = RetraceState(self, live_arg_boxes) - return + if not self.partial_trace: + # FIXME: Support a retrace to be a bridge as well as a loop + self.compile_trace(live_arg_boxes, resumedescr) # raises in case it works -- which is the common case, hopefully, # at least for bridges starting from a guard. @@ -1966,14 +1967,10 @@ else: # Found! Compile it as a loop. # raises in case it works -- which is the common case - if self.retracing_loop_from and \ - self.retracing_loop_from.merge_point == j: - bridge_arg_boxes = self.retracing_loop_from.live_arg_boxes - self.compile_bridge_and_loop(original_boxes, \ - live_arg_boxes, start, - bridge_arg_boxes, resumedescr) - else: - self.compile(original_boxes, live_arg_boxes, start, resumedescr) + if self.partial_trace: + if start != self.retracing_from: + raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now + self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.staticdata.log('cancelled, tracing more...') #self.staticdata.log('cancelled, stopping tracing') @@ -2029,54 +2026,59 @@ from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) - def get_compiled_merge_points(self, greenkey): - """Get the list of looptokens corresponding to the greenkey. - Turns the (internal) list of weakrefs into regular refs. - """ + def get_procedure_token(self, greenkey): cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - return cell.get_compiled_merge_points() + return cell.get_procedure_token() + + def compile_loop(self, original_boxes, live_arg_boxes, start, start_resumedescr): + num_green_args = self.jitdriver_sd.num_green_args + greenkey = original_boxes[:num_green_args] + if not self.partial_trace: + assert self.get_procedure_token(greenkey) is None or \ + self.get_procedure_token(greenkey).target_tokens is None + if self.partial_trace: + target_token = compile.compile_retrace(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr, self.partial_trace, + self.resumekey) + else: + target_token = compile.compile_loop(self, greenkey, start, + original_boxes[num_green_args:], + live_arg_boxes[num_green_args:], + start_resumedescr) + if target_token is not None: + assert isinstance(target_token, TargetToken) + self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token) + self.staticdata.stats.add_jitcell_token(target_token.targeting_jitcell_token) - def set_compiled_merge_points(self, greenkey, looptokens): - cell = self.jitdriver_sd.warmstate.jit_cell_at_key(greenkey) - cell.set_compiled_merge_points(looptokens) - def compile(self, original_boxes, live_arg_boxes, start, start_resumedescr): - num_green_args = self.jitdriver_sd.num_green_args - original_inputargs = self.history.inputargs - self.history.inputargs = original_boxes[num_green_args:] - greenkey = original_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) - loop_token = compile.compile_new_loop(self, old_loop_tokens, - greenkey, start, start_resumedescr) - if loop_token is not None: # raise if it *worked* correctly - self.set_compiled_merge_points(greenkey, old_loop_tokens) + if target_token is not None: # raise if it *worked* correctly self.history.inputargs = None self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, loop_token) + assert isinstance(target_token, TargetToken) + raise GenerateMergePoint(live_arg_boxes, target_token.targeting_jitcell_token) - self.history.inputargs = original_inputargs - self.history.operations.pop() # remove the JUMP - - def compile_bridge(self, live_arg_boxes): + def compile_trace(self, live_arg_boxes, start_resumedescr): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] - old_loop_tokens = self.get_compiled_merge_points(greenkey) - if len(old_loop_tokens) == 0: + target_jitcell_token = self.get_procedure_token(greenkey) + if not target_jitcell_token: return - #if self.resumekey.guard_opnum == rop.GUARD_CLASS: - # return # Kepp tracing for another iteration - self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None) + if not target_jitcell_token.target_tokens: + return + + self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, + descr=target_jitcell_token) try: - target_loop_token = compile.compile_new_bridge(self, - old_loop_tokens, - self.resumekey) + target_token = compile.compile_trace(self, self.resumekey, start_resumedescr) finally: self.history.operations.pop() # remove the JUMP - if target_loop_token is not None: # raise if it *worked* correctly + if target_token is not None: # raise if it *worked* correctly self.history.inputargs = None self.history.operations = None - raise GenerateMergePoint(live_arg_boxes, target_loop_token) + assert isinstance(target_token, TargetToken) + raise GenerateMergePoint(live_arg_boxes, target_token.targeting_jitcell_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, bridge_arg_boxes, start_resumedescr): @@ -2137,21 +2139,21 @@ loop_tokens = sd.loop_tokens_done_with_this_frame_float else: assert False - self.history.record(rop.JUMP, exits, None) - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + # FIXME: kill TerminatingLoopToken? + # FIXME: can we call compile_trace? + token = loop_tokens[0].finishdescr + self.history.record(rop.FINISH, exits, None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() def compile_exit_frame_with_exception(self, valuebox): self.gen_store_back_in_virtualizable() - # temporarily put a JUMP to a pseudo-loop - self.history.record(rop.JUMP, [valuebox], None) sd = self.staticdata - loop_tokens = sd.loop_tokens_exit_frame_with_exception_ref - target_loop_token = compile.compile_new_bridge(self, loop_tokens, - self.resumekey) - if target_loop_token is not loop_tokens[0]: + token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr + self.history.record(rop.FINISH, [valuebox], None, descr=token) + target_token = compile.compile_trace(self, self.resumekey) + if target_token is not token: compile.giveup() @specialize.arg(1) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -369,6 +369,8 @@ 'FINISH/*d', '_FINAL_LAST', + 'LABEL/*d', + '_GUARD_FIRST', '_GUARD_FOLDABLE_FIRST', 'GUARD_TRUE/1d', diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -16,15 +16,16 @@ from pypy.jit.codewriter import support class FakeJitCell(object): - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst + __product_token = None + def get_procedure_token(self): + return self.__product_token + def set_procedure_token(self, token): + self.__product_token = token class FakeWarmRunnerState(object): - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass + def attach_procedure_to_interp(self, greenkey, procedure_token): + cell = self.jit_cell_at_key(greenkey) + cell.set_procedure_token(procedure_token) def helper_func(self, FUNCPTR, func): from pypy.rpython.annlowlevel import llhelper @@ -132,16 +133,14 @@ def _run_with_machine_code(testself, args): metainterp = testself.metainterp num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented + procedure_token = metainterp.get_procedure_token(args[:num_green_args]) # a loop was successfully created by _run_with_pyjitpl(); call it cpu = metainterp.cpu for i in range(len(args) - num_green_args): x = args[num_green_args + i] typecode = history.getkind(lltype.typeOf(x)) set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) + faildescr = cpu.execute_token(procedure_token) assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') if metainterp.jitdriver_sd.result_type == history.INT: return cpu.get_latest_value_int(0) @@ -160,23 +159,31 @@ def check_simple_loop(self, expected=None, **check): get_stats().check_simple_loop(expected=expected, **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" + + + def check_trace_count(self, count): # was check_loop_count + # The number of traces compiled assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): + def check_trace_count_at_most(self, count): assert get_stats().compiled_count <= count + + def check_jitcell_token_count(self, count): # was check_tree_loop_count + assert len(get_stats().jitcell_token_wrefs) == count + + def check_target_token_count(self, count): + tokens = get_stats().get_all_jitcell_tokens() + n = sum ([len(t.target_tokens) for t in tokens]) + assert n == count + def check_enter_count(self, count): assert get_stats().enter_count == count def check_enter_count_at_most(self, count): assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + return # FIXME assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): assert get_stats().aborted_count == count def check_aborted_count_at_least(self, count): @@ -219,7 +226,7 @@ # this can be used after interp_operations if expected is not None: expected = dict(expected) - expected['jump'] = 1 + expected['finish'] = 1 self.metainterp.staticdata.stats.check_history(expected, **isns) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -66,7 +66,7 @@ res = self.interp_operations(f, [8, 98]) assert res == 110 - def test_loop(self): + def test_loop_1(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) def f(x, y): res = 0 @@ -78,19 +78,20 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 42 - self.check_loop_count(1) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) + self.check_trace_count(1) + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, + 'guard_true': 2, 'int_sub': 2}) if self.basic: found = 0 - for op in get_stats().loops[0]._all_operations(): + for op in get_stats().get_all_loops()[0]._all_operations(): if op.getopname() == 'guard_true': liveboxes = op.getfailargs() assert len(liveboxes) == 3 for box in liveboxes: assert isinstance(box, history.BoxInt) found += 1 - assert found == 1 + assert found == 2 def test_loop_variant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) @@ -106,7 +107,7 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop(int_mul=1) def test_loop_variant_mul_ovf(self): @@ -123,7 +124,7 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 1323 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop(int_mul_ovf=1) def test_loop_invariant_mul1(self): @@ -138,9 +139,9 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 252 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop(int_mul=0) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) @@ -157,67 +158,63 @@ return res res = self.meta_interp(f, [6, 7]) assert res == 308 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop(int_mul_ovf=0) - self.check_resops({'jump': 2, 'int_lshift': 2, 'int_gt': 2, + self.check_resops({'jump': 1, 'int_lshift': 2, 'int_gt': 2, 'int_mul_ovf': 1, 'int_add': 4, 'guard_true': 2, 'guard_no_overflow': 1, 'int_sub': 2}) def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'n']) + def f(x, y, n): res = 0 while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, n=n, res=res) + myjitdriver.jit_merge_point(x=x, y=y, n=n, res=res) res += x * x - if y<16: + if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x=x, y=y, res=res, n=n) res += x * x - if y<16: + if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.can_enter_jit(x=x, y=y, res=res, n=n) + myjitdriver.jit_merge_point(x=x, y=y, res=res, n=n) z = x * x res += z - if y<16: + if y>10]) assert res == 11 - self.check_tree_loop_count(2) + self.check_jitcell_token_count(1) def test_wrap_around_sub(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) @@ -2086,7 +2083,7 @@ return n res = self.meta_interp(f, [10-sys.maxint]) assert res == 12 - self.check_tree_loop_count(2) + self.check_jitcell_token_count(1) def test_caching_setfield(self): myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node']) @@ -2606,10 +2603,12 @@ i += 1 return sa assert self.meta_interp(f, [20, 2]) == f(20, 2) - self.check_tree_loop_count(4) + self.check_jitcell_token_count(1) + self.check_target_token_count(4) assert self.meta_interp(f, [20, 3]) == f(20, 3) - self.check_tree_loop_count(5) - + self.check_jitcell_token_count(1) + self.check_target_token_count(5) + def test_max_retrace_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -2625,10 +2624,11 @@ i += 1 return sa assert self.meta_interp(f, [20, 1]) == f(20, 1) - self.check_tree_loop_count(2) + self.check_jitcell_token_count(1) + self.check_target_token_count(2) assert self.meta_interp(f, [20, 10]) == f(20, 10) - self.check_tree_loop_count(5) - + self.check_jitcell_token_count(1) + self.check_target_token_count(5) def test_retrace_limit_with_extra_guards(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', @@ -2648,11 +2648,13 @@ i += 1 return sa assert self.meta_interp(f, [20, 2]) == f(20, 2) - self.check_tree_loop_count(4) + self.check_jitcell_token_count(1) + self.check_target_token_count(4) assert self.meta_interp(f, [20, 3]) == f(20, 3) - self.check_tree_loop_count(5) - - def test_retrace_ending_up_retrazing_another_loop(self): + self.check_jitcell_token_count(1) + self.check_target_token_count(5) + + def test_retrace_ending_up_retracing_another_loop(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'i', 'sa']) bytecode = "0+sI0+SI" @@ -2694,11 +2696,9 @@ # The attempts of retracing first loop will end up retracing the # second and thus fail 5 times, saturating the retrace_count. Instead a # bridge back to the preamble of the first loop is produced. A guard in - # this bridge is later traced resulting in a retrace of the second loop. - # Thus we end up with: - # 1 preamble and 1 specialized version of first loop - # 1 preamble and 2 specialized version of second loop - self.check_tree_loop_count(2 + 3) + # this bridge is later traced resulting in a failed attempt of retracing + # the second loop. + self.check_trace_count(8) # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times. @@ -2709,9 +2709,12 @@ res = self.meta_interp(g, [10]) assert res == g(10) - # 1 preamble and 6 speciealized versions of each loop - self.check_tree_loop_count(2*(1 + 6)) - + + self.check_jitcell_token_count(2) + for cell in get_stats().get_all_jitcell_tokens(): + # Initialal trace with two labels and 5 retraces + assert len(cell.target_tokens) <= 7 + def test_nested_retrace(self): myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa']) @@ -2748,22 +2751,33 @@ res = self.meta_interp(f, [10, 7]) assert res == f(10, 7) - self.check_tree_loop_count(4) + self.check_jitcell_token_count(2) + for cell in get_stats().get_all_jitcell_tokens(): + assert len(cell.target_tokens) == 2 def g(n): return f(n, 2) + f(n, 3) res = self.meta_interp(g, [10]) assert res == g(10) - self.check_tree_loop_count(6) - + self.check_jitcell_token_count(2) + for cell in get_stats().get_all_jitcell_tokens(): + assert len(cell.target_tokens) <= 3 def g(n): return f(n, 2) + f(n, 3) + f(n, 4) + f(n, 5) + f(n, 6) + f(n, 7) res = self.meta_interp(g, [10]) assert res == g(10) - self.check_tree_loop_count(8) + # 2 loops and one function + self.check_jitcell_token_count(3) + cnt = 0 + for cell in get_stats().get_all_jitcell_tokens(): + if cell.target_tokens is None: + cnt += 1 + else: + assert len(cell.target_tokens) <= 4 + assert cnt == 1 def test_frame_finished_during_retrace(self): class Base(object): @@ -2846,66 +2860,6 @@ assert res == -2 self.check_resops(setarrayitem_gc=2, getarrayitem_gc=1) - def test_retrace_ending_up_retracing_another_loop(self): - - myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'i', 'sa']) - bytecode = "0+sI0+SI" - def f(n): - set_param(None, 'threshold', 3) - set_param(None, 'trace_eagerness', 1) - set_param(None, 'retrace_limit', 5) - set_param(None, 'function_threshold', -1) - pc = sa = i = 0 - while pc < len(bytecode): - myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i) - n = hint(n, promote=True) - op = bytecode[pc] - if op == '0': - i = 0 - elif op == '+': - i += 1 - elif op == 's': - sa += i - elif op == 'S': - sa += 2 - elif op == 'I': - if i < n: - pc -= 2 - myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i) - continue - pc += 1 - return sa - - def g(n1, n2): - for i in range(10): - f(n1) - for i in range(10): - f(n2) - - nn = [10, 3] - assert self.meta_interp(g, nn) == g(*nn) - - # The attempts of retracing first loop will end up retracing the - # second and thus fail 5 times, saturating the retrace_count. Instead a - # bridge back to the preamble of the first loop is produced. A guard in - # this bridge is later traced resulting in a retrace of the second loop. - # Thus we end up with: - # 1 preamble and 1 specialized version of first loop - # 1 preamble and 2 specialized version of second loop - self.check_tree_loop_count(2 + 3) - - # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times. - - def g(n): - for i in range(n): - for j in range(10): - f(n-i) - - res = self.meta_interp(g, [10]) - assert res == g(10) - # 1 preamble and 6 speciealized versions of each loop - self.check_tree_loop_count(2*(1 + 6)) - def test_continue_tracing_with_boxes_in_start_snapshot_replaced_by_optimizer(self): myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'a', 'b']) def f(n): @@ -3153,7 +3107,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_tree_loop_count(3) + self.check_trace_count(2) def test_two_loopinvariant_arrays2(self): from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -3176,7 +3130,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_tree_loop_count(3) + self.check_trace_count(2) def test_two_loopinvariant_arrays3(self): from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -3200,7 +3154,7 @@ return sa res = self.meta_interp(f, [32]) assert res == f(32) - self.check_tree_loop_count(2) + self.check_trace_count(3) def test_two_loopinvariant_arrays_boxed(self): class A(object): @@ -3371,7 +3325,7 @@ res = self.meta_interp(main, [10]) assert res == main(10) self.check_resops({'int_gt': 2, 'strlen': 2, 'guard_true': 2, - 'int_sub': 2, 'jump': 2, 'call': 2, + 'int_sub': 2, 'jump': 1, 'call': 2, 'guard_no_exception': 2, 'int_add': 4}) def test_look_inside_iff_const_getarrayitem_gc_pure(self): @@ -3508,7 +3462,7 @@ res = self.meta_interp(f, [10]) assert res == 0 - self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) def test_virtual_opaque_ptr(self): @@ -3528,7 +3482,7 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) @@ -3551,7 +3505,7 @@ res = self.meta_interp(f, [10]) assert res == 0 self.check_resops({'int_gt': 2, 'getfield_gc': 1, 'int_eq': 1, - 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'guard_true': 2, 'int_sub': 2, 'jump': 1, 'guard_false': 1}) @@ -3799,6 +3753,31 @@ x = self.interp_operations(f, [1000, 1], translationoptions=topt) assert x == 999 + def test_retracing_bridge_from_interpreter_to_finnish(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa']) + def f(n): + sa = i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, sa=sa) + n = hint(n, promote=True) + sa += 2*n + i += 1 + return sa + def g(n): + return f(n) + f(n) + f(n) + f(n) + f(10*n) + f(11*n) + res = self.meta_interp(g, [1], repeat=3) + assert res == g(1) + #self.check_jitcell_token_count(1) + self.check_jitcell_token_count(2) + # XXX A bridge from the interpreter to a finish is first + # constructed for n=1. It is later replaced with a trace for + # the case n=10 which is extended with a retrace for n=11 and + # finnaly a new bridge to finnish is again traced and created + # for the case n=1. We were not able to reuse the orignial n=1 + # bridge as a preamble since it does not start with a + # label. The alternative would be to have all such bridges + # start with labels. I dont know which is better... + def test_ll_arraycopy(self): from pypy.rlib import rgc A = lltype.GcArray(lltype.Char) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -1,7 +1,7 @@ from pypy.config.pypyoption import get_pypy_config -from pypy.jit.metainterp.history import LoopToken, ConstInt, History, Stats +from pypy.jit.metainterp.history import TargetToken, ConstInt, History, Stats from pypy.jit.metainterp.history import BoxInt, INT -from pypy.jit.metainterp.compile import insert_loop_token, compile_new_loop +from pypy.jit.metainterp.compile import compile_loop from pypy.jit.metainterp.compile import ResumeGuardDescr from pypy.jit.metainterp.compile import ResumeGuardCountersInt from pypy.jit.metainterp.compile import compile_tmp_callback @@ -10,23 +10,6 @@ from pypy.jit.tool.oparser import parse from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT -def test_insert_loop_token(): - # XXX this test is a bit useless now that there are no specnodes - lst = [] - # - tok1 = LoopToken() - insert_loop_token(lst, tok1) - assert lst == [tok1] - # - tok2 = LoopToken() - insert_loop_token(lst, tok2) - assert lst == [tok1, tok2] - # - tok3 = LoopToken() - insert_loop_token(lst, tok3) - assert lst == [tok1, tok2, tok3] - - class FakeCPU(object): ts = typesystem.llhelper def __init__(self): @@ -73,7 +56,7 @@ on_compile = staticmethod(lambda *args: None) on_compile_bridge = staticmethod(lambda *args: None) -def test_compile_new_loop(): +def test_compile_loop(): cpu = FakeCPU() staticdata = FakeMetaInterpStaticData() staticdata.cpu = cpu @@ -93,34 +76,26 @@ metainterp.staticdata = staticdata metainterp.cpu = cpu metainterp.history = History() - metainterp.history.operations = loop.operations[:] + metainterp.history.operations = loop.operations[:-1] metainterp.history.inputargs = loop.inputargs[:] cpu._all_size_descrs_with_vtable = ( LLtypeMixin.cpu._all_size_descrs_with_vtable) # - loop_tokens = [] - loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) - assert loop_tokens == [loop_token] - assert loop_token.number == 1 + greenkey = 'faked' + target_token = compile_loop(metainterp, greenkey, 0, + loop.inputargs, + loop.operations[-1].getarglist(), + None) + jitcell_token = target_token.targeting_jitcell_token + assert jitcell_token == target_token.original_jitcell_token + assert jitcell_token.target_tokens == [target_token] + assert jitcell_token.number == 1 assert staticdata.globaldata.loopnumbering == 2 # assert len(cpu.seen) == 1 - assert cpu.seen[0][2] == loop_token + assert cpu.seen[0][2] == jitcell_token # del cpu.seen[:] - metainterp = FakeMetaInterp() - metainterp.staticdata = staticdata - metainterp.cpu = cpu - metainterp.history = History() - metainterp.history.operations = loop.operations[:] - metainterp.history.inputargs = loop.inputargs[:] - # - loop_token_2 = compile_new_loop(metainterp, loop_tokens, [], 0, None) - assert loop_token_2 is loop_token - assert loop_tokens == [loop_token] - assert len(cpu.seen) == 0 - assert staticdata.globaldata.loopnumbering == 2 - def test_resume_guard_counters(): rgc = ResumeGuardCountersInt() diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -27,7 +27,7 @@ 'int_sub': 2, 'int_gt': 2, 'guard_true': 2, - 'jump': 2}) + 'jump': 1}) def test_class_of_allocated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -154,7 +154,7 @@ res = self.meta_interp(f, [100], listops=True) assert res == f(50) self.check_resops({'new_array': 2, 'getfield_gc': 2, - 'guard_true': 2, 'jump': 2, + 'guard_true': 2, 'jump': 1, 'new_with_vtable': 2, 'getinteriorfield_gc': 2, 'setfield_gc': 6, 'int_gt': 2, 'int_sub': 2, 'call': 10, 'int_and': 2, diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -35,7 +35,7 @@ return n res = self.meta_interp(f, [10]) assert res == 0 - self.check_resops({'jump': 2, 'guard_true': 2, + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) def test_bridge_from_guard_exception(self): @@ -512,7 +512,7 @@ res = self.meta_interp(main, [41], repeat=7) assert res == -1 - self.check_tree_loop_count(2) # the loop and the entry path + self.check_target_token_count(2) # the loop and the entry path # we get: # ENTER - compile the new loop and the entry bridge # ENTER - compile the leaving path (raising MyError) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -77,14 +77,14 @@ int_add=2, int_lt=2, guard_true=2, - jump=2) + jump=1) else: self.check_resops( call_release_gil=0, # no CALL_RELEASE_GIL int_add=2, int_lt=2, guard_true=2, - jump=2) + jump=1) return res def test_byval_result(self): @@ -145,7 +145,7 @@ return result_point[0].x * result_point[0].y assert self.meta_interp(main, [10]) == main(10) == 9000 - self.check_resops({'jump': 2, 'int_lt': 2, 'setinteriorfield_raw': 4, + self.check_resops({'jump': 1, 'int_lt': 2, 'setinteriorfield_raw': 4, 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2}) def test_array_getitem_uint8(self): @@ -167,7 +167,7 @@ return f(data, n) assert self.meta_interp(main, [10]) == 2000 - self.check_resops({'jump': 2, 'int_lt': 2, 'getinteriorfield_raw': 2, + self.check_resops({'jump': 1, 'int_lt': 2, 'getinteriorfield_raw': 2, 'guard_true': 2, 'int_add': 4}) diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -24,7 +24,7 @@ # res = self.meta_interp(g, [7]) assert res == -2 - self.check_loop_count(2) + self.check_trace_count(2) self.check_resops(guard_value=0) def test_green_field_2(self): @@ -49,7 +49,7 @@ # res = self.meta_interp(g, [7]) assert res == -22 - self.check_loop_count(6) + self.check_trace_count(6) self.check_resops(guard_value=0) diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -28,10 +28,10 @@ i += 1 self.meta_interp(loop, [1, 4]) - assert sorted(called.keys()) == [(4, 1, "entry bridge"), (4, 1, "loop")] + assert sorted(called.keys()) == [(4, 1, "loop")] self.meta_interp(loop, [2, 4]) - assert sorted(called.keys()) == [(4, 1, "entry bridge"), (4, 1, "loop"), - (4, 2, "entry bridge"), (4, 2, "loop")] + assert sorted(called.keys()) == [(4, 1, "loop"), + (4, 2, "loop")] def test_on_compile_bridge(self): called = {} @@ -55,8 +55,7 @@ i += 1 self.meta_interp(loop, [1, 10]) - assert sorted(called.keys()) == ['bridge', (10, 1, "entry bridge"), - (10, 1, "loop")] + assert sorted(called.keys()) == ['bridge', (10, 1, "loop")] class TestLLtypeSingle(JitDriverTests, LLJitMixin): @@ -92,8 +91,9 @@ # the following numbers are not really expectations of the test # itself, but just the numbers that we got after looking carefully # at the generated machine code - self.check_loop_count(5) - self.check_tree_loop_count(4) # 2 x loop, 2 x enter bridge + self.check_trace_count(5) + self.check_jitcell_token_count(2) # 2 x loop including enter bridge + self.check_target_token_count(4) # 2 x loop, 2 x enter bridge self.check_enter_count(5) def test_inline(self): @@ -125,7 +125,7 @@ # we expect no loop at all for 'loop1': it should always be inlined # we do however get several version of 'loop2', all of which contains # at least one int_add, while there are no int_add's in 'loop1' - self.check_tree_loop_count(5) + self.check_jitcell_token_count(1) for loop in get_stats().loops: assert loop.summary()['int_add'] >= 1 diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -55,8 +55,6 @@ TRACING, BACKEND, ~ BACKEND, - BACKEND, - ~ BACKEND, ~ TRACING, RUNNING, ~ RUNNING, @@ -64,8 +62,8 @@ ~ BLACKHOLE ] assert profiler.events == expected - assert profiler.times == [3, 2, 1, 1] - assert profiler.counters == [1, 2, 1, 1, 3, 3, 1, 13, 2, 0, 0, 0, 0, + assert profiler.times == [2, 1, 1, 1] + assert profiler.counters == [1, 1, 1, 1, 3, 3, 1, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0] def test_simple_loop_with_call(self): diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -225,7 +225,7 @@ return s res = self.meta_interp(f, [15], listops=True) assert res == f(15) - self.check_resops({'jump': 2, 'int_gt': 2, 'int_add': 2, + self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, 'guard_true': 2, 'int_sub': 2}) class TestOOtype(ListTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py --- a/pypy/jit/metainterp/test/test_logger.py +++ b/pypy/jit/metainterp/test/test_logger.py @@ -5,7 +5,7 @@ from pypy.jit.metainterp.typesystem import llhelper from StringIO import StringIO from pypy.jit.metainterp.optimizeopt.util import equaloplists -from pypy.jit.metainterp.history import AbstractDescr, LoopToken, BasicFailDescr +from pypy.jit.metainterp.history import AbstractDescr, JitCellToken, BasicFailDescr from pypy.jit.backend.model import AbstractCPU @@ -131,7 +131,7 @@ equaloplists(loop.operations, oloop.operations) def test_jump(self): - namespace = {'target': LoopToken()} + namespace = {'target': JitCellToken()} namespace['target'].number = 3 inp = ''' [i0] diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -36,7 +36,7 @@ return res * 2 res = self.meta_interp(f, [6, 7]) assert res == 84 - self.check_loop_count(1) + self.check_trace_count(1) def test_loop_with_delayed_setfield(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res', 'a']) @@ -58,7 +58,7 @@ return res * 2 res = self.meta_interp(f, [6, 13]) assert res == f(6, 13) - self.check_loop_count(1) + self.check_trace_count(1) if self.enable_opts: self.check_resops(setfield_gc=2, getfield_gc=0) @@ -90,9 +90,9 @@ res = self.meta_interp(f, [6, 33], policy=StopAtXPolicy(l)) assert res == f(6, 33) if self.enable_opts: - self.check_loop_count(3) + self.check_trace_count(2) else: - self.check_loop_count(2) + self.check_trace_count(2) def test_alternating_loops(self): myjitdriver = JitDriver(greens = [], reds = ['pattern']) @@ -108,9 +108,9 @@ return 42 self.meta_interp(f, [0xF0F0F0]) if self.enable_opts: - self.check_loop_count(3) + self.check_trace_count(3) else: - self.check_loop_count(2) + self.check_trace_count(2) def test_interp_simple(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -135,7 +135,7 @@ return x res = self.meta_interp(f, [100, 30]) assert res == 42 - self.check_loop_count(0) + self.check_trace_count(0) def test_green_prevents_loop(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -154,7 +154,7 @@ return x res = self.meta_interp(f, [100, 5]) assert res == f(100, 5) - self.check_loop_count(0) + self.check_trace_count(0) def test_interp_single_loop(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -179,7 +179,7 @@ return x res = self.meta_interp(f, [5, 8]) assert res == 42 - self.check_loop_count(1) + self.check_trace_count(1) # the 'int_eq' and following 'guard' should be constant-folded if 'unroll' in self.enable_opts: self.check_resops(int_eq=0, guard_true=2, guard_false=0) @@ -194,7 +194,10 @@ assert isinstance(liveboxes[0], history.BoxInt) assert isinstance(liveboxes[1], history.BoxInt) found += 1 - assert found == 1 + if 'unroll' in self.enable_opts: + assert found == 2 + else: + assert found == 1 def test_interp_many_paths(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'node']) @@ -229,7 +232,7 @@ expected = f(node1) res = self.meta_interp(f, [node1]) assert res == expected - self.check_loop_count_at_most(19) + self.check_trace_count_at_most(19) def test_interp_many_paths_2(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'node']) @@ -268,7 +271,7 @@ expected = f(node1) res = self.meta_interp(f, [node1]) assert res == expected - self.check_loop_count_at_most(19) + self.check_trace_count_at_most(19) def test_nested_loops(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -601,11 +604,11 @@ assert res == expected if self.enable_opts: - self.check_loop_count(2) - self.check_tree_loop_count(2) # 1 loop, 1 bridge from interp + self.check_trace_count(2) + self.check_jitcell_token_count(1) # 1 loop with bridge from interp else: - self.check_loop_count(2) - self.check_tree_loop_count(1) # 1 loop, callable from the interp + self.check_trace_count(2) + self.check_jitcell_token_count(1) # 1 loop, callable from the interp def test_example(self): myjitdriver = JitDriver(greens = ['i'], @@ -646,10 +649,10 @@ res = self.meta_interp(main_interpreter_loop, [1]) assert res == 102 - self.check_loop_count(1) + self.check_trace_count(1) if 'unroll' in self.enable_opts: self.check_resops({'int_add' : 6, 'int_gt' : 2, - 'guard_false' : 2, 'jump' : 2}) + 'guard_false' : 2, 'jump' : 1}) else: self.check_resops({'int_add' : 3, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1}) @@ -691,7 +694,7 @@ res = self.meta_interp(main_interpreter_loop, [1]) assert res == main_interpreter_loop(1) - self.check_loop_count(1) + self.check_trace_count(1) # These loops do different numbers of ops based on which optimizer we # are testing with. self.check_resops(self.automatic_promotion_result) @@ -753,7 +756,7 @@ res = self.meta_interp(interpret, [1]) assert res == interpret(1) # XXX it's unsure how many loops should be there - self.check_loop_count(3) + self.check_trace_count(3) def test_path_with_operations_not_from_start(self): jitdriver = JitDriver(greens = ['k'], reds = ['n', 'z']) diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -8,7 +8,7 @@ enable_opts = ALL_OPTS_NAMES automatic_promotion_result = { - 'int_gt': 2, 'guard_false': 2, 'jump': 2, 'int_add': 6, + 'int_gt': 2, 'guard_false': 2, 'jump': 1, 'int_add': 6, 'guard_value': 1 } diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -14,7 +14,9 @@ from pypy.jit.metainterp.memmgr import MemoryManager from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside - +from pypy.jit.metainterp.warmspot import get_stats +from pypy.jit.metainterp.warmstate import JitCell +from pypy.rlib import rgc class FakeLoopToken: generation = 0 @@ -81,6 +83,20 @@ # See comments in TestMemoryManager. To get temporarily the normal # behavior just rename this class to TestIntegration. + # We need an extra rgc.collect in get_procedure_token() for some of + # these tests to pass. But we dont want it there always since that will + # make all other tests take forever. + def setup_class(cls): + original_get_procedure_token = JitCell.get_procedure_token + def get_procedure_token(self): + rgc.collect(); + return original_get_procedure_token(self) + JitCell.get_procedure_token = get_procedure_token + cls.original_get_procedure_token = original_get_procedure_token + + def teardown_class(cls): + JitCell.get_procedure_token = cls.original_get_procedure_token + def test_loop_kept_alive(self): myjitdriver = JitDriver(greens=[], reds=['n']) def g(): @@ -99,7 +115,7 @@ assert res == 42 # we should see only the loop and the entry bridge - self.check_tree_loop_count(2) + self.check_target_token_count(2) def test_target_loop_kept_alive_or_not(self): myjitdriver = JitDriver(greens=['m'], reds=['n']) @@ -114,6 +130,8 @@ # Depending on loop_longevity, either: # A. create the loop and the entry bridge for 'g(5)' # B. create 8 loops (and throw them away at each iteration) + # Actually, it's 4 loops and 4 exit bridges thrown away + # every second iteration for i in range(8): g(5) # create another loop and another entry bridge for 'g(7)', @@ -132,14 +150,15 @@ # case A res = self.meta_interp(f, [], loop_longevity=3) assert res == 42 - # we should see only the loop and the entry bridge for g(5) and g(7) - self.check_tree_loop_count(4) + # we should see only the loop with preamble and the exit bridge + # for g(5) and g(7) + self.check_enter_count(4) # case B, with a lower longevity res = self.meta_interp(f, [], loop_longevity=1) assert res == 42 # we should see a loop for each call to g() - self.check_tree_loop_count(8 + 20*2*2) + self.check_enter_count(8 + 20*2) def test_throw_away_old_loops(self): myjitdriver = JitDriver(greens=['m'], reds=['n']) @@ -152,9 +171,9 @@ return 21 def f(): for i in range(10): - g(1) # g(1) gets a loop and an entry bridge, stays alive - g(2) # (and an exit bridge, which does not count in - g(1) # check_tree_loop_count) + g(1) # g(1) gets a loop with an entry bridge + g(2) # and an exit bridge, stays alive + g(1) g(3) g(1) g(4) # g(2), g(3), g(4), g(5) are thrown away every iteration @@ -164,7 +183,7 @@ res = self.meta_interp(f, [], loop_longevity=3) assert res == 42 - self.check_tree_loop_count(2 + 10*4*2) + self.check_enter_count(2 + 10*4) def test_call_assembler_keep_alive(self): myjitdriver1 = JitDriver(greens=['m'], reds=['n']) @@ -187,7 +206,7 @@ return 21 def f(u): for i in range(8): - h(u, 32) # make a loop and an entry bridge for h(u) + h(u, 32) # make a loop and an exit bridge for h(u) g(u, 8) # make a loop for g(u) with a call_assembler g(u, 0); g(u+1, 0) # \ g(u, 0); g(u+2, 0) # \ make more loops for g(u+1) to g(u+4), @@ -198,7 +217,12 @@ res = self.meta_interp(f, [1], loop_longevity=4, inline=True) assert res == 42 - self.check_tree_loop_count(12) + self.check_jitcell_token_count(6) + tokens = [t() for t in get_stats().jitcell_token_wrefs] + # Some loops have been freed + assert None in tokens + # Loop with number 0, h(), has not been freed + assert 0 in [t.number for t in tokens if t] # ____________________________________________________________ @@ -217,10 +241,17 @@ if __name__ == '__main__': # occurs in the subprocess for test in [_TestMemoryManager(), _TestIntegration()]: - for name in dir(test): - if name.startswith('test_'): - print - print '-'*79 - print '----- Now running test', name, '-----' - print - getattr(test, name)() + if hasattr(test, 'setup_class'): + test.setup_class() + try: + for name in dir(test): + if name.startswith('test_'): + print + print '-'*79 + print '----- Now running test', name, '-----' + print + getattr(test, name)() + finally: + if hasattr(test, 'teardown_class'): + test.teardown_class() + diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -294,7 +294,8 @@ return total res = self.meta_interp(main, []) - self.check_tree_loop_count(6) + self.check_trace_count(6) + self.check_jitcell_token_count(3) assert res == main() def test_change_during_running(self): @@ -305,7 +306,7 @@ self.a = a @dont_look_inside def residual_call(foo, x): - if x == 5: + if x == 10: foo.a += 1 def f(a, x): foo = Foo(a) @@ -319,9 +320,9 @@ x -= 1 return total # - assert f(100, 15) == 3009 - res = self.meta_interp(f, [100, 15]) - assert res == 3009 + assert f(100, 30) == 6019 + res = self.meta_interp(f, [100, 30]) + assert res == 6019 self.check_resops(guard_not_invalidated=8, guard_not_forced=0, call_may_force=0, getfield_gc=0) @@ -434,7 +435,7 @@ self.lst = lst @dont_look_inside def residual_call(foo, x): - if x == 5: + if x == 10: lst2 = [0, 0] lst2[1] = foo.lst[1] + 1 foo.lst = lst2 @@ -452,9 +453,9 @@ x -= 1 return total # - assert f(100, 15) == 3009 - res = self.meta_interp(f, [100, 15]) - assert res == 3009 + assert f(100, 30) == 6019 + res = self.meta_interp(f, [100, 30]) + assert res == 6019 self.check_resops(call_may_force=0, getfield_gc=0, getarrayitem_gc_pure=0, guard_not_forced=0, getarrayitem_gc=0, guard_not_invalidated=8) @@ -477,7 +478,7 @@ return foo.step res = self.meta_interp(f, [60]) assert res == 1 - self.check_tree_loop_count(4) # at least not 2 like before + self.check_jitcell_token_count(2) class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -530,8 +530,8 @@ result = 0 for i in range(m): result += f('+-cl--', i) - g(50) - self.meta_interp(g, [50], backendopt=True) + res = self.meta_interp(g, [50], backendopt=True) + assert res == g(50) py.test.skip("tracing from start is by now only longer enabled " "if a trace gets too big") self.check_tree_loop_count(3) @@ -577,7 +577,7 @@ self.meta_interp(g, [10], backendopt=True) self.check_aborted_count(1) self.check_resops(call=0, call_assembler=2) - self.check_tree_loop_count(3) + self.check_jitcell_token_count(2) def test_directly_call_assembler(self): driver = JitDriver(greens = ['codeno'], reds = ['i'], @@ -1211,11 +1211,11 @@ portal(c, i, v) self.meta_interp(main, [10, 10, False, False], inline=True) - self.check_tree_loop_count(1) - self.check_loop_count(0) + self.check_jitcell_token_count(1) + self.check_trace_count(1) self.meta_interp(main, [3, 10, True, False], inline=True) - self.check_tree_loop_count(0) - self.check_loop_count(0) + self.check_jitcell_token_count(0) + self.check_trace_count(0) def test_trace_from_start_does_not_prevent_inlining(self): driver = JitDriver(greens = ['c', 'bc'], reds = ['i']) @@ -1260,7 +1260,7 @@ return portal(level + 1) self.meta_interp(portal, [0]) - self.check_loop_count_at_most(2) # and not, e.g., 24 + self.check_trace_count_at_most(2) # and not, e.g., 24 class TestLLtype(RecursiveTests, LLJitMixin): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -20,7 +20,7 @@ return c res = self.meta_interp(f, [1]) assert res == 2 - self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) # all folded away def test_red_builtin_send(self): @@ -67,7 +67,7 @@ backendopt=True) assert res == 43 self.check_resops({'int_gt': 2, 'getfield_gc': 2, - 'guard_true': 2, 'int_sub': 2, 'jump': 2, + 'guard_true': 2, 'int_sub': 2, 'jump': 1, 'call': 2, 'guard_no_exception': 2, 'int_add': 2}) @@ -160,7 +160,7 @@ res = self.meta_interp(f, [j], policy=policy) assert res == 42 self.check_enter_count_at_most(5) - self.check_loop_count_at_most(5) + self.check_trace_count_at_most(5) def test_oosend_guard_failure(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'w']) @@ -199,7 +199,7 @@ # InvalidLoop condition, and was then unrolled, giving two copies # of the body in a single bigger loop with no failing guard except # the final one. - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) @@ -240,7 +240,7 @@ assert res == f(3, 28) res = self.meta_interp(f, [4, 28]) assert res == f(4, 28) - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(guard_class=1, int_add=4, int_sub=4) self.check_jumps(14) @@ -277,7 +277,7 @@ # looking only at the loop, we deduce that the class of 'w' is 'W2'. # However, this doesn't match the initial value of 'w'. # XXX This not completely easy to check... - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(guard_class=1, new_with_vtable=0, int_lshift=2, int_add=0, new=0) @@ -306,7 +306,7 @@ return x res = self.meta_interp(f, [198], policy=StopAtXPolicy(externfn)) assert res == f(198) - self.check_loop_count(4) + self.check_trace_count(4) def test_indirect_call_unknown_object_2(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'state']) @@ -340,9 +340,9 @@ res = self.meta_interp(f, [198], policy=StopAtXPolicy(State.externfn.im_func)) assert res == f(198) - # we get two TreeLoops: an initial one, and one entering from - # the interpreter - self.check_tree_loop_count(2) + # we get two TargetTokens, one for the loop and one for the preamble + self.check_jitcell_token_count(1) + self.check_target_token_count(2) def test_indirect_call_unknown_object_3(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'z', 'state']) @@ -377,9 +377,10 @@ res = self.meta_interp(f, [198], policy=StopAtXPolicy(State.externfn.im_func)) assert res == f(198) - # we get four TreeLoops: one for each of the 3 getvalue functions, - # and one entering from the interpreter - self.check_tree_loop_count(4) + # we get four TargetTokens: one for each of the 3 getvalue functions, + # and one entering from the interpreter (the preamble) + self.check_jitcell_token_count(1) + self.check_target_token_count(4) def test_two_behaviors(self): py.test.skip("XXX fix me!!!!!!! problem in optimize.py") @@ -403,7 +404,7 @@ # is true if we replace "if cases[y]" above with "if not cases[y]" # -- so there is no good reason that it fails. self.check_loops(new_with_vtable=0) - self.check_loop_count(2) + self.check_trace_count(2) def test_behavior_change_after_a_while(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x']) @@ -431,9 +432,10 @@ assert res == 200 # we expect 2 versions of the loop, 1 entry bridge, # and 1 bridge going from the - # loop back to the start of the entry bridge - self.check_loop_count(3) # 2 loop + 1 bridge - self.check_tree_loop_count(3) # 2 loop + 1 entry bridge (argh) + # loop back to the loop + self.check_trace_count(2) # preamble/loop and 1 bridge + self.check_jitcell_token_count(1) + self.check_target_token_count(3) # preamble, Int1, Int2 self.check_aborted_count(0) def test_three_cases(self): @@ -454,7 +456,7 @@ return node.x res = self.meta_interp(f, [55]) assert res == f(55) - self.check_tree_loop_count(4) + self.check_trace_count(3) def test_three_classes(self): class Base: @@ -484,7 +486,7 @@ return n res = self.meta_interp(f, [55], policy=StopAtXPolicy(extern)) assert res == f(55) - self.check_tree_loop_count(2) + self.check_jitcell_token_count(1) def test_bug1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -499,7 +499,7 @@ sys.defaultencoding = _str('utf-8') return sa assert self.meta_interp(f, [8]) == f(8) - self.check_resops({'jump': 2, 'int_is_true': 2, 'int_add': 2, + self.check_resops({'jump': 1, 'int_is_true': 2, 'int_add': 2, 'guard_true': 2, 'guard_not_invalidated': 2, 'int_sub': 2}) @@ -590,7 +590,7 @@ # The "".join should be unrolled, since the length of x is known since # it is virtual, ensure there are no calls to ll_join_chars, or # allocations. - self.check_resops({'jump': 2, 'guard_true': 5, 'int_lt': 2, + self.check_resops({'jump': 1, 'guard_true': 5, 'int_lt': 2, 'int_add': 2, 'int_is_true': 3}) def test_virtual_copystringcontent(self): diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -72,7 +72,7 @@ res = self.meta_interp(main, [0, 6], listops=True, backendopt=True) assert res == 5040 - self.check_resops({'jump': 2, 'int_le': 2, 'guard_value': 1, + self.check_resops({'jump': 1, 'int_le': 2, 'guard_value': 1, 'int_mul': 2, 'guard_false': 2, 'int_sub': 2}) def test_tl_2(self): @@ -80,7 +80,7 @@ res = self.meta_interp(main, [1, 10], listops=True, backendopt=True) assert res == main(1, 10) - self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 2, + self.check_resops({'int_le': 2, 'int_sub': 2, 'jump': 1, 'guard_false': 2, 'guard_value': 1}) def test_tl_call(self, listops=True, policy=None): diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -1,5 +1,5 @@ import py -from pypy.rlib.jit import JitDriver, promote +from pypy.rlib.jit import JitDriver, promote, dont_look_inside from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -30,7 +30,7 @@ assert f(10) == 55 * 10 res = self.meta_interp(f, [10]) assert res == 55 * 10 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, new=0) @@ -79,7 +79,7 @@ assert f(10) == 55 * 10 res = self.meta_interp(f, [10]) assert res == 55 * 10 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=3, new=0) @@ -97,7 +97,7 @@ return node.floatval res = self.meta_interp(f, [10]) assert res == f(10) - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new=0, float_add=1) def test_virtualized_float2(self): @@ -115,7 +115,7 @@ return node.floatval res = self.meta_interp(f, [10]) assert res == f(10) - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new=0, float_add=2) @@ -140,7 +140,7 @@ return node.value * node.extra res = self.meta_interp(f, [10]) assert res == 55 * 30 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, new=0) @@ -161,7 +161,7 @@ return node.value res = self.meta_interp(f, [500]) assert res == 640 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=1, new=0) @@ -185,7 +185,7 @@ return node.value res = self.meta_interp(f, [18]) assert res == f(18) - self.check_loop_count(2) + self.check_trace_count(2) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, new=0) @@ -214,7 +214,7 @@ return node.value res = self.meta_interp(f, [20], policy=StopAtXPolicy(externfn)) assert res == f(20) - self.check_loop_count(3) + self.check_trace_count(2) self.check_resops(**{self._new_op: 1}) self.check_resops(int_mul=0, call=1) @@ -391,7 +391,7 @@ fieldname = self._field_prefix + 'value' assert getattr(res, fieldname, -100) == f(21).value - self.check_tree_loop_count(2) # the loop and the entry path + self.check_jitcell_token_count(1) # the loop and the entry path # we get: # ENTER - compile the new loop and entry bridge # ENTER - compile the leaving path @@ -565,7 +565,10 @@ n -= 1 return node1.value + node2.value assert self.meta_interp(f, [40, 3]) == f(40, 3) - self.check_loop_count(6) + # We get 4 versions of this loop: + # preamble (no virtuals), node1 virtual, node2 virtual, both virtual + self.check_target_token_count(4) + self.check_resops(new=0, new_with_vtable=0) def test_single_virtual_forced_in_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'node']) @@ -612,10 +615,10 @@ return node.value res = self.meta_interp(f, [48, 3], policy=StopAtXPolicy(externfn)) assert res == f(48, 3) - self.check_loop_count(5) + self.check_trace_count(4) res = self.meta_interp(f, [40, 3], policy=StopAtXPolicy(externfn)) assert res == f(40, 3) - self.check_loop_count(3) + self.check_trace_count(3) def test_forced_virtual_assigned_different_class_in_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'node', 'node2']) @@ -782,6 +785,165 @@ + def test_retrace_not_matching_bridge(self): + @dont_look_inside + def external(node): + return node.value + 1 + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'node', 'node2']) + class A(): + def new(self): + return A() + def val(self, i): + return i + 7 + class B(A): + def new(self): + return B() + def val(self, i): + return i + 42 + def f(n): + node = self._new() + node2 = A() + node.value = 0 + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, node=node, node2=node2) + next = self._new() + next.value = node.value + n + node2.val(i) + if i != 7: + next.value += external(next) + else: + node2 = B() + node = next + node2 = node2.new() + + i += 1 + return node.value + res = self.meta_interp(f, [10], repeat=10) + assert res == f(10) + self.check_resops(jump=2) + + def test_retrace_not_matching_bridge_str(self): + @dont_look_inside + def external(node): + return node.value + 1 + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'node', 'node2', 's']) + class A(): + def new(self): + return A() + def val(self, i): + return i + 7 + class B(A): + def new(self): + return B() + def val(self, i): + return i + 42 + def f(n): + s = '*' * n + node = self._new() + node2 = A() + node.value = 0 + i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, node=node, node2=node2, s=s) + next = self._new() + next.value = node.value + n + node2.val(i) + if i != 7: + next.value += external(next) + else: + node2 = B() + node = next + node2 = node2.new() + node.value += len(s) + i += 1 + return node.value + res = self.meta_interp(f, [10], repeat=10) + assert res == f(10) + self.check_resops(jump=2) + + def test_nested_loops(self): + class Int(object): + def __init__(self, val): + self.val = val + myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) + bytecode = "iajb+JI" + def f(n): + pc = sa = 0 + i = j = Int(0) + while pc < len(bytecode): + myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i, j=j) + op = bytecode[pc] + if op == 'i': + i = Int(0) + elif op == 'j': + j = Int(0) + elif op == '+': + sa += i.val * j.val + elif op == 'a': + i = Int(i.val + 1) + elif op == 'b': + j = Int(j.val + 1) + elif op == 'J': + if j.val < n: + pc -= 2 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + elif op == 'I': + if i.val < n: + pc -= 5 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + pc += 1 + return sa + + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_aborted_count(0) + self.check_target_token_count(3) + + def test_nested_loops_bridge(self): + class Int(object): + def __init__(self, val): + self.val = val + myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) + bytecode = "iajb+JI" + def f(n): + pc = sa = 0 + i = j = Int(0) + while pc < len(bytecode): + myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i, j=j) + op = bytecode[pc] + if op == 'i': + i = Int(0) + elif op == 'j': + j = Int(0) + elif op == '+': + if i.val < n-8: + sa += 7 + if j.val < n-16: + sa += 42 + sa += i.val * j.val + elif op == 'a': + i = Int(i.val + 1) + elif op == 'b': + j = Int(j.val + 1) + elif op == 'J': + if j.val < n: + pc -= 2 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + elif op == 'I': + if i.val < n: + pc -= 5 + myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j) + continue + pc += 1 + return sa + + res = self.meta_interp(f, [32]) + assert res == f(32) + self.check_aborted_count(0) + self.check_target_token_count(3) + class VirtualMiscTests: def test_multiple_equal_virtuals(self): @@ -1008,7 +1170,7 @@ assert f(10) == 20 res = self.meta_interp(f, [10]) assert res == 20 - self.check_loop_count(1) + self.check_trace_count(1) self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=0, new=0) diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -582,7 +582,7 @@ res = self.meta_interp(f, [123], policy=StopAtXPolicy(g)) assert res == f(123) self.check_aborted_count(2) - self.check_tree_loop_count(0) + self.check_jitcell_token_count(0) def test_external_read_with_exception(self): jitdriver = JitDriver(greens = [], reds = ['frame'], @@ -621,7 +621,7 @@ res = self.meta_interp(f, [123], policy=StopAtXPolicy(g)) assert res == f(123) self.check_aborted_count(2) - self.check_tree_loop_count(0) + self.check_jitcell_token_count(0) def test_external_write(self): jitdriver = JitDriver(greens = [], reds = ['frame'], @@ -653,7 +653,7 @@ res = self.meta_interp(f, [240], policy=StopAtXPolicy(g)) assert res == f(240) self.check_aborted_count(3) - self.check_tree_loop_count(0) + self.check_jitcell_token_count(0) def test_external_read_sometimes(self): jitdriver = JitDriver(greens = [], reds = ['frame'], diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -321,7 +321,7 @@ assert res == 13 self.check_resops(new_with_vtable=2, # the vref, but not XY() new_array=0) # and neither next1/2/3 - self.check_loop_count(1) + self.check_trace_count(1) self.check_aborted_count(0) def test_blackhole_forces(self): @@ -363,7 +363,7 @@ assert res == 13 self.check_resops(new_with_vtable=0, # all virtualized in the n!=13 loop new_array=0) - self.check_loop_count(1) + self.check_trace_count(1) self.check_aborted_count(0) def test_bridge_forces(self): @@ -410,7 +410,7 @@ # res = self.meta_interp(f, [72]) assert res == 6 - self.check_loop_count(2) # the loop and the bridge + self.check_trace_count(2) # the loop and the bridge self.check_resops(new_with_vtable=2, # loop: nothing; bridge: vref, xy new_array=2) # bridge: next4, next5 self.check_aborted_count(0) diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -6,10 +6,11 @@ from pypy.jit.metainterp.optimizeopt.optimizer import OptValue from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from pypy.rpython.lltypesystem import lltype -from pypy.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, equaloplists +from pypy.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ + equaloplists, FakeDescrWithSnapshot from pypy.jit.metainterp.optimizeopt.intutils import IntBound -from pypy.jit.metainterp.history import TreeLoop, LoopToken -from pypy.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeDescr, FakeMetaInterpStaticData +from pypy.jit.metainterp.history import TreeLoop, JitCellToken +from pypy.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from pypy.jit.metainterp.optimize import RetraceLoop from pypy.jit.metainterp.resoperation import ResOperation, rop @@ -434,7 +435,7 @@ enable_opts = "intbounds:rewrite:virtualize:string:pure:heap:unroll" def _do_optimize_bridge(self, bridge, call_pure_results): - from pypy.jit.metainterp.optimizeopt import optimize_bridge_1, build_opt_chain + from pypy.jit.metainterp.optimizeopt import optimize_trace from pypy.jit.metainterp.optimizeopt.util import args_dict self.bridge = bridge @@ -448,10 +449,9 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - d = {} - for name in self.enable_opts.split(":"): - d[name] = None - optimize_bridge_1(metainterp_sd, bridge, d) + bridge.start_resumedescr = FakeDescrWithSnapshot() + optimize_trace(metainterp_sd, bridge, self.enable_opts) + def optimize_bridge(self, loops, bridge, expected, expected_target='Loop', **boxvalues): if isinstance(loops, str): @@ -459,24 +459,19 @@ loops = [self.parse(loop) for loop in loops] bridge = self.parse(bridge) for loop in loops: - loop.preamble = TreeLoop('preamble') - loop.preamble.inputargs = loop.inputargs - loop.preamble.token = LoopToken() - loop.preamble.start_resumedescr = FakeDescr() - self._do_optimize_loop(loop, None) + loop.preamble = self.unroll_and_optimize(loop) preamble = loops[0].preamble - for loop in loops[1:]: - preamble.token.short_preamble.extend(loop.preamble.token.short_preamble) + token = JitCellToken() + token.target_tokens = [l.operations[0].getdescr() for l in [preamble] + loops] boxes = {} for b in bridge.inputargs + [op.result for op in bridge.operations]: boxes[str(b)] = b for b, v in boxvalues.items(): boxes[b].value = v - bridge.operations[-1].setdescr(preamble.token) - try: - self._do_optimize_bridge(bridge, None) - except RetraceLoop: + bridge.operations[-1].setdescr(token) + self._do_optimize_bridge(bridge, None) + if bridge.operations[-1].getopnum() == rop.LABEL: assert expected == 'RETRACE' return @@ -485,13 +480,13 @@ self.assert_equal(bridge, expected) if expected_target == 'Preamble': - assert bridge.operations[-1].getdescr() is preamble.token + assert bridge.operations[-1].getdescr() is preamble.operations[0].getdescr() elif expected_target == 'Loop': assert len(loops) == 1 - assert bridge.operations[-1].getdescr() is loops[0].token + assert bridge.operations[-1].getdescr() is loops[0].operations[0].getdescr() elif expected_target.startswith('Loop'): n = int(expected_target[4:]) - assert bridge.operations[-1].getdescr() is loops[n].token + assert bridge.operations[-1].getdescr() is loops[n].operations[0].getdescr() else: assert False @@ -918,6 +913,9 @@ pass def getvalue(*args): pass + def emit_operation(*args): + pass + class TestShortBoxes: p1 = BoxPtr() diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -200,7 +200,7 @@ m -= 1 self.meta_interp(f2, [i2]) try: - self.check_tree_loop_count(1) + self.check_jitcell_token_count(1) break except AssertionError: print "f2: no loop generated for i2==%d" % i2 @@ -215,7 +215,7 @@ m -= 1 self.meta_interp(f1, [i1]) try: - self.check_tree_loop_count(1) + self.check_jitcell_token_count(1) break except AssertionError: print "f1: no loop generated for i1==%d" % i1 @@ -235,8 +235,8 @@ self.meta_interp(f1, [8]) # it should generate one "loop" only, which ends in a FINISH # corresponding to the return from f2. - self.check_tree_loop_count(1) - self.check_loop_count(0) + self.check_trace_count(1) + self.check_resops(jump=0) def test_simple_loop(self): mydriver = JitDriver(greens=[], reds=['m']) @@ -245,8 +245,8 @@ mydriver.jit_merge_point(m=m) m = m - 1 self.meta_interp(f1, [8]) - self.check_loop_count(1) - self.check_resops({'jump': 2, 'guard_true': 2, 'int_gt': 2, + self.check_trace_count(1) + self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2, 'int_sub': 2}) def test_void_red_variable(self): diff --git a/pypy/jit/metainterp/test/test_warmstate.py b/pypy/jit/metainterp/test/test_warmstate.py --- a/pypy/jit/metainterp/test/test_warmstate.py +++ b/pypy/jit/metainterp/test/test_warmstate.py @@ -190,14 +190,14 @@ state = WarmEnterState(None, FakeJitDriverSD()) get_jitcell = state.make_jitcell_getter() class FakeLoopToken(object): - pass + invalidated = False looptoken = FakeLoopToken() - state.attach_unoptimized_bridge_from_interp([ConstInt(5), - constfloat(2.25)], - looptoken) + state.attach_procedure_to_interp([ConstInt(5), + constfloat(2.25)], + looptoken) cell1 = get_jitcell(True, 5, 2.25) assert cell1.counter < 0 - assert cell1.get_entry_loop_token() is looptoken + assert cell1.get_procedure_token() is looptoken def test_make_jitdriver_callbacks_1(): class FakeWarmRunnerDesc: diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -169,34 +169,22 @@ # counter == -1: there is an entry bridge for this cell # counter == -2: tracing is currently going on for this cell counter = 0 - compiled_merge_points_wref = None # list of weakrefs to LoopToken dont_trace_here = False - wref_entry_loop_token = None # (possibly) one weakref to LoopToken + wref_procedure_token = None - def get_compiled_merge_points(self): - result = [] - if self.compiled_merge_points_wref is not None: - for wref in self.compiled_merge_points_wref: - looptoken = wref() - if looptoken is not None and not looptoken.invalidated: - result.append(looptoken) - return result - - def set_compiled_merge_points(self, looptokens): - self.compiled_merge_points_wref = [self._makeref(token) - for token in looptokens] - - def get_entry_loop_token(self): - if self.wref_entry_loop_token is not None: - return self.wref_entry_loop_token() + def get_procedure_token(self): + if self.wref_procedure_token is not None: + token = self.wref_procedure_token() + if token and not token.invalidated: + return token return None - def set_entry_loop_token(self, looptoken): - self.wref_entry_loop_token = self._makeref(looptoken) + def set_procedure_token(self, token): + self.wref_procedure_token = self._makeref(token) - def _makeref(self, looptoken): - assert looptoken is not None - return weakref.ref(looptoken) + def _makeref(self, token): + assert token is not None + return weakref.ref(token) # ____________________________________________________________ @@ -283,18 +271,17 @@ debug_print("disabled inlining", loc) debug_stop("jit-disableinlining") - def attach_unoptimized_bridge_from_interp(self, greenkey, - entry_loop_token): + def attach_procedure_to_interp(self, greenkey, procedure_token): cell = self.jit_cell_at_key(greenkey) - old_token = cell.get_entry_loop_token() - cell.set_entry_loop_token(entry_loop_token) - cell.counter = -1 # valid entry bridge attached + old_token = cell.get_procedure_token() + cell.set_procedure_token(procedure_token) + cell.counter = -1 # valid procedure bridge attached if old_token is not None: - self.cpu.redirect_call_assembler(old_token, entry_loop_token) - # entry_loop_token is also kept alive by any loop that used + self.cpu.redirect_call_assembler(old_token, procedure_token) + # procedure_token is also kept alive by any loop that used # to point to old_token. Actually freeing old_token early # is a pointless optimization (it is tiny). - old_token.record_jump_to(entry_loop_token) + old_token.record_jump_to(procedure_token) # ---------- @@ -343,7 +330,7 @@ # set counter to -2, to mean "tracing in effect" cell.counter = -2 try: - loop_token = metainterp.compile_and_run_once(jitdriver_sd, + procedure_token = metainterp.compile_and_run_once(jitdriver_sd, *args) finally: if cell.counter == -2: @@ -356,8 +343,8 @@ assert cell.counter == -1 if not confirm_enter_jit(*args): return - loop_token = cell.get_entry_loop_token() - if loop_token is None: # it was a weakref that has been freed + procedure_token = cell.get_procedure_token() + if procedure_token is None: # it was a weakref that has been freed cell.counter = 0 return # machine code was already compiled for these greenargs @@ -368,14 +355,14 @@ while True: # until interrupted by an exception metainterp_sd.profiler.start_running() #debug_start("jit-running") - fail_descr = warmrunnerdesc.execute_token(loop_token) + fail_descr = warmrunnerdesc.execute_token(procedure_token) #debug_stop("jit-running") metainterp_sd.profiler.end_running() - loop_token = None # for test_memmgr + procedure_token = None # for test_memmgr if vinfo is not None: vinfo.reset_vable_token(virtualizable) - loop_token = fail_descr.handle_fail(metainterp_sd, - jitdriver_sd) + procedure_token = fail_descr.handle_fail(metainterp_sd, + jitdriver_sd) maybe_compile_and_run._dont_inline_ = True self.maybe_compile_and_run = maybe_compile_and_run @@ -617,16 +604,16 @@ def get_assembler_token(greenkey, redboxes): # 'redboxes' is only used to know the types of red arguments cell = self.jit_cell_at_key(greenkey) - entry_loop_token = cell.get_entry_loop_token() - if entry_loop_token is None: + procedure_token = cell.get_procedure_token() + if procedure_token is None: from pypy.jit.metainterp.compile import compile_tmp_callback if cell.counter == -1: # used to be a valid entry bridge, cell.counter = 0 # but was freed in the meantime. memmgr = warmrunnerdesc.memory_manager - entry_loop_token = compile_tmp_callback(cpu, jd, greenkey, - redboxes, memmgr) - cell.set_entry_loop_token(entry_loop_token) - return entry_loop_token + procedure_token = compile_tmp_callback(cpu, jd, greenkey, + redboxes, memmgr) + cell.set_procedure_token(procedure_token) + return procedure_token self.get_assembler_token = get_assembler_token # diff --git a/pypy/jit/tool/oparser.py b/pypy/jit/tool/oparser.py --- a/pypy/jit/tool/oparser.py +++ b/pypy/jit/tool/oparser.py @@ -70,7 +70,7 @@ self.invent_fail_descr = invent_fail_descr self.nonstrict = nonstrict self.model = get_model(self.use_mock_model) - self.looptoken = self.model.LoopToken() + self.original_jitcell_token = self.model.JitCellToken() def get_const(self, name, typ): if self._consts is None: @@ -243,7 +243,8 @@ descr = self.invent_fail_descr(self.model, fail_args) elif opnum == rop.JUMP: if descr is None and self.invent_fail_descr: - descr = self.looptoken + descr = self.original_jitcell_token + return opnum, args, descr, fail_args def create_op(self, opnum, args, result, descr): @@ -307,7 +308,7 @@ raise ParseError("unexpected dedent at line: %s" % newlines[num]) loop = self.model.ExtendedTreeLoop("loop") loop.comment = first_comment - loop.token = self.looptoken + loop.original_jitcell_token = self.original_jitcell_token loop.operations = ops loop.inputargs = inpargs loop.last_offset = last_offset diff --git a/pypy/jit/tool/oparser_model.py b/pypy/jit/tool/oparser_model.py --- a/pypy/jit/tool/oparser_model.py +++ b/pypy/jit/tool/oparser_model.py @@ -3,7 +3,7 @@ def get_real_model(): class LoopModel(object): - from pypy.jit.metainterp.history import TreeLoop, LoopToken + from pypy.jit.metainterp.history import TreeLoop, JitCellToken from pypy.jit.metainterp.history import Box, BoxInt, BoxFloat from pypy.jit.metainterp.history import ConstInt, ConstObj, ConstPtr, ConstFloat from pypy.jit.metainterp.history import BasicFailDescr @@ -33,13 +33,13 @@ return LoopModel def get_mock_model(): - class LoopModel(object): + class MockLoopModel(object): class TreeLoop(object): def __init__(self, name): self.name = name - class LoopToken(object): + class JitCellToken(object): I_am_a_descr = True class BasicFailDescr(object): @@ -107,9 +107,9 @@ class llhelper(object): pass - LoopModel.llhelper.BoxRef = LoopModel.BoxRef + MockLoopModel.llhelper.BoxRef = MockLoopModel.BoxRef - return LoopModel + return MockLoopModel def get_model(use_mock): diff --git a/pypy/jit/tool/test/test_jitoutput.py b/pypy/jit/tool/test/test_jitoutput.py --- a/pypy/jit/tool/test/test_jitoutput.py +++ b/pypy/jit/tool/test/test_jitoutput.py @@ -36,12 +36,12 @@ assert info.tracing_no == 1 assert info.asm_no == 1 assert info.blackhole_no == 1 - assert info.backend_no == 2 + assert info.backend_no == 1 assert info.ops.total == 2 assert info.recorded_ops.total == 2 assert info.recorded_ops.calls == 0 assert info.guards == 1 - assert info.opt_ops == 11 + assert info.opt_ops == 13 assert info.opt_guards == 2 assert info.forcings == 0 diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py --- a/pypy/jit/tool/test/test_oparser.py +++ b/pypy/jit/tool/test/test_oparser.py @@ -4,7 +4,7 @@ from pypy.jit.tool.oparser import parse, OpParser from pypy.jit.metainterp.resoperation import rop -from pypy.jit.metainterp.history import AbstractDescr, BoxInt, LoopToken +from pypy.jit.metainterp.history import AbstractDescr, BoxInt, JitCellToken class BaseTestOparser(object): @@ -119,10 +119,10 @@ jump() ''' loop = self.parse(x) - assert loop.operations[0].getdescr() is loop.token + assert loop.operations[0].getdescr() is loop.original_jitcell_token def test_jump_target_other(self): - looptoken = LoopToken() + looptoken = JitCellToken() looptoken.I_am_a_descr = True # for the mock case x = ''' [] diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -188,7 +188,7 @@ self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 35, 'getfield_gc_pure': 6, 'guard_class': 22, 'int_add': 8, 'float_mul': 2, - 'guard_isnull': 2, 'jump': 4, 'int_ge': 4, + 'guard_isnull': 2, 'jump': 2, 'int_ge': 4, 'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4, 'guard_value': 2}) @@ -230,7 +230,7 @@ def test_specialization(self): self.run("specialization") # This is 3, not 2 because there is a bridge for the exit. - self.check_loop_count(3) + self.check_trace_count(3) def define_slice(): return """ @@ -325,7 +325,7 @@ def test_setslice(self): result = self.run("setslice") assert result == 11.0 - self.check_loop_count(1) + self.check_trace_count(1) self.check_simple_loop({'getinteriorfield_raw': 2, 'float_add' : 1, 'setinteriorfield_raw': 1, 'int_add': 3, 'int_eq': 1, 'guard_false': 1, 'jump': 1}) diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -3,7 +3,7 @@ from pypy.conftest import gettestobjspace, option from pypy.interpreter.pycode import PyCode from pypy.interpreter.gateway import interp2app -from pypy.jit.metainterp.history import LoopToken +from pypy.jit.metainterp.history import JitCellToken from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.logger import Logger from pypy.rpython.annlowlevel import (cast_instance_to_base_ptr, @@ -41,11 +41,11 @@ """, namespace={'ptr0': code_gcref}).operations def interp_on_compile(): - pypyjitdriver.on_compile(logger, LoopToken(), oplist, 'loop', + pypyjitdriver.on_compile(logger, JitCellToken(), oplist, 'loop', 0, False, ll_code) def interp_on_compile_bridge(): - pypyjitdriver.on_compile_bridge(logger, LoopToken(), oplist, 0) + pypyjitdriver.on_compile_bridge(logger, JitCellToken(), oplist, 0) cls.w_on_compile = space.wrap(interp2app(interp_on_compile)) cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge)) diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -47,32 +47,33 @@ storage = LoopStorage() traces = [SimpleParser.parse_from_input(rawtrace) for rawtrace in rawtraces] traces = storage.reconnect_loops(traces) - self.loops = [LoopWithIds.from_trace(trace, storage) for trace in traces] + self.loops = [TraceWithIds.from_trace(trace, storage) for trace in traces] def _filter(self, loop, is_entry_bridge=False): - return is_entry_bridge == '*' or loop.is_entry_bridge == is_entry_bridge + if is_entry_bridge == '*': + return loop + assert is_entry_bridge in (True, False) + return PartialTraceWithIds(loop, is_entry_bridge) def loops_by_filename(self, filename, **kwds): """ Return all loops which start in the file ``filename`` """ - return [loop for loop in self.loops - if loop.filename == filename and self._filter(loop, **kwds)] + return [self._filter(loop, **kwds) for loop in self.loops + if loop.filename == filename] def loops_by_id(self, id, **kwds): """ Return all loops which contain the ID ``id`` """ - return [loop for loop in self.loops - if loop.has_id(id) and self._filter(loop, **kwds)] + return [self._filter(loop, **kwds) for loop in self.loops + if loop.has_id(id)] @classmethod def opnames(self, oplist): return [op.name for op in oplist] -class LoopWithIds(Function): - - is_entry_bridge = False +class TraceWithIds(Function): def __init__(self, *args, **kwds): Function.__init__(self, *args, **kwds) @@ -88,7 +89,6 @@ @classmethod def from_trace(cls, trace, storage): res = cls.from_operations(trace.operations, storage) - res.is_entry_bridge = 'entry bridge' in trace.comment return res def flatten_chunks(self): @@ -117,7 +117,7 @@ # # 2. compute the ids of all the inlined functions for chunk in self.chunks: - if isinstance(chunk, LoopWithIds): + if isinstance(chunk, TraceWithIds): chunk.compute_ids(ids) def get_set_of_opcodes(self): @@ -144,6 +144,10 @@ (opcode and opcode.__class__.__name__ == opcode_name): for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op + else: + for op in chunk.operations: + if op.name == 'label': + yield op def allops(self, *args, **kwds): return list(self._allops(*args, **kwds)) @@ -161,26 +165,72 @@ def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] + loop_ops = self.allops(include_debug_merge_points, opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): for op in self._ops_for_chunk(chunk, include_debug_merge_points): - yield op + if op in loop_ops: + yield op def ops_by_id(self, *args, **kwds): return list(self._ops_by_id(*args, **kwds)) def match(self, expected_src, **kwds): - ops = list(self.allops()) - matcher = OpMatcher(ops, src=self.format_ops()) + ops = self.allops() + matcher = OpMatcher(ops) return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) - matcher = OpMatcher(ops, src=self.format_ops(id)) + matcher = OpMatcher(ops) return matcher.match(expected_src) +class PartialTraceWithIds(TraceWithIds): + def __init__(self, trace, is_entry_bridge=False): + self.trace = trace + self.is_entry_bridge = is_entry_bridge + + def allops(self, *args, **kwds): + if self.is_entry_bridge: + return self.entry_bridge_ops(*args, **kwds) + else: + return self.simple_loop_ops(*args, **kwds) + + def simple_loop_ops(self, *args, **kwds): + ops = list(self._allops(*args, **kwds)) + labels = [op for op in ops if op.name == 'label'] + jumpop = self.chunks[-1].operations[-1] + assert jumpop.name == 'jump' + assert jumpop.getdescr() == labels[-1].getdescr() + i = ops.index(labels[-1]) + return ops[i+1:] + + def entry_bridge_ops(self, *args, **kwds): + ops = list(self._allops(*args, **kwds)) + labels = [op for op in ops if op.name == 'label'] + assert ops.index(labels[0]) == 0 + i = ops.index(labels[1]) + return ops[1:i] + + @property + def chunks(self): + return self.trace.chunks + + @property + def ids(self): + return self.trace.ids + + @property + def filename(self): + return self.trace.filename + + @property + def code(self): + return self.trace.code + + class InvalidMatch(Exception): opindex = None @@ -210,9 +260,9 @@ class OpMatcher(object): - def __init__(self, ops, src=None): + def __init__(self, ops): self.ops = ops - self.src = src + self.src = '\n'.join(map(str, ops)) self.alpha_map = {} @classmethod diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -8,7 +8,7 @@ from pypy.tool import logparser from pypy.jit.tool.jitoutput import parse_prof from pypy.module.pypyjit.test_pypy_c.model import Log, find_ids_range, find_ids, \ - LoopWithIds, OpMatcher + TraceWithIds, OpMatcher class BaseTestPyPyC(object): def setup_class(cls): @@ -50,7 +50,7 @@ cmdline.append(str(self.filepath)) # print cmdline, logfile - env={'PYPYLOG': 'jit-log-opt,jit-log-noopt,jit-summary:' + str(logfile)} + env={'PYPYLOG': 'jit-log-opt,jit-log-noopt,jit-log-virtualstate,jit-summary:' + str(logfile)} pipe = subprocess.Popen(cmdline, env=env, stdout=subprocess.PIPE, @@ -118,7 +118,7 @@ def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) - matcher = OpMatcher(loop.operations, src=src1) + matcher = OpMatcher(loop.operations) return matcher.match(src2, **kwds) def test_match_var(self): @@ -317,14 +317,17 @@ loops = log.loops_by_filename(self.filepath) assert len(loops) == 1 assert loops[0].filename == self.filepath - assert not loops[0].is_entry_bridge + assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge=True) assert len(loops) == 1 - assert loops[0].is_entry_bridge + assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) > 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge='*') - assert len(loops) == 2 + assert len(loops) == 1 + assert len([op for op in loops[0].allops() if op.name == 'label']) == 2 def test_loops_by_id(self): def f(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -22,7 +22,7 @@ guard_true(i7, descr=...) i9 = int_add(i5, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i9, i6, descr=) + jump(p0, p1, p2, p3, p4, i9, i6, descr=...) """) def test_array_sum(self): @@ -47,7 +47,7 @@ guard_no_overflow(descr=...) i18 = int_add(i7, 1) --TICK-- - jump(p0, p1, p2, p3, p4, p5, i18, i16, p8, i9, i10, descr=) + jump(p0, p1, p2, p3, p4, p5, i18, i16, p8, i9, i10, descr=...) """) def test_array_intimg(self): @@ -85,7 +85,7 @@ setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>) i28 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=) + jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=...) """) def test_array_of_doubles(self): @@ -115,7 +115,7 @@ guard_true(i18, descr=...) i20 = int_add(i6, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_array_of_floats(self): @@ -152,7 +152,7 @@ guard_true(i21, descr=...) i23 = int_add(i6, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """ % (arraydescr, arraydescr, arraydescr)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -99,7 +99,7 @@ i15 = int_add_ovf(i12, 1) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i15, i6, p7, p8, descr=) + jump(p0, p1, p2, p3, p4, i15, i6, p7, p8, descr=...) """) def test_method_call(self): @@ -142,7 +142,7 @@ i19 = int_add_ovf(i10, i17) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, i19, p7, i17, i9, i10, p11, p12, p13, descr=) + jump(p0, p1, p2, p3, p4, p5, i19, p7, i17, i9, i10, p11, p12, p13, descr=...) """) def test_static_classmethod_call(self): @@ -174,7 +174,7 @@ guard_no_overflow(descr=...) i18 = force_token() --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_default_and_kw(self): @@ -394,7 +394,7 @@ guard_not_invalidated(descr=...) i120 = int_add(i5, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_global_closure_has_constant_cells(self): @@ -438,7 +438,7 @@ i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=) + p39 = same_as(...) # Should be killed by backend """) def test_local_closure_is_virtual(self): @@ -461,7 +461,7 @@ p22 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p22, i13, descr=) setfield_gc(p4, p22, descr=) - jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) + jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=...) """) def test_kwargs_virtual(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -115,7 +115,7 @@ i35 = int_add_ovf(i5, i34) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=) + jump(p0, p1, p2, p3, p4, i35, p13, i7, descr=...) """) def test_floatlist_unpack_without_calls(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_exception.py b/pypy/module/pypyjit/test_pypy_c/test_exception.py --- a/pypy/module/pypyjit/test_pypy_c/test_exception.py +++ b/pypy/module/pypyjit/test_pypy_c/test_exception.py @@ -42,7 +42,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=...) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_exception_inside_loop_2(self): @@ -89,5 +89,5 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -15,12 +15,14 @@ g() log = self.run(main, [500]) - loop, = log.loops_by_filename(self.filepath) + loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*') assert loop.match_by_id("generator", """ + ... + label(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) i47 = arraylen_gc(p8, descr=) # Should be removed by backend - setarrayitem_gc(p8, 0, p45, descr=) - setfield_gc(p45, i29, descr=) jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -27,7 +27,7 @@ i9 = int_add_ovf(i5, 2) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i9, i6, descr=) + jump(p0, p1, p2, p3, p4, i9, i6, descr=...) """) def test_load_attr(self): @@ -52,7 +52,7 @@ i10 = int_add_ovf(i5, i7) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i10, i6, i7, p8, descr=) + jump(p0, p1, p2, p3, p4, i10, i6, i7, p8, descr=...) """) def test_getattr_with_dynamic_attribute(self): @@ -125,9 +125,9 @@ i12 = force_token() --TICK-- p20 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p20, i11, descr=) setfield_gc(ConstPtr(ptr21), p20, descr=) - setfield_gc(p20, i11, descr=) - jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=) + jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=...) """) def test_oldstyle_newstyle_mix(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py --- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py +++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py @@ -97,7 +97,7 @@ guard_no_overflow(descr=...) i17 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i14, i12, i17, p8, i9, descr=) + jump(p0, p1, p2, p3, p4, i14, i12, i17, p8, i9, descr=...) """) def test_intbound_sub_lt(self): @@ -121,7 +121,7 @@ guard_no_overflow(descr=...) i13 = int_add(i5, 1) --TICK-- - jump(p0, p1, p2, p3, i11, i13, descr=) + jump(p0, p1, p2, p3, i11, i13, descr=...) """) def test_intbound_addsub_ge(self): @@ -150,7 +150,7 @@ guard_no_overflow(descr=...) i19 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i16, i14, i19, p8, i9, descr=) + jump(p0, p1, p2, p3, p4, i16, i14, i19, p8, i9, descr=...) """) def test_intbound_addmul_ge(self): @@ -178,7 +178,7 @@ guard_no_overflow(descr=...) i21 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i18, i14, i21, p8, descr=) + jump(p0, p1, p2, p3, p4, i18, i14, i21, p8, descr=...) """) def test_intbound_eq(self): @@ -210,7 +210,7 @@ guard_no_overflow(descr=...) i16 = int_add(i8, 1) --TICK-- - jump(p0, p1, p2, p3, p4, p6, i14, i16, p8, descr=) + jump(p0, p1, p2, p3, p4, p6, i14, i16, p8, descr=...) """) def test_intbound_mul(self): @@ -236,7 +236,7 @@ guard_no_overflow(descr=...) i14 = int_add(i6, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i12, i14, descr=) + jump(p0, p1, p2, p3, p4, i12, i14, descr=...) """) def test_assert(self): @@ -257,7 +257,7 @@ guard_no_overflow(descr=...) i12 = int_add(i6, 1) --TICK-- - jump(p0, p1, p2, p3, p4, i10, i12, descr=) + jump(p0, p1, p2, p3, p4, i10, i12, descr=...) """) def test_xor(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -29,7 +29,7 @@ f5 = float_add(f0, f4) i4 = int_add(i0, 1) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) def test_sin_cos(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -22,7 +22,7 @@ guard_no_overflow(descr=...) i11 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i11, i9, descr=) + jump(p0, p1, p2, p3, i11, i9, descr=...) """) def test_silly_max(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -34,7 +34,10 @@ jump(p0, p1, p2, p3, p4, p5, i13, i11, i8, descr=...) """ assert loop0.match(expected) - assert loop1.match(expected) + # XXX: The retracing fails to form a loop since j + # becomes constant 0 after the bridge and constant 1 at the end of the + # loop. A bridge back to the peramble is produced instead. + #assert loop1.match(expected) def test_factorial(self): def fact(n): @@ -88,7 +91,7 @@ guard_true(i9, descr=...) f10 = float_add(f8, f5) --TICK-- - jump(p0, p1, p2, p3, p4, f10, p6, f7, f8, descr=) + jump(p0, p1, p2, p3, p4, f10, p6, f7, f8, descr=...) """) @@ -159,7 +162,7 @@ i27 = int_add_ovf(i7, i18) guard_no_overflow(descr=...) --TICK-- - jump(..., descr=) + jump(..., descr=...) """) @@ -219,7 +222,7 @@ i28 = int_add_ovf(i10, i25) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, p12, i19, descr=) + jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, p12, i19, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -156,7 +156,7 @@ guard_no_overflow(descr=...) i40 = int_sub(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i40, i38, descr=) + jump(p0, p1, p2, p3, i40, i38, descr=...) """) def test_getattr_promote(self): @@ -179,7 +179,7 @@ log = self.run(main, [1000]) assert log.result == main(1000) loops = log.loops_by_filename(self.filepath) - assert len(loops) == 2 + assert len(loops) == 1 for loop in loops: loop.match_by_id('getattr',''' guard_not_invalidated(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -24,5 +24,5 @@ guard_true(i2, descr=...) i3 = int_add(i0, 1) --THREAD-TICK-- - jump(..., descr=) + jump(..., descr=...) """) diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -70,7 +70,8 @@ def test_simple_match_repeated(self): res = self.meta_interp_match(r"abcdef", "abcdef", repeat=10) assert res == 6 - self.check_tree_loop_count(1) + self.check_trace_count(1) + self.check_jitcell_token_count(1) def test_match_minrepeat_1(self): res = self.meta_interp_match(r".*?abc", "xxxxxxxxxxxxxxabc") From noreply at buildbot.pypy.org Thu Dec 29 09:57:37 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:37 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: move is_comparison_or_ovf_op to llsuport Message-ID: <20111229085737.BD17D82C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50947:d8831765a657 Date: 2011-12-29 09:47 +0100 http://bitbucket.org/pypy/pypy/changeset/d8831765a657/ Log: move is_comparison_or_ovf_op to llsuport diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -527,6 +527,17 @@ return longevity, useful +def is_comparison_or_ovf_op(opnum): + from pypy.jit.metainterp.resoperation import opclasses + cls = opclasses[opnum] + # hack hack: in theory they are instance method, but they don't use + # any instance field, we can use a fake object + class Fake(cls): + pass + op = Fake(None) + return op.is_comparison() or op.is_ovf() + + def not_implemented(msg): os.write(2, '[llsupport/regalloc] %s\n' % msg) raise NotImplementedError(msg) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -20,7 +20,7 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ - TempBox, compute_vars_longevity + TempBox, compute_vars_longevity, is_comparison_or_ovf_op from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong @@ -1461,16 +1461,6 @@ def add_none_argument(fn): return lambda self, op: fn(self, op, None) -def is_comparison_or_ovf_op(opnum): - from pypy.jit.metainterp.resoperation import opclasses - cls = opclasses[opnum] - # hack hack: in theory they are instance method, but they don't use - # any instance field, we can use a fake object - class Fake(cls): - pass - op = Fake(None) - return op.is_comparison() or op.is_ovf() - for name, value in RegAlloc.__dict__.iteritems(): if name.startswith('consider_'): name = name[len('consider_'):] From noreply at buildbot.pypy.org Thu Dec 29 09:57:39 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Dec 2011 09:57:39 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: implement changes needed for target/label functionality Message-ID: <20111229085739.0FF4F82C01@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50948:3a1fca872e84 Date: 2011-12-29 09:49 +0100 http://bitbucket.org/pypy/pypy/changeset/3a1fca872e84/ Log: implement changes needed for target/label functionality diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -10,7 +10,7 @@ PC_OFFSET, N_REGISTERS_SAVED_BY_MALLOC from pypy.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder from pypy.jit.backend.arm.regalloc import (Regalloc, ARMFrameManager, - ARMv7RegisterMananger, check_imm_arg, + ARMv7RegisterManager, check_imm_arg, operations as regalloc_operations, operations_with_guard as regalloc_operations_with_guard) from pypy.jit.backend.arm.jump import remap_frame_layout @@ -87,11 +87,11 @@ assert self.memcpy_addr != 0, 'setup_once() not called?' self.mc = ARMv7Builder() self.pending_guards = [] - self.currently_compiling_loop = None assert self.datablockwrapper is None allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) + self.target_tokens_currently_compiling = {} return operations def teardown(self): @@ -99,7 +99,6 @@ self._regalloc = None self.mc = None self.pending_guards = None - self.currently_compiling_loop = None assert self.datablockwrapper is None def setup_once(self): @@ -326,10 +325,10 @@ mc.SUB_rr(r.r0.value, r.r1.value, r.r0.value) addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() # XXX replace with an STMxx operation - for reg, ofs in ARMv7RegisterMananger.REGLOC_TO_COPY_AREA_OFS.items(): + for reg, ofs in ARMv7RegisterManager.REGLOC_TO_COPY_AREA_OFS.items(): mc.STR_ri(reg.value, r.fp.value, imm=ofs) mc.BL(addr) - for reg, ofs in ARMv7RegisterMananger.REGLOC_TO_COPY_AREA_OFS.items(): + for reg, ofs in ARMv7RegisterManager.REGLOC_TO_COPY_AREA_OFS.items(): mc.LDR_ri(reg.value, r.fp.value, imm=ofs) mc.CMP_ri(r.r0.value, 0) @@ -546,9 +545,9 @@ self.mc.VLDR(r.vfp_ip.value, r.ip.value) self.mov_loc_loc(r.vfp_ip, loc) - def gen_direct_bootstrap_code(self, loop_head, looptoken, inputargs): + def gen_direct_bootstrap_code(self, loop_head, arglocs, frame_depth, inputargs): self.gen_func_prolog() - nonfloatlocs, floatlocs = looptoken._arm_arglocs + nonfloatlocs, floatlocs = arglocs reg_args = count_reg_args(inputargs) @@ -627,7 +626,7 @@ sp_patch_location = self._prepare_sp_patch_position() self.mc.B_offs(loop_head) - self._patch_sp_offset(sp_patch_location, looptoken._arm_frame_depth) + self._patch_sp_offset(sp_patch_location, frame_depth) def _dump(self, ops, type='loop'): debug_start('jit-backend-ops') @@ -642,8 +641,11 @@ clt.allgcrefs = [] looptoken.compiled_loop_token = clt + if not we_are_translated(): + # Arguments should be unique + assert len(set(inputargs)) == len(inputargs) + operations = self.setup(looptoken, operations) - self.currently_compiling_loop = looptoken self._dump(operations) self.align() @@ -659,25 +661,31 @@ looptoken._arm_loop_code = loop_head looptoken._arm_bootstrap_code = 0 - looptoken._arm_frame_depth = -1 + clt.frame_depth = -1 frame_depth = self._assemble(operations, regalloc) - looptoken._arm_frame_depth = frame_depth - self._patch_sp_offset(sp_patch_location, looptoken._arm_frame_depth) + clt.frame_depth = frame_depth + self._patch_sp_offset(sp_patch_location, frame_depth) self.align() direct_bootstrap_code = self.mc.currpos() - self.gen_direct_bootstrap_code(loop_head, looptoken, inputargs) + self.gen_direct_bootstrap_code(loop_head, arglocs, + frame_depth, inputargs) self.write_pending_failure_recoveries() - loop_start = self.materialize_loop(looptoken) - looptoken._arm_bootstrap_code = loop_start - direct_code_start = loop_start + direct_bootstrap_code + + rawstart = self.materialize_loop(looptoken) + direct_code_start = rawstart + direct_bootstrap_code + + looptoken._arm_bootstrap_code = rawstart looptoken._arm_direct_bootstrap_code = direct_code_start - self.process_pending_guards(loop_start) + + self.process_pending_guards(rawstart) + self.fixup_target_tokens(rawstart) + if log and not we_are_translated(): print 'Loop', inputargs, operations - self.mc._dump_trace(loop_start, + self.mc._dump_trace(rawstart, 'loop_%s.asm' % self.cpu.total_compiled_loops) print 'Done assembling loop with token %r' % looptoken self.teardown() @@ -688,7 +696,7 @@ frame_depth = regalloc.frame_manager.get_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - frame_depth = max(frame_depth, jump_target_descr._arm_frame_depth) + frame_depth = max(frame_depth, jump_target_descr._arm_clt.frame_depth) return frame_depth def assemble_bridge(self, faildescr, inputargs, operations, @@ -698,7 +706,7 @@ assert isinstance(faildescr, AbstractFailDescr) code = faildescr._failure_recovery_code enc = rffi.cast(rffi.CCHARP, code) - frame_depth = faildescr._arm_frame_depth + frame_depth = faildescr._arm_current_frame_depth arglocs = self.decode_inputargs(enc) if not we_are_translated(): assert len(inputargs) == len(arglocs) @@ -713,17 +721,27 @@ self._patch_sp_offset(sp_patch_location, frame_depth) self.write_pending_failure_recoveries() - bridge_start = self.materialize_loop(original_loop_token) - self.process_pending_guards(bridge_start) + rawstart = self.materialize_loop(original_loop_token) + self.process_pending_guards(rawstart) self.patch_trace(faildescr, original_loop_token, - bridge_start, regalloc) - if log and not we_are_translated(): - print 'Bridge', inputargs, operations - self.mc._dump_trace(bridge_start, 'bridge_%d.asm' % - self.cpu.total_compiled_bridges) + rawstart, regalloc) + self.fixup_target_tokens(rawstart) + + if not we_are_translated(): + # for the benefit of tests + faildescr._arm_bridge_frame_depth = frame_depth + if log: + print 'Bridge', inputargs, operations + self.mc._dump_trace(rawstart, 'bridge_%d.asm' % + self.cpu.total_compiled_bridges) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) self.teardown() + def fixup_target_tokens(self, rawstart): + for targettoken in self.target_tokens_currently_compiling: + targettoken._arm_loop_code += rawstart + self.target_tokens_currently_compiling = None def target_arglocs(self, loop_token): return loop_token._arm_arglocs @@ -745,7 +763,7 @@ memaddr = self._gen_path_to_exit_path(descr, tok.failargs, tok.faillocs, save_exc=tok.save_exc) # store info on the descr - descr._arm_frame_depth = tok.faillocs[0].getint() + descr._arm_current_frame_depth = tok.faillocs[0].getint() descr._failure_recovery_code = memaddr descr._arm_guard_pos = pos diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -22,7 +22,8 @@ from pypy.jit.backend.arm.locations import imm from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.history import (Box, AbstractFailDescr, - LoopToken, INT, FLOAT, REF) + INT, FLOAT, REF) +from pypy.jit.metainterp.history import JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem import lltype, rffi, rstr @@ -305,15 +306,14 @@ def emit_op_jump(self, op, arglocs, regalloc, fcond): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) assert fcond == c.AL self._insert_checks() - if descr is self.currently_compiling_loop: + if descr in self.target_tokens_currently_compiling: self.mc.B_offs(descr._arm_loop_code, fcond) else: - target = descr._arm_bootstrap_code + descr._arm_loop_code - self.mc.B(target, fcond) + self.mc.B(descr._arm_loop_code, fcond) return fcond def emit_op_finish(self, op, arglocs, regalloc, fcond): @@ -373,8 +373,7 @@ regalloc, fcond, op.result) descr = op.getdescr() #XXX Hack, Hack, Hack - if (op.result and not we_are_translated() - and not isinstance(descr, LoopToken)): + if (op.result and not we_are_translated()): #XXX check result type loc = regalloc.rm.call_result_location(op.result) size = descr.get_result_size(False) @@ -999,7 +998,7 @@ self._write_fail_index(fail_index) descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) # XXX check this assert op.numargs() == len(descr._arm_arglocs[0]) resbox = TempInt() diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -16,7 +16,8 @@ from pypy.jit.codewriter import longlong from pypy.jit.metainterp.history import (Const, ConstInt, ConstFloat, ConstPtr, Box, BoxPtr, - INT, REF, FLOAT, LoopToken) + INT, REF, FLOAT) +from pypy.jit.metainterp.history import JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr, \ BaseSizeDescr, InteriorFieldDescr @@ -26,6 +27,11 @@ from pypy.jit.codewriter.effectinfo import EffectInfo +# xxx hack: set a default value for TargetToken._arm_loop_code. If 0, we know +# that it is a LABEL that was not compiled yet. +TargetToken._arm_loop_code = 0 + + class TempInt(TempBox): type = INT @@ -80,6 +86,7 @@ else: return loc.position + def void(self, op, fcond): return [] @@ -128,7 +135,7 @@ return reg -class ARMv7RegisterMananger(RegisterManager): +class ARMv7RegisterManager(RegisterManager): all_regs = r.all_regs box_types = None # or a list of acceptable types no_lower_byte_regs = all_regs @@ -192,6 +199,7 @@ self.assembler = assembler self.frame_manager = frame_manager self.jump_target_descr = None + self.final_jump_op = None def loc(self, var): if var.type == FLOAT: @@ -294,7 +302,7 @@ fm = self.frame_manager asm = self.assembler self.vfprm = VFPRegisterManager(longevity, fm, asm) - self.rm = ARMv7RegisterMananger(longevity, fm, asm) + self.rm = ARMv7RegisterManager(longevity, fm, asm) return useful def prepare_loop(self, inputargs, operations): @@ -629,11 +637,24 @@ op = operations[-1] if op.getopnum() != rop.JUMP: return + self.final_jump_op = op descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) + if descr._arm_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding prepare_op_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) - for i in range(op.numargs()): - box = op.getarg(i) + jump_op = self.final_jump_op + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) if isinstance(box, Box): loc = nonfloatlocs[i] if loc is not None and loc.is_stack(): @@ -647,16 +668,13 @@ def prepare_op_jump(self, op, fcond): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, TargetToken) self.jump_target_descr = descr nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) # get temporary locs tmploc = r.ip - box = TempFloat() - # compute 'vfptmploc' to be all_regs[0] by spilling what is there - vfptmp = self.vfprm.all_regs[0] - vfptmploc = self.vfprm.force_allocate_reg(box, selected_reg=vfptmp) + vfptmploc = r.vfp_ip # Part about non-floats # XXX we don't need a copy, we only just the original list @@ -671,7 +689,6 @@ remap_frame_layout_mixed(self.assembler, src_locations1, dst_locations1, tmploc, src_locations2, dst_locations2, vfptmploc) - self.possibly_free_var(box) return [] def prepare_op_setfield_gc(self, op, fcond): @@ -1066,6 +1083,39 @@ self.possibly_free_var(op.result) return [res_loc] + def prepare_op_label(self, op, fcond): + # XXX big refactoring needed? + descr = op.getdescr() + assert isinstance(descr, TargetToken) + inputargs = op.getarglist() + floatlocs = [None] * len(inputargs) + nonfloatlocs = [None] * len(inputargs) + + for i in range(len(inputargs)): + arg = inputargs[i] + assert not isinstance(arg, Const) + loc = self.loc(arg) + if arg.type == FLOAT: + floatlocs[i] = loc + else: + nonfloatlocs[i] = loc + if loc.is_reg(): + self.frame_manager.mark_as_free(arg) + descr._arm_arglocs = nonfloatlocs, floatlocs + descr._arm_loop_code = self.assembler.mc.currpos() + descr._arm_clt = self.assembler.current_clt + self.assembler.target_tokens_currently_compiling[descr] = None + self.possibly_free_vars_for_op(op) + # + # if the LABEL's descr is precisely the target of the JUMP at the + # end of the same loop, i.e. if what we are compiling is a single + # loop that ends up jumping to this LABEL, then we can now provide + # the hints about the expected position of the spilled variables. + jump_op = self.final_jump_op + if jump_op is not None and jump_op.getdescr() is descr: + self._compute_hint_frame_locations_from_descr(descr) + return None + def prepare_guard_call_may_force(self, op, guard_op, fcond): faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) @@ -1106,7 +1156,7 @@ def prepare_guard_call_assembler(self, op, guard_op, fcond): descr = op.getdescr() - assert isinstance(descr, LoopToken) + assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None size = jd.portal_calldescr.get_result_size( diff --git a/pypy/jit/backend/arm/runner.py b/pypy/jit/backend/arm/runner.py --- a/pypy/jit/backend/arm/runner.py +++ b/pypy/jit/backend/arm/runner.py @@ -115,7 +115,7 @@ faildescr = self.get_fail_descr_from_number(fail_index) rffi.cast(TP, addr_of_force_index)[0] = ~fail_index # start of "no gc operation!" block - frame_depth = faildescr._arm_frame_depth * WORD + frame_depth = faildescr._arm_current_frame_depth * WORD addr_end_of_frame = (addr_of_force_index - (frame_depth + len(all_regs) * WORD + diff --git a/pypy/jit/backend/arm/test/test_assembler.py b/pypy/jit/backend/arm/test/test_assembler.py --- a/pypy/jit/backend/arm/test/test_assembler.py +++ b/pypy/jit/backend/arm/test/test_assembler.py @@ -1,8 +1,6 @@ -from pypy.jit.backend.arm import arch from pypy.jit.backend.arm import conditions as c from pypy.jit.backend.arm import registers as r -from pypy.jit.backend.arm.arch import WORD -from pypy.jit.backend.arm.arch import arm_int_div, arm_int_div_sign +from pypy.jit.backend.arm.arch import arm_int_div from pypy.jit.backend.arm.assembler import AssemblerARM from pypy.jit.backend.arm.locations import imm from pypy.jit.backend.arm.test.support import skip_unless_arm, run_asm @@ -10,21 +8,21 @@ from pypy.jit.metainterp.resoperation import rop from pypy.rpython.annlowlevel import llhelper -from pypy.rpython.lltypesystem import lltype, rffi, llmemory -from pypy.jit.metainterp.history import LoopToken +from pypy.rpython.lltypesystem import lltype, rffi +from pypy.jit.metainterp.history import JitCellToken from pypy.jit.backend.model import CompiledLoopToken skip_unless_arm() CPU = getcpuclass() + + class TestRunningAssembler(object): def setup_method(self, method): cpu = CPU(None, None) - #lp = LoopToken() - #lp.compiled_loop_token = CompiledLoopToken(cpu, None) self.a = AssemblerARM(cpu) self.a.setup_once() - token = LoopToken() + token = JitCellToken() clt = CompiledLoopToken(cpu, 0) clt.allgcrefs = [] token.compiled_loop_token = clt @@ -33,7 +31,8 @@ def test_make_operation_list(self): i = rop.INT_ADD from pypy.jit.backend.arm import assembler - assert assembler.asm_operations[i] is AssemblerARM.emit_op_int_add.im_func + assert assembler.asm_operations[i] \ + is AssemblerARM.emit_op_int_add.im_func def test_load_small_int_to_reg(self): self.a.gen_func_prolog() @@ -77,7 +76,6 @@ self.a.gen_func_epilog() assert run_asm(self.a) == 464 - def test_or(self): self.a.gen_func_prolog() self.a.mc.MOV_ri(r.r1.value, 8) @@ -115,7 +113,7 @@ self.a.gen_func_prolog() self.a.mc.MOV_ri(r.r1.value, 1) loop_head = self.a.mc.currpos() - self.a.mc.CMP_ri(r.r1.value, 0) # z=0, z=1 + self.a.mc.CMP_ri(r.r1.value, 0) # z=0, z=1 self.a.mc.MOV_ri(r.r1.value, 0, cond=c.NE) self.a.mc.MOV_ri(r.r1.value, 7, cond=c.EQ) self.a.mc.B_offs(loop_head, c.NE) @@ -143,7 +141,8 @@ self.a.mc.MOV_ri(r.r0.value, 123, cond=c.NE) for x in range(15): - self.a.mc.POP([reg.value for reg in r.callee_restored_registers], cond=c.NE) + self.a.mc.POP( + [reg.value for reg in r.callee_restored_registers], cond=c.NE) self.a.mc.MOV_ri(r.r1.value, 33) self.a.mc.MOV_ri(r.r0.value, 23) @@ -160,7 +159,8 @@ self.a.mc.MOV_ri(r.r0.value, 123, cond=c.NE) for x in range(100): - self.a.mc.POP([reg.value for reg in r.callee_restored_registers], cond=c.NE) + self.a.mc.POP( + [reg.value for reg in r.callee_restored_registers], cond=c.NE) self.a.mc.MOV_ri(r.r1.value, 33) self.a.mc.MOV_ri(r.r0.value, 23) @@ -216,7 +216,6 @@ self.a.gen_func_epilog() assert run_asm(self.a) == -36 - def test_bl_with_conditional_exec(self): functype = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)) call_addr = rffi.cast(lltype.Signed, llhelper(functype, callme)) @@ -240,7 +239,7 @@ assert run_asm(self.a) == 2478 def test_load_store(self): - x = 0x60002224 + x = 0x60002224 self.a.gen_func_prolog() self.a.mc.gen_load_int(r.r1.value, x) self.a.mc.MOV_ri(r.r3.value, 8) @@ -249,7 +248,7 @@ self.a.gen_func_epilog() assert run_asm(self.a) == x + def callme(inp): i = inp + 10 return i - diff --git a/pypy/jit/backend/arm/test/test_calling_convention.py b/pypy/jit/backend/arm/test/test_calling_convention.py --- a/pypy/jit/backend/arm/test/test_calling_convention.py +++ b/pypy/jit/backend/arm/test/test_calling_convention.py @@ -1,13 +1,15 @@ from pypy.rpython.annlowlevel import llhelper -from pypy.jit.metainterp.history import LoopToken +from pypy.jit.metainterp.history import JitCellToken from pypy.jit.backend.test.calling_convention_test import TestCallingConv, parse from pypy.rpython.lltypesystem import lltype from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.backend.arm.test.support import skip_unless_arm skip_unless_arm() -# ../../test/calling_convention_test.py + class TestARMCallingConvention(TestCallingConv): + # ../../test/calling_convention_test.py + def test_call_argument_spilling(self): # bug when we have a value in r0, that is overwritten by an argument # and needed after the call, so that the register gets spilled after it @@ -28,7 +30,7 @@ i99 = call(ConstClass(func_ptr), 22, descr=calldescr) finish(%s, i99)""" % (args, args) loop = parse(ops, namespace=locals()) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) for x in range(11): self.cpu.set_future_value_int(x, x) diff --git a/pypy/jit/backend/arm/test/test_gc_integration.py b/pypy/jit/backend/arm/test/test_gc_integration.py --- a/pypy/jit/backend/arm/test/test_gc_integration.py +++ b/pypy/jit/backend/arm/test/test_gc_integration.py @@ -3,63 +3,74 @@ """ import py -from pypy.jit.metainterp.history import BoxInt, ConstInt,\ - BoxPtr, ConstPtr, TreeLoop +from pypy.jit.metainterp.history import BoxInt, \ + BoxPtr, TreeLoop, TargetToken from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass -from pypy.jit.backend.arm.regalloc import Regalloc from pypy.jit.backend.arm.arch import WORD -from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper -from pypy.rpython.lltypesystem import rclass, rstr -from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcPtrFieldDescr +from pypy.rpython.lltypesystem import rclass +from pypy.jit.backend.llsupport.gc import GcLLDescr_framework from pypy.jit.backend.arm.test.test_regalloc import MockAssembler from pypy.jit.backend.arm.test.test_regalloc import BaseTestRegalloc -from pypy.jit.backend.arm.regalloc import ARMv7RegisterMananger, ARMFrameManager,\ - VFPRegisterManager +from pypy.jit.backend.arm.regalloc import ARMFrameManager, VFPRegisterManager from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.backend.arm.test.support import skip_unless_arm +from pypy.jit.backend.arm.regalloc import Regalloc, ARMv7RegisterManager skip_unless_arm() CPU = getcpuclass() + class MockGcRootMap(object): is_shadow_stack = False + def get_basic_shape(self, is_64_bit): return ['shape'] + def add_frame_offset(self, shape, offset): shape.append(offset) + def add_callee_save_reg(self, shape, reg_index): - index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } + index_to_name = {1: 'ebx', 2: 'esi', 3: 'edi'} shape.append(index_to_name[reg_index]) + def compress_callshape(self, shape, datablockwrapper): assert datablockwrapper == 'fakedatablockwrapper' assert shape[0] == 'shape' return ['compressed'] + shape[1:] + class MockGcRootMap2(object): is_shadow_stack = False + def get_basic_shape(self, is_64_bit): return ['shape'] + def add_frame_offset(self, shape, offset): shape.append(offset) + def add_callee_save_reg(self, shape, reg_index): - index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } + index_to_name = {1: 'ebx', 2: 'esi', 3: 'edi'} shape.append(index_to_name[reg_index]) + def compress_callshape(self, shape, datablockwrapper): assert datablockwrapper == 'fakedatablockwrapper' assert shape[0] == 'shape' return ['compressed'] + shape[1:] + class MockGcDescr(GcCache): is_shadow_stack = False + def get_funcptr_for_new(self): return 123 + get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new @@ -74,13 +85,14 @@ record_constptrs = GcLLDescr_framework.record_constptrs.im_func rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func + class TestRegallocDirectGcIntegration(object): def test_mark_gc_roots(self): py.test.skip('roots') cpu = CPU(None, None) cpu.setup_once() - regalloc = RegAlloc(MockAssembler(cpu, MockGcDescr(False))) + regalloc = Regalloc(MockAssembler(cpu, MockGcDescr(False))) regalloc.assembler.datablockwrapper = 'fakedatablockwrapper' boxes = [BoxPtr() for i in range(len(ARMv7RegisterManager.all_regs))] longevity = {} @@ -95,7 +107,8 @@ for box in boxes: regalloc.rm.try_allocate_reg(box) TP = lltype.FuncType([], lltype.Signed) - calldescr = cpu.calldescrof(TP, TP.ARGS, TP.RESULT, EffectInfo.MOST_GENERAL) + calldescr = cpu.calldescrof(TP, TP.ARGS, TP.RESULT, + EffectInfo.MOST_GENERAL) regalloc.rm._check_invariants() box = boxes[0] regalloc.position = 0 @@ -129,6 +142,7 @@ descr0 = cpu.fielddescrof(S, 'int') ptr0 = struct_ref + targettoken = TargetToken() namespace = locals().copy() @@ -153,6 +167,7 @@ def test_bug_0(self): ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, descr=targettoken) guard_value(i2, 1) [i2, i3, i4, i5, i6, i7, i0, i1, i8] guard_class(i4, 138998336) [i4, i5, i6, i7, i0, i1, i8] i11 = getfield_gc(i4, descr=descr0) @@ -180,7 +195,7 @@ guard_false(i32) [i4, i6, i7, i0, i1, i24] i33 = getfield_gc(i0, descr=descr0) guard_value(i33, ConstPtr(ptr0)) [i4, i6, i7, i0, i1, i33, i24] - jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24) + jump(i0, i1, 1, 17, i4, ConstPtr(ptr0), i6, i7, i24, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0, 0, 0, 0, 0, 0], run=False) @@ -322,17 +337,22 @@ class Seen(Exception): pass + class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): def can_inline_malloc_varsize(self, arraydescr, num_elem): return num_elem < 5 + def get_funcptr_for_newarray(self): return 52 + def init_array_descr(self, A, descr): descr.tid = self._counter self._counter += 1 + def args_for_new_array(self, descr): raise Seen("args_for_new_array") + class TestMallocVarsizeFastpath(BaseTestRegalloc): def setup_method(self, method): cpu = CPU(None, None) diff --git a/pypy/jit/backend/arm/test/test_generated.py b/pypy/jit/backend/arm/test/test_generated.py --- a/pypy/jit/backend/arm/test/test_generated.py +++ b/pypy/jit/backend/arm/test/test_generated.py @@ -3,10 +3,10 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) +from pypy.jit.metainterp.history import JitCellToken from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.rpython.test.test_llinterp import interpret from pypy.jit.backend.detect_cpu import getcpuclass @@ -40,7 +40,7 @@ ResOperation(rop.GUARD_TRUE, [v12], None, descr=faildescr1), ResOperation(rop.FINISH, [v9, v6, v10, v2, v8, v5, v1, v4], None, descr=faildescr2), ] - looptoken = LoopToken() + looptoken = JitCellToken() operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -12) @@ -101,7 +101,7 @@ ] operations[2].setfailargs([v10, v6]) operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 16) cpu.set_future_value_int(1, 5) @@ -152,7 +152,7 @@ ] operations[2].setfailargs([v8, v3]) operations[4].setfailargs([v2, v12, v1, v3, v4]) - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -5) cpu.set_future_value_int(1, 24) @@ -203,7 +203,7 @@ ResOperation(rop.FINISH, [v8, v2, v6, v5, v7, v1, v10], None, descr=faildescr2), ] operations[5].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 19) cpu.set_future_value_int(1, -3) @@ -254,7 +254,7 @@ ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), ResOperation(rop.FINISH, [v1, v4, v10, v8, v7, v3], None, descr=faildescr2), ] - looptoken = LoopToken() + looptoken = JitCellToken() operations[5].setfailargs([]) cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 1073741824) @@ -316,7 +316,7 @@ ResOperation(rop.GUARD_FALSE, [tmp17], None, descr=faildescr3), ResOperation(rop.FINISH, [v8, v10, v6, v3, v2, v9], None, descr=faildescr4), ] - looptoken = LoopToken() + looptoken = JitCellToken() operations[1].setfailargs([v8, v6, v1]) operations[7].setfailargs([v4]) operations[9].setfailargs([v10, v13]) @@ -376,7 +376,7 @@ ] operations[1].setfailargs([v6, v8, v1, v4]) operations[8].setfailargs([v5, v9]) - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -8) cpu.set_future_value_int(1, 0) @@ -434,7 +434,7 @@ operations[3].setfailargs([]) operations[-4].setfailargs([v15]) operations[-2].setfailargs([v9, v4, v10, v11, v14]) - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -39) cpu.set_future_value_int(1, -18) @@ -497,7 +497,7 @@ operations[1].setfailargs([v9, v1]) operations[5].setfailargs([v10, v2, v11, v3]) operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 0) cpu.set_future_value_int(1, -2) @@ -547,7 +547,7 @@ ResOperation(rop.FINISH, [v8, v2, v10, v6, v7, v9, v5, v4], None, descr=faildescr2), ] operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 3) cpu.set_future_value_int(1, -5) @@ -604,7 +604,7 @@ ] operations[-2].setfailargs([v4, v10, v3, v9, v14, v2]) operations[4].setfailargs([v14]) - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 14) cpu.set_future_value_int(1, -20) diff --git a/pypy/jit/backend/arm/test/test_recompilation.py b/pypy/jit/backend/arm/test/test_recompilation.py --- a/pypy/jit/backend/arm/test/test_recompilation.py +++ b/pypy/jit/backend/arm/test/test_recompilation.py @@ -2,14 +2,16 @@ from pypy.jit.backend.arm.test.support import skip_unless_arm skip_unless_arm() + class TestRecompilation(BaseTestRegalloc): def test_compile_bridge_not_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -23,16 +25,19 @@ fail = self.run(loop) assert fail.identifier == 2 assert self.getint(0) == 21 - + def test_compile_bridge_deeper(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' loop = self.interpret(ops, [0]) + previous = loop._jitcelltoken.compiled_loop_token.frame_depth + #assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 assert self.getint(0) == 20 ops = ''' [i1] @@ -41,12 +46,17 @@ i5 = int_add(i4, 1) i6 = int_add(i5, 1) i7 = int_add(i5, i4) + force_spill(i5) i8 = int_add(i7, 1) i9 = int_add(i8, 1) finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - descr = loop.operations[2].getdescr() + descr = loop.operations[3].getdescr() + new = descr._arm_bridge_frame_depth + #assert descr._x86_bridge_param_depth == 0 + # the force_spill() forces the stack to grow + assert new > previous self.cpu.set_future_value_int(0, 0) fail = self.run(loop) assert fail.identifier == 2 @@ -58,21 +68,23 @@ def test_bridge_jump_to_other_loop(self): loop = self.interpret(''' [i0, i10, i11, i12, i13, i14, i15, i16] + label(i0, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2, descr=fdescr1) [i1] - jump(i1, i10, i11, i12, i13, i14, i15, i16) + jump(i1, i10, i11, i12, i13, i14, i15, i16, descr=targettoken) ''', [0]) other_loop = self.interpret(''' [i3] + label(i3, descr=targettoken2) guard_false(i3, descr=fdescr2) [i3] - jump(i3) + jump(i3, descr=targettoken2) ''', [1]) ops = ''' [i3] - jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=looptoken) + jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, other_loop, 0, looptoken=loop.token) + bridge = self.attach_bridge(ops, other_loop, 1) self.cpu.set_future_value_int(0, 1) fail = self.run(other_loop) assert fail.identifier == 1 @@ -80,6 +92,7 @@ def test_bridge_jumps_to_self_deeper(self): loop = self.interpret(''' [i0, i1, i2, i31, i32, i33] + label(i0, i1, i2, i31, i32, i33, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i30 = int_add(i1, i2) @@ -88,7 +101,7 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i30, 1, i30, i30, i30) + jump(i3, i30, 1, i30, i30, i30, descr=targettoken) ''', [0]) assert self.getint(0) == 0 assert self.getint(1) == 1 @@ -98,16 +111,19 @@ i8 = int_add(i3, 1) i6 = int_add(i8, i10) i7 = int_add(i3, i6) + force_spill(i6) + force_spill(i7) + force_spill(i8) i12 = int_add(i7, i8) i11 = int_add(i12, i6) - jump(i3, i12, i11, i10, i6, i7, descr=looptoken) + jump(i3, i12, i11, i10, i6, i7, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 5, looptoken=loop.token) - guard_op = loop.operations[5] - #loop_frame_depth = loop.token._x86_frame_depth - #assert loop.token._x86_param_depth == 0 - ## XXX: Maybe add enough ops to force stack on 64-bit as well? - # assert guard_op.getdescr()._arm_bridge_frame_depth > loop_frame_depth + loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth + bridge = self.attach_bridge(ops, loop, 6) + guard_op = loop.operations[6] + #assert loop._jitcelltoken.compiled_loop_token.param_depth == 0 + # the force_spill() forces the stack to grow + assert guard_op.getdescr()._arm_bridge_frame_depth > loop_frame_depth #assert guard_op.getdescr()._x86_bridge_param_depth == 0 self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) @@ -119,6 +135,7 @@ def test_bridge_jumps_to_self_shallower(self): loop = self.interpret(''' [i0, i1, i2] + label(i0, i1, i2, descr=targettoken) i98 = same_as(0) i99 = same_as(1) i3 = int_add(i0, 1) @@ -126,19 +143,19 @@ guard_false(i4) [i98, i3] i5 = int_lt(i3, 20) guard_true(i5) [i99, i3] - jump(i3, i1, i2) + jump(i3, i1, i2, descr=targettoken) ''', [0]) assert self.getint(0) == 0 assert self.getint(1) == 1 ops = ''' [i97, i3] - jump(i3, 0, 1, descr=looptoken) + jump(i3, 0, 1, descr=targettoken) ''' - bridge = self.attach_bridge(ops, loop, 4, looptoken=loop.token) + bridge = self.attach_bridge(ops, loop, 5) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 0) self.cpu.set_future_value_int(2, 0) self.run(loop) assert self.getint(0) == 1 assert self.getint(1) == 20 - + diff --git a/pypy/jit/backend/arm/test/test_regalloc.py b/pypy/jit/backend/arm/test/test_regalloc.py --- a/pypy/jit/backend/arm/test/test_regalloc.py +++ b/pypy/jit/backend/arm/test/test_regalloc.py @@ -3,19 +3,29 @@ """ import py -from pypy.jit.metainterp.history import BasicFailDescr +from pypy.jit.metainterp.history import BasicFailDescr, \ + JitCellToken, \ + TargetToken +from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.arm.regalloc import Regalloc, ARMFrameManager +from pypy.jit.backend.llsupport.regalloc import is_comparison_or_ovf_op from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.backend.arm.test.support import skip_unless_arm +from pypy.jit.codewriter import longlong skip_unless_arm() +def test_is_comparison_or_ovf_op(): + assert not is_comparison_or_ovf_op(rop.INT_ADD) + assert is_comparison_or_ovf_op(rop.INT_ADD_OVF) + assert is_comparison_or_ovf_op(rop.INT_EQ) + CPU = getcpuclass() @@ -85,38 +95,49 @@ FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)) f_fptr = llhelper(FPTR, f) - f_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + f_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) zero_division_tp, zero_division_value = cpu.get_zero_division_error() zd_addr = cpu.cast_int_to_adr(zero_division_tp) zero_division_error = llmemory.cast_adr_to_ptr(zd_addr, lltype.Ptr(rclass.OBJECT_VTABLE)) - raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) + raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + targettoken = TargetToken() + targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) + def setup_method(self, meth): + self.targettoken._arm_loop_code = 0 + self.targettoken2._arm_loop_code = 0 + def f1(x): - return x+1 + return x + 1 def f2(x, y): - return x*y + return x * y def f10(*args): assert len(args) == 10 return sum(args) F1PTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)) - F2PTR = lltype.Ptr(lltype.FuncType([lltype.Signed]*2, lltype.Signed)) - F10PTR = lltype.Ptr(lltype.FuncType([lltype.Signed]*10, lltype.Signed)) + F2PTR = lltype.Ptr(lltype.FuncType([lltype.Signed] * 2, lltype.Signed)) + F10PTR = lltype.Ptr(lltype.FuncType([lltype.Signed] * 10, lltype.Signed)) f1ptr = llhelper(F1PTR, f1) f2ptr = llhelper(F2PTR, f2) f10ptr = llhelper(F10PTR, f10) - f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT, EffectInfo.MOST_GENERAL) - f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT, EffectInfo.MOST_GENERAL) - f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT, EffectInfo.MOST_GENERAL) + f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, + F10PTR.TO.RESULT, EffectInfo.MOST_GENERAL) namespace = locals().copy() type_system = 'lltype' @@ -128,18 +149,21 @@ def interpret(self, ops, args, run=True): loop = self.parse(ops) - self.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) for i, arg in enumerate(args): if isinstance(arg, int): self.cpu.set_future_value_int(i, arg) elif isinstance(arg, float): + arg = longlong.getfloatstorage(arg) self.cpu.set_future_value_float(i, arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) self.cpu.set_future_value_ref(i, llgcref) + loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(loop.token) + self.cpu.execute_token(looptoken) return loop def prepare_loop(self, ops): @@ -167,10 +191,7 @@ gcref = self.cpu.get_latest_value_ref(index) return lltype.cast_opaque_ptr(T, gcref) - def attach_bridge(self, ops, loop, guard_op_index, looptoken=None, **kwds): - if looptoken is not None: - self.namespace = self.namespace.copy() - self.namespace['looptoken'] = looptoken + def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) @@ -178,20 +199,22 @@ [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, - loop.token) + loop._jitcelltoken) return bridge def run(self, loop): - return self.cpu.execute_token(loop.token) + return self.cpu.execute_token(loop._jitcelltoken) + class TestRegallocSimple(BaseTestRegalloc): def test_simple_loop(self): ops = ''' [i0] + label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] - jump(i1) + jump(i1, descr=targettoken) ''' self.interpret(ops, [0]) assert self.getint(0) == 20 @@ -199,27 +222,29 @@ def test_two_loops_and_a_bridge(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(i0, 1) i5 = int_lt(i4, 20) guard_true(i5) [i4, i1, i2, i3] - jump(i4, i1, i2, i3) + jump(i4, i1, i2, i3, descr=targettoken) ''' - loop = self.interpret(ops, [0, 0, 0, 0]) + self.interpret(ops, [0, 0, 0, 0]) ops2 = ''' [i5] + label(i5, descr=targettoken2) i1 = int_add(i5, 1) i3 = int_add(i1, 1) i4 = int_add(i3, 1) i2 = int_lt(i4, 30) guard_true(i2) [i4] - jump(i4) + jump(i4, descr=targettoken2) ''' loop2 = self.interpret(ops2, [0]) bridge_ops = ''' [i4] - jump(i4, i4, i4, i4, descr=looptoken) + jump(i4, i4, i4, i4, descr=targettoken) ''' - bridge = self.attach_bridge(bridge_ops, loop2, 4, looptoken=loop.token) + self.attach_bridge(bridge_ops, loop2, 5) self.cpu.set_future_value_int(0, 0) self.run(loop2) assert self.getint(0) == 31 @@ -230,10 +255,11 @@ def test_pointer_arg(self): ops = ''' [i0, p0] + label(i0, p0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 10) guard_true(i2) [p0] - jump(i1, p0) + jump(i1, p0, descr=targettoken) ''' S = lltype.GcStruct('S') ptr = lltype.malloc(S) @@ -257,7 +283,7 @@ ''' loop = self.interpret(ops, [0]) assert self.getint(0) == 1 - bridge = self.attach_bridge(bridge_ops, loop, 2) + self.attach_bridge(bridge_ops, loop, 2) self.cpu.set_future_value_int(0, 0) self.run(loop) assert self.getint(0) == 1 @@ -284,7 +310,7 @@ loop = self.interpret(ops, [0, 10]) assert self.getint(0) == 0 assert self.getint(1) == 10 - bridge = self.attach_bridge(bridge_ops, loop, 0) + self.attach_bridge(bridge_ops, loop, 0) self.cpu.set_future_value_int(0, 0) self.cpu.set_future_value_int(1, 10) self.run(loop) @@ -311,10 +337,11 @@ def test_spill_for_constant(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_add(3, i1) i5 = int_lt(i4, 30) guard_true(i5) [i0, i4, i2, i3] - jump(1, i4, 3, 4) + jump(1, i4, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) assert self.getints(4) == [1, 30, 3, 4] @@ -322,43 +349,47 @@ def test_spill_for_constant_lshift(self): ops = ''' [i0, i2, i1, i3] + label(i0, i2, i1, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 3, i5, 4) + jump(i4, 3, i5, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) - assert self.getints(4) == [1<<29, 30, 3, 4] + assert self.getints(4) == [1 << 29, 30, 3, 4] ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, i5, 3, 4) + jump(i4, i5, 3, 4, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) - assert self.getints(4) == [1<<29, 30, 3, 4] + assert self.getints(4) == [1 << 29, 30, 3, 4] ops = ''' [i0, i3, i1, i2] + label(i0, i3, i1, i2, descr=targettoken) i4 = int_lshift(1, i1) i5 = int_add(1, i1) i6 = int_lt(i5, 30) guard_true(i6) [i4, i5, i2, i3] - jump(i4, 4, i5, 3) + jump(i4, 4, i5, 3, descr=targettoken) ''' self.interpret(ops, [0, 0, 0, 0]) - assert self.getints(4) == [1<<29, 30, 3, 4] + assert self.getints(4) == [1 << 29, 30, 3, 4] def test_result_selected_reg_via_neg(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i6 = int_neg(i2) i7 = int_add(1, i1) i4 = int_lt(i7, 10) guard_true(i4) [i0, i6, i7] - jump(1, i7, i2, i6) + jump(1, i7, i2, i6, descr=targettoken) ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] @@ -366,11 +397,12 @@ def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] + label(i0, i1, i2, i3, descr=targettoken) i4 = int_lt(i0, i1) i5 = int_add(i3, 1) i6 = int_lt(i5, 30) guard_true(i6) [i4] - jump(i0, i1, i4, i5) + jump(i0, i1, i4, i5, descr=targettoken) ''' self.interpret(ops, [0, 10, 0, 0]) assert self.getint(0) == 1 @@ -378,10 +410,11 @@ def test_jump_different_args(self): ops = ''' [i0, i15, i16, i18, i1, i2, i3] + label(i0, i15, i16, i18, i1, i2, i3, descr=targettoken) i4 = int_add(i3, 1) i5 = int_lt(i4, 20) guard_true(i5) [i2, i1] - jump(i0, i18, i15, i16, i2, i1, i4) + jump(i0, i18, i15, i16, i2, i1, i4, descr=targettoken) ''' self.interpret(ops, [0, 1, 2, 3]) @@ -450,6 +483,7 @@ regalloc = self.prepare_loop(ops) assert len(regalloc.rm.reg_bindings) == 2 + class TestRegallocCompOps(BaseTestRegalloc): def test_cmp_op_0(self): @@ -463,9 +497,11 @@ self.interpret(ops, [0, 1]) assert self.getint(0) == 0 + class TestRegallocMoreRegisters(BaseTestRegalloc): cpu = BaseTestRegalloc.cpu + targettoken = TargetToken() S = lltype.GcStruct('S', ('field', lltype.Char)) fielddescr = cpu.fielddescrof(S, 'field') @@ -513,7 +549,7 @@ strsetitem(p0, 1, i) finish() ''' - llstr = rstr.mallocstr(10) + llstr = rstr.mallocstr(10) self.interpret(ops, [llstr, ord('a')]) assert llstr.chars[1] == 'a' @@ -570,6 +606,7 @@ def test_division_optimized(self): ops = ''' [i7, i6] + label(i7, i6, descr=targettoken) i18 = int_floordiv(i7, i6) i19 = int_xor(i7, i6) i21 = int_lt(i19, 0) @@ -577,12 +614,13 @@ i23 = int_is_true(i22) i24 = int_eq(i6, 4) guard_false(i24) [i18] - jump(i18, i6) + jump(i18, i6, descr=targettoken) ''' self.interpret(ops, [10, 4]) assert self.getint(0) == 2 # FIXME: Verify that i19 - i23 are removed + class TestRegallocFloats(BaseTestRegalloc): def test_float_add(self): py.test.skip('need floats') @@ -603,7 +641,8 @@ finish(f9, f10, f2, f3, f4, f5, f6, f7, f8) ''' self.interpret(ops, [0.1, .2, .3, .4, .5, .6, .7, .8, .9]) - assert self.getfloats(9) == [.1+.2, .9+3.5, .3, .4, .5, .6, .7, .8, .9] + assert self.getfloats(9) == [.1 + .2, .9 + 3.5, .3, + .4, .5, .6, .7, .8, .9] def test_lt_const(self): py.test.skip('need floats') @@ -633,9 +672,10 @@ i9 = float_ne(f9, 0.0) finish(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [0.0, .1, .2, .3, .4, .5, .6, .7, .8, .9]) + self.interpret(ops, [0.0, .1, .2, .3, .4, .5, .6, .7, .8, .9]) assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1] + class TestRegAllocCallAndStackDepth(BaseTestRegalloc): def expected_param_depth(self, num_args): # Assumes the arguments are all non-float @@ -647,7 +687,7 @@ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) + self.interpret(ops, [4, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9]) assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] def test_two_calls(self): @@ -657,8 +697,8 @@ i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) finish(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9) ''' - loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 9]) - assert self.getints(11) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] + self.interpret(ops, [4, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9]) + assert self.getints(11) == [5 * 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9] def test_call_many_arguments(self): ops = ''' @@ -666,7 +706,7 @@ i8 = call(ConstClass(f10ptr), 1, i0, i1, i2, i3, i4, i5, i6, i7, 10, descr=f10_calldescr) finish(i8) ''' - loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) + self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9]) assert self.getint(0) == 55 def test_bridge_calls_1(self): @@ -683,13 +723,12 @@ i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) finish(i3, descr=fdescr2) ''' - bridge = self.attach_bridge(ops, loop, -2) - + self.attach_bridge(ops, loop, -2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) self.run(loop) - assert self.getint(0) == 5*7 + assert self.getint(0) == 5 * 7 def test_bridge_calls_2(self): ops = ''' @@ -699,27 +738,28 @@ finish(i1) ''' loop = self.interpret(ops, [4, 7]) - assert self.getint(0) == 4*7 + assert self.getint(0) == 4 * 7 ops = ''' [i2] i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) finish(i3, descr=fdescr2) ''' - bridge = self.attach_bridge(ops, loop, -2) - + self.attach_bridge(ops, loop, -2) self.cpu.set_future_value_int(0, 4) self.cpu.set_future_value_int(1, 7) self.run(loop) assert self.getint(0) == 29 + class TestJumps(BaseTestRegalloc): def test_jump_with_consts(self): loop = """ [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14] - jump(i1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, descr=targettoken) + jump(i1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, descr=targettoken) """ - large = self.interpret(loop, range(15), run=False) + self.interpret(loop, range(15), run=False) # ensure compiling this loop works assert 1 @@ -729,6 +769,7 @@ FUNCPTR = lltype.Ptr(lltype.FuncType([lltype.Signed, llmemory.GCREF], lltype.Signed)) + class FakeJitDriverSD: index_of_virtualizable = -1 _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) @@ -736,15 +777,16 @@ _assembler_helper_ptr) FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( - lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)), [lltype.Signed], lltype.Signed, EffectInfo.MOST_GENERAL) + lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)), \ + [lltype.Signed], lltype.Signed, EffectInfo.MOST_GENERAL) loop1 = """ [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10] i11 = int_add(i0, i1) finish(i11, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10) """ large = self.interpret(loop1, range(11), run=False) - large.token.outermost_jitdriver_sd = FakeJitDriverSD() - self.namespace['looptoken'] = large.token + large._jitcelltoken.outermost_jitdriver_sd = FakeJitDriverSD() + self.namespace['looptoken'] = large._jitcelltoken assert self.namespace['looptoken']._arm_bootstrap_code != 0 loop2 = """ [i0] @@ -761,6 +803,7 @@ def test_far_far_jump(self): ops = """ [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, descr=targettoken) i11 = int_add(i0, 1) i12 = int_lt(i11, 2) i13 = call(ConstClass(f_fptr), i12, descr=f_calldescr) @@ -781,10 +824,11 @@ i29 = call(ConstClass(f_fptr), i12, descr=f_calldescr) i30 = call(ConstClass(f_fptr), i12, descr=f_calldescr) guard_true(i12) [i11, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10] - jump(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10) + jump(i11, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, descr=targettoken) """ self.interpret(ops, range(11)) - assert self.getint(0) == 2 # and not segfault() + assert self.getint(0) == 2 # and not segfault() + class TestStrOps(BaseTestRegalloc): def test_newstr(self): @@ -824,4 +868,3 @@ self.interpret(ops, []) string = self.getptr(0, lltype.Ptr(rstr.STR)) assert len(string.chars) == 300 - diff --git a/pypy/jit/backend/arm/test/test_regalloc2.py b/pypy/jit/backend/arm/test/test_regalloc2.py --- a/pypy/jit/backend/arm/test/test_regalloc2.py +++ b/pypy/jit/backend/arm/test/test_regalloc2.py @@ -1,6 +1,7 @@ import py from pypy.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, LoopToken + BoxPtr, ConstPtr, BasicFailDescr +from pypy.jit.metainterp.history import JitCellToken from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.backend.arm.arch import WORD @@ -22,7 +23,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 9) cpu.execute_token(looptoken) @@ -45,7 +46,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -10) cpu.execute_token(looptoken) @@ -142,7 +143,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, -13) cpu.set_future_value_int(1, 10) @@ -257,7 +258,7 @@ ] cpu = CPU(None, None) cpu.setup_once() - looptoken = LoopToken() + looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) cpu.set_future_value_int(0, 17) cpu.set_future_value_int(1, -20) diff --git a/pypy/jit/backend/arm/test/test_runner.py b/pypy/jit/backend/arm/test/test_runner.py --- a/pypy/jit/backend/arm/test/test_runner.py +++ b/pypy/jit/backend/arm/test/test_runner.py @@ -4,25 +4,23 @@ boxfloat, \ constfloat from pypy.jit.backend.arm.test.support import skip_unless_arm -from pypy.jit.metainterp.history import (AbstractFailDescr, - AbstractDescr, - BasicFailDescr, - BoxInt, Box, BoxPtr, - LoopToken, - ConstInt, ConstPtr, - BoxObj, Const, - ConstObj, BoxFloat, ConstFloat) +from pypy.jit.metainterp.history import (BasicFailDescr, + BoxInt, + ConstInt) from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.annlowlevel import llhelper from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.jit.metainterp.history import JitCellToken, TargetToken skip_unless_arm() + class FakeStats(object): pass + class TestARM(LLtypeBackendTest): def setup_class(cls): @@ -38,39 +36,41 @@ cpu = self.cpu inp = [BoxInt(i) for i in range(1, 15)] out = [BoxInt(i) for i in range(1, 15)] - looptoken = LoopToken() + looptoken = JitCellToken() + targettoken = TargetToken() operations = [ - ResOperation(rop.INT_ADD, [inp[0] , inp[1]], out[0]), - ResOperation(rop.INT_ADD, [inp[2] , inp[3]], out[1]), - ResOperation(rop.INT_ADD, [inp[4] , inp[5]], out[2]), - ResOperation(rop.INT_ADD, [inp[6] , inp[7]], out[3]), - ResOperation(rop.INT_ADD, [inp[8] , inp[9]], out[4]), + ResOperation(rop.LABEL, inp, None, descr=targettoken), + ResOperation(rop.INT_ADD, [inp[0], inp[1]], out[0]), + ResOperation(rop.INT_ADD, [inp[2], inp[3]], out[1]), + ResOperation(rop.INT_ADD, [inp[4], inp[5]], out[2]), + ResOperation(rop.INT_ADD, [inp[6], inp[7]], out[3]), + ResOperation(rop.INT_ADD, [inp[8], inp[9]], out[4]), ResOperation(rop.INT_ADD, [inp[10], inp[11]], out[5]), ResOperation(rop.INT_ADD, [inp[12], inp[13]], out[6]), - ResOperation(rop.INT_ADD, [inp[0] , inp[1]], out[7]), - ResOperation(rop.INT_ADD, [inp[2] , inp[3]], out[8]), - ResOperation(rop.INT_ADD, [inp[4] , inp[5]], out[9]), - ResOperation(rop.INT_ADD, [inp[6] , inp[7]], out[10]), - ResOperation(rop.INT_ADD, [inp[8] , inp[9]], out[11]), + ResOperation(rop.INT_ADD, [inp[0], inp[1]], out[7]), + ResOperation(rop.INT_ADD, [inp[2], inp[3]], out[8]), + ResOperation(rop.INT_ADD, [inp[4], inp[5]], out[9]), + ResOperation(rop.INT_ADD, [inp[6], inp[7]], out[10]), + ResOperation(rop.INT_ADD, [inp[8], inp[9]], out[11]), ResOperation(rop.INT_ADD, [inp[10], inp[11]], out[12]), ResOperation(rop.INT_ADD, [inp[12], inp[13]], out[13]), ResOperation(rop.FINISH, out, None, descr=BasicFailDescr(1)), ] cpu.compile_loop(inp, operations, looptoken) for i in range(1, 15): - self.cpu.set_future_value_int(i-1, i) - res = self.cpu.execute_token(looptoken) - output = [self.cpu.get_latest_value_int(i-1) for i in range(1, 15)] + self.cpu.set_future_value_int(i - 1, i) + self.cpu.execute_token(looptoken) + output = [self.cpu.get_latest_value_int(i - 1) for i in range(1, 15)] expected = [3, 7, 11, 15, 19, 23, 27, 3, 7, 11, 15, 19, 23, 27] assert output == expected def test_redirect_call_assember2(self): - called = [] def assembler_helper(failindex, virtualizable): return self.cpu.get_latest_value_int(0) FUNCPTR = lltype.Ptr(lltype.FuncType([lltype.Signed, llmemory.GCREF], lltype.Signed)) + class FakeJitDriverSD: index_of_virtualizable = -1 _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) @@ -79,7 +79,7 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)), [lltype.Signed], lltype.Signed, EffectInfo.MOST_GENERAL) - lt1, lt2, lt3 = [LoopToken() for x in range(3)] + lt1, lt2, lt3 = [JitCellToken() for x in range(3)] lt2.outermost_jitdriver_sd = FakeJitDriverSD() loop1 = parse(''' [i0] @@ -101,12 +101,12 @@ self.cpu.compile_loop(loop3.inputargs, loop3.operations, lt3) self.cpu.compile_loop(loop1.inputargs, loop1.operations, lt1) self.cpu.set_future_value_int(0, 11) - res = self.cpu.execute_token(lt1) + self.cpu.execute_token(lt1) assert self.cpu.get_latest_value_int(0) == 12 self.cpu.redirect_call_assembler(lt2, lt3) self.cpu.set_future_value_int(0, 11) - res = self.cpu.execute_token(lt1) + self.cpu.execute_token(lt1) assert self.cpu.get_latest_value_int(0) == 10 def test_new_array_with_const_length(self): @@ -172,6 +172,7 @@ TFloat = lltype.GcStruct('TFloat', ('parent', SFloat), ('next', lltype.Ptr(SFloat))) + def test_float_field(self): if not self.cpu.supports_floats: py.test.skip('requires floats') diff --git a/pypy/jit/backend/arm/test/test_ztranslate_backend.py b/pypy/jit/backend/arm/test/test_ztranslate_backend.py --- a/pypy/jit/backend/arm/test/test_ztranslate_backend.py +++ b/pypy/jit/backend/arm/test/test_ztranslate_backend.py @@ -4,10 +4,10 @@ AbstractDescr, BasicFailDescr, BoxInt, Box, BoxPtr, - LoopToken, ConstInt, ConstPtr, BoxObj, Const, ConstObj, BoxFloat, ConstFloat) +from pypy.jit.metainterp.history import JitCellToken from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.rpython.test.test_llinterp import interpret from pypy.jit.backend.detect_cpu import getcpuclass @@ -27,7 +27,7 @@ i2 = BoxInt() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) - looptoken = LoopToken() + looptoken = JitCellToken() operations = [ ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -122,7 +122,7 @@ ResOperation(rop.FINISH, [i1], None, descr=BasicFailDescr(1)) ] inputargs = [i0] - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) self.cpu.set_future_value_float(0, longlong.getfloatstorage(2.8)) fail = self.cpu.execute_token(looptoken) @@ -1248,7 +1248,7 @@ guard_false(i0) [f1, f2, f3] finish()""" loop = parse(loopops) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) self.cpu.set_future_value_int(0, 1) self.cpu.set_future_value_float(1, longlong.getfloatstorage(132.25)) @@ -1298,7 +1298,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[1].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -1397,7 +1397,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] operations[-2].setfailargs([]) - looptoken = LoopToken() + looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # cpu = self.cpu @@ -2642,7 +2642,7 @@ del called[:] self.cpu.done_with_this_frame_int_v = done_number try: - othertoken = LoopToken() + othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) for i in range(10): self.cpu.set_future_value_int(i, i+1) From noreply at buildbot.pypy.org Thu Dec 29 16:51:36 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Thu, 29 Dec 2011 16:51:36 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Remove duplicate, broken definition of get_asmmemmgr_blocks. Message-ID: <20111229155136.E038382C03@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50949:8e95fc00f638 Date: 2011-12-29 10:51 -0500 http://bitbucket.org/pypy/pypy/changeset/8e95fc00f638/ Log: Remove duplicate, broken definition of get_asmmemmgr_blocks. Store TOC at correct offset for PPC64. diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -122,12 +122,6 @@ mc.load(reg.value, spp_reg.value, self.OFFSET_SPP_TO_GPR_SAVE_AREA + WORD * i) - def get_asmmemmgr_blocks(self, looptoken): - clt = looptoken.compiled_loop_token - if clt.asmmemmgr_blocks is None: - clt.asmmemmgr = [] - return clt.asmmemmgr_blocks - def _make_prologue(self, target_pos, frame_depth): self._make_frame(frame_depth) curpos = self.mc.currpos() @@ -343,7 +337,7 @@ # load address of decoding function into SCRATCH mc.alloc_scratch_reg(addr) if IS_PPC_64: - mc.std(r.r2.value, r.SP.value, 3 * WORD) + mc.std(r.r2.value, r.SP.value, 5 * WORD) # load TOC pointer and environment pointer mc.load_imm(r.r2, r2_value) mc.load_imm(r.r11, r11_value) @@ -352,7 +346,7 @@ mc.free_scratch_reg() mc.bctrl() if IS_PPC_64: - mc.ld(r.r2.value, r.SP.value, 3 * WORD) + mc.ld(r.r2.value, r.SP.value, 5 * WORD) # # save SPP in r5 # (assume that r5 has been written to failboxes) From noreply at buildbot.pypy.org Thu Dec 29 17:26:27 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 29 Dec 2011 17:26:27 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: (bivab) Fix allocation of function descriptors for PPC64 Message-ID: <20111229162627.DF58482C03@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r50950:0c305ddabb69 Date: 2011-12-29 08:25 -0800 http://bitbucket.org/pypy/pypy/changeset/0c305ddabb69/ Log: (bivab) Fix allocation of function descriptors for PPC64 diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -515,7 +515,6 @@ self.setup(looptoken, operations) self.startpos = self.mc.currpos() - longevity = compute_vars_longevity(inputargs, operations) regalloc = Regalloc(longevity, assembler=self, frame_manager=PPCFrameManager()) @@ -540,6 +539,8 @@ self.gen_direct_bootstrap_code(loophead, looptoken, inputargs, frame_depth) self.write_pending_failure_recoveries() + if IS_PPC_64: + fdescrs = self.gen_64_bit_func_descrs() loop_start = self.materialize_loop(looptoken, False) looptoken._ppc_bootstrap_code = loop_start @@ -547,13 +548,15 @@ if IS_PPC_32: looptoken._ppc_direct_bootstrap_code = real_start else: - looptoken._ppc_direct_bootstrap_code = self.gen_64_bit_func_descr(real_start) + self.write_64_bit_func_descr(fdescrs[0], real_start) + looptoken._ppc_direct_bootstrap_code = fdescrs[0] real_start = loop_start + start_pos if IS_PPC_32: looptoken.ppc_code = real_start else: - looptoken.ppc_code = self.gen_64_bit_func_descr(real_start) + self.write_64_bit_func_descr(fdescrs[1], real_start) + looptoken.ppc_code = fdescrs[1] self.process_pending_guards(loop_start) if not we_are_translated(): print 'Loop', inputargs, operations @@ -701,13 +704,16 @@ return False return True - def gen_64_bit_func_descr(self, start_addr): - mc = PPCBuilder() - mc.write64(start_addr) - mc.write64(0) - mc.write64(0) - return mc.materialize(self.cpu.asmmemmgr, [], - self.cpu.gc_ll_descr.gcrootmap) + def gen_64_bit_func_descrs(self): + d0 = self.datablockwrapper.malloc_aligned(3*WORD, alignment=1) + d1 = self.datablockwrapper.malloc_aligned(3*WORD, alignment=1) + return [d0, d1] + + def write_64_bit_func_descr(self, descr, start_addr): + data = rffi.cast(rffi.CArrayPtr(lltype.Signed), descr) + data[0] = start_addr + data[1] = 0 + data[2] = 0 def compute_frame_depth(self, regalloc): PARAMETER_AREA = self.max_stack_params * WORD From noreply at buildbot.pypy.org Thu Dec 29 17:51:42 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 29 Dec 2011 17:51:42 +0100 (CET) Subject: [pypy-commit] jitviewer default: try slightly harder to display source, even in extraordinary conditions Message-ID: <20111229165142.B452F82C03@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r185:9e5303808452 Date: 2011-12-29 18:51 +0200 http://bitbucket.org/pypy/jitviewer/changeset/9e5303808452/ Log: try slightly harder to display source, even in extraordinary conditions diff --git a/_jitviewer/display.py b/_jitviewer/display.py --- a/_jitviewer/display.py +++ b/_jitviewer/display.py @@ -33,7 +33,7 @@ in_loop = no in lineset self.lines.append(LineRepr(line, in_loop)) - last_lineno = -1 + last_lineno = self.firstlineno for chunk in loop.chunks: if chunk.is_bytecode: chunk.cssclass = 'dmp ' @@ -42,7 +42,7 @@ else: chunk.cssclass += 'nonempty' no = chunk.lineno - if no < last_lineno: + if no is None or no < last_lineno: no = last_lineno else: last_lineno = no diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -137,10 +137,10 @@ except (IOError, OSError): source = CodeReprNoFile(loop) else: - try: - source = CodeRepr(inspect.getsource(code), code, loop) - except: - source = CodeReprNoFile(loop) + #try: + source = CodeRepr(inspect.getsource(code), code, loop) + #except: + # source = CodeReprNoFile(loop) d = {'html': flask.render_template('loop.html', source=source, current_loop=no, From noreply at buildbot.pypy.org Thu Dec 29 21:32:36 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Thu, 29 Dec 2011 21:32:36 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Added a closure test Message-ID: <20111229203236.7D06082C03@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r29:b27460bc0d0d Date: 2011-12-04 00:19 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/b27460bc0d0d/ Log: Added a closure test diff --git a/scheme/test/test_scheme_level.py b/scheme/test/test_scheme_level.py --- a/scheme/test/test_scheme_level.py +++ b/scheme/test/test_scheme_level.py @@ -159,3 +159,17 @@ (assert (string? new-str)) (assert (equal? new-str "*******")) """) + +def test_closures(): + run_with_assert(r""" +(define (make-counter start) (lambda () (set! start (+ 1 start)) start)) +(assert (procedure? make-counter)) +(define counter-a (make-counter 10)) +(define counter-b (make-counter 10)) +(assert (procedure? counter-a)) +(assert (eqv? (counter-a) 11)) +(assert (eqv? (counter-a) 12)) +(assert (eqv? (counter-a) 13)) +(assert (eqv? (counter-b) 11)) +""") + From noreply at buildbot.pypy.org Thu Dec 29 21:32:37 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Thu, 29 Dec 2011 21:32:37 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Implement "append" & "append!" Message-ID: <20111229203237.8B8C482C04@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r30:62cfba56990c Date: 2011-12-09 01:54 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/62cfba56990c/ Log: Implement "append" & "append!" diff --git a/scheme/procedure.py b/scheme/procedure.py --- a/scheme/procedure.py +++ b/scheme/procedure.py @@ -172,6 +172,66 @@ w_pair.cdr = w_obj return w_undefined +class Append(W_Procedure): + _symbol_name = "append" + + def procedure(self, ctx, lst): + w_lol = plst2lst(lst) + w_lol = Reverse().procedure(ctx,[w_lol]) + + w_result = w_nil + while w_lol is not w_nil: + assert isinstance(w_lol, W_Pair) + w_list = w_lol.car + w_lol = w_lol.cdr + if w_list is w_nil: + continue + if not isinstance(w_list, W_Pair): + raise WrongArgType(w_list, "List") + w_head = W_Pair(w_list.car, w_undefined) + w_tail = w_head + w_list = w_list.cdr + while w_list is not w_nil: + if not isinstance (w_list, W_Pair): + raise WrongArgType(w_list, "List") + assert isinstance(w_tail, W_Pair) + w_tail.cdr = W_Pair(w_list.car, w_undefined) + w_tail = w_tail.cdr + w_list = w_list.cdr + + assert isinstance(w_tail, W_Pair) + w_tail.cdr = w_result + w_result = w_head + + return w_result + +class AppendE(W_Procedure): + _symbol_name = "append!" + + def procedure(self, ctx, lst): + if len(lst) == 0: + return w_nil + + w_head = w_nil + w_prev_tail = w_nil + for w_list in lst: + if w_list is w_nil: + continue + if w_head is w_nil: + w_head = w_list + if w_prev_tail is not w_nil: + assert isinstance(w_prev_tail, W_Pair) + w_prev_tail.cdr = w_list + if not isinstance(w_list, W_Pair): + raise WrongArgType(w_list, "List") + while w_list.cdr is not w_nil: + w_list = w_list.cdr + if not isinstance(w_list, W_Pair): + raise WrongArgType(w_list, "List") + w_prev_tail = w_list + + return w_head + class Apply(W_Procedure): _symbol_name = "apply" diff --git a/scheme/test/test_eval.py b/scheme/test/test_eval.py --- a/scheme/test/test_eval.py +++ b/scheme/test/test_eval.py @@ -14,6 +14,9 @@ def eval_noctx(expr): return parse(expr)[0].eval(ExecutionContext()) +def parse_(expr): + return parse(expr)[0] + def test_numerical(): w_num = eval_noctx("(+)") assert w_num.to_number() == 0 @@ -802,3 +805,38 @@ py.test.raises(WrongArgsNumber, eval_, ctx, "(apply 1)") py.test.raises(WrongArgType, eval_, ctx, "(apply 1 '(1))") py.test.raises(WrongArgType, eval_, ctx, "(apply + 42)") + +def test_append(): + ctx = ExecutionContext() + eval_(ctx, "(define lst-a (list 'a 'b 'c))") + + w_res = eval_(ctx, "(append '(1 2 3) lst-a)") + assert w_res.equal(parse_("(1 2 3 a b c)")) + + w_res = eval_(ctx, "(append lst-a lst-a '() '(1 2 3))") + w_lst_a = eval_(ctx, "lst-a") + assert w_res.equal(parse_("(a b c a b c 1 2 3)")) + assert w_lst_a.equal(parse_("(a b c)")) + + w_res = eval_(ctx, "(append '(1 2 3) '(4 (5 6) 7) '(8) '((9)))") + assert w_res.equal(parse_("(1 2 3 4 (5 6) 7 8 (9))")) + + w_res = eval_(ctx, "(append)") + assert w_res.eq(w_nil) + + w_res = eval_(ctx, "(append!)") + assert w_res.eq(w_nil) + + w_res = eval_(ctx, "(append! (list 1 2 3) lst-a)") + w_lst_a = eval_(ctx, "lst-a") + assert w_res.equal(parse_("(1 2 3 a b c)")) + assert w_lst_a.equal(parse_("(a b c)")) + + w_res = eval_(ctx, "(append! lst-a '(1 2 3))") + w_lst_a = eval_(ctx, "lst-a") + assert w_res.equal(parse_("(a b c 1 2 3)")) + assert w_lst_a.equal(parse_("(a b c 1 2 3)")) + + py.test.raises(WrongArgType, eval_, ctx, "(append 'a '())") + py.test.raises(WrongArgType, eval_, ctx, "(append 1 2 3)") + py.test.raises(WrongArgType, eval_, ctx, "(append! (cons 1 2) '(3 4))") \ No newline at end of file From noreply at buildbot.pypy.org Thu Dec 29 21:32:39 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Thu, 29 Dec 2011 21:32:39 +0100 (CET) Subject: [pypy-commit] lang-scheme default: Move Parser helper-functions from object.py to ssparser.py Message-ID: <20111229203239.EB57A82C03@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r31:9eb92cb30772 Date: 2011-12-29 21:32 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/9eb92cb30772/ Log: Move Parser helper-functions from object.py to ssparser.py diff --git a/scheme/object.py b/scheme/object.py --- a/scheme/object.py +++ b/scheme/object.py @@ -607,21 +607,6 @@ return self.body.eval_tr(local_ctx) -## -# Parser helpers -## -def quote(sexpr): - return W_Pair(symbol('quote'), W_Pair(sexpr, w_nil)) - -def qq(sexpr): - return W_Pair(symbol('quasiquote'), W_Pair(sexpr, w_nil)) - -def unquote(sexpr): - return W_Pair(symbol('unquote'), W_Pair(sexpr, w_nil)) - -def unquote_splicing(sexpr): - return W_Pair(symbol('unquote-splicing'), W_Pair(sexpr, w_nil)) - ## # General helpers diff --git a/scheme/ssparser.py b/scheme/ssparser.py --- a/scheme/ssparser.py +++ b/scheme/ssparser.py @@ -1,7 +1,7 @@ from pypy.rlib.parsing.pypackrat import PackratParser from pypy.rlib.parsing.makepackrat import BacktrackException, Status from scheme.object import W_Pair, W_Integer, W_String, symbol, \ - w_nil, W_Boolean, W_Real, quote, qq, unquote, unquote_splicing, \ + w_nil, W_Boolean, W_Real, \ w_ellipsis, W_Character, SchemeSyntaxError, W_Vector def str_unquote(s): @@ -141,3 +141,17 @@ p = SchemeParser(code) return p.file() +## +# Parser helpers +## +def quote(sexpr): + return W_Pair(symbol('quote'), W_Pair(sexpr, w_nil)) + +def qq(sexpr): + return W_Pair(symbol('quasiquote'), W_Pair(sexpr, w_nil)) + +def unquote(sexpr): + return W_Pair(symbol('unquote'), W_Pair(sexpr, w_nil)) + +def unquote_splicing(sexpr): + return W_Pair(symbol('unquote-splicing'), W_Pair(sexpr, w_nil)) From noreply at buildbot.pypy.org Fri Dec 30 13:51:46 2011 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Dec 2011 13:51:46 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: fixes for translation Message-ID: <20111230125146.32B2282C03@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50951:d13df48602d0 Date: 2011-12-29 23:17 +0200 http://bitbucket.org/pypy/pypy/changeset/d13df48602d0/ Log: fixes for translation diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -283,8 +283,8 @@ descr_rmod = _binop_right_impl("mod") def _reduce_ufunc_impl(ufunc_name, promote_to_largest = False): - def impl(self, space, w_dim=None): - if w_dim is None: + def impl(self, space, w_dim=-1): + if isinstance(w_dim,int): w_dim = space.wrap(w_dim) return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, True, promote_to_largest, w_dim) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -108,11 +108,7 @@ if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) - dim = -1 - if not space.is_w(w_dim, space.w_None): - dim = space.int_w(w_dim) - if not multidim and space.is_w(w_dim, space.w_None): - dim = 0 + dim = space.int_w(w_dim) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if isinstance(obj, Scalar): @@ -132,7 +128,8 @@ "%s.reduce without identity", self.name) if shapelen>1 and dim>=0: from pypy.module.micronumpy.interp_numarray import Reduce - return Reduce(self.func, self.name, dim, dtype, obj, self.identity) + return space.wrap(Reduce(self.func, self.name, dim, dtype, + obj, self.identity)) sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), obj.create_sig(obj.shape)), obj) From noreply at buildbot.pypy.org Fri Dec 30 13:51:47 2011 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Dec 2011 13:51:47 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: (Alex_Gaynor) fix incorrect default function values Message-ID: <20111230125147.6997C82C04@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50952:ece8d1b61ecb Date: 2011-12-30 08:38 +0200 http://bitbucket.org/pypy/pypy/changeset/ece8d1b61ecb/ Log: (Alex_Gaynor) fix incorrect default function values diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -95,7 +95,9 @@ allnumbers.append(no) self.iter_no = no - def create_frame(self, arr, res_shape=None, chunks = []): + def create_frame(self, arr, res_shape=None, chunks = None): + if chunks is None: + chunks = [] res_shape = res_shape or arr.shape iterlist = [] arraylist = [] From noreply at buildbot.pypy.org Fri Dec 30 13:51:48 2011 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Dec 2011 13:51:48 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: assert for translation, small cleanups Message-ID: <20111230125148.9882982C03@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50953:911002e026f2 Date: 2011-12-30 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/911002e026f2/ Log: assert for translation, small cleanups diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -103,15 +103,21 @@ def next(self, shapelen): return self -def axis_iter_from_arr(arr, dim=-1, start=[]): +def axis_iter_from_arr(arr, dim=-1, start=None): + if start is None: + start = [] + # The assert is needed for zjit tests + from pypy.module.micronumpy.interp_numarray import ConcreteArray + assert isinstance(arr, ConcreteArray) return AxisIterator(arr.start, arr.strides, arr.backstrides, arr.shape, dim, start) class AxisIterator(object): """ This object will return offsets of each start of a stride on the - desired dimension, starting at the desired index + desired dimension, starting at "start" which is an index along + each axis """ - def __init__(self, arr_start, strides, backstrides, shape, dim=-1, slice_start=[]): + def __init__(self, arr_start, strides, backstrides, shape, dim, start): self.shape = shape self.shapelen = len(shape) self.indices = [0] * len(shape) @@ -123,8 +129,8 @@ if dim >= 0: self.dim = dim if len(slice_start) == len(shape): - for i in range(len(slice_start)): - self.offset += strides[i] * slice_start[i] + for i in range(len(start)): + self.offset += strides[i] * start[i] def next(self, shapelen): #shapelen will always be one less than self.shapelen offset = self.offset diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -281,8 +281,8 @@ descr_rdiv = _binop_right_impl("divide") descr_rpow = _binop_right_impl("power") descr_rmod = _binop_right_impl("mod") - - def _reduce_ufunc_impl(ufunc_name, promote_to_largest = False): + + def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): def impl(self, space, w_dim=-1): if isinstance(w_dim,int): w_dim = space.wrap(w_dim) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -119,8 +119,8 @@ dtype = find_unaryop_result_dtype( space, obj.find_dtype(), promote_to_float=self.promote_to_float, - promote_to_largest = promote_to_largest, - promote_bools = True + promote_to_largest=promote_to_largest, + promote_bools=True ) shapelen = len(obj.shape) if self.identity is None and size == 0: From noreply at buildbot.pypy.org Fri Dec 30 13:51:50 2011 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Dec 2011 13:51:50 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: whoops Message-ID: <20111230125150.7B04582C03@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50954:e00aeecb0882 Date: 2011-12-30 13:57 +0200 http://bitbucket.org/pypy/pypy/changeset/e00aeecb0882/ Log: whoops diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -128,7 +128,7 @@ self.backstrides = backstrides if dim >= 0: self.dim = dim - if len(slice_start) == len(shape): + if len(start) == len(shape): for i in range(len(start)): self.offset += strides[i] * start[i] def next(self, shapelen): From noreply at buildbot.pypy.org Fri Dec 30 15:44:28 2011 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Dec 2011 15:44:28 +0100 (CET) Subject: [pypy-commit] pypy numpypy-axisops: rework default arg, tests finally pass, cleanup Message-ID: <20111230144428.2C4B382C03@wyvern.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-axisops Changeset: r50955:af55619e4fb9 Date: 2011-12-30 16:37 +0200 http://bitbucket.org/pypy/pypy/changeset/af55619e4fb9/ Log: rework default arg, tests finally pass, cleanup diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py --- a/pypy/module/micronumpy/interp_iter.py +++ b/pypy/module/micronumpy/interp_iter.py @@ -113,8 +113,8 @@ dim, start) class AxisIterator(object): - """ This object will return offsets of each start of a stride on the - desired dimension, starting at "start" which is an index along + """ This object will return offsets of each start of a stride on the + desired dimension, starting at "start" which is an index along each axis """ def __init__(self, arr_start, strides, backstrides, shape, dim, start): @@ -131,6 +131,7 @@ if len(start) == len(shape): for i in range(len(start)): self.offset += strides[i] * start[i] + def next(self, shapelen): #shapelen will always be one less than self.shapelen offset = self.offset @@ -159,4 +160,3 @@ res.dim = self.dim res.done = self.done return res - diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -283,9 +283,9 @@ descr_rmod = _binop_right_impl("mod") def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False): - def impl(self, space, w_dim=-1): - if isinstance(w_dim,int): - w_dim = space.wrap(w_dim) + def impl(self, space, w_dim=None): + if space.is_w(w_dim, space.w_None): + w_dim = space.wrap(-1) return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space, self, True, promote_to_largest, w_dim) return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) @@ -376,7 +376,7 @@ else: w_res = self.descr_mul(space, w_other) assert isinstance(w_res, BaseArray) - return w_res.descr_sum(space) + return w_res.descr_sum(space, space.wrap(-1)) def get_concrete(self): raise NotImplementedError @@ -560,8 +560,11 @@ ) return w_result - def descr_mean(self, space): - return space.div(self.descr_sum_promote(space), space.wrap(self.size)) + def descr_mean(self, space, w_dim=None): + if space.is_w(w_dim, space.w_None): + w_dim = space.wrap(-1) + return space.div(self.descr_sum_promote(space, w_dim), + space.wrap(self.size)) def descr_nonzero(self, space): if self.size > 1: @@ -672,7 +675,7 @@ self.name = name def _del_sources(self): - # Function for deleting references to source arrays, + # Function for deleting references to source arrays, # to allow garbage-collecting them raise NotImplementedError @@ -739,9 +742,10 @@ def _del_sources(self): self.child = None + class Reduce(VirtualArray): def __init__(self, ufunc, name, dim, res_dtype, values, identity=None): - shape=values.shape[0:dim] + values.shape[dim+1:len(values.shape)] + shape = values.shape[0:dim] + values.shape[dim+1:len(values.shape)] VirtualArray.__init__(self, name, shape, res_dtype) self.values = values self.size = 1 @@ -770,7 +774,7 @@ shapelen = len(result.shape) objlen = len(self.values.shape) target_len = self.values.shape[self.dim] - #sig = self.find_sig(result.shape) ##Don't do this, it causes an infinite recursion + #sig = self.find_sig(result.shape) ##Don't do this, infinite recursion sig = self.create_sig(result.shape) ri = ArrayIterator(result.size) si = axis_iter_from_arr(self.values, self.dim) @@ -778,12 +782,12 @@ chunks = [] #for i in range(objlen - 1, -1, -1): for i in range(objlen): - if i==self.dim: + if i == self.dim: chunks.append((0, target_len, 1, target_len)) else: chunks.append((si.indices[i], 0, 0, 1)) - frame = sig.create_frame(self.values, - res_shape = [target_len], chunks = [chunks,]) + frame = sig.create_frame(self.values, + res_shape=[target_len], chunks = [chunks, ]) if self.identity is None: value = sig.eval(frame, self.values).convert_to(dtype) frame.next(shapelen) @@ -800,7 +804,6 @@ return result - class Call1(VirtualArray): def __init__(self, ufunc, name, shape, res_dtype, values): VirtualArray.__init__(self, name, shape, res_dtype) @@ -930,10 +933,9 @@ builder.append('\n' + indent) else: builder.append(indent) - # create_slice requires len(chunks) > 1 in order to reduce - # shape - view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() - view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) + view = self.create_slice([(i, 0, 0, 1)]).get_concrete() + view.to_str(space, comma, builder, indent=indent + ' ', + use_ellipsis=use_ellipsis) builder.append('\n' + indent + '..., ') i = self.shape[0] - 3 while i < self.shape[0]: @@ -945,10 +947,9 @@ builder.append('\n' + indent) else: builder.append(indent) - # create_slice requires len(chunks) > 1 in order to reduce - # shape - view = self.create_slice([(i, 0, 0, 1), (0, self.shape[1], 1, self.shape[1])]).get_concrete() - view.to_str(space, comma, builder, indent=indent + ' ', use_ellipsis=use_ellipsis) + view = self.create_slice([(i, 0, 0, 1)]).get_concrete() + view.to_str(space, comma, builder, indent=indent + ' ', + use_ellipsis=use_ellipsis) i += 1 elif ndims == 1: spacer = ',' * comma + ' ' diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -10,12 +10,13 @@ from pypy.tool.sourcetools import func_with_new_name reduce_driver = jit.JitDriver( - greens = ['shapelen', "sig"], - virtualizables = ["frame"], - reds = ["frame", "self", "dtype", "value", "obj"], + greens=['shapelen', "sig"], + virtualizables=["frame"], + reds=["frame", "self", "dtype", "value", "obj"], get_printable_location=new_printable_location('reduce'), ) + class W_Ufunc(Wrappable): _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] _immutable_fields_ = ["promote_to_float", "promote_bools", "name"] @@ -51,23 +52,23 @@ def descr_reduce(self, space, w_obj, w_dim=0): '''reduce(...) reduce(a, axis=0) - + Reduces `a`'s dimension by one, by applying ufunc along one axis. - + Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = the result of iterating `j` over :math:`range(N_i)`, cumulatively applying ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. For a one-dimensional array, reduce produces results equivalent to: :: - + r = op.identity # op = ufunc for i in xrange(len(A)): r = op(r, A[i]) return r - + For example, add.reduce() is equivalent to sum(). - + Parameters ---------- a : array_like @@ -79,9 +80,9 @@ -------- >>> np.multiply.reduce([2,3,5]) 30 - + A multi-dimensional array example: - + >>> X = np.arange(8).reshape((2,2,2)) >>> X array([[[0, 1], @@ -104,7 +105,8 @@ return self.reduce(space, w_obj, False, False, w_dim) def reduce(self, space, w_obj, multidim, promote_to_largest, w_dim): - from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + from pypy.module.micronumpy.interp_numarray import convert_to_array, \ + Scalar if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -126,9 +128,9 @@ if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) - if shapelen>1 and dim>=0: + if shapelen > 1 and dim >= 0: from pypy.module.micronumpy.interp_numarray import Reduce - return space.wrap(Reduce(self.func, self.name, dim, dtype, + return space.wrap(Reduce(self.func, self.name, dim, dtype, obj, self.identity)) sig = find_sig(ReduceSignature(self.func, self.name, dtype, ScalarSignature(dtype), @@ -148,10 +150,12 @@ value=value, obj=obj, frame=frame, dtype=dtype) assert isinstance(sig, ReduceSignature) - value = sig.binfunc(dtype, value, sig.eval(frame, obj).convert_to(dtype)) + value = sig.binfunc(dtype, value, + sig.eval(frame, obj).convert_to(dtype)) frame.next(shapelen) return value + class W_Ufunc1(W_Ufunc): argcount = 1 @@ -236,6 +240,7 @@ reduce = interp2app(W_Ufunc.descr_reduce), ) + def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): # dt1.num should be <= dt2.num @@ -284,6 +289,7 @@ dtypenum += 3 return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] + def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): @@ -308,6 +314,7 @@ assert False return dt + def find_dtype_for_scalar(space, w_obj, current_guess=None): bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py --- a/pypy/module/micronumpy/signature.py +++ b/pypy/module/micronumpy/signature.py @@ -95,7 +95,7 @@ allnumbers.append(no) self.iter_no = no - def create_frame(self, arr, res_shape=None, chunks = None): + def create_frame(self, arr, res_shape=None, chunks=None): if chunks is None: chunks = [] res_shape = res_shape or arr.shape @@ -104,6 +104,7 @@ self._create_iter(iterlist, arraylist, arr, res_shape, chunks) return NumpyEvalFrame(iterlist, arraylist) + class ConcreteSignature(Signature): _immutable_fields_ = ['dtype'] diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -2,53 +2,56 @@ from pypy.module.micronumpy.interp_iter import axis_iter_from_arr from pypy.module.micronumpy.interp_numarray import W_NDimArray + class MockDtype(object): def malloc(self, size): return None + class TestAxisIteratorDirect(object): def test_axis_iterator(self): - a = W_NDimArray(5*3, [5, 3], MockDtype(), 'C') + a = W_NDimArray(5 * 3, [5, 3], MockDtype(), 'C') i = axis_iter_from_arr(a) ret = [] while not i.done: ret.append(i.offset) i = i.next(1) assert ret == [0, 3, 6, 9, 12] - a = W_NDimArray(7*5*3, [7, 5, 3], MockDtype(), 'C') + a = W_NDimArray(7 * 5 * 3, [7, 5, 3], MockDtype(), 'C') i = axis_iter_from_arr(a) ret = [] while not i.done: ret.append(i.offset) i = i.next(1) - assert ret == [3*v for v in range(7*5)] - i = axis_iter_from_arr(a,2) + assert ret == [3 * v for v in range(7 * 5)] + i = axis_iter_from_arr(a, 2) ret = [] while not i.done: ret.append(i.offset) i = i.next(1) - assert ret == [3*v for v in range(7*5)] - i = axis_iter_from_arr(a,1) + assert ret == [3 * v for v in range(7 * 5)] + i = axis_iter_from_arr(a, 1) ret = [] while not i.done: ret.append(i.offset) i = i.next(1) assert ret == [ 0, 1, 2, 15, 16, 17, 30, 31, 32, 45, 46, 47, - 60, 61, 62, 75, 76, 77, 90, 91, 92] + 60, 61, 62, 75, 76, 77, 90, 91, 92] + def test_axis_iterator_with_start(self): - a = W_NDimArray(7*5*3, [7, 5, 3], MockDtype(), 'C') + a = W_NDimArray(7 * 5 * 3, [7, 5, 3], MockDtype(), 'C') i = axis_iter_from_arr(a, start=[0, 0, 0]) ret = [] while not i.done: ret.append(i.offset) i = i.next(2) - assert ret == [3*v for v in range(7*5)] + assert ret == [3 * v for v in range(7 * 5)] i = axis_iter_from_arr(a, start=[1, 1, 0]) ret = [] while not i.done: ret.append(i.offset) i = i.next(2) - assert ret == [3*v+18 for v in range(7*5)] + assert ret == [3 * v + 18 for v in range(7 * 5)] i = axis_iter_from_arr(a, 1, [2, 0, 2]) ret = [] while not i.done: diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -298,7 +298,7 @@ for i in range(len(a)): assert b[i] == math.atan(a[i]) - a = array([float('nan')]) + a = array([float('nan')]) b = arctan(a) assert math.isnan(b[0]) From noreply at buildbot.pypy.org Fri Dec 30 16:15:03 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Fri, 30 Dec 2011 16:15:03 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Add encode64 for PPC64. Message-ID: <20111230151503.687D282C03@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50956:6478d3b30785 Date: 2011-12-30 10:13 -0500 http://bitbucket.org/pypy/pypy/changeset/6478d3b30785/ Log: Add encode64 for PPC64. diff --git a/pypy/jit/backend/ppc/ppcgen/helper/assembler.py b/pypy/jit/backend/ppc/ppcgen/helper/assembler.py --- a/pypy/jit/backend/ppc/ppcgen/helper/assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/helper/assembler.py @@ -64,6 +64,16 @@ | ord(mem[index+1]) << 16 | ord(mem[index]) << 24) +def encode64(mem, i, n): + mem[i+7] = chr(n & 0xFF) + mem[i+6] = chr((n >> 8) & 0xFF) + mem[i+5] = chr((n >> 16) & 0xFF) + mem[i+4] = chr((n >> 24) & 0xFF) + mem[i+3] = chr((n >> 32) & 0xFF) + mem[i+2] = chr((n >> 40) & 0xFF) + mem[i+1] = chr((n >> 48) & 0xFF) + mem[i] = chr((n >> 56) & 0xFF) + def decode64(mem, index): value = 0 for x in unrolling_iterable(range(8)): From noreply at buildbot.pypy.org Fri Dec 30 16:15:04 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Fri, 30 Dec 2011 16:15:04 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Use encode64/decode64 for IMM on PPC64. Message-ID: <20111230151504.97D0082C03@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50957:e2765d568453 Date: 2011-12-30 10:14 -0500 http://bitbucket.org/pypy/pypy/changeset/e2765d568453/ Log: Use encode64/decode64 for IMM on PPC64. diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -15,8 +15,8 @@ FPR_SAVE_AREA, FLOAT_INT_CONVERSION, FORCE_INDEX) from pypy.jit.backend.ppc.ppcgen.helper.assembler import (gen_emit_cmp_op, - encode32, decode32, - decode64, + encode32, encode64, + decode32, decode64, count_reg_args, Saved_Volatiles) import pypy.jit.backend.ppc.ppcgen.register as r @@ -199,8 +199,12 @@ if res == self.IMM_LOC: # imm value if group == self.INT_TYPE or group == self.REF_TYPE: - value = decode32(enc, i+1) - i += 4 + if IS_PPC_32: + value = decode32(enc, i+1) + i += 4 + else: + value = decode64(enc, i+1) + i += 8 else: assert 0, "not implemented yet" elif res == self.STACK_LOC: @@ -620,8 +624,12 @@ assert (arg.type == INT or arg.type == REF or arg.type == FLOAT) mem[j] = self.IMM_LOC - encode32(mem, j+1, loc.getint()) - j += 5 + if IS_PPC_32: + encode32(mem, j+1, loc.getint()) + j += 5 + else: + encode64(mem, j+1, loc.getint()) + j += 9 else: mem[j] = self.STACK_LOC encode32(mem, j+1, loc.position) From noreply at buildbot.pypy.org Fri Dec 30 17:12:58 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 30 Dec 2011 17:12:58 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: merge default Message-ID: <20111230161258.2C9A282C03@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50958:16f44a7dcbb5 Date: 2011-12-29 10:24 +0100 http://bitbucket.org/pypy/pypy/changeset/16f44a7dcbb5/ Log: merge default diff too long, truncating to 10000 out of 21761 lines diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test class Callbacks(unittest.TestCase): @@ -98,6 +99,7 @@ ## self.check_type(c_char_p, "abc") ## self.check_type(c_char_p, "def") + @xfail def test_pyobject(self): o = () from sys import getrefcount as grc diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py --- a/lib-python/modified-2.7/ctypes/test/test_libc.py +++ b/lib-python/modified-2.7/ctypes/test/test_libc.py @@ -25,7 +25,10 @@ lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort)) self.assertEqual(chars.raw, " ,,aaaadmmmnpppsss\x00") - def test_no_more_xfail(self): + def SKIPPED_test_no_more_xfail(self): + # We decided to not explicitly support the whole ctypes-2.7 + # and instead go for a case-by-case, demand-driven approach. + # So this test is skipped instead of failing. import socket import ctypes.test self.assertTrue(not hasattr(ctypes.test, 'xfail'), diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -231,8 +231,10 @@ sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] sqlite.sqlite3_result_text.restype = None -sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] -sqlite.sqlite3_enable_load_extension.restype = c_int +HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension") +if HAS_LOAD_EXTENSION: + sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int] + sqlite.sqlite3_enable_load_extension.restype = c_int ########################################## # END Wrapped SQLite C API and constants @@ -708,13 +710,14 @@ from sqlite3.dump import _iterdump return _iterdump(self) - def enable_load_extension(self, enabled): - self._check_thread() - self._check_closed() + if HAS_LOAD_EXTENSION: + def enable_load_extension(self, enabled): + self._check_thread() + self._check_closed() - rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) - if rc != SQLITE_OK: - raise OperationalError("Error enabling load extension") + rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled)) + if rc != SQLITE_OK: + raise OperationalError("Error enabling load extension") DML, DQL, DDL = range(3) diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -180,7 +180,12 @@ if name is None: name = pyobj.func_name if signature is None: - signature = cpython_code_signature(pyobj.func_code) + if hasattr(pyobj, '_generator_next_method_of_'): + from pypy.interpreter.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyobj.func_code) if defaults is None: defaults = pyobj.func_defaults self.name = name diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt @@ -0,0 +1,3 @@ +Use "specialized tuples", a custom implementation for some common kinds +of tuples. Currently limited to tuples of length 2, in three variants: +(int, int), (float, float), and a generic (object, object). diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,6 +51,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \"%s\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \"%s\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -94,17 +112,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \"%s\" missing from %s" - err = err % (missing, host) - w_err = space.wrap(err) - raise OperationError(space.w_TypeError, w_err) - raise AssertionError("should not reach here") class mod(AST): @@ -112,7 +119,6 @@ class Module(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -128,7 +134,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Module') + self.missing_field(space, ['body'], 'Module') else: pass w_list = self.w_body @@ -145,7 +151,6 @@ class Interactive(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -161,7 +166,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Interactive') + self.missing_field(space, ['body'], 'Interactive') else: pass w_list = self.w_body @@ -178,7 +183,6 @@ class Expression(mod): - def __init__(self, body): self.body = body self.initialization_state = 1 @@ -192,7 +196,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Expression') + self.missing_field(space, ['body'], 'Expression') else: pass self.body.sync_app_attrs(space) @@ -200,7 +204,6 @@ class Suite(mod): - def __init__(self, body): self.body = body self.w_body = None @@ -216,7 +219,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['body'], 'Suite') + self.missing_field(space, ['body'], 'Suite') else: pass w_list = self.w_body @@ -232,15 +235,13 @@ class stmt(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class FunctionDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args @@ -264,7 +265,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') else: pass self.args.sync_app_attrs(space) @@ -292,9 +293,6 @@ class ClassDef(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases @@ -320,7 +318,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef') + self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') else: pass w_list = self.w_bases @@ -357,9 +355,6 @@ class Return(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -374,10 +369,10 @@ return visitor.visit_Return(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Return') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -385,9 +380,6 @@ class Delete(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, targets, lineno, col_offset): self.targets = targets self.w_targets = None @@ -404,7 +396,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete') + self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') else: pass w_list = self.w_targets @@ -421,9 +413,6 @@ class Assign(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, targets, value, lineno, col_offset): self.targets = targets self.w_targets = None @@ -442,7 +431,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign') + self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') else: pass w_list = self.w_targets @@ -460,9 +449,6 @@ class AugAssign(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, target, op, value, lineno, col_offset): self.target = target self.op = op @@ -480,7 +466,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') else: pass self.target.sync_app_attrs(space) @@ -489,9 +475,6 @@ class Print(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values @@ -511,10 +494,10 @@ return visitor.visit_Print(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 30: - missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print') + if (self.initialization_state & ~4) ^ 27: + self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.dest = None if self.dest: self.dest.sync_app_attrs(space) @@ -532,9 +515,6 @@ class For(stmt): - _lineno_mask = 16 - _col_offset_mask = 32 - def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter @@ -559,7 +539,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 63: - missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For') + self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') else: pass self.target.sync_app_attrs(space) @@ -588,9 +568,6 @@ class While(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -613,7 +590,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') else: pass self.test.sync_app_attrs(space) @@ -641,9 +618,6 @@ class If(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -666,7 +640,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') else: pass self.test.sync_app_attrs(space) @@ -694,9 +668,6 @@ class With(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars @@ -717,10 +688,10 @@ return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 29: - missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With') + if (self.initialization_state & ~8) ^ 23: + self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.optional_vars = None self.context_expr.sync_app_attrs(space) if self.optional_vars: @@ -739,9 +710,6 @@ class Raise(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst @@ -762,14 +730,14 @@ return visitor.visit_Raise(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~7) ^ 24: - missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise') + if (self.initialization_state & ~28) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.inst = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.tback = None if self.type: self.type.sync_app_attrs(space) @@ -781,9 +749,6 @@ class TryExcept(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body self.w_body = None @@ -808,7 +773,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') else: pass w_list = self.w_body @@ -845,9 +810,6 @@ class TryFinally(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, body, finalbody, lineno, col_offset): self.body = body self.w_body = None @@ -868,7 +830,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally') + self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') else: pass w_list = self.w_body @@ -895,9 +857,6 @@ class Assert(stmt): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, test, msg, lineno, col_offset): self.test = test self.msg = msg @@ -914,10 +873,10 @@ return visitor.visit_Assert(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 13: - missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert') + if (self.initialization_state & ~8) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.msg = None self.test.sync_app_attrs(space) if self.msg: @@ -926,9 +885,6 @@ class Import(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -945,7 +901,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') else: pass w_list = self.w_names @@ -962,9 +918,6 @@ class ImportFrom(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names @@ -982,12 +935,12 @@ return visitor.visit_ImportFrom(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~5) ^ 26: - missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom') + if (self.initialization_state & ~20) ^ 11: + self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.module = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.level = 0 w_list = self.w_names if w_list is not None: @@ -1003,9 +956,6 @@ class Exec(stmt): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals @@ -1025,12 +975,12 @@ return visitor.visit_Exec(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~6) ^ 25: - missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec') + if (self.initialization_state & ~24) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') else: - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.globals = None - if not self.initialization_state & 4: + if not self.initialization_state & 16: self.locals = None self.body.sync_app_attrs(space) if self.globals: @@ -1041,9 +991,6 @@ class Global(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, names, lineno, col_offset): self.names = names self.w_names = None @@ -1058,7 +1005,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global') + self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') else: pass w_list = self.w_names @@ -1072,9 +1019,6 @@ class Expr(stmt): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) @@ -1089,7 +1033,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') else: pass self.value.sync_app_attrs(space) @@ -1097,9 +1041,6 @@ class Pass(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1112,16 +1053,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass') + self.missing_field(space, ['lineno', 'col_offset'], 'Pass') else: pass class Break(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1134,16 +1072,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break') + self.missing_field(space, ['lineno', 'col_offset'], 'Break') else: pass class Continue(stmt): - _lineno_mask = 1 - _col_offset_mask = 2 - def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) self.initialization_state = 3 @@ -1156,21 +1091,19 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue') + self.missing_field(space, ['lineno', 'col_offset'], 'Continue') else: pass class expr(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class BoolOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values @@ -1188,7 +1121,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') else: pass w_list = self.w_values @@ -1205,9 +1138,6 @@ class BinOp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, op, right, lineno, col_offset): self.left = left self.op = op @@ -1225,7 +1155,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp') else: pass self.left.sync_app_attrs(space) @@ -1234,9 +1164,6 @@ class UnaryOp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, op, operand, lineno, col_offset): self.op = op self.operand = operand @@ -1252,7 +1179,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp') + self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp') else: pass self.operand.sync_app_attrs(space) @@ -1260,9 +1187,6 @@ class Lambda(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, args, body, lineno, col_offset): self.args = args self.body = body @@ -1279,7 +1203,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda') + self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda') else: pass self.args.sync_app_attrs(space) @@ -1288,9 +1212,6 @@ class IfExp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body @@ -1309,7 +1230,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp') + self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp') else: pass self.test.sync_app_attrs(space) @@ -1319,9 +1240,6 @@ class Dict(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, keys, values, lineno, col_offset): self.keys = keys self.w_keys = None @@ -1342,7 +1260,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict') + self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict') else: pass w_list = self.w_keys @@ -1369,9 +1287,6 @@ class Set(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, elts, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1388,7 +1303,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set') + self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set') else: pass w_list = self.w_elts @@ -1405,9 +1320,6 @@ class ListComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1426,7 +1338,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp') else: pass self.elt.sync_app_attrs(space) @@ -1444,9 +1356,6 @@ class SetComp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1465,7 +1374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp') else: pass self.elt.sync_app_attrs(space) @@ -1483,9 +1392,6 @@ class DictComp(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, key, value, generators, lineno, col_offset): self.key = key self.value = value @@ -1506,7 +1412,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp') + self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp') else: pass self.key.sync_app_attrs(space) @@ -1525,9 +1431,6 @@ class GeneratorExp(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators @@ -1546,7 +1449,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp') + self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp') else: pass self.elt.sync_app_attrs(space) @@ -1564,9 +1467,6 @@ class Yield(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1581,10 +1481,10 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~1) ^ 6: - missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) @@ -1592,9 +1492,6 @@ class Compare(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops @@ -1615,7 +1512,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare') + self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare') else: pass self.left.sync_app_attrs(space) @@ -1640,9 +1537,6 @@ class Call(expr): - _lineno_mask = 32 - _col_offset_mask = 64 - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args @@ -1670,12 +1564,12 @@ return visitor.visit_Call(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 103: - missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call') + if (self.initialization_state & ~96) ^ 31: + self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call') else: - if not self.initialization_state & 8: + if not self.initialization_state & 32: self.starargs = None - if not self.initialization_state & 16: + if not self.initialization_state & 64: self.kwargs = None self.func.sync_app_attrs(space) w_list = self.w_args @@ -1706,9 +1600,6 @@ class Repr(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1723,7 +1614,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr') else: pass self.value.sync_app_attrs(space) @@ -1731,9 +1622,6 @@ class Num(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, n, lineno, col_offset): self.n = n expr.__init__(self, lineno, col_offset) @@ -1747,16 +1635,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num') + self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') else: pass class Str(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) @@ -1770,16 +1655,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str') + self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') else: pass class Attribute(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr @@ -1796,7 +1678,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute') else: pass self.value.sync_app_attrs(space) @@ -1804,9 +1686,6 @@ class Subscript(expr): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice @@ -1824,7 +1703,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 31: - missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript') + self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript') else: pass self.value.sync_app_attrs(space) @@ -1833,9 +1712,6 @@ class Name(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx @@ -1850,16 +1726,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name') + self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name') else: pass class List(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1877,7 +1750,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List') else: pass w_list = self.w_elts @@ -1894,9 +1767,6 @@ class Tuple(expr): - _lineno_mask = 4 - _col_offset_mask = 8 - def __init__(self, elts, ctx, lineno, col_offset): self.elts = elts self.w_elts = None @@ -1914,7 +1784,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 15: - missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple') + self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple') else: pass w_list = self.w_elts @@ -1931,9 +1801,6 @@ class Const(expr): - _lineno_mask = 2 - _col_offset_mask = 4 - def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) @@ -1947,7 +1814,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const') + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const') else: pass @@ -2009,7 +1876,6 @@ class Ellipsis(slice): - def __init__(self): self.initialization_state = 0 @@ -2021,14 +1887,13 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 0: - missing_field(space, self.initialization_state, [], 'Ellipsis') + self.missing_field(space, [], 'Ellipsis') else: pass class Slice(slice): - def __init__(self, lower, upper, step): self.lower = lower self.upper = upper @@ -2049,7 +1914,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~7) ^ 0: - missing_field(space, self.initialization_state, [None, None, None], 'Slice') + self.missing_field(space, [None, None, None], 'Slice') else: if not self.initialization_state & 1: self.lower = None @@ -2067,7 +1932,6 @@ class ExtSlice(slice): - def __init__(self, dims): self.dims = dims self.w_dims = None @@ -2083,7 +1947,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['dims'], 'ExtSlice') + self.missing_field(space, ['dims'], 'ExtSlice') else: pass w_list = self.w_dims @@ -2100,7 +1964,6 @@ class Index(slice): - def __init__(self, value): self.value = value self.initialization_state = 1 @@ -2114,7 +1977,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 1: - missing_field(space, self.initialization_state, ['value'], 'Index') + self.missing_field(space, ['value'], 'Index') else: pass self.value.sync_app_attrs(space) @@ -2377,7 +2240,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: - missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension') + self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension') else: pass self.target.sync_app_attrs(space) @@ -2394,15 +2257,13 @@ node.sync_app_attrs(space) class excepthandler(AST): + def __init__(self, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset class ExceptHandler(excepthandler): - _lineno_mask = 8 - _col_offset_mask = 16 - def __init__(self, type, name, body, lineno, col_offset): self.type = type self.name = name @@ -2424,12 +2285,12 @@ return visitor.visit_ExceptHandler(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~3) ^ 28: - missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler') + if (self.initialization_state & ~12) ^ 19: + self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') else: - if not self.initialization_state & 1: + if not self.initialization_state & 4: self.type = None - if not self.initialization_state & 2: + if not self.initialization_state & 8: self.name = None if self.type: self.type.sync_app_attrs(space) @@ -2470,7 +2331,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~6) ^ 9: - missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments') + self.missing_field(space, ['args', None, None, 'defaults'], 'arguments') else: if not self.initialization_state & 2: self.vararg = None @@ -2513,7 +2374,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 3: - missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword') + self.missing_field(space, ['arg', 'value'], 'keyword') else: pass self.value.sync_app_attrs(space) @@ -2533,7 +2394,7 @@ def sync_app_attrs(self, space): if (self.initialization_state & ~2) ^ 1: - missing_field(space, self.initialization_state, ['name', None], 'alias') + self.missing_field(space, ['name', None], 'alias') else: if not self.initialization_state & 2: self.asname = None @@ -3019,6 +2880,8 @@ def Expression_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -3098,7 +2961,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -3112,14 +2975,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -3133,7 +2996,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 stmt.typedef = typedef.TypeDef("stmt", AST.typedef, @@ -3149,7 +3012,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3163,14 +3026,14 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -3184,10 +3047,10 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3201,10 +3064,10 @@ def FunctionDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3218,7 +3081,7 @@ def FunctionDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): @@ -3254,7 +3117,7 @@ w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -3268,10 +3131,10 @@ w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') if w_self.w_bases is None: @@ -3285,10 +3148,10 @@ def ClassDef_set_bases(space, w_self, w_new_value): w_self.w_bases = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3302,10 +3165,10 @@ def ClassDef_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') if w_self.w_decorator_list is None: @@ -3319,7 +3182,7 @@ def ClassDef_set_decorator_list(space, w_self, w_new_value): w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): @@ -3356,7 +3219,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3364,13 +3227,15 @@ def Return_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): @@ -3397,7 +3262,7 @@ ) def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3411,7 +3276,7 @@ def Delete_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): @@ -3439,7 +3304,7 @@ ) def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') if w_self.w_targets is None: @@ -3453,14 +3318,14 @@ def Assign_set_targets(space, w_self, w_new_value): w_self.w_targets = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3468,13 +3333,15 @@ def Assign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): @@ -3507,7 +3374,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3515,20 +3382,22 @@ def AugAssign_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -3544,14 +3413,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -3559,13 +3428,15 @@ def AugAssign_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): @@ -3598,7 +3469,7 @@ w_obj = w_self.getdictvalue(space, 'dest') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') return space.wrap(w_self.dest) @@ -3606,16 +3477,18 @@ def Print_set_dest(space, w_self, w_new_value): try: w_self.dest = space.interp_w(expr, w_new_value, True) + if type(w_self.dest) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'dest', w_new_value) return w_self.deldictvalue(space, 'dest') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Print_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -3629,14 +3502,14 @@ def Print_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') return space.wrap(w_self.nl) @@ -3650,7 +3523,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) return w_self.deldictvalue(space, 'nl') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): @@ -3684,7 +3557,7 @@ w_obj = w_self.getdictvalue(space, 'target') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') return space.wrap(w_self.target) @@ -3692,20 +3565,22 @@ def For_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'target', w_new_value) return w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') return space.wrap(w_self.iter) @@ -3713,16 +3588,18 @@ def For_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'iter', w_new_value) return w_self.deldictvalue(space, 'iter') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def For_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3736,10 +3613,10 @@ def For_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def For_get_orelse(space, w_self): - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3753,7 +3630,7 @@ def For_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): @@ -3789,7 +3666,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3797,16 +3674,18 @@ def While_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def While_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3820,10 +3699,10 @@ def While_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def While_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3837,7 +3716,7 @@ def While_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): @@ -3872,7 +3751,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -3880,16 +3759,18 @@ def If_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def If_get_body(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -3903,10 +3784,10 @@ def If_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def If_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -3920,7 +3801,7 @@ def If_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): @@ -3955,7 +3836,7 @@ w_obj = w_self.getdictvalue(space, 'context_expr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') return space.wrap(w_self.context_expr) @@ -3963,20 +3844,22 @@ def With_set_context_expr(space, w_self, w_new_value): try: w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'context_expr', w_new_value) return w_self.deldictvalue(space, 'context_expr') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') return space.wrap(w_self.optional_vars) @@ -3984,16 +3867,18 @@ def With_set_optional_vars(space, w_self, w_new_value): try: w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'optional_vars', w_new_value) return w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def With_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4007,7 +3892,7 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): @@ -4041,7 +3926,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -4049,20 +3934,22 @@ def Raise_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') return space.wrap(w_self.inst) @@ -4070,20 +3957,22 @@ def Raise_set_inst(space, w_self, w_new_value): try: w_self.inst = space.interp_w(expr, w_new_value, True) + if type(w_self.inst) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'inst', w_new_value) return w_self.deldictvalue(space, 'inst') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') return space.wrap(w_self.tback) @@ -4091,13 +3980,15 @@ def Raise_set_tback(space, w_self, w_new_value): try: w_self.tback = space.interp_w(expr, w_new_value, True) + if type(w_self.tback) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'tback', w_new_value) return w_self.deldictvalue(space, 'tback') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): @@ -4126,7 +4017,7 @@ ) def TryExcept_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4140,10 +4031,10 @@ def TryExcept_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryExcept_get_handlers(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') if w_self.w_handlers is None: @@ -4157,10 +4048,10 @@ def TryExcept_set_handlers(space, w_self, w_new_value): w_self.w_handlers = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def TryExcept_get_orelse(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') if w_self.w_orelse is None: @@ -4174,7 +4065,7 @@ def TryExcept_set_orelse(space, w_self, w_new_value): w_self.w_orelse = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): @@ -4206,7 +4097,7 @@ ) def TryFinally_get_body(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -4220,10 +4111,10 @@ def TryFinally_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def TryFinally_get_finalbody(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') if w_self.w_finalbody is None: @@ -4237,7 +4128,7 @@ def TryFinally_set_finalbody(space, w_self, w_new_value): w_self.w_finalbody = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): @@ -4271,7 +4162,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -4279,20 +4170,22 @@ def Assert_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') return space.wrap(w_self.msg) @@ -4300,13 +4193,15 @@ def Assert_set_msg(space, w_self, w_new_value): try: w_self.msg = space.interp_w(expr, w_new_value, True) + if type(w_self.msg) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'msg', w_new_value) return w_self.deldictvalue(space, 'msg') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): @@ -4334,7 +4229,7 @@ ) def Import_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4348,7 +4243,7 @@ def Import_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): @@ -4380,7 +4275,7 @@ w_obj = w_self.getdictvalue(space, 'module') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') return space.wrap(w_self.module) @@ -4397,10 +4292,10 @@ w_self.setdictvalue(space, 'module', w_new_value) return w_self.deldictvalue(space, 'module') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ImportFrom_get_names(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4414,14 +4309,14 @@ def ImportFrom_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') return space.wrap(w_self.level) @@ -4435,7 +4330,7 @@ w_self.setdictvalue(space, 'level', w_new_value) return w_self.deldictvalue(space, 'level') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): @@ -4469,7 +4364,7 @@ w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -4477,20 +4372,22 @@ def Exec_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') return space.wrap(w_self.globals) @@ -4498,20 +4395,22 @@ def Exec_set_globals(space, w_self, w_new_value): try: w_self.globals = space.interp_w(expr, w_new_value, True) + if type(w_self.globals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'globals', w_new_value) return w_self.deldictvalue(space, 'globals') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') return space.wrap(w_self.locals) @@ -4519,13 +4418,15 @@ def Exec_set_locals(space, w_self, w_new_value): try: w_self.locals = space.interp_w(expr, w_new_value, True) + if type(w_self.locals) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'locals', w_new_value) return w_self.deldictvalue(space, 'locals') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): @@ -4554,7 +4455,7 @@ ) def Global_get_names(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') if w_self.w_names is None: @@ -4568,7 +4469,7 @@ def Global_set_names(space, w_self, w_new_value): w_self.w_names = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): @@ -4600,7 +4501,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -4608,13 +4509,15 @@ def Expr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): @@ -4696,7 +4599,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -4710,14 +4613,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -4731,7 +4634,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 expr.typedef = typedef.TypeDef("expr", AST.typedef, @@ -4747,7 +4650,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return boolop_to_class[w_self.op - 1]() @@ -4763,10 +4666,10 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BoolOp_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -4780,7 +4683,7 @@ def BoolOp_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): @@ -4813,7 +4716,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -4821,20 +4724,22 @@ def BinOp_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return operator_to_class[w_self.op - 1]() @@ -4850,14 +4755,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') return space.wrap(w_self.right) @@ -4865,13 +4770,15 @@ def BinOp_set_right(space, w_self, w_new_value): try: w_self.right = space.interp_w(expr, w_new_value, False) + if type(w_self.right) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'right', w_new_value) return w_self.deldictvalue(space, 'right') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): @@ -4904,7 +4811,7 @@ w_obj = w_self.getdictvalue(space, 'op') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') return unaryop_to_class[w_self.op - 1]() @@ -4920,14 +4827,14 @@ return # need to save the original object too w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') return space.wrap(w_self.operand) @@ -4935,13 +4842,15 @@ def UnaryOp_set_operand(space, w_self, w_new_value): try: w_self.operand = space.interp_w(expr, w_new_value, False) + if type(w_self.operand) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'operand', w_new_value) return w_self.deldictvalue(space, 'operand') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): @@ -4973,7 +4882,7 @@ w_obj = w_self.getdictvalue(space, 'args') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') return space.wrap(w_self.args) @@ -4987,14 +4896,14 @@ w_self.setdictvalue(space, 'args', w_new_value) return w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5002,13 +4911,15 @@ def Lambda_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): @@ -5040,7 +4951,7 @@ w_obj = w_self.getdictvalue(space, 'test') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') return space.wrap(w_self.test) @@ -5048,20 +4959,22 @@ def IfExp_set_test(space, w_self, w_new_value): try: w_self.test = space.interp_w(expr, w_new_value, False) + if type(w_self.test) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'test', w_new_value) return w_self.deldictvalue(space, 'test') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') return space.wrap(w_self.body) @@ -5069,20 +4982,22 @@ def IfExp_set_body(space, w_self, w_new_value): try: w_self.body = space.interp_w(expr, w_new_value, False) + if type(w_self.body) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'body', w_new_value) return w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') return space.wrap(w_self.orelse) @@ -5090,13 +5005,15 @@ def IfExp_set_orelse(space, w_self, w_new_value): try: w_self.orelse = space.interp_w(expr, w_new_value, False) + if type(w_self.orelse) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'orelse', w_new_value) return w_self.deldictvalue(space, 'orelse') - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): @@ -5125,7 +5042,7 @@ ) def Dict_get_keys(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') if w_self.w_keys is None: @@ -5139,10 +5056,10 @@ def Dict_set_keys(space, w_self, w_new_value): w_self.w_keys = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Dict_get_values(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') if w_self.w_values is None: @@ -5156,7 +5073,7 @@ def Dict_set_values(space, w_self, w_new_value): w_self.w_values = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): @@ -5186,7 +5103,7 @@ ) def Set_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -5200,7 +5117,7 @@ def Set_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): @@ -5232,7 +5149,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5240,16 +5157,18 @@ def ListComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ListComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5263,7 +5182,7 @@ def ListComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): @@ -5296,7 +5215,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5304,16 +5223,18 @@ def SetComp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def SetComp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5327,7 +5248,7 @@ def SetComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): @@ -5360,7 +5281,7 @@ w_obj = w_self.getdictvalue(space, 'key') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') return space.wrap(w_self.key) @@ -5368,20 +5289,22 @@ def DictComp_set_key(space, w_self, w_new_value): try: w_self.key = space.interp_w(expr, w_new_value, False) + if type(w_self.key) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'key', w_new_value) return w_self.deldictvalue(space, 'key') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5389,16 +5312,18 @@ def DictComp_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def DictComp_get_generators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5412,7 +5337,7 @@ def DictComp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): @@ -5446,7 +5371,7 @@ w_obj = w_self.getdictvalue(space, 'elt') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') return space.wrap(w_self.elt) @@ -5454,16 +5379,18 @@ def GeneratorExp_set_elt(space, w_self, w_new_value): try: w_self.elt = space.interp_w(expr, w_new_value, False) + if type(w_self.elt) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'elt', w_new_value) return w_self.deldictvalue(space, 'elt') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def GeneratorExp_get_generators(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') if w_self.w_generators is None: @@ -5477,7 +5404,7 @@ def GeneratorExp_set_generators(space, w_self, w_new_value): w_self.w_generators = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): @@ -5510,7 +5437,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5518,13 +5445,15 @@ def Yield_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, True) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): @@ -5555,7 +5484,7 @@ w_obj = w_self.getdictvalue(space, 'left') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') return space.wrap(w_self.left) @@ -5563,16 +5492,18 @@ def Compare_set_left(space, w_self, w_new_value): try: w_self.left = space.interp_w(expr, w_new_value, False) + if type(w_self.left) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'left', w_new_value) return w_self.deldictvalue(space, 'left') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Compare_get_ops(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') if w_self.w_ops is None: @@ -5586,10 +5517,10 @@ def Compare_set_ops(space, w_self, w_new_value): w_self.w_ops = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Compare_get_comparators(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') if w_self.w_comparators is None: @@ -5603,7 +5534,7 @@ def Compare_set_comparators(space, w_self, w_new_value): w_self.w_comparators = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): @@ -5638,7 +5569,7 @@ w_obj = w_self.getdictvalue(space, 'func') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') return space.wrap(w_self.func) @@ -5646,16 +5577,18 @@ def Call_set_func(space, w_self, w_new_value): try: w_self.func = space.interp_w(expr, w_new_value, False) + if type(w_self.func) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'func', w_new_value) return w_self.deldictvalue(space, 'func') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Call_get_args(space, w_self): - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') if w_self.w_args is None: @@ -5669,10 +5602,10 @@ def Call_set_args(space, w_self, w_new_value): w_self.w_args = w_new_value - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Call_get_keywords(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') if w_self.w_keywords is None: @@ -5686,14 +5619,14 @@ def Call_set_keywords(space, w_self, w_new_value): w_self.w_keywords = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 32: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') return space.wrap(w_self.starargs) @@ -5701,20 +5634,22 @@ def Call_set_starargs(space, w_self, w_new_value): try: w_self.starargs = space.interp_w(expr, w_new_value, True) + if type(w_self.starargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'starargs', w_new_value) return w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 8 + w_self.initialization_state |= 32 def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') if w_obj is not None: return w_obj - if not w_self.initialization_state & 16: + if not w_self.initialization_state & 64: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') return space.wrap(w_self.kwargs) @@ -5722,13 +5657,15 @@ def Call_set_kwargs(space, w_self, w_new_value): try: w_self.kwargs = space.interp_w(expr, w_new_value, True) + if type(w_self.kwargs) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'kwargs', w_new_value) return w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 16 + w_self.initialization_state |= 64 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): @@ -5765,7 +5702,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5773,13 +5710,15 @@ def Repr_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): @@ -5810,7 +5749,7 @@ w_obj = w_self.getdictvalue(space, 'n') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') return w_self.n @@ -5824,7 +5763,7 @@ w_self.setdictvalue(space, 'n', w_new_value) return w_self.deldictvalue(space, 'n') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): @@ -5855,7 +5794,7 @@ w_obj = w_self.getdictvalue(space, 's') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') return w_self.s @@ -5869,7 +5808,7 @@ w_self.setdictvalue(space, 's', w_new_value) return w_self.deldictvalue(space, 's') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): @@ -5900,7 +5839,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5908,20 +5847,22 @@ def Attribute_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') return space.wrap(w_self.attr) @@ -5935,14 +5876,14 @@ w_self.setdictvalue(space, 'attr', w_new_value) return w_self.deldictvalue(space, 'attr') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -5958,7 +5899,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): @@ -5991,7 +5932,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return space.wrap(w_self.value) @@ -5999,20 +5940,22 @@ def Subscript_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') return space.wrap(w_self.slice) @@ -6020,20 +5963,22 @@ def Subscript_set_slice(space, w_self, w_new_value): try: w_self.slice = space.interp_w(slice, w_new_value, False) + if type(w_self.slice) is slice: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'slice', w_new_value) return w_self.deldictvalue(space, 'slice') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6049,7 +5994,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): @@ -6082,7 +6027,7 @@ w_obj = w_self.getdictvalue(space, 'id') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') return space.wrap(w_self.id) @@ -6096,14 +6041,14 @@ w_self.setdictvalue(space, 'id', w_new_value) return w_self.deldictvalue(space, 'id') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6119,7 +6064,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): @@ -6147,7 +6092,7 @@ ) def List_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6161,14 +6106,14 @@ def List_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6184,7 +6129,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): @@ -6213,7 +6158,7 @@ ) def Tuple_get_elts(space, w_self): - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') if w_self.w_elts is None: @@ -6227,14 +6172,14 @@ def Tuple_set_elts(space, w_self, w_new_value): w_self.w_elts = w_new_value - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') return expr_context_to_class[w_self.ctx - 1]() @@ -6250,7 +6195,7 @@ return # need to save the original object too w_self.setdictvalue(space, 'ctx', w_new_value) - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): @@ -6283,7 +6228,7 @@ w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') return w_self.value @@ -6297,7 +6242,7 @@ w_self.setdictvalue(space, 'value', w_new_value) return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): @@ -6409,6 +6354,8 @@ def Slice_set_lower(space, w_self, w_new_value): try: w_self.lower = space.interp_w(expr, w_new_value, True) + if type(w_self.lower) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6430,6 +6377,8 @@ def Slice_set_upper(space, w_self, w_new_value): try: w_self.upper = space.interp_w(expr, w_new_value, True) + if type(w_self.upper) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6451,6 +6400,8 @@ def Slice_set_step(space, w_self, w_new_value): try: w_self.step = space.interp_w(expr, w_new_value, True) + if type(w_self.step) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6540,6 +6491,8 @@ def Index_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6809,6 +6762,8 @@ def comprehension_set_target(space, w_self, w_new_value): try: w_self.target = space.interp_w(expr, w_new_value, False) + if type(w_self.target) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6830,6 +6785,8 @@ def comprehension_set_iter(space, w_self, w_new_value): try: w_self.iter = space.interp_w(expr, w_new_value, False) + if type(w_self.iter) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6887,7 +6844,7 @@ w_obj = w_self.getdictvalue(space, 'lineno') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._lineno_mask: + if not w_self.initialization_state & 1: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') return space.wrap(w_self.lineno) @@ -6901,14 +6858,14 @@ w_self.setdictvalue(space, 'lineno', w_new_value) return w_self.deldictvalue(space, 'lineno') - w_self.initialization_state |= w_self._lineno_mask + w_self.initialization_state |= 1 def excepthandler_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') if w_obj is not None: return w_obj - if not w_self.initialization_state & w_self._col_offset_mask: + if not w_self.initialization_state & 2: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') return space.wrap(w_self.col_offset) @@ -6922,7 +6879,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) return w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state |= w_self._col_offset_mask + w_self.initialization_state |= 2 excepthandler.typedef = typedef.TypeDef("excepthandler", AST.typedef, @@ -6938,7 +6895,7 @@ w_obj = w_self.getdictvalue(space, 'type') if w_obj is not None: return w_obj - if not w_self.initialization_state & 1: + if not w_self.initialization_state & 4: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') return space.wrap(w_self.type) @@ -6946,20 +6903,22 @@ def ExceptHandler_set_type(space, w_self, w_new_value): try: w_self.type = space.interp_w(expr, w_new_value, True) + if type(w_self.type) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'type', w_new_value) return w_self.deldictvalue(space, 'type') - w_self.initialization_state |= 1 + w_self.initialization_state |= 4 def ExceptHandler_get_name(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'name') if w_obj is not None: return w_obj - if not w_self.initialization_state & 2: + if not w_self.initialization_state & 8: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') return space.wrap(w_self.name) @@ -6967,16 +6926,18 @@ def ExceptHandler_set_name(space, w_self, w_new_value): try: w_self.name = space.interp_w(expr, w_new_value, True) + if type(w_self.name) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'name', w_new_value) return w_self.deldictvalue(space, 'name') - w_self.initialization_state |= 2 + w_self.initialization_state |= 8 def ExceptHandler_get_body(space, w_self): - if not w_self.initialization_state & 4: + if not w_self.initialization_state & 16: typename = space.type(w_self).getname(space) raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') if w_self.w_body is None: @@ -6990,7 +6951,7 @@ def ExceptHandler_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 4 + w_self.initialization_state |= 16 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body']) def ExceptHandler_init(space, w_self, __args__): @@ -7164,6 +7125,8 @@ def keyword_set_value(space, w_self, w_new_value): try: w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -79,6 +79,7 @@ else: self.emit("class %s(AST):" % (base,)) if sum.attributes: + self.emit("") args = ", ".join(attr.name.value for attr in sum.attributes) self.emit("def __init__(self, %s):" % (args,), 1) for attr in sum.attributes: @@ -114,7 +115,7 @@ else: names.append(repr(field.name.value)) sub = (", ".join(names), name.value) - self.emit("missing_field(space, self.initialization_state, [%s], %r)" + self.emit("self.missing_field(space, [%s], %r)" % sub, 3) self.emit("else:", 2) # Fill in all the default fields. @@ -195,17 +196,13 @@ def visitConstructor(self, cons, base, extra_attributes): self.emit("class %s(%s):" % (cons.name, base)) self.emit("") - for field in self.data.cons_attributes[cons]: - subst = (field.name, self.data.field_masks[field]) - self.emit("_%s_mask = %i" % subst, 1) - self.emit("") self.make_constructor(cons.fields, cons, extra_attributes, base) self.emit("") self.emit("def walkabout(self, visitor):", 1) self.emit("visitor.visit_%s(self)" % (cons.name,), 2) self.emit("") self.make_mutate_over(cons, cons.name) - self.make_var_syncer(cons.fields + self.data.cons_attributes[cons], + self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields, cons, cons.name) def visitField(self, field): @@ -324,7 +321,7 @@ def visitSum(self, sum, name): for field in sum.attributes: - self.make_property(field, name, True) + self.make_property(field, name) self.make_typedef(name, "AST", sum.attributes, fields_name="_attributes") if not is_simple_sum(sum): @@ -400,13 +397,10 @@ def visitField(self, field, name): self.make_property(field, name) - def make_property(self, field, name, different_masks=False): + def make_property(self, field, name): func = "def %s_get_%s(space, w_self):" % (name, field.name) self.emit(func) - if different_masks: - flag = "w_self._%s_mask" % (field.name,) - else: - flag = self.data.field_masks[field] + flag = self.data.field_masks[field] if not field.seq: self.emit("if w_self.w_dict is not None:", 1) self.emit(" w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1) @@ -458,6 +452,11 @@ config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % config, 2) + if field.type.value not in self.data.prod_simple: + self.emit("if type(w_self.%s) is %s:" % ( + field.name, field.type), 2) + self.emit("raise OperationError(space.w_TypeError, " + "space.w_None)", 3) else: level = 2 if field.opt and field.type.value != "int": @@ -505,7 +504,10 @@ optional_mask = 0 for i, field in enumerate(fields): flag = 1 << i - field_masks[field] = flag + if field not in field_masks: + field_masks[field] = flag + else: + assert field_masks[field] == flag if field.opt: optional_mask |= flag else: @@ -518,9 +520,9 @@ if is_simple_sum(sum): simple_types.add(tp.name.value) else: + attrs = [field for field in sum.attributes] for cons in sum.types: - attrs = [copy_field(field) for field in sum.attributes] - add_masks(cons.fields + attrs, cons) + add_masks(attrs + cons.fields, cons) cons_attributes[cons] = attrs else: prod = tp.value @@ -588,6 +590,24 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) + def missing_field(self, space, required, host): + "Find which required field is missing." + state = self.initialization_state + for i in range(len(required)): + if (state >> i) & 1: + continue # field is present + missing = required[i] + if missing is None: + continue # field is optional + w_obj = self.getdictvalue(space, missing) + if w_obj is None: + err = "required field \\"%s\\" missing from %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + else: + err = "incorrect type for field \\"%s\\" in %s" + raise operationerrfmt(space.w_TypeError, err, missing, host) + raise AssertionError("should not reach here") + class NodeVisitorNotImplemented(Exception): pass @@ -631,15 +651,6 @@ ) -def missing_field(space, state, required, host): - "Find which required field is missing." - for i in range(len(required)): - if not (state >> i) & 1: - missing = required[i] - if missing is not None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) - raise AssertionError("should not reach here") """ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -190,8 +190,8 @@ def is_w(self, space, w_other): return self is w_other - def unique_id(self, space): - return space.wrap(compute_unique_id(self)) + def immutable_unique_id(self, space): + return None def str_w(self, space): w_msg = typed_unwrap_error_msg(space, "string", self) @@ -706,7 +706,10 @@ return w_two.is_w(self, w_one) def id(self, w_obj): - return w_obj.unique_id(self) + w_result = w_obj.immutable_unique_id(self) + if w_result is None: + w_result = self.wrap(compute_unique_id(w_obj)) + return w_result def hash_w(self, w_obj): """shortcut for space.int_w(space.hash(w_obj))""" diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -98,7 +98,6 @@ "Abstract. Get the expected number of locals." raise TypeError, "abstract" - @jit.dont_look_inside def fast2locals(self): # Copy values from the fastlocals to self.w_locals if self.w_locals is None: @@ -112,7 +111,6 @@ w_name = self.space.wrap(name) self.space.setitem(self.w_locals, w_name, w_value) - @jit.dont_look_inside def locals2fast(self): # Copy values from self.w_locals to the fastlocals assert self.w_locals is not None diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -619,7 +619,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -655,7 +656,8 @@ self.descr_reqcls, args) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -674,7 +676,8 @@ self.descr_reqcls, args.prepend(w_obj)) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -690,7 +693,8 @@ raise OperationError(space.w_SystemError, space.wrap("unexpected DescrMismatch error")) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -708,7 +712,8 @@ self.descr_reqcls, Arguments(space, [w1])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -726,7 +731,8 @@ self.descr_reqcls, Arguments(space, [w1, w2])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -744,7 +750,8 @@ self.descr_reqcls, Arguments(space, [w1, w2, w3])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result @@ -763,7 +770,8 @@ Arguments(space, [w1, w2, w3, w4])) except Exception, e: - raise self.handle_exception(space, e) + self.handle_exception(space, e) + w_result = None if w_result is None: w_result = space.w_None return w_result diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -54,7 +54,11 @@ # Hash support def default_identity_hash(space, w_obj): - return space.wrap(compute_identity_hash(w_obj)) + w_unique_id = w_obj.immutable_unique_id(space) + if w_unique_id is None: # common case + return space.wrap(compute_identity_hash(w_obj)) + else: + return space.hash(w_unique_id) # ____________________________________________________________ # diff --git a/pypy/jit/backend/arm/test/test_zrpy_gc.py b/pypy/jit/backend/arm/test/test_zrpy_gc.py --- a/pypy/jit/backend/arm/test/test_zrpy_gc.py +++ b/pypy/jit/backend/arm/test/test_zrpy_gc.py @@ -71,16 +71,17 @@ def get_functions_to_patch(): from pypy.jit.backend.llsupport import gc # - can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc - def can_inline_malloc2(*args): + can_use_nursery_malloc1 = gc.GcLLDescr_framework.can_use_nursery_malloc + def can_use_nursery_malloc2(*args): try: if os.environ['PYPY_NO_INLINE_MALLOC']: return False except KeyError: pass - return can_inline_malloc1(*args) + return can_use_nursery_malloc1(*args) # - return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + return {(gc.GcLLDescr_framework, 'can_use_nursery_malloc'): + can_use_nursery_malloc2} def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -328,6 +328,14 @@ _variables.append(v) return r +def compile_started_vars(clt): + if not hasattr(clt, '_debug_argtypes'): # only when compiling the loop + argtypes = [v.concretetype for v in _variables] + try: + clt._debug_argtypes = argtypes + except AttributeError: # when 'clt' is actually a translated + pass # GcStruct + def compile_add(loop, opnum): loop = _from_opaque(loop) loop.operations.append(Operation(opnum)) @@ -355,11 +363,13 @@ TARGET_TOKENS = weakref.WeakKeyDictionary() -def compile_add_target_token(loop, descr): +def compile_add_target_token(loop, descr, clt): + # here, 'clt' is the compiled_loop_token of the original loop that + # we are compiling loop = _from_opaque(loop) op = loop.operations[-1] descrobj = _normalize(descr) - TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args + TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt def compile_add_var(loop, intvar): loop = _from_opaque(loop) @@ -395,17 +405,25 @@ _variables.append(v) return r -def compile_add_jump_target(loop, targettoken): +def compile_add_jump_target(loop, targettoken, source_clt): loop = _from_opaque(loop) descrobj = _normalize(targettoken) - loop_target, target_opindex, target_inputargs = TARGET_TOKENS[descrobj] + (loop_target, target_opindex, target_inputargs, target_clt + ) = TARGET_TOKENS[descrobj] + # + try: + assert source_clt._debug_argtypes == target_clt._debug_argtypes + except AttributeError: # when translated + pass # op = loop.operations[-1] op.jump_target = loop_target op.jump_target_opindex = target_opindex op.jump_target_inputargs = target_inputargs assert op.opnum == rop.JUMP - assert len(op.args) == len(target_inputargs) + assert [v.concretetype for v in op.args] == ( + [v.concretetype for v in target_inputargs]) + # if loop_target == loop: log.info("compiling new loop") else: @@ -987,6 +1005,7 @@ self._may_force = self.opindex try: inpargs = _from_opaque(ctl.compiled_version).inputargs + assert len(inpargs) == len(args) for i, inparg in enumerate(inpargs): TYPE = inparg.concretetype if TYPE is lltype.Signed: @@ -1816,6 +1835,7 @@ setannotation(compile_start_int_var, annmodel.SomeInteger()) setannotation(compile_start_ref_var, annmodel.SomeInteger()) setannotation(compile_start_float_var, annmodel.SomeInteger()) +setannotation(compile_started_vars, annmodel.s_None) setannotation(compile_add, annmodel.s_None) setannotation(compile_add_descr, annmodel.s_None) setannotation(compile_add_descr_arg, annmodel.s_None) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -37,7 +37,7 @@ def get_arg_types(self): return self.arg_types - def get_return_type(self): + def get_result_type(self): return self.typeinfo def get_extra_info(self): @@ -138,11 +138,12 @@ clt = original_loop_token.compiled_loop_token clt.loop_and_bridges.append(c) clt.compiling_a_bridge() - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) old, oldindex = faildescr._compiled_fail llimpl.compile_redirect_fail(old, oldindex, c) - def compile_loop(self, inputargs, operations, jitcell_token, log=True, name=''): + def compile_loop(self, inputargs, operations, jitcell_token, + log=True, name=''): """In a real assembler backend, this should assemble the given list of operations. Here we just generate a similar CompiledLoop instance. The code here is RPython, whereas the code in llimpl @@ -153,14 +154,14 @@ clt.loop_and_bridges = [c] clt.compiled_version = c jitcell_token.compiled_loop_token = clt - self._compile_loop_or_bridge(c, inputargs, operations) + self._compile_loop_or_bridge(c, inputargs, operations, clt) def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token.loop_and_bridges: llimpl.mark_as_free(c) model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def _compile_loop_or_bridge(self, c, inputargs, operations): + def _compile_loop_or_bridge(self, c, inputargs, operations, clt): var2index = {} for box in inputargs: if isinstance(box, history.BoxInt): @@ -172,10 +173,11 @@ var2index[box] = llimpl.compile_start_float_var(c) else: raise Exception("box is: %r" % (box,)) - self._compile_operations(c, operations, var2index) + llimpl.compile_started_vars(clt) + self._compile_operations(c, operations, var2index, clt) return c - def _compile_operations(self, c, operations, var2index): + def _compile_operations(self, c, operations, var2index, clt): for op in operations: llimpl.compile_add(c, op.getopnum()) descr = op.getdescr() @@ -187,7 +189,7 @@ assert op.getopnum() != rop.JUMP llimpl.compile_add_loop_token(c, descr) if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL: - llimpl.compile_add_target_token(c, descr) + llimpl.compile_add_target_token(c, descr, clt) if self.is_oo and isinstance(descr, (OODescr, MethDescr)): # hack hack, not rpython c._obj.externalobj.operations[-1].setdescr(descr) @@ -241,7 +243,7 @@ assert op.is_final() if op.getopnum() == rop.JUMP: targettoken = op.getdescr() - llimpl.compile_add_jump_target(c, targettoken) + llimpl.compile_add_jump_target(c, targettoken, clt) elif op.getopnum() == rop.FINISH: faildescr = op.getdescr() index = self.get_fail_descr_number(faildescr) @@ -260,23 +262,28 @@ self.latest_frame = frame return fail_index - def execute_token(self, loop_token): - """Calls the fake 'assembler' generated for the given loop. - Returns the descr of the last executed operation: either the one - attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. - """ - fail_index = self._execute_token(loop_token) - return self.get_fail_descr_from_number(fail_index) - - def set_future_value_int(self, index, intvalue): - llimpl.set_future_value_int(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - llimpl.set_future_value_ref(index, objvalue) - - def set_future_value_float(self, index, floatvalue): - llimpl.set_future_value_float(index, floatvalue) + def make_execute_token(self, *argtypes): + nb_args = len(argtypes) + unroll_argtypes = unrolling_iterable(list(enumerate(argtypes))) + # + def execute_token(loop_token, *args): + assert len(args) == nb_args + for index, TYPE in unroll_argtypes: + x = args[index] + assert TYPE == lltype.typeOf(x) + if TYPE == lltype.Signed: + llimpl.set_future_value_int(index, x) + elif TYPE == llmemory.GCREF: + llimpl.set_future_value_ref(index, x) + elif TYPE == longlong.FLOATSTORAGE: + llimpl.set_future_value_float(index, x) + else: + assert 0 + # + fail_index = self._execute_token(loop_token) + return self.get_fail_descr_from_number(fail_index) + # + return execute_token def get_latest_value_int(self, index): return llimpl.frame_int_getvalue(self.latest_frame, index) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -5,11 +5,7 @@ from pypy.jit.metainterp.history import AbstractDescr, getkind from pypy.jit.metainterp import history from pypy.jit.codewriter import heaptracker, longlong - -# The point of the class organization in this file is to make instances -# as compact as possible. This is done by not storing the field size or -# the 'is_pointer_field' flag in the instance itself but in the class -# (in methods actually) using a few classes instead of just one. +from pypy.jit.codewriter.longlong import is_longlong class GcCache(object): @@ -19,6 +15,7 @@ self._cache_size = {} self._cache_field = {} self._cache_array = {} + self._cache_arraylen = {} self._cache_call = {} self._cache_interiorfield = {} @@ -26,24 +23,15 @@ assert isinstance(STRUCT, lltype.GcStruct) def init_array_descr(self, ARRAY, arraydescr): - assert isinstance(ARRAY, lltype.GcArray) + assert (isinstance(ARRAY, lltype.GcArray) or + isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld) -if lltype.SignedLongLong is lltype.Signed: - def is_longlong(TYPE): - return False -else: - assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float) - def is_longlong(TYPE): - return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong) - # ____________________________________________________________ # SizeDescrs class SizeDescr(AbstractDescr): size = 0 # help translation - is_immutable = False - tid = llop.combine_ushort(lltype.Signed, 0, 0) def __init__(self, size, count_fields_if_immut=-1): @@ -77,265 +65,247 @@ cache[STRUCT] = sizedescr return sizedescr + # ____________________________________________________________ # FieldDescrs -class BaseFieldDescr(AbstractDescr): +FLAG_POINTER = 'P' +FLAG_FLOAT = 'F' +FLAG_UNSIGNED = 'U' +FLAG_SIGNED = 'S' +FLAG_STRUCT = 'X' +FLAG_VOID = 'V' + +class FieldDescr(AbstractDescr): + name = '' offset = 0 # help translation - name = '' - _clsname = '' + field_size = 0 + flag = '\x00' - def __init__(self, name, offset): + def __init__(self, name, offset, field_size, flag): self.name = name self.offset = offset + self.field_size = field_size + self.flag = flag + + def is_pointer_field(self): + return self.flag == FLAG_POINTER + + def is_float_field(self): + return self.flag == FLAG_FLOAT + + def is_field_signed(self): + return self.flag == FLAG_SIGNED def sort_key(self): return self.offset - def get_field_size(self, translate_support_code): - raise NotImplementedError + def repr_of_descr(self): + return '' % (self.flag, self.name, self.offset) - _is_pointer_field = False # unless overridden by GcPtrFieldDescr - _is_float_field = False # unless overridden by FloatFieldDescr - _is_field_signed = False # unless overridden by XxxFieldDescr - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def is_field_signed(self): - return self._is_field_signed - - def repr_of_descr(self): - return '<%s %s %s>' % (self._clsname, self.name, self.offset) - -class DynamicFieldDescr(BaseFieldDescr): - def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): - self.offset = offset - self._fieldsize = fieldsize - self._is_pointer_field = is_pointer - self._is_float_field = is_float - self._is_field_signed = is_signed - - def get_field_size(self, translate_support_code): - return self._fieldsize - -class NonGcPtrFieldDescr(BaseFieldDescr): - _clsname = 'NonGcPtrFieldDescr' - def get_field_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrFieldDescr(NonGcPtrFieldDescr): - _clsname = 'GcPtrFieldDescr' - _is_pointer_field = True - -def getFieldDescrClass(TYPE): - return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, - NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field try: return cache[STRUCT][fieldname] except KeyError: - offset, _ = symbolic.get_field_token(STRUCT, fieldname, - gccache.translate_support_code) + offset, size = symbolic.get_field_token(STRUCT, fieldname, + gccache.translate_support_code) FIELDTYPE = getattr(STRUCT, fieldname) + flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) - fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset) + fielddescr = FieldDescr(name, offset, size, flag) cachedict = cache.setdefault(STRUCT, {}) cachedict[fieldname] = fielddescr return fielddescr +def get_type_flag(TYPE): + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + return FLAG_POINTER + else: + return FLAG_UNSIGNED + if isinstance(TYPE, lltype.Struct): + return FLAG_STRUCT + if TYPE is lltype.Float or is_longlong(TYPE): + return FLAG_FLOAT + if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and + rffi.cast(TYPE, -1) == -1): + return FLAG_SIGNED + return FLAG_UNSIGNED + +def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT): + cache = gccache._cache_arraylen + try: + return cache[ARRAY_OR_STRUCT] + except KeyError: + tsc = gccache.translate_support_code + (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + size = symbolic.get_size(lltype.Signed, tsc) + result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed)) + cache[ARRAY_OR_STRUCT] = result + return result + + # ____________________________________________________________ # ArrayDescrs -_A = lltype.GcArray(lltype.Signed) # a random gcarray -_AF = lltype.GcArray(lltype.Float) # an array of C doubles +class ArrayDescr(AbstractDescr): + tid = 0 + basesize = 0 # workaround for the annotator + itemsize = 0 + lendescr = None + flag = '\x00' - -class BaseArrayDescr(AbstractDescr): - _clsname = '' - tid = llop.combine_ushort(lltype.Signed, 0, 0) - - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_A, translate_support_code) - return basesize - - def get_ofs_length(self, translate_support_code): - _, _, ofslength = symbolic.get_array_token(_A, translate_support_code) - return ofslength - - def get_item_size(self, translate_support_code): - raise NotImplementedError - - _is_array_of_pointers = False # unless overridden by GcPtrArrayDescr - _is_array_of_floats = False # unless overridden by FloatArrayDescr - _is_array_of_structs = False # unless overridden by StructArrayDescr - _is_item_signed = False # unless overridden by XxxArrayDescr + def __init__(self, basesize, itemsize, lendescr, flag): + self.basesize = basesize + self.itemsize = itemsize + self.lendescr = lendescr # or None, if no length + self.flag = flag def is_array_of_pointers(self): - return self._is_array_of_pointers + return self.flag == FLAG_POINTER def is_array_of_floats(self): - return self._is_array_of_floats + return self.flag == FLAG_FLOAT + + def is_item_signed(self): + return self.flag == FLAG_SIGNED def is_array_of_structs(self): - return self._is_array_of_structs - - def is_item_signed(self): - return self._is_item_signed + return self.flag == FLAG_STRUCT def repr_of_descr(self): - return '<%s>' % self._clsname + return '' % (self.flag, self.itemsize) -class NonGcPtrArrayDescr(BaseArrayDescr): - _clsname = 'NonGcPtrArrayDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayDescr(NonGcPtrArrayDescr): - _clsname = 'GcPtrArrayDescr' - _is_array_of_pointers = True - -class FloatArrayDescr(BaseArrayDescr): - _clsname = 'FloatArrayDescr' - _is_array_of_floats = True - def get_base_size(self, translate_support_code): - basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code) - return basesize - def get_item_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class StructArrayDescr(BaseArrayDescr): - _clsname = 'StructArrayDescr' - _is_array_of_structs = True - -class BaseArrayNoLengthDescr(BaseArrayDescr): - def get_base_size(self, translate_support_code): - return 0 - - def get_ofs_length(self, translate_support_code): - return -1 - -class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr): - def __init__(self, itemsize): - self.itemsize = itemsize - - def get_item_size(self, translate_support_code): - return self.itemsize - -class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr): - _clsname = 'NonGcPtrArrayNoLengthDescr' - def get_item_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr): - _clsname = 'GcPtrArrayNoLengthDescr' - _is_array_of_pointers = True - -def getArrayDescrClass(ARRAY): - if ARRAY.OF is lltype.Float: - return FloatArrayDescr - elif isinstance(ARRAY.OF, lltype.Struct): - class Descr(StructArrayDescr): - _clsname = '%sArrayDescr' % ARRAY.OF._name - def get_item_size(self, translate_support_code): - return symbolic.get_size(ARRAY.OF, translate_support_code) - Descr.__name__ = Descr._clsname - return Descr - return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, - NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def getArrayNoLengthDescrClass(ARRAY): - return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, - NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') - -def get_array_descr(gccache, ARRAY): +def get_array_descr(gccache, ARRAY_OR_STRUCT): cache = gccache._cache_array try: - return cache[ARRAY] + return cache[ARRAY_OR_STRUCT] except KeyError: - # we only support Arrays that are either GcArrays, or raw no-length - # non-gc Arrays. - if ARRAY._hints.get('nolength', False): - assert not isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayNoLengthDescrClass(ARRAY)() + tsc = gccache.translate_support_code + basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc) + if isinstance(ARRAY_OR_STRUCT, lltype.Array): + ARRAY_INSIDE = ARRAY_OR_STRUCT else: - assert isinstance(ARRAY, lltype.GcArray) - arraydescr = getArrayDescrClass(ARRAY)() - # verify basic assumption that all arrays' basesize and ofslength - # are equal - basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False) - assert basesize == arraydescr.get_base_size(False) - assert itemsize == arraydescr.get_item_size(False) - if not ARRAY._hints.get('nolength', False): - assert ofslength == arraydescr.get_ofs_length(False) - if isinstance(ARRAY, lltype.GcArray): - gccache.init_array_descr(ARRAY, arraydescr) - cache[ARRAY] = arraydescr + ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld] + if ARRAY_INSIDE._hints.get('nolength', False): + lendescr = None + else: + lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) + flag = get_type_flag(ARRAY_INSIDE.OF) + arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag) + if ARRAY_OR_STRUCT._gckind == 'gc': + gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr) + cache[ARRAY_OR_STRUCT] = arraydescr return arraydescr + # ____________________________________________________________ # InteriorFieldDescr class InteriorFieldDescr(AbstractDescr): - arraydescr = BaseArrayDescr() # workaround for the annotator - fielddescr = BaseFieldDescr('', 0) + arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator + fielddescr = FieldDescr('', 0, 0, '\x00') def __init__(self, arraydescr, fielddescr): + assert arraydescr.flag == FLAG_STRUCT self.arraydescr = arraydescr self.fielddescr = fielddescr + def sort_key(self): + return self.fielddescr.sort_key() + def is_pointer_field(self): return self.fielddescr.is_pointer_field() def is_float_field(self): return self.fielddescr.is_float_field() - def sort_key(self): - return self.fielddescr.sort_key() - def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() -def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name): +def get_interiorfield_descr(gc_ll_descr, ARRAY, name): cache = gc_ll_descr._cache_interiorfield try: - return cache[(ARRAY, FIELDTP, name)] + return cache[(ARRAY, name)] except KeyError: arraydescr = get_array_descr(gc_ll_descr, ARRAY) - fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name) + fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name) descr = InteriorFieldDescr(arraydescr, fielddescr) - cache[(ARRAY, FIELDTP, name)] = descr + cache[(ARRAY, name)] = descr return descr +def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize, + is_pointer, is_float, is_signed): + arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT) + if is_pointer: + assert not is_float + flag = FLAG_POINTER + elif is_float: + flag = FLAG_FLOAT + elif is_signed: + flag = FLAG_SIGNED + else: + flag = FLAG_UNSIGNED + fielddescr = FieldDescr('dynamic', offset, fieldsize, flag) + return InteriorFieldDescr(arraydescr, fielddescr) + + # ____________________________________________________________ # CallDescrs -class BaseCallDescr(AbstractDescr): - _clsname = '' - loop_token = None +class CallDescr(AbstractDescr): arg_classes = '' # <-- annotation hack + result_type = '\x00' + result_flag = '\x00' ffi_flags = 1 + call_stub_i = staticmethod(lambda func, args_i, args_r, args_f: + 0) + call_stub_r = staticmethod(lambda func, args_i, args_r, args_f: + lltype.nullptr(llmemory.GCREF.TO)) + call_stub_f = staticmethod(lambda func,args_i,args_r,args_f: + longlong.ZEROF) - def __init__(self, arg_classes, extrainfo=None, ffi_flags=1): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + def __init__(self, arg_classes, result_type, result_signed, result_size, + extrainfo=None, ffi_flags=1): + """ + 'arg_classes' is a string of characters, one per argument: + 'i', 'r', 'f', 'L', 'S' + + 'result_type' is one character from the same list or 'v' + + 'result_signed' is a boolean True/False + """ + self.arg_classes = arg_classes + self.result_type = result_type + self.result_size = result_size self.extrainfo = extrainfo self.ffi_flags = ffi_flags # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which # makes sense on Windows as it's the one for all the C functions # we are compiling together with the JIT. On non-Windows platforms # it is just ignored anyway. + if result_type == 'v': + result_flag = FLAG_VOID + elif result_type == 'i': + if result_signed: + result_flag = FLAG_SIGNED + else: + result_flag = FLAG_UNSIGNED + elif result_type == history.REF: + result_flag = FLAG_POINTER + elif result_type == history.FLOAT or result_type == 'L': + result_flag = FLAG_FLOAT + elif result_type == 'S': + result_flag = FLAG_UNSIGNED + else: + raise NotImplementedError("result_type = '%s'" % (result_type,)) + self.result_flag = result_flag def __repr__(self): - res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) + res = 'CallDescr(%s)' % (self.arg_classes,) extraeffect = getattr(self.extrainfo, 'extraeffect', None) if extraeffect is not None: res += ' EF=%r' % extraeffect @@ -363,14 +333,14 @@ def get_arg_types(self): return self.arg_classes - def get_return_type(self): - return self._return_type + def get_result_type(self): + return self.result_type - def get_result_size(self, translate_support_code): - raise NotImplementedError + def get_result_size(self): + return self.result_size def is_result_signed(self): - return False # unless overridden + return self.result_flag == FLAG_SIGNED def create_call_stub(self, rtyper, RESULT): from pypy.rlib.clibffi import FFI_DEFAULT_ABI @@ -408,18 +378,26 @@ seen = {'i': 0, 'r': 0, 'f': 0} args = ", ".join([process(c) for c in self.arg_classes]) - if self.get_return_type() == history.INT: + result_type = self.get_result_type() + if result_type == history.INT: result = 'rffi.cast(lltype.Signed, res)' - elif self.get_return_type() == history.REF: + category = 'i' + elif result_type == history.REF: + assert RESULT == llmemory.GCREF # should be ensured by the caller result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)' - elif self.get_return_type() == history.FLOAT: + category = 'r' + elif result_type == history.FLOAT: result = 'longlong.getfloatstorage(res)' - elif self.get_return_type() == 'L': + category = 'f' + elif result_type == 'L': result = 'rffi.cast(lltype.SignedLongLong, res)' - elif self.get_return_type() == history.VOID: - result = 'None' - elif self.get_return_type() == 'S': + category = 'f' + elif result_type == history.VOID: + result = '0' + category = 'i' + elif result_type == 'S': result = 'longlong.singlefloat2int(res)' + category = 'i' else: assert 0 source = py.code.Source(""" @@ -433,10 +411,13 @@ d = globals().copy() d.update(locals()) exec source.compile() in d - self.call_stub = d['call_stub'] + call_stub = d['call_stub'] + # store the function into one of three attributes, to preserve + # type-correctness of the return value + setattr(self, 'call_stub_%s' % category, call_stub) def verify_types(self, args_i, args_r, args_f, return_type): - assert self._return_type in return_type + assert self.result_type in return_type assert (self.arg_classes.count('i') + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) @@ -444,161 +425,56 @@ self.arg_classes.count('L')) == len(args_f or ()) def repr_of_descr(self): - return '<%s>' % self._clsname + res = 'Call%s %d' % (self.result_type, self.result_size) + if self.arg_classes: + res += ' ' + self.arg_classes + if self.extrainfo: + res += ' EF=%d' % self.extrainfo.extraeffect + oopspecindex = self.extrainfo.oopspecindex + if oopspecindex: + res += ' OS=%d' % oopspecindex + return '<%s>' % res -class BaseIntCallDescr(BaseCallDescr): - # Base class of the various subclasses of descrs corresponding to - # calls having a return kind of 'int' (including non-gc pointers). - # The inheritance hierarchy is a bit different than with other Descr - # classes because of the 'call_stub' attribute, which is of type - # - # lambda func, args_i, args_r, args_f --> int/ref/float/void - # - # The purpose of BaseIntCallDescr is to be the parent of all classes - # in which 'call_stub' has a return kind of 'int'. - _return_type = history.INT - call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0) - - _is_result_signed = False # can be overridden in XxxCallDescr - def is_result_signed(self): - return self._is_result_signed - -class DynamicIntCallDescr(BaseIntCallDescr): - """ - calldescr that works for every integer type, by explicitly passing it the - size of the result. Used only by get_call_descr_dynamic - """ - _clsname = 'DynamicIntCallDescr' - - def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) - assert isinstance(result_sign, bool) - self._result_size = chr(result_size) - self._result_sign = result_sign - - def get_result_size(self, translate_support_code): - return ord(self._result_size) - - def is_result_signed(self): - return self._result_sign - - -class NonGcPtrCallDescr(BaseIntCallDescr): - _clsname = 'NonGcPtrCallDescr' - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class GcPtrCallDescr(BaseCallDescr): - _clsname = 'GcPtrCallDescr' - _return_type = history.REF - call_stub = staticmethod(lambda func, args_i, args_r, args_f: - lltype.nullptr(llmemory.GCREF.TO)) - def get_result_size(self, translate_support_code): - return symbolic.get_size_of_ptr(translate_support_code) - -class FloatCallDescr(BaseCallDescr): - _clsname = 'FloatCallDescr' - _return_type = history.FLOAT - call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF) - def get_result_size(self, translate_support_code): - return symbolic.get_size(lltype.Float, translate_support_code) - -class LongLongCallDescr(FloatCallDescr): - _clsname = 'LongLongCallDescr' - _return_type = 'L' - -class VoidCallDescr(BaseCallDescr): - _clsname = 'VoidCallDescr' - _return_type = history.VOID - call_stub = staticmethod(lambda func, args_i, args_r, args_f: None) - def get_result_size(self, translate_support_code): - return 0 - -_SingleFloatCallDescr = None # built lazily - -def getCallDescrClass(RESULT): - if RESULT is lltype.Void: - return VoidCallDescr - if RESULT is lltype.Float: - return FloatCallDescr - if RESULT is lltype.SingleFloat: - global _SingleFloatCallDescr - if _SingleFloatCallDescr is None: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) - class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): - _clsname = 'SingleFloatCallDescr' - _return_type = 'S' - _SingleFloatCallDescr = SingleFloatCallDescr - return _SingleFloatCallDescr - if is_longlong(RESULT): - return LongLongCallDescr - return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, - NonGcPtrCallDescr, 'Call', 'get_result_size', - Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') -getCallDescrClass._annspecialcase_ = 'specialize:memo' +def map_type_to_argclass(ARG, accept_void=False): + kind = getkind(ARG) + if kind == 'int': + if ARG is lltype.SingleFloat: return 'S' + else: return 'i' + elif kind == 'ref': return 'r' + elif kind == 'float': + if is_longlong(ARG): return 'L' + else: return 'f' + elif kind == 'void': + if accept_void: return 'v' + raise NotImplementedError('ARG = %r' % (ARG,)) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): - arg_classes = [] - for ARG in ARGS: - kind = getkind(ARG) - if kind == 'int': - if ARG is lltype.SingleFloat: - arg_classes.append('S') + arg_classes = map(map_type_to_argclass, ARGS) + arg_classes = ''.join(arg_classes) + result_type = map_type_to_argclass(RESULT, accept_void=True) + RESULT_ERASED = RESULT + if RESULT is lltype.Void: + result_size = 0 + result_signed = False + else: + if isinstance(RESULT, lltype.Ptr): + # avoid too many CallDescrs + if result_type == 'r': + RESULT_ERASED = llmemory.GCREF else: - arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') - elif kind == 'float': - if is_longlong(ARG): - arg_classes.append('L') - else: - arg_classes.append('f') - else: - raise NotImplementedError('ARG = %r' % (ARG,)) - arg_classes = ''.join(arg_classes) - cls = getCallDescrClass(RESULT) - key = (cls, arg_classes, extrainfo) + RESULT_ERASED = llmemory.Address + result_size = symbolic.get_size(RESULT_ERASED, + gccache.translate_support_code) + result_signed = get_type_flag(RESULT) == FLAG_SIGNED + key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo) cache = gccache._cache_call try: - return cache[key] + calldescr = cache[key] except KeyError: - calldescr = cls(arg_classes, extrainfo) - calldescr.create_call_stub(gccache.rtyper, RESULT) + calldescr = CallDescr(arg_classes, result_type, result_signed, + result_size, extrainfo) + calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED) cache[key] = calldescr - return calldescr - - -# ____________________________________________________________ - -def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, - nameprefix, methodname, floatattrname, signedattrname, - _cache={}): - if isinstance(TYPE, lltype.Ptr): - if TYPE.TO._gckind == 'gc': - return GcPtrDescr - else: - return NonGcPtrDescr - if TYPE is lltype.SingleFloat: - assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) - TYPE = rffi.UINT - try: - return _cache[nameprefix, TYPE] - except KeyError: - # - class Descr(BaseDescr): - _clsname = '%s%sDescr' % (TYPE._name, nameprefix) - Descr.__name__ = Descr._clsname - # - def method(self, translate_support_code): - return symbolic.get_size(TYPE, translate_support_code) - setattr(Descr, methodname, method) - # - if TYPE is lltype.Float or is_longlong(TYPE): - setattr(Descr, floatattrname, True) - elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and - rffi.cast(TYPE, -1) == -1): - setattr(Descr, signedattrname, True) - # - _cache[nameprefix, TYPE] = Descr - return Descr + assert repr(calldescr.result_size) == repr(result_size) + return calldescr diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,9 +1,7 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import rffi -from pypy.jit.backend.llsupport.descr import ( - DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, - LongLongCallDescr, getCallDescrClass) +from pypy.jit.backend.llsupport.descr import CallDescr class UnsupportedKind(Exception): pass @@ -16,29 +14,13 @@ argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: return None - arg_classes = ''.join(argkinds) - if reskind == history.INT: - size = intmask(ffi_result.c_size) - signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - elif reskind == 'S': - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo, - ffi_flags=ffi_flags) - assert False + if reskind == history.VOID: + result_size = 0 + else: + result_size = intmask(ffi_result.c_size) + argkinds = ''.join(argkinds) + return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result), + result_size, extrainfo, ffi_flags=ffi_flags) def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,6 @@ import os from pypy.rlib import rgc -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib.debug import fatalerror from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr @@ -8,52 +8,93 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr -from pypy.jit.metainterp.history import AbstractDescr +from pypy.jit.codewriter import heaptracker +from pypy.jit.metainterp.history import ConstPtr, AbstractDescr from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.symbolic import WORD -from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr -from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr +from pypy.jit.backend.llsupport.descr import get_array_descr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): - minimal_size_in_nursery = 0 - get_malloc_slowpath_addr = None def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr + if translator and translator.config.translation.gcremovetypeptr: + self.fielddescr_vtable = None + else: + self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT, + 'typeptr') + self._generated_functions = [] + + def _setup_str(self): + self.str_descr = get_array_descr(self, rstr.STR) + self.unicode_descr = get_array_descr(self, rstr.UNICODE) + + def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF): + """Generates a variant of malloc with the given name and the given + arguments. It should return NULL if out of memory. If it raises + anything, it must be an optional MemoryError. + """ + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) + descr = get_call_descr(self, ARGS, RESULT) + setattr(self, funcname, func) + setattr(self, funcname + '_FUNCPTR', FUNCPTR) + setattr(self, funcname + '_descr', descr) + self._generated_functions.append(funcname) + + @specialize.arg(1) + def get_malloc_fn(self, funcname): + func = getattr(self, funcname) + FUNC = getattr(self, funcname + '_FUNCPTR') + return llhelper(FUNC, func) + + @specialize.arg(1) + def get_malloc_fn_addr(self, funcname): + ll_func = self.get_malloc_fn(funcname) + return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func)) + def _freeze_(self): return True def initialize(self): pass def do_write_barrier(self, gcref_struct, gcref_newptr): pass - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - return operations - def can_inline_malloc(self, descr): - return False - def can_inline_malloc_varsize(self, descr, num_elem): + def can_use_nursery_malloc(self, size): return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): pass + def get_nursery_free_addr(self): + raise NotImplementedError + def get_nursery_top_addr(self): + raise NotImplementedError - def get_funcptr_for_newarray(self): - return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array) - def get_funcptr_for_newstr(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str) - def get_funcptr_for_newunicode(self): - return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode) + def gc_malloc(self, sizedescr): + """Blackhole: do a 'bh_new'. Also used for 'bh_new_with_vtable', + with the vtable pointer set manually afterwards.""" + assert isinstance(sizedescr, SizeDescr) + return self._bh_malloc(sizedescr) + def gc_malloc_array(self, arraydescr, num_elem): + assert isinstance(arraydescr, ArrayDescr) + return self._bh_malloc_array(arraydescr, num_elem) - def record_constptrs(self, op, gcrefs_output_list): + def gc_malloc_str(self, num_elem): + return self._bh_malloc_array(self.str_descr, num_elem) + + def gc_malloc_unicode(self, num_elem): + return self._bh_malloc_array(self.unicode_descr, num_elem) + + def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -61,11 +102,27 @@ rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) + def rewrite_assembler(self, cpu, operations, gcrefs_output_list): + rewriter = GcRewriterAssembler(self, cpu) + newops = rewriter.rewrite(operations) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler + for op in newops: + self._record_constptrs(op, gcrefs_output_list) + return newops + # ____________________________________________________________ class GcLLDescr_boehm(GcLLDescription): - moving_gc = False - gcrootmap = None + kind = 'boehm' + moving_gc = False + round_up = False + gcrootmap = None + write_barrier_descr = None + fielddescr_tid = None + str_type_id = 0 + unicode_type_id = 0 + get_malloc_slowpath_addr = None @classmethod def configure_boehm_once(cls): @@ -76,6 +133,16 @@ from pypy.rpython.tool import rffi_platform compilation_info = rffi_platform.configure_boehm() + # on some platform GC_init is required before any other + # GC_* functions, call it here for the benefit of tests + # XXX move this to tests + init_fn_ptr = rffi.llexternal("GC_init", + [], lltype.Void, + compilation_info=compilation_info, + sandboxsafe=True, + _nowrapper=True) + init_fn_ptr() + # Versions 6.x of libgc needs to use GC_local_malloc(). # Versions 7.x of libgc removed this function; GC_malloc() has # the same behavior if libgc was compiled with @@ -95,96 +162,42 @@ sandboxsafe=True, _nowrapper=True) cls.malloc_fn_ptr = malloc_fn_ptr - cls.compilation_info = compilation_info return malloc_fn_ptr def __init__(self, gcdescr, translator, rtyper): GcLLDescription.__init__(self, gcdescr, translator, rtyper) # grab a pointer to the Boehm 'malloc' function - malloc_fn_ptr = self.configure_boehm_once() - self.funcptr_for_new = malloc_fn_ptr + self.malloc_fn_ptr = self.configure_boehm_once() + self._setup_str() + self._make_functions() - def malloc_array(basesize, itemsize, ofs_length, num_elem): + def _make_functions(self): + + def malloc_fixedsize(size): + return self.malloc_fn_ptr(size) + self.generate_function('malloc_fixedsize', malloc_fixedsize, + [lltype.Signed]) + + def malloc_array(basesize, num_elem, itemsize, ofs_length): try: - size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) except OverflowError: return lltype.nullptr(llmemory.GCREF.TO) - res = self.funcptr_for_new(size) - if not res: - return res - rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem + res = self.malloc_fn_ptr(totalsize) + if res: + arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) + arrayptr[ofs_length/WORD] = num_elem return res - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 4, llmemory.GCREF)) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 4) + def _bh_malloc(self, sizedescr): + return self.malloc_fixedsize(sizedescr.size) - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, self.translate_support_code) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code) - def malloc_str(length): - return self.malloc_array( - str_basesize, str_itemsize, str_ofs_length, length - ) - def malloc_unicode(length): - return self.malloc_array( - unicode_basesize, unicode_itemsize, unicode_ofs_length, length - ) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - - - # on some platform GC_init is required before any other - # GC_* functions, call it here for the benefit of tests - # XXX move this to tests - init_fn_ptr = rffi.llexternal("GC_init", - [], lltype.Void, - compilation_info=self.compilation_info, - sandboxsafe=True, - _nowrapper=True) - - init_fn_ptr() - - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.funcptr_for_new(sizedescr.size) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(basesize, itemsize, ofs_length, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size] - - def args_for_new_array(self, arraydescr): - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [basesize, itemsize, ofs_length] - - def get_funcptr_for_new(self): - return self.funcptr_for_new - - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # record all GCREFs too, because Boehm cannot see them and keep them - # alive if they end up as constants in the assembler - for op in operations: - self.record_constptrs(op, gcrefs_output_list) - return GcLLDescription.rewrite_assembler(self, cpu, operations, - gcrefs_output_list) + def _bh_malloc_array(self, arraydescr, num_elem): + return self.malloc_array(arraydescr.basesize, num_elem, + arraydescr.itemsize, + arraydescr.lendescr.offset) # ____________________________________________________________ @@ -554,12 +567,14 @@ class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): - GCClass = gc_ll_descr.GCClass self.llop1 = gc_ll_descr.llop1 self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR - self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid') + self.fielddescr_tid = gc_ll_descr.fielddescr_tid # + GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -596,48 +611,74 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py + kind = 'framework' + round_up = True - def __init__(self, gcdescr, translator, rtyper, llop1=llop): - from pypy.rpython.memory.gctypelayout import check_typeid - from pypy.rpython.memory.gcheader import GCHeaderBuilder - from pypy.rpython.memory.gctransform import framework + def __init__(self, gcdescr, translator, rtyper, llop1=llop, + really_not_translated=False): GcLLDescription.__init__(self, gcdescr, translator, rtyper) - assert self.translate_support_code, "required with the framework GC" self.translator = translator self.llop1 = llop1 + if really_not_translated: + assert not self.translate_support_code # but half does not work + self._initialize_for_tests() + else: + assert self.translate_support_code,"required with the framework GC" + self._check_valid_gc() + self._make_gcrootmap() + self._make_layoutbuilder() + self._setup_gcclass() + self._setup_tid() + self._setup_write_barrier() + self._setup_str() + self._make_functions(really_not_translated) + def _initialize_for_tests(self): + self.layoutbuilder = None + self.fielddescr_tid = AbstractDescr() + self.max_size_of_young_obj = 1000 + self.GCClass = None + + def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() # to work - if gcdescr.config.translation.gc not in ('hybrid', 'minimark'): + if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'): raise NotImplementedError("--gc=%s not implemented with the JIT" % (gcdescr.config.translation.gc,)) + def _make_gcrootmap(self): # to find roots in the assembler, make a GcRootMap - name = gcdescr.config.translation.gcrootfinder + name = self.gcdescr.config.translation.gcrootfinder try: cls = globals()['GcRootMap_' + name] except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls(gcdescr) + gcrootmap = cls(self.gcdescr) self.gcrootmap = gcrootmap + def _make_layoutbuilder(self): # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer + from pypy.rpython.memory.gctransform import framework + translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} - gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + self.gcrootmap.add_jit2gc_hooks(translator._jit2gc) + def _setup_gcclass(self): + from pypy.rpython.memory.gcheader import GCHeaderBuilder self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO) - (self.array_basesize, _, self.array_length_ofs) = \ - symbolic.get_array_token(lltype.GcArray(lltype.Signed), True) self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() @@ -645,89 +686,126 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with two arguments - def malloc_basic(size, tid): - assert size > 0, 'size should be > 0' - type_id = llop.extract_ushort(llgroup.HALFWORD, tid) - check_typeid(type_id) - res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - type_id, size, - False, False, False) - # In case the operation above failed, we are returning NULL - # from this function to assembler. There is also an RPython - # exception set, typically MemoryError; but it's easier and - # faster to check for the NULL return value, as done by - # translator/exceptiontransform.py. - #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id, - # "-->", res) - return res - self.malloc_basic = malloc_basic - self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType( - [lltype.Signed, lltype.Signed], llmemory.GCREF)) + def _setup_tid(self): + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + + def _setup_write_barrier(self): self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, llmemory.Address], lltype.Void)) self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void)) self.write_barrier_descr = WriteBarrierDescr(self) - # + + def _make_functions(self, really_not_translated): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + (self.standard_array_basesize, _, self.standard_array_length_ofs) = \ + symbolic.get_array_token(lltype.GcArray(lltype.Signed), + not really_not_translated) + + def malloc_nursery_slowpath(size): + """Allocate 'size' null bytes out of the nursery. + Note that the fast path is typically inlined by the backend.""" + assert size >= self.minimal_size_in_nursery + if self.DEBUG: + self._random_usage_of_xmm_registers() + type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, size, + False, False, False) + self.generate_function('malloc_nursery', malloc_nursery_slowpath, + [lltype.Signed]) + def malloc_array(itemsize, tid, num_elem): - assert num_elem >= 0, 'num_elem should be >= 0' + """Allocate an array with a variable-size num_elem. + Only works for standard arrays.""" + assert num_elem >= 0, 'num_elem should be >= 0' type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs) - self.malloc_array = malloc_array - self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( - [lltype.Signed] * 3, llmemory.GCREF)) - # - (str_basesize, str_itemsize, str_ofs_length - ) = symbolic.get_array_token(rstr.STR, True) - (unicode_basesize, unicode_itemsize, unicode_ofs_length - ) = symbolic.get_array_token(rstr.UNICODE, True) - str_type_id = self.layoutbuilder.get_type_id(rstr.STR) - unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) - # + type_id, num_elem, self.standard_array_basesize, itemsize, + self.standard_array_length_ofs) + self.generate_function('malloc_array', malloc_array, + [lltype.Signed] * 3) + + def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid, + num_elem): + """For the rare case of non-standard arrays, i.e. arrays where + self.standard_array_{basesize,length_ofs} is wrong. It can + occur e.g. with arrays of floats on Win32.""" + type_id = llop.extract_ushort(llgroup.HALFWORD, tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, basesize, itemsize, lengthofs) + self.generate_function('malloc_array_nonstandard', + malloc_array_nonstandard, + [lltype.Signed] * 5) + + str_type_id = self.str_descr.tid + str_basesize = self.str_descr.basesize + str_itemsize = self.str_descr.itemsize + str_ofs_length = self.str_descr.lendescr.offset + unicode_type_id = self.unicode_descr.tid + unicode_basesize = self.unicode_descr.basesize + unicode_itemsize = self.unicode_descr.itemsize + unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, str_type_id, length, str_basesize, str_itemsize, str_ofs_length) + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) - self.malloc_str = malloc_str - self.malloc_unicode = malloc_unicode - self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( - [lltype.Signed], llmemory.GCREF)) - # - class ForTestOnly: - pass - for_test_only = ForTestOnly() - for_test_only.x = 1.23 - def random_usage_of_xmm_registers(): - x0 = for_test_only.x - x1 = x0 * 0.1 - x2 = x0 * 0.2 - x3 = x0 * 0.3 - for_test_only.x = x0 + x1 + x2 + x3 - # - def malloc_slowpath(size): - if self.DEBUG: - random_usage_of_xmm_registers() - assert size >= self.minimal_size_in_nursery - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, False, False, False) - return rffi.cast(lltype.Signed, gcref) - self.malloc_slowpath = malloc_slowpath - self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + + # Rarely called: allocate a fixed-size amount of bytes, but + # not in the nursery, because it is too big. Implemented like + # malloc_nursery_slowpath() above. + self.generate_function('malloc_fixedsize', malloc_nursery_slowpath, + [lltype.Signed]) + + def _bh_malloc(self, sizedescr): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid) + check_typeid(type_id) + return llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + type_id, sizedescr.size, + False, False, False) + + def _bh_malloc_array(self, arraydescr, num_elem): + from pypy.rpython.memory.gctypelayout import check_typeid + llop1 = self.llop1 + type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid) + check_typeid(type_id) + return llop1.do_malloc_varsize_clear(llmemory.GCREF, + type_id, num_elem, + arraydescr.basesize, + arraydescr.itemsize, + arraydescr.lendescr.offset) + + + class ForTestOnly: + pass + for_test_only = ForTestOnly() + for_test_only.x = 1.23 + + def _random_usage_of_xmm_registers(self): + x0 = self.for_test_only.x + x1 = x0 * 0.1 + x2 = x0 * 0.2 + x3 = x0 * 0.3 + self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -737,49 +815,26 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) - return rffi.cast(lltype.Signed, fptr) - def initialize(self): self.gcrootmap.initialize() def init_size_descr(self, S, descr): - type_id = self.layoutbuilder.get_type_id(S) - assert not self.layoutbuilder.is_weakref_type(S) - assert not self.layoutbuilder.has_finalizer(S) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(S) + assert not self.layoutbuilder.is_weakref_type(S) + assert not self.layoutbuilder.has_finalizer(S) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) def init_array_descr(self, A, descr): - type_id = self.layoutbuilder.get_type_id(A) - descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) + if self.layoutbuilder is not None: + type_id = self.layoutbuilder.get_type_id(A) + descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0) - def gc_malloc(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return self.malloc_basic(sizedescr.size, sizedescr.tid) - - def gc_malloc_array(self, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return self.malloc_array(itemsize, arraydescr.tid, num_elem) - - def gc_malloc_str(self, num_elem): - return self.malloc_str(num_elem) - - def gc_malloc_unicode(self, num_elem): - return self.malloc_unicode(num_elem) - - def args_for_new(self, sizedescr): - assert isinstance(sizedescr, BaseSizeDescr) - return [sizedescr.size, sizedescr.tid] - - def args_for_new_array(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - itemsize = arraydescr.get_item_size(self.translate_support_code) - return [itemsize, arraydescr.tid] - - def get_funcptr_for_new(self): - return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic) + def _set_tid(self, gcptr, tid): + hdr_addr = llmemory.cast_ptr_to_adr(gcptr) + hdr_addr -= self.gcheaderbuilder.size_gc_header + hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) + hdr.tid = tid def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) @@ -793,108 +848,8 @@ funcptr(llmemory.cast_ptr_to_adr(gcref_struct), llmemory.cast_ptr_to_adr(gcref_newptr)) - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): - # Perform two kinds of rewrites in parallel: - # - # - Add COND_CALLs to the write barrier before SETFIELD_GC and - # SETARRAYITEM_GC operations. - # - # - Record the ConstPtrs from the assembler. - # - newops = [] - known_lengths = {} - # we can only remember one malloc since the next malloc can possibly - # collect - last_malloc = None - for op in operations: - if op.getopnum() == rop.DEBUG_MERGE_POINT: - continue - # ---------- record the ConstPtrs ---------- - self.record_constptrs(op, gcrefs_output_list) - if op.is_malloc(): - last_malloc = op.result - elif op.can_malloc(): - last_malloc = None - # ---------- write barrier for SETFIELD_GC ---------- - if op.getopnum() == rop.SETFIELD_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) - # ---------- write barrier for SETINTERIORFIELD_GC ------ - if op.getopnum() == rop.SETINTERIORFIELD_GC: - val = op.getarg(0) - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier(newops, op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - # ---------- write barrier for SETARRAYITEM_GC ---------- - if op.getopnum() == rop.SETARRAYITEM_GC: - val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val is not last_malloc: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self._gen_write_barrier_array(newops, op.getarg(0), - op.getarg(1), v, - cpu, known_lengths) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) - elif op.getopnum() == rop.NEW_ARRAY: - v_length = op.getarg(0) - if isinstance(v_length, ConstInt): - known_lengths[op.result] = v_length.getint() - # ---------- - newops.append(op) - return newops - - def _gen_write_barrier(self, newops, v_base, v_value): - args = [v_base, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=self.write_barrier_descr)) - - def _gen_write_barrier_array(self, newops, v_base, v_index, v_value, - cpu, known_lengths): - if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0: - # If we know statically the length of 'v', and it is not too - # big, then produce a regular write_barrier. If it's unknown or - # too big, produce instead a write_barrier_from_array. - LARGE = 130 - length = known_lengths.get(v_base, LARGE) - if length >= LARGE: - # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] - newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, - None, - descr=self.write_barrier_descr)) - return - # fall-back case: produce a write_barrier - self._gen_write_barrier(newops, v_base, v_value) - - def can_inline_malloc(self, descr): - assert isinstance(descr, BaseSizeDescr) - if descr.size < self.max_size_of_young_obj: - has_finalizer = bool(descr.tid & (1<= 0 + if index < 0: + return False size = self.frame_size(box.type) for i in range(size): while (index + i) >= len(self.used): diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/rewrite.py @@ -0,0 +1,328 @@ +import sys +from pypy.rlib.rarithmetic import ovfcheck +from pypy.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr +from pypy.jit.metainterp.resoperation import ResOperation, rop +from pypy.jit.codewriter import heaptracker +from pypy.jit.backend.llsupport.symbolic import WORD +from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr + + +class GcRewriterAssembler(object): + # This class performs the following rewrites on the list of operations: + # + # - Remove the DEBUG_MERGE_POINTs. + # + # - Turn all NEW_xxx to either a CALL_MALLOC_GC, or a CALL_MALLOC_NURSERY + # followed by SETFIELDs in order to initialize their GC fields. The + # two advantages of CALL_MALLOC_NURSERY is that it inlines the common + # path, and we need only one such operation to allocate several blocks + # of memory at once. + # + # - Add COND_CALLs to the write barrier before SETFIELD_GC and + # SETARRAYITEM_GC operations. + + _previous_size = -1 + _op_malloc_nursery = None + _v_last_malloced_nursery = None + c_zero = ConstInt(0) + + def __init__(self, gc_ll_descr, cpu): + self.gc_ll_descr = gc_ll_descr + self.cpu = cpu + self.newops = [] + self.known_lengths = {} + self.recent_mallocs = {} # set of variables + + def rewrite(self, operations): + # we can only remember one malloc since the next malloc can possibly + # collect; but we can try to collapse several known-size mallocs into + # one, both for performance and to reduce the number of write + # barriers. We do this on each "basic block" of operations, which in + # this case means between CALLs or unknown-size mallocs. + # + for op in operations: + if op.getopnum() == rop.DEBUG_MERGE_POINT: + continue + # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- + if op.is_malloc(): + self.handle_malloc_operation(op) + continue + elif op.can_malloc(): + self.emitting_an_operation_that_can_collect() + elif op.getopnum() == rop.LABEL: + self.emitting_an_operation_that_can_collect() + self.known_lengths.clear() + # ---------- write barriers ---------- + if self.gc_ll_descr.write_barrier_descr is not None: + if op.getopnum() == rop.SETFIELD_GC: + self.handle_write_barrier_setfield(op) + continue + if op.getopnum() == rop.SETINTERIORFIELD_GC: + self.handle_write_barrier_setinteriorfield(op) + continue + if op.getopnum() == rop.SETARRAYITEM_GC: + self.handle_write_barrier_setarrayitem(op) + continue + # ---------- + self.newops.append(op) + return self.newops + + # ---------- + + def handle_malloc_operation(self, op): + opnum = op.getopnum() + if opnum == rop.NEW: + self.handle_new_fixedsize(op.getdescr(), op) + elif opnum == rop.NEW_WITH_VTABLE: + classint = op.getarg(0).getint() + descr = heaptracker.vtable2descr(self.cpu, classint) + self.handle_new_fixedsize(descr, op) + if self.gc_ll_descr.fielddescr_vtable is not None: + op = ResOperation(rop.SETFIELD_GC, + [op.result, ConstInt(classint)], None, + descr=self.gc_ll_descr.fielddescr_vtable) + self.newops.append(op) + elif opnum == rop.NEW_ARRAY: + descr = op.getdescr() + assert isinstance(descr, ArrayDescr) + self.handle_new_array(descr, op) + elif opnum == rop.NEWSTR: + self.handle_new_array(self.gc_ll_descr.str_descr, op) + elif opnum == rop.NEWUNICODE: + self.handle_new_array(self.gc_ll_descr.unicode_descr, op) + else: + raise NotImplementedError(op.getopname()) + + def handle_new_fixedsize(self, descr, op): + assert isinstance(descr, SizeDescr) + size = descr.size + self.gen_malloc_nursery(size, op.result) + self.gen_initialize_tid(op.result, descr.tid) + + def handle_new_array(self, arraydescr, op): + v_length = op.getarg(0) + total_size = -1 + if isinstance(v_length, ConstInt): + num_elem = v_length.getint() + self.known_lengths[op.result] = num_elem + try: + var_size = ovfcheck(arraydescr.itemsize * num_elem) + total_size = ovfcheck(arraydescr.basesize + var_size) + except OverflowError: + pass # total_size is still -1 + elif arraydescr.itemsize == 0: + total_size = arraydescr.basesize + if 0 <= total_size <= 0xffffff: # up to 16MB, arbitrarily + self.gen_malloc_nursery(total_size, op.result) + self.gen_initialize_tid(op.result, arraydescr.tid) + self.gen_initialize_len(op.result, v_length, arraydescr.lendescr) + elif self.gc_ll_descr.kind == 'boehm': + self.gen_boehm_malloc_array(arraydescr, v_length, op.result) + else: + opnum = op.getopnum() + if opnum == rop.NEW_ARRAY: + self.gen_malloc_array(arraydescr, v_length, op.result) + elif opnum == rop.NEWSTR: + self.gen_malloc_str(v_length, op.result) + elif opnum == rop.NEWUNICODE: + self.gen_malloc_unicode(v_length, op.result) + else: + raise NotImplementedError(op.getopname()) + + # ---------- + + def emitting_an_operation_that_can_collect(self): + # must be called whenever we emit an operation that can collect: + # forgets the previous MALLOC_NURSERY, if any; and empty the + # set 'recent_mallocs', so that future SETFIELDs will generate + # a write barrier as usual. + self._op_malloc_nursery = None + self.recent_mallocs.clear() + + def _gen_call_malloc_gc(self, args, v_result, descr): + """Generate a CALL_MALLOC_GC with the given args.""" + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) + self.newops.append(op) + # mark 'v_result' as freshly malloced + self.recent_mallocs[v_result] = None + + def gen_malloc_fixedsize(self, size, v_result): + """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)). + Note that with the framework GC, this should be called very rarely. + """ + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize') + self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result, + self.gc_ll_descr.malloc_fixedsize_descr) + + def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm.""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + self._gen_call_malloc_gc([ConstInt(addr), + ConstInt(arraydescr.basesize), + v_num_elem, + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset)], + v_result, + self.gc_ll_descr.malloc_array_descr) + + def gen_malloc_array(self, arraydescr, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) going either + to the standard or the nonstandard version of the function.""" + # + if (arraydescr.basesize == self.gc_ll_descr.standard_array_basesize + and arraydescr.lendescr.offset == + self.gc_ll_descr.standard_array_length_ofs): + # this is a standard-looking array, common case + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array') + args = [ConstInt(addr), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_descr + else: + # rare case, so don't care too much about the number of arguments + addr = self.gc_ll_descr.get_malloc_fn_addr( + 'malloc_array_nonstandard') + args = [ConstInt(addr), + ConstInt(arraydescr.basesize), + ConstInt(arraydescr.itemsize), + ConstInt(arraydescr.lendescr.offset), + ConstInt(arraydescr.tid), + v_num_elem] + calldescr = self.gc_ll_descr.malloc_array_nonstandard_descr + self._gen_call_malloc_gc(args, v_result, calldescr) + + def gen_malloc_str(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_str_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_str') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_str_descr) + + def gen_malloc_unicode(self, v_num_elem, v_result): + """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...).""" + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_unicode') + self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result, + self.gc_ll_descr.malloc_unicode_descr) + + def gen_malloc_nursery(self, size, v_result): + """Try to generate or update a CALL_MALLOC_NURSERY. + If that fails, generate a plain CALL_MALLOC_GC instead. + """ + size = self.round_up_for_allocation(size) + if not self.gc_ll_descr.can_use_nursery_malloc(size): + self.gen_malloc_fixedsize(size, v_result) + return + # + op = None + if self._op_malloc_nursery is not None: + # already a MALLOC_NURSERY: increment its total size + total_size = self._op_malloc_nursery.getarg(0).getint() + total_size += size + if self.gc_ll_descr.can_use_nursery_malloc(total_size): + # if the total size is still reasonable, merge it + self._op_malloc_nursery.setarg(0, ConstInt(total_size)) + op = ResOperation(rop.INT_ADD, + [self._v_last_malloced_nursery, + ConstInt(self._previous_size)], + v_result) + if op is None: + # if we failed to merge with a previous MALLOC_NURSERY, emit one + self.emitting_an_operation_that_can_collect() + op = ResOperation(rop.CALL_MALLOC_NURSERY, + [ConstInt(size)], + v_result) + self._op_malloc_nursery = op + # + self.newops.append(op) + self._previous_size = size + self._v_last_malloced_nursery = v_result + self.recent_mallocs[v_result] = None + + def gen_initialize_tid(self, v_newgcobj, tid): + if self.gc_ll_descr.fielddescr_tid is not None: + # produce a SETFIELD to initialize the GC header + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, ConstInt(tid)], None, + descr=self.gc_ll_descr.fielddescr_tid) + self.newops.append(op) + + def gen_initialize_len(self, v_newgcobj, v_length, arraylen_descr): + # produce a SETFIELD to initialize the array length + op = ResOperation(rop.SETFIELD_GC, + [v_newgcobj, v_length], None, + descr=arraylen_descr) + self.newops.append(op) + + # ---------- + + def handle_write_barrier_setfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(1) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setinteriorfield(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier(op.getarg(0), v) + op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.newops.append(op) + + def handle_write_barrier_setarrayitem(self, op): + val = op.getarg(0) + # no need for a write barrier in the case of previous malloc + if val not in self.recent_mallocs: + v = op.getarg(2) + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + self.gen_write_barrier_array(op.getarg(0), + op.getarg(1), v) + op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.newops.append(op) + + def gen_write_barrier(self, v_base, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + args = [v_base, v_value] + self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, + descr=write_barrier_descr)) + + def gen_write_barrier_array(self, v_base, v_index, v_value): + write_barrier_descr = self.gc_ll_descr.write_barrier_descr + if write_barrier_descr.has_write_barrier_from_array(self.cpu): + # If we know statically the length of 'v', and it is not too + # big, then produce a regular write_barrier. If it's unknown or + # too big, produce instead a write_barrier_from_array. + LARGE = 130 + length = self.known_lengths.get(v_base, LARGE) + if length >= LARGE: + # unknown or too big: produce a write_barrier_from_array + args = [v_base, v_index, v_value] + self.newops.append( + ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr)) + return + # fall-back case: produce a write_barrier + self.gen_write_barrier(v_base, v_value) + + def round_up_for_allocation(self, size): + if not self.gc_ll_descr.round_up: + return size + if self.gc_ll_descr.translate_support_code: + from pypy.rpython.lltypesystem import llarena + return llarena.round_up_for_allocation( + size, self.gc_ll_descr.minimal_size_in_nursery) + else: + # non-translated: do it manually + # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs + size = max(size, 2 * WORD) + return (size + WORD-1) & ~(WORD-1) # round up diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -1,4 +1,4 @@ -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, rffi, rstr from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport import symbolic from pypy.rlib.objectmodel import Symbolic @@ -53,18 +53,6 @@ ('z', lltype.Ptr(U)), ('f', lltype.Float), ('s', lltype.SingleFloat)) - assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr - assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr - cls = getFieldDescrClass(lltype.Char) - assert cls != getFieldDescrClass(lltype.Signed) - assert cls == getFieldDescrClass(lltype.Char) - clsf = getFieldDescrClass(lltype.Float) - assert clsf != cls - assert clsf == getFieldDescrClass(lltype.Float) - clss = getFieldDescrClass(lltype.SingleFloat) - assert clss not in (cls, clsf) - assert clss == getFieldDescrClass(lltype.SingleFloat) - assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -77,11 +65,7 @@ descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') descr_s = get_field_descr(c2, S, 's') - assert descr_x.__class__ is cls - assert descr_y.__class__ is GcPtrFieldDescr - assert descr_z.__class__ is NonGcPtrFieldDescr - assert descr_f.__class__ is clsf - assert descr_s.__class__ is clss + assert isinstance(descr_x, FieldDescr) assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' @@ -90,33 +74,27 @@ if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() - assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char) - assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) - assert descr_s.get_field_size(False) == rffi.sizeof( - lltype.SingleFloat) + assert descr_x.field_size == rffi.sizeof(lltype.Char) + assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T)) + assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U)) + assert descr_f.field_size == rffi.sizeof(lltype.Float) + assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) assert isinstance(descr_s.offset, Symbolic) - assert isinstance(descr_x.get_field_size(True), Symbolic) - assert isinstance(descr_y.get_field_size(True), Symbolic) - assert isinstance(descr_z.get_field_size(True), Symbolic) - assert isinstance(descr_f.get_field_size(True), Symbolic) - assert isinstance(descr_s.get_field_size(True), Symbolic) - assert not descr_x.is_pointer_field() - assert descr_y.is_pointer_field() - assert not descr_z.is_pointer_field() - assert not descr_f.is_pointer_field() - assert not descr_s.is_pointer_field() - assert not descr_x.is_float_field() - assert not descr_y.is_float_field() - assert not descr_z.is_float_field() - assert descr_f.is_float_field() - assert not descr_s.is_float_field() + assert isinstance(descr_x.field_size, Symbolic) + assert isinstance(descr_y.field_size, Symbolic) + assert isinstance(descr_z.field_size, Symbolic) + assert isinstance(descr_f.field_size, Symbolic) + assert isinstance(descr_s.field_size, Symbolic) + assert descr_x.flag == FLAG_UNSIGNED + assert descr_y.flag == FLAG_POINTER + assert descr_z.flag == FLAG_UNSIGNED + assert descr_f.flag == FLAG_FLOAT + assert descr_s.flag == FLAG_UNSIGNED def test_get_field_descr_sign(): @@ -128,7 +106,8 @@ for tsc in [False, True]: c2 = GcCache(tsc) descr_x = get_field_descr(c2, S, 'x') - assert descr_x.is_field_signed() == signed + assert descr_x.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] def test_get_field_descr_longlong(): if sys.maxint > 2147483647: @@ -136,9 +115,8 @@ c0 = GcCache(False) S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong)) descr = get_field_descr(c0, S, 'y') - assert not descr.is_pointer_field() - assert descr.is_float_field() - assert descr.get_field_size(False) == 8 + assert descr.flag == FLAG_FLOAT + assert descr.field_size == 8 def test_get_array_descr(): @@ -149,19 +127,8 @@ A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed), - ('k', lltype.Signed))) + ('k', lltype.Signed))) A6 = lltype.GcArray(lltype.SingleFloat) - assert getArrayDescrClass(A2) is GcPtrArrayDescr - assert getArrayDescrClass(A3) is NonGcPtrArrayDescr - cls = getArrayDescrClass(A1) - assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) - assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) - clsf = getArrayDescrClass(A4) - assert clsf != cls - assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) - clss = getArrayDescrClass(A6) - assert clss not in (clsf, cls) - assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) @@ -170,82 +137,61 @@ descr4 = get_array_descr(c0, A4) descr5 = get_array_descr(c0, A5) descr6 = get_array_descr(c0, A6) - assert descr1.__class__ is cls - assert descr2.__class__ is GcPtrArrayDescr - assert descr3.__class__ is NonGcPtrArrayDescr - assert descr4.__class__ is clsf - assert descr6.__class__ is clss + assert isinstance(descr1, ArrayDescr) assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert not descr1.is_array_of_pointers() - assert descr2.is_array_of_pointers() - assert not descr3.is_array_of_pointers() - assert not descr4.is_array_of_pointers() - assert not descr5.is_array_of_pointers() - assert not descr1.is_array_of_floats() - assert not descr2.is_array_of_floats() - assert not descr3.is_array_of_floats() - assert descr4.is_array_of_floats() - assert not descr5.is_array_of_floats() + assert descr1.flag == FLAG_UNSIGNED + assert descr2.flag == FLAG_POINTER + assert descr3.flag == FLAG_UNSIGNED + assert descr4.flag == FLAG_FLOAT + assert descr5.flag == FLAG_STRUCT + assert descr6.flag == FLAG_UNSIGNED # def get_alignment(code): # Retrieve default alignment for the compiler/platform return struct.calcsize('l' + code) - struct.calcsize(code) - assert descr1.get_base_size(False) == get_alignment('c') - assert descr2.get_base_size(False) == get_alignment('p') - assert descr3.get_base_size(False) == get_alignment('p') - assert descr4.get_base_size(False) == get_alignment('d') - assert descr5.get_base_size(False) == get_alignment('f') - assert descr1.get_ofs_length(False) == 0 - assert descr2.get_ofs_length(False) == 0 - assert descr3.get_ofs_length(False) == 0 - assert descr4.get_ofs_length(False) == 0 - assert descr5.get_ofs_length(False) == 0 - assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) - assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) - assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) - assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2 - assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr1.basesize == get_alignment('c') + assert descr2.basesize == get_alignment('p') + assert descr3.basesize == get_alignment('p') + assert descr4.basesize == get_alignment('d') + assert descr5.basesize == get_alignment('f') + assert descr1.lendescr.offset == 0 + assert descr2.lendescr.offset == 0 + assert descr3.lendescr.offset == 0 + assert descr4.lendescr.offset == 0 + assert descr5.lendescr.offset == 0 + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T)) + assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U)) + assert descr4.itemsize == rffi.sizeof(lltype.Float) + assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2 + assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat) # - assert isinstance(descr1.get_base_size(True), Symbolic) - assert isinstance(descr2.get_base_size(True), Symbolic) - assert isinstance(descr3.get_base_size(True), Symbolic) - assert isinstance(descr4.get_base_size(True), Symbolic) - assert isinstance(descr5.get_base_size(True), Symbolic) - assert isinstance(descr1.get_ofs_length(True), Symbolic) - assert isinstance(descr2.get_ofs_length(True), Symbolic) - assert isinstance(descr3.get_ofs_length(True), Symbolic) - assert isinstance(descr4.get_ofs_length(True), Symbolic) - assert isinstance(descr5.get_ofs_length(True), Symbolic) - assert isinstance(descr1.get_item_size(True), Symbolic) - assert isinstance(descr2.get_item_size(True), Symbolic) - assert isinstance(descr3.get_item_size(True), Symbolic) - assert isinstance(descr4.get_item_size(True), Symbolic) - assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_SIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) - assert descr.is_array_of_pointers() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_POINTER + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) - assert descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_FLOAT + assert descr.basesize == 0 + assert descr.lendescr is None CA = rffi.CArray(rffi.FLOAT) descr = get_array_descr(c0, CA) - assert not descr.is_array_of_floats() - assert descr.get_base_size(False) == 0 - assert descr.get_ofs_length(False) == -1 + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == 0 + assert descr.itemsize == rffi.sizeof(lltype.SingleFloat) + assert descr.lendescr is None def test_get_array_descr_sign(): @@ -257,46 +203,55 @@ for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, A) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] # RA = rffi.CArray(RESTYPE) for tsc in [False, True]: c2 = GcCache(tsc) arraydescr = get_array_descr(c2, RA) - assert arraydescr.is_item_signed() == signed + assert arraydescr.flag == {False: FLAG_UNSIGNED, + True: FLAG_SIGNED }[signed] + + +def test_get_array_descr_str(): + c0 = GcCache(False) + descr1 = get_array_descr(c0, rstr.STR) + assert descr1.itemsize == rffi.sizeof(lltype.Char) + assert descr1.flag == FLAG_UNSIGNED def test_get_call_descr_not_translated(): c0 = GcCache(False) descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char) - assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char) - assert descr1.get_return_type() == history.INT + assert descr1.get_result_size() == rffi.sizeof(lltype.Char) + assert descr1.get_result_type() == history.INT assert descr1.arg_classes == "ii" # T = lltype.GcStruct('T') descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T)) - assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T)) - assert descr2.get_return_type() == history.REF + assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T)) + assert descr2.get_result_type() == history.REF assert descr2.arg_classes == "r" # U = lltype.GcStruct('U', ('x', lltype.Signed)) assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U)) # V = lltype.Struct('V', ('x', lltype.Signed)) - assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() == + assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() == history.INT) # - assert (get_call_descr(c0, [], lltype.Void).get_return_type() == + assert (get_call_descr(c0, [], lltype.Void).get_result_type() == history.VOID) # descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float) - assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) - assert descr4.get_return_type() == history.FLOAT + assert descr4.get_result_size() == rffi.sizeof(lltype.Float) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) - assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) - assert descr5.get_return_type() == "S" + assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): @@ -305,13 +260,13 @@ c0 = GcCache(False) # descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed) - assert descr5.get_result_size(False) == 4 - assert descr5.get_return_type() == history.INT + assert descr5.get_result_size() == 4 + assert descr5.get_result_type() == history.INT assert descr5.arg_classes == "L" # descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong) - assert descr6.get_result_size(False) == 8 - assert descr6.get_return_type() == "L" + assert descr6.get_result_size() == 8 + assert descr6.get_result_type() == "L" assert descr6.arg_classes == "i" def test_get_call_descr_translated(): @@ -319,18 +274,18 @@ T = lltype.GcStruct('T') U = lltype.GcStruct('U', ('x', lltype.Signed)) descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U)) - assert isinstance(descr3.get_result_size(True), Symbolic) - assert descr3.get_return_type() == history.REF + assert isinstance(descr3.get_result_size(), Symbolic) + assert descr3.get_result_type() == history.REF assert descr3.arg_classes == "r" # descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float) - assert isinstance(descr4.get_result_size(True), Symbolic) - assert descr4.get_return_type() == history.FLOAT + assert isinstance(descr4.get_result_size(), Symbolic) + assert descr4.get_result_type() == history.FLOAT assert descr4.arg_classes == "ff" # descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) - assert isinstance(descr5.get_result_size(True), Symbolic) - assert descr5.get_return_type() == "S" + assert isinstance(descr5.get_result_size(), Symbolic) + assert descr5.get_result_type() == "S" assert descr5.arg_classes == "S" def test_call_descr_extra_info(): @@ -358,6 +313,10 @@ def test_repr_of_descr(): + def repr_of_descr(descr): + s = descr.repr_of_descr() + assert ',' not in s # makes the life easier for pypy.tool.jitlogparser + return s c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), @@ -365,33 +324,34 @@ ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) - assert descr1.repr_of_descr() == '' % s + assert repr_of_descr(descr1) == '' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) - assert descr2.repr_of_descr() == '' % o + assert repr_of_descr(descr2) == '' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) - assert descr2i.repr_of_descr() == '' % o + assert repr_of_descr(descr2i) == '' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) - assert descr3.repr_of_descr() == '' + o = symbolic.get_size(lltype.Ptr(S), False) + assert repr_of_descr(descr3) == '' % o # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) - assert descr3i.repr_of_descr() == '' + assert repr_of_descr(descr3i) == '' # descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) - assert 'GcPtrCallDescr' in descr4.repr_of_descr() + assert repr_of_descr(descr4) == '' % o # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) - assert 'CharCallDescr' in descr4i.repr_of_descr() + assert repr_of_descr(descr4i) == '' # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) - assert 'FloatCallDescr' in descr4f.repr_of_descr() + assert repr_of_descr(descr4f) == '' # descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) - assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() + assert repr_of_descr(descr5f) == '' def test_call_stubs_1(): c0 = GcCache(False) @@ -401,10 +361,10 @@ def f(a, b): return 'c' - call_stub = descr1.call_stub fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) - res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) + res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [1, 2], None, None) assert res == ord('c') def test_call_stubs_2(): @@ -421,8 +381,8 @@ a = lltype.malloc(ARRAY, 3) opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a) a[0] = 1 - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [], [opaquea], [longlong.getfloatstorage(3.5)]) + res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr), + [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 def test_call_stubs_single_float(): @@ -445,6 +405,22 @@ a = intmask(singlefloat2uint(r_singlefloat(-10.0))) b = intmask(singlefloat2uint(r_singlefloat(3.0))) c = intmask(singlefloat2uint(r_singlefloat(2.0))) - res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), - [a, b, c], [], []) + res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 + +def test_field_arraylen_descr(): + c0 = GcCache(True) + A1 = lltype.GcArray(lltype.Signed) + fielddescr = get_field_arraylen_descr(c0, A1) + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + assert repr(ofs) == '< ArrayLengthOffset >' + # + fielddescr = get_field_arraylen_descr(c0, rstr.STR) + ofs = fielddescr.offset + assert repr(ofs) == ("< " + " 'chars'> + < ArrayLengthOffset" + " > >") + # caching: + assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,5 +1,6 @@ from pypy.rlib.libffi import types from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.descr import * from pypy.jit.backend.llsupport.ffisupport import * @@ -15,7 +16,9 @@ args = [types.sint, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None, ffi_flags=42) - assert isinstance(descr, DynamicIntCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_type == 'i' + assert descr.result_flag == FLAG_SIGNED assert descr.arg_classes == 'ii' assert descr.get_ffi_flags() == 42 @@ -24,18 +27,20 @@ assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), args, types.void, None, ffi_flags=43) - assert isinstance(descr, VoidCallDescr) + assert descr.result_type == 'v' + assert descr.result_flag == FLAG_VOID assert descr.arg_classes == 'ifi' assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_SIGNED assert descr.is_result_signed() == True descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42) - assert isinstance(descr, DynamicIntCallDescr) - assert descr.get_result_size(False) == 1 + assert isinstance(descr, CallDescr) + assert descr.get_result_size() == 1 + assert descr.result_flag == FLAG_UNSIGNED assert descr.is_result_signed() == False if not is_64_bit: @@ -44,7 +49,9 @@ assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), [], types.slonglong, None, ffi_flags=43) - assert isinstance(descr, LongLongCallDescr) + assert isinstance(descr, CallDescr) + assert descr.result_flag == FLAG_FLOAT + assert descr.result_type == 'L' assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong @@ -53,6 +60,6 @@ assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), [], types.float, None, ffi_flags=44) - SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - assert isinstance(descr, SingleFloatCallDescr) + assert descr.result_flag == FLAG_UNSIGNED + assert descr.result_type == 'S' assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -6,6 +6,7 @@ from pypy.jit.backend.llsupport.gc import * from pypy.jit.backend.llsupport import symbolic from pypy.jit.metainterp.gc import get_description +from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -15,12 +16,12 @@ gc_ll_descr = GcLLDescr_boehm(None, None, None) # record = [] - prev_funcptr_for_new = gc_ll_descr.funcptr_for_new - def my_funcptr_for_new(size): - p = prev_funcptr_for_new(size) + prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr + def my_malloc_fn_ptr(size): + p = prev_malloc_fn_ptr(size) record.append((size, p)) return p - gc_ll_descr.funcptr_for_new = my_funcptr_for_new + gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr # # ---------- gc_malloc ---------- S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -32,8 +33,8 @@ A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(gc_ll_descr, A) p = gc_ll_descr.gc_malloc_array(arraydescr, 10) - assert record == [(arraydescr.get_base_size(False) + - 10 * arraydescr.get_item_size(False), p)] + assert record == [(arraydescr.basesize + + 10 * arraydescr.itemsize, p)] del record[:] # ---------- gc_malloc_str ---------- p = gc_ll_descr.gc_malloc_str(10) @@ -246,24 +247,28 @@ def __init__(self): self.record = [] + def _malloc(self, type_id, size): + tid = llop.combine_ushort(lltype.Signed, type_id, 0) + x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size) + x += self.gcheaderbuilder.size_gc_header + return x, tid + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr - assert not has_finalizer # in these tests - assert not has_light_finalizer # in these tests - p = llmemory.raw_malloc(size) + assert not has_finalizer + assert not has_light_finalizer + p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("fixedsize", repr(size), tid, p)) return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): - p = llmemory.raw_malloc(size + itemsize * length) + p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) - tid = llop.combine_ushort(lltype.Signed, type_id, 0) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) @@ -322,43 +327,40 @@ gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None, llop1) gc_ll_descr.initialize() + llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() - def test_args_for_new(self): - S = lltype.GcStruct('S', ('x', lltype.Signed)) - sizedescr = get_size_descr(self.gc_ll_descr, S) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed - A = lltype.GcArray(lltype.Signed) - arraydescr = get_array_descr(self.gc_ll_descr, A) - args = self.gc_ll_descr.args_for_new(sizedescr) - for x in args: - assert lltype.typeOf(x) == lltype.Signed +## def test_args_for_new(self): +## S = lltype.GcStruct('S', ('x', lltype.Signed)) +## sizedescr = get_size_descr(self.gc_ll_descr, S) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed +## A = lltype.GcArray(lltype.Signed) +## arraydescr = get_array_descr(self.gc_ll_descr, A) +## args = self.gc_ll_descr.args_for_new(sizedescr) +## for x in args: +## assert lltype.typeOf(x) == lltype.Signed def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = get_size_descr(self.gc_ll_descr, S) p = self.gc_ll_descr.gc_malloc(sizedescr) - assert self.llop1.record == [("fixedsize", - repr(sizedescr.size), + assert lltype.typeOf(p) == llmemory.GCREF + assert self.llop1.record == [("fixedsize", repr(sizedescr.size), sizedescr.tid, p)] - assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr( - [sizedescr.size, sizedescr.tid]) def test_gc_malloc_array(self): A = lltype.GcArray(lltype.Signed) arraydescr = get_array_descr(self.gc_ll_descr, A) p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10) assert self.llop1.record == [("varsize", arraydescr.tid, 10, - repr(arraydescr.get_base_size(True)), - repr(arraydescr.get_item_size(True)), - repr(arraydescr.get_ofs_length(True)), + repr(arraydescr.basesize), + repr(arraydescr.itemsize), + repr(arraydescr.lendescr.offset), p)] - assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr( - [arraydescr.get_item_size(True), arraydescr.tid]) def test_gc_malloc_str(self): p = self.gc_ll_descr.gc_malloc_str(10) @@ -404,10 +406,11 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - newops = [] + rewriter = GcRewriterAssembler(gc_ll_descr, None) + newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() - gc_ll_descr._gen_write_barrier(newops, v_base, v_value) + rewriter.gen_write_barrier(v_base, v_value) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB @@ -427,8 +430,7 @@ operations = gc_ll_descr.rewrite_assembler(None, operations, []) assert len(operations) == 0 - def test_rewrite_assembler_1(self): - # check recording of ConstPtrs + def test_record_constptrs(self): class MyFakeCPU(object): def cast_adr_to_int(self, adr): assert adr == "some fake address" @@ -455,211 +457,6 @@ assert operations2 == operations assert gcrefs == [s_gcref] - def test_rewrite_assembler_2(self): - # check write barriers before SETFIELD_GC - v_base = BoxPtr() - v_value = BoxPtr() - field_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETFIELD_GC, [v_base, v_value], None, - descr=field_descr), - ] - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations, - []) - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETFIELD_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_value - assert operations[1].getdescr() == field_descr - - def test_rewrite_assembler_3(self): - # check write barriers before SETARRAYITEM_GC - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_4(self): - # check write barriers before SETARRAYITEM_GC, - # if we have actually a write_barrier_from_array. - self.llop1._have_wb_from_array = True - for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()): - v_base = BoxPtr() - v_index = BoxInt() - v_value = BoxPtr() - array_descr = AbstractDescr() - operations = [ - ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value], - None, descr=array_descr), - ] - if v_new_length is not None: - operations.insert(0, ResOperation(rop.NEW_ARRAY, - [v_new_length], v_base, - descr=array_descr)) - # we need to insert another, unrelated NEW_ARRAY here - # to prevent the initialization_store optimization - operations.insert(1, ResOperation(rop.NEW_ARRAY, - [ConstInt(12)], BoxPtr(), - descr=array_descr)) - gc_ll_descr = self.gc_ll_descr - operations = get_deep_immutable_oplist(operations) - operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - if v_new_length is not None: - assert operations[0].getopnum() == rop.NEW_ARRAY - assert operations[1].getopnum() == rop.NEW_ARRAY - del operations[:2] - assert len(operations) == 2 - # - if isinstance(v_new_length, ConstInt) and v_new_length.value < 130: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_value - else: - assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY - assert operations[0].getarg(0) == v_base - assert operations[0].getarg(1) == v_index - assert operations[0].getarg(2) == v_value - assert operations[0].result is None - # - assert operations[1].getopnum() == rop.SETARRAYITEM_RAW - assert operations[1].getarg(0) == v_base - assert operations[1].getarg(1) == v_index - assert operations[1].getarg(2) == v_value - assert operations[1].getdescr() == array_descr - - def test_rewrite_assembler_5(self): - S = lltype.GcStruct('S') - A = lltype.GcArray(lltype.Struct('A', ('x', lltype.Ptr(S)))) - interiordescr = get_interiorfield_descr(self.gc_ll_descr, A, - A.OF, 'x') - wbdescr = self.gc_ll_descr.write_barrier_descr - ops = parse(""" - [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - expected = parse(""" - [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiordescr) - jump(p1, p2) - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - # no write barrier - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_2(self): - S = lltype.GcStruct('S', ('parent', OBJECT), - ('x', lltype.Signed)) - s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - wbdescr = self.gc_ll_descr.write_barrier_descr - xdescr = get_field_descr(self.gc_ll_descr, S, 'x') - ops = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - setfield_gc(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_with_vtable(ConstClass(s_vtable)) - p3 = new_with_vtable(ConstClass(s_vtable)) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=xdescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) - - def test_rewrite_assembler_initialization_store_3(self): - A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S'))) - arraydescr = get_array_descr(self.gc_ll_descr, A) - ops = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - expected = parse(""" - [p1] - p0 = new_array(3, descr=arraydescr) - setarrayitem_gc(p0, 0, p1, descr=arraydescr) - jump() - """, namespace=locals()) - operations = get_deep_immutable_oplist(ops.operations) - operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu, - operations, []) - equaloplists(operations, expected.operations) class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/llsupport/test/test_rewrite.py @@ -0,0 +1,668 @@ +from pypy.jit.backend.llsupport.descr import * +from pypy.jit.backend.llsupport.gc import * +from pypy.jit.metainterp.gc import get_description +from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.optimizeopt.util import equaloplists +from pypy.jit.codewriter.heaptracker import register_known_gctype + + +class Evaluator(object): + def __init__(self, scope): + self.scope = scope + def __getitem__(self, key): + return eval(key, self.scope) + + +class RewriteTests(object): + def check_rewrite(self, frm_operations, to_operations, **namespace): + S = lltype.GcStruct('S', ('x', lltype.Signed), + ('y', lltype.Signed)) + sdescr = get_size_descr(self.gc_ll_descr, S) + sdescr.tid = 1234 + # + T = lltype.GcStruct('T', ('y', lltype.Signed), + ('z', lltype.Ptr(S)), + ('t', lltype.Signed)) + tdescr = get_size_descr(self.gc_ll_descr, T) + tdescr.tid = 5678 + tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + # + A = lltype.GcArray(lltype.Signed) + adescr = get_array_descr(self.gc_ll_descr, A) + adescr.tid = 4321 + alendescr = adescr.lendescr + # + B = lltype.GcArray(lltype.Char) + bdescr = get_array_descr(self.gc_ll_descr, B) + bdescr.tid = 8765 + blendescr = bdescr.lendescr + # + C = lltype.GcArray(lltype.Ptr(S)) + cdescr = get_array_descr(self.gc_ll_descr, C) + cdescr.tid = 8111 + clendescr = cdescr.lendescr + # + E = lltype.GcStruct('Empty') + edescr = get_size_descr(self.gc_ll_descr, E) + edescr.tid = 9000 + # + vtable_descr = self.gc_ll_descr.fielddescr_vtable + O = lltype.GcStruct('O', ('parent', rclass.OBJECT), + ('x', lltype.Signed)) + o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + register_known_gctype(self.cpu, o_vtable, O) + # + tiddescr = self.gc_ll_descr.fielddescr_tid + wbdescr = self.gc_ll_descr.write_barrier_descr + WORD = globals()['WORD'] + # + strdescr = self.gc_ll_descr.str_descr + unicodedescr = self.gc_ll_descr.unicode_descr + strlendescr = strdescr.lendescr + unicodelendescr = unicodedescr.lendescr + # + namespace.update(locals()) + # + for funcname in self.gc_ll_descr._generated_functions: + namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) + namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, + '%s_descr' % funcname) + # + ops = parse(frm_operations, namespace=namespace) + expected = parse(to_operations % Evaluator(namespace), + namespace=namespace) + operations = self.gc_ll_descr.rewrite_assembler(self.cpu, + ops.operations, + []) + equaloplists(operations, expected.operations) + + +class TestBoehm(RewriteTests): + def setup_method(self, meth): + class FakeCPU(object): + def sizeof(self, STRUCT): + return SizeDescrWithVTable(102) + self.cpu = FakeCPU() + self.gc_ll_descr = GcLLDescr_boehm(None, None, None) + + def test_new(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_no_collapsing(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\ + descr=malloc_fixedsize_descr) + jump() + """) + + def test_new_array_fixed(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(adescr.basesize + 10 * adescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_new_array_variable(self): + self.check_rewrite(""" + [i1] + p0 = new_array(i1, descr=adescr) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(adescr.basesize)d, \ + i1, \ + %(adescr.itemsize)d, \ + %(adescr.lendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_newstr(self): + self.check_rewrite(""" + [i1] + p0 = newstr(i1) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_array), \ + %(strdescr.basesize)d, \ + i1, \ + %(strdescr.itemsize)d, \ + %(strlendescr.offset)d, \ + descr=malloc_array_descr) + jump() + """) + + def test_newunicode(self): + self.check_rewrite(""" + [i1] + p0 = newunicode(10) + jump() + """, """ + [i1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(unicodedescr.basesize + \ + 10 * unicodedescr.itemsize)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 10, descr=unicodelendescr) + jump() + """) + + +class TestFramework(RewriteTests): + def setup_method(self, meth): + class config_(object): + class translation(object): + gc = 'hybrid' + gcrootfinder = 'asmgcc' + gctransformer = 'framework' + gcremovetypeptr = False + gcdescr = get_description(config_) + self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, + really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: True) + # + class FakeCPU(object): + def sizeof(self, STRUCT): + descr = SizeDescrWithVTable(102) + descr.tid = 9315 + return descr + self.cpu = FakeCPU() + + def test_rewrite_assembler_new_to_malloc(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=sdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new3_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new(descr=tdescr) + p2 = new(descr=sdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + tdescr.size + sdescr.size)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 5678, descr=tiddescr) + p2 = int_add(p1, %(tdescr.size)d) + setfield_gc(p2, 1234, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 4321, descr=tiddescr) + setfield_gc(p0, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): + self.check_rewrite(""" + [] + p0 = new(descr=sdescr) + p1 = new_array(10, descr=adescr) + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(sdescr.size + \ + adescr.basesize + 10 * adescr.itemsize)d) + setfield_gc(p0, 1234, descr=tiddescr) + p1 = int_add(p0, %(sdescr.size)d) + setfield_gc(p1, 4321, descr=tiddescr) + setfield_gc(p1, 10, descr=alendescr) + jump() + """) + + def test_rewrite_assembler_round_up(self): + self.check_rewrite(""" + [] + p0 = new_array(6, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 6, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_round_up_always(self): + self.check_rewrite(""" + [] + p0 = new_array(5, descr=bdescr) + p1 = new_array(5, descr=bdescr) + p2 = new_array(5, descr=bdescr) + p3 = new_array(5, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 5, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 8)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 5, descr=blendescr) + p2 = int_add(p1, %(bdescr.basesize + 8)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 5, descr=blendescr) + p3 = int_add(p2, %(bdescr.basesize + 8)d) + setfield_gc(p3, 8765, descr=tiddescr) + setfield_gc(p3, 5, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_minimal_size(self): + self.check_rewrite(""" + [] + p0 = new(descr=edescr) + p1 = new(descr=edescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(4*WORD)d) + setfield_gc(p0, 9000, descr=tiddescr) + p1 = int_add(p0, %(2*WORD)d) + setfield_gc(p1, 9000, descr=tiddescr) + jump() + """) + + def test_rewrite_assembler_variable_size(self): + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=bdescr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, i0, \ + descr=malloc_array_descr) + jump(i0) + """) + + def test_rewrite_assembler_nonstandard_array(self): + # a non-standard array is a bit hard to get; e.g. GcArray(Float) + # is like that on Win32, but not on Linux. Build one manually... + NONSTD = lltype.GcArray(lltype.Float) + nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD) + nonstd_descr.tid = 6464 + nonstd_descr.basesize = 64 # <= hacked + nonstd_descr.itemsize = 8 + nonstd_descr_gcref = 123 + self.check_rewrite(""" + [i0] + p0 = new_array(i0, descr=nonstd_descr) + jump(i0) + """, """ + [i0] + p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \ + 64, 8, \ + %(nonstd_descr.lendescr.offset)d, \ + 6464, i0, \ + descr=malloc_array_nonstandard_descr) + jump(i0) + """, nonstd_descr=nonstd_descr) + + def test_rewrite_assembler_maximal_size_1(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_array(103, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ + %(bdescr.basesize + 104)d, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_maximal_size_2(self): + self.gc_ll_descr.max_size_of_young_obj = 300 + self.check_rewrite(""" + [] + p0 = new_array(101, descr=bdescr) + p1 = new_array(102, descr=bdescr) # two new_arrays can be combined + p2 = new_array(103, descr=bdescr) # but not all three + jump() + """, """ + [] + p0 = call_malloc_nursery( \ + %(2 * (bdescr.basesize + 104))d) + setfield_gc(p0, 8765, descr=tiddescr) + setfield_gc(p0, 101, descr=blendescr) + p1 = int_add(p0, %(bdescr.basesize + 104)d) + setfield_gc(p1, 8765, descr=tiddescr) + setfield_gc(p1, 102, descr=blendescr) + p2 = call_malloc_nursery( \ + %(bdescr.basesize + 104)d) + setfield_gc(p2, 8765, descr=tiddescr) + setfield_gc(p2, 103, descr=blendescr) + jump() + """) + + def test_rewrite_assembler_huge_size(self): + # "huge" is defined as "larger than 0xffffff bytes, or 16MB" + self.check_rewrite(""" + [] + p0 = new_array(20000000, descr=bdescr) + jump() + """, """ + [] + p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ + %(bdescr.tid)d, 20000000, \ + descr=malloc_array_descr) + jump() + """) + + def test_new_with_vtable(self): + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(104) # rounded up + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_new_with_vtable_too_big(self): + self.gc_ll_descr.max_size_of_young_obj = 100 + self.check_rewrite(""" + [] + p0 = new_with_vtable(ConstClass(o_vtable)) + jump() + """, """ + [p1] + p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \ + descr=malloc_fixedsize_descr) + setfield_gc(p0, 9315, descr=tiddescr) + setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + jump() + """) + + def test_rewrite_assembler_newstr_newunicode(self): + self.check_rewrite(""" + [i2] + p0 = newstr(14) + p1 = newunicode(10) + p2 = newunicode(i2) + p3 = newstr(i2) + jump() + """, """ + [i2] + p0 = call_malloc_nursery( \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ + unicodedescr.basesize + 10 * unicodedescr.itemsize)d) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_gc(ConstClass(malloc_unicode), i2, \ + descr=malloc_unicode_descr) + p3 = call_malloc_gc(ConstClass(malloc_str), i2, \ + descr=malloc_str_descr) + jump() + """) + + def test_write_barrier_before_setfield_gc(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setfield_raw(p1, p2, descr=tzdescr) + jump() + """) + + def test_write_barrier_before_array_without_from_array(self): + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_short_array(self): + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(129, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 129 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 129, descr=clendescr) + call(123456) + cond_call_gc_wb(p1, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_long_array(self): + # the limit of "being too long" is fixed, arbitrarily, at 130 + self.gc_ll_descr.max_size_of_young_obj = 2000 + self.check_rewrite(""" + [i2, p3] + p1 = new_array(130, descr=cdescr) + call(123456) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 130 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 130, descr=clendescr) + call(123456) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_unknown_array(self): + self.check_rewrite(""" + [p1, i2, p3] + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [p1, i2, p3] + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_label_makes_size_unknown(self): + self.check_rewrite(""" + [i2, p3] + p1 = new_array(5, descr=cdescr) + label(p1, i2, p3) + setarrayitem_gc(p1, i2, p3, descr=cdescr) + jump() + """, """ + [i2, p3] + p1 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p1, 8111, descr=tiddescr) + setfield_gc(p1, 5, descr=clendescr) + label(p1, i2, p3) + cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) + setarrayitem_raw(p1, i2, p3, descr=cdescr) + jump() + """) + + def test_write_barrier_before_setinteriorfield_gc(self): + S1 = lltype.GcStruct('S1') + INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1))) + interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR) + interiordescr.tid = 1291 + interiorlendescr = interiordescr.lendescr + interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, + INTERIOR, 'z') + self.check_rewrite(""" + [p1, p2] + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, """ + [p1, p2] + cond_call_gc_wb(p1, p2, descr=wbdescr) + setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + jump(p1, p2) + """, interiorzdescr=interiorzdescr) + + def test_initialization_store(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_2(self): + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = new(descr=sdescr) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = int_add(p0, %(tdescr.size)d) + setfield_gc(p1, 1234, descr=tiddescr) + # <<>> + setfield_gc(p0, p1, descr=tzdescr) + jump() + """) + + def test_initialization_store_array(self): + self.check_rewrite(""" + [p1, i2] + p0 = new_array(5, descr=cdescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """, """ + [p1, i2] + p0 = call_malloc_nursery( \ + %(cdescr.basesize + 5 * cdescr.itemsize)d) + setfield_gc(p0, 8111, descr=tiddescr) + setfield_gc(p0, 5, descr=clendescr) + setarrayitem_gc(p0, i2, p1, descr=cdescr) + jump() + """) + + def test_non_initialization_store(self): + self.check_rewrite(""" + [i0] + p0 = new(descr=tdescr) + p1 = newstr(i0) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [i0] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + p1 = call_malloc_gc(ConstClass(malloc_str), i0, \ + descr=malloc_str_descr) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) + + def test_non_initialization_store_label(self): + self.check_rewrite(""" + [p1] + p0 = new(descr=tdescr) + label(p0, p1) + setfield_gc(p0, p1, descr=tzdescr) + jump() + """, """ + [p1] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + label(p0, p1) + cond_call_gc_wb(p0, p1, descr=wbdescr) + setfield_raw(p0, p1, descr=tzdescr) + jump() + """) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -1,5 +1,6 @@ from pypy.rlib.debug import debug_start, debug_print, debug_stop from pypy.jit.metainterp import history +from pypy.rpython.lltypesystem import lltype class AbstractCPU(object): @@ -89,24 +90,21 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken): - """Execute the generated code referenced by the looptoken. + def execute_token(self, looptoken, *args): + """NOT_RPYTHON (for tests only) + Execute the generated code referenced by the looptoken. Returns the descr of the last executed operation: either the one attached to the failing guard, or the one attached to the FINISH. - Use set_future_value_xxx() before, and get_latest_value_xxx() after. + Use get_latest_value_xxx() afterwards to read the result(s). """ - raise NotImplementedError + argtypes = [lltype.typeOf(x) for x in args] + execute = self.make_execute_token(*argtypes) + return execute(looptoken, *args) - def set_future_value_int(self, index, intvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_float(self, index, floatvalue): - """Set the value for the index'th argument for the loop to run.""" - raise NotImplementedError - - def set_future_value_ref(self, index, objvalue): - """Set the value for the index'th argument for the loop to run.""" + def make_execute_token(self, *argtypes): + """Must make and return an execute_token() function that will be + called with the given argtypes. + """ raise NotImplementedError def get_latest_value_int(self, index): diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -41,17 +41,18 @@ local_floats = list(floats) local_ints = list(ints) expected_result = 0.0 + arguments = [] for i in range(len(args)): x = args[i] if x[0] == 'f': x = local_floats.pop() t = longlong.getfloatstorage(x) - self.cpu.set_future_value_float(i, t) + arguments.append(t) else: x = local_ints.pop() - self.cpu.set_future_value_int(i, x) + arguments.append(x) expected_result += x - return expected_result + return arguments, expected_result @classmethod def get_funcbox(cls, cpu, func_ptr): @@ -111,9 +112,9 @@ looptoken = JitCellToken() done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - expected_result = self._prepare_args(args, floats, ints) + argvals, expected_result = self._prepare_args(args, floats, ints) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(x - expected_result) < 0.0001 @@ -259,8 +260,8 @@ done_number = self.cpu.get_fail_descr_number(called_loop.operations[-1].getdescr()) self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) - expected_result = self._prepare_args(args, floats, ints) - res = cpu.execute_token(called_looptoken) + argvals, expected_result = self._prepare_args(args, floats, ints) + res = cpu.execute_token(called_looptoken, *argvals) assert res.identifier == 3 t = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert abs(t - expected_result) < 0.0001 @@ -289,8 +290,8 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # prepare call to called_loop - self._prepare_args(args, floats, ints) - res = cpu.execute_token(othertoken) + argvals, _ = self._prepare_args(args, floats, ints) + res = cpu.execute_token(othertoken, *argvals) x = longlong.getrealfloat(cpu.get_latest_value_float(0)) assert res.identifier == 4 assert abs(x - expected_result) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -34,20 +34,17 @@ descr) looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - j = 0 + args = [] for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(j, box.getint()) - j += 1 + args.append(box.getint()) elif isinstance(box, (BoxPtr, BoxObj)): - self.cpu.set_future_value_ref(j, box.getref_base()) - j += 1 + args.append(box.getref_base()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(j, box.getfloatstorage()) - j += 1 + args.append(box.getfloatstorage()) else: raise NotImplementedError(box) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, *args) if res is operations[-1].getdescr(): self.guard_failed = False else: @@ -108,8 +105,7 @@ inputargs = [i0] looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) res = self.cpu.get_latest_value_int(0) assert res == 3 assert fail.identifier == 1 @@ -124,8 +120,7 @@ inputargs = [i0] looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(2.8)) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, longlong.getfloatstorage(2.8)) res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 5.1 assert fail.identifier == 1 @@ -147,8 +142,7 @@ operations[3].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -172,8 +166,7 @@ operations[4].setfailargs([None, None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 44) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 44) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(2) assert res == 10 @@ -238,8 +231,7 @@ self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -280,8 +272,7 @@ self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 20 @@ -304,8 +295,7 @@ operations[3].setfailargs([None, i1, None]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail is faildescr1 count = self.cpu.get_latest_value_count() @@ -327,8 +317,7 @@ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] self.cpu.compile_loop([i0], operations, looptoken) - self.cpu.set_future_value_int(0, 99) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 99) assert fail is faildescr res = self.cpu.get_latest_value_int(0) assert res == 99 @@ -359,8 +348,7 @@ ] self.cpu.compile_loop([f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) - self.cpu.set_future_value_float(0, value) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, value) assert fail is faildescr res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == -61.25 @@ -395,9 +383,7 @@ ] operations[-2].setfailargs([t, z]) cpu.compile_loop([x, y], operations, looptoken) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - res = self.cpu.execute_token(looptoken) + res = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_int(1) == 55 @@ -458,9 +444,7 @@ for x, y, z in testcases: excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, x) - self.cpu.set_future_value_int(1, y) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, x, y) if (z == boom) ^ reversed: assert fail.identifier == 1 else: @@ -1161,17 +1145,7 @@ assert 0 values[index_counter] = 11 # - for i, (box, val) in enumerate(zip(inputargs, values)): - if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, val) - elif isinstance(box, BoxPtr): - self.cpu.set_future_value_ref(i, val) - elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, val) - else: - assert 0 - # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *values) assert fail.identifier == 15 # dstvalues = values[:] @@ -1223,10 +1197,11 @@ self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + args = [] for i in range(len(fboxes)): x = 13.5 + 6.73 * i - self.cpu.set_future_value_float(i, longlong.getfloatstorage(x)) - fail = self.cpu.execute_token(looptoken) + args.append(longlong.getfloatstorage(x)) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 2 res = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(res) == 8.5 @@ -1250,10 +1225,10 @@ loop = parse(loopops) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(132.25)) - self.cpu.set_future_value_float(2, longlong.getfloatstorage(0.75)) - fail = self.cpu.execute_token(looptoken) + args = [1] + args.append(longlong.getfloatstorage(132.25)) + args.append(longlong.getfloatstorage(0.75)) + fail = self.cpu.execute_token(looptoken, *args) #xxx check assert loop.operations[-2].getdescr() == fail f1 = self.cpu.get_latest_value_float(0) f2 = self.cpu.get_latest_value_float(1) @@ -1352,14 +1327,12 @@ if test1 == -42 or combinaison[0] == 'b': for test2 in [-65, -42, -11, 0, 1, 10]: if test2 == -42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1405,14 +1378,12 @@ if test1 == 42 or combinaison[0] == 'b': for test2 in [65, 42, 11, 0, 1]: if test2 == 42 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_int(n, test1) - n += 1 + args.append(test1) if combinaison[1] == 'b': - cpu.set_future_value_int(n, test2) - n += 1 - fail = cpu.execute_token(looptoken) + args.append(test2) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1464,16 +1435,14 @@ if test1 == -4.5 or combinaison[0] == 'b': for test2 in [-6.5, -4.5, -2.5, nan]: if test2 == -4.5 or combinaison[1] == 'b': - n = 0 + args = [] if combinaison[0] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test1)) - n += 1 + args.append( + longlong.getfloatstorage(test1)) if combinaison[1] == 'b': - cpu.set_future_value_float( - n, longlong.getfloatstorage(test2)) - n += 1 - fail = cpu.execute_token(looptoken) + args.append( + longlong.getfloatstorage(test2)) + fail = cpu.execute_token(looptoken, *args) # expected = compare(test1, test2) expected ^= guard_case @@ -1517,15 +1486,16 @@ # self.cpu.compile_loop(inputargs, operations, looptoken) # - for i, box in enumerate(inputargs): + args = [] + for box in inputargs: if isinstance(box, BoxInt): - self.cpu.set_future_value_int(i, box.getint()) + args.append(box.getint()) elif isinstance(box, BoxFloat): - self.cpu.set_future_value_float(i, box.getfloatstorage()) + args.append(box.getfloatstorage()) else: assert 0 # - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 1 def test_nan_and_infinity(self): @@ -1588,10 +1558,9 @@ unique_testcase_list = list(set(testcase)) self.cpu.compile_loop(unique_testcase_list, operations, looptoken) - for i, box in enumerate(unique_testcase_list): - self.cpu.set_future_value_float( - i, box.getfloatstorage()) - fail = self.cpu.execute_token(looptoken) + args = [box.getfloatstorage() + for box in unique_testcase_list] + fail = self.cpu.execute_token(looptoken, *args) if fail.identifier != 5 - (expected_id^expected): if fail.identifier == 4: msg = "was taken" @@ -1863,14 +1832,12 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 0 assert self.cpu.get_latest_value_ref(1) == xptr excvalue = self.cpu.grab_exc_value() assert not excvalue - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -1889,8 +1856,7 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == yptr @@ -1908,13 +1874,11 @@ loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 1) assert self.cpu.get_latest_value_int(0) == 1 excvalue = self.cpu.grab_exc_value() assert excvalue == xptr - self.cpu.set_future_value_int(0, 0) - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, 0) assert self.cpu.get_latest_value_int(0) == 0 excvalue = self.cpu.grab_exc_value() assert not excvalue @@ -2086,16 +2050,12 @@ ops[2].setfailargs([i1, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 20 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 10 @@ -2131,16 +2091,12 @@ ops[2].setfailargs([i1, i2, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == 42 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 assert self.cpu.get_latest_value_int(1) == 42 @@ -2179,17 +2135,13 @@ ops[2].setfailargs([i1, f2, i0]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, 20) - self.cpu.set_future_value_int(1, 0) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 20, 0) assert fail.identifier == 0 x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 42.5 assert values == [] - self.cpu.set_future_value_int(0, 10) - self.cpu.set_future_value_int(1, 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 10, 1) assert fail.identifier == 1 assert self.cpu.get_latest_value_int(0) == 1 x = self.cpu.get_latest_value_float(1) @@ -2224,8 +2176,7 @@ ops[1].setfailargs([i1, i2]) looptoken = JitCellToken() self.cpu.compile_loop([i1], ops, looptoken) - self.cpu.set_future_value_int(0, ord('G')) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, ord('G')) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == ord('g') @@ -2284,12 +2235,12 @@ ops[1].setfailargs([]) looptoken = JitCellToken() self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) - self.cpu.set_future_value_int(0, rffi.cast(lltype.Signed, raw)) - self.cpu.set_future_value_int(1, 2) - self.cpu.set_future_value_int(2, 4) - self.cpu.set_future_value_int(3, rffi.cast(lltype.Signed, fn)) + args = [rffi.cast(lltype.Signed, raw), + 2, + 4, + rffi.cast(lltype.Signed, fn)] assert glob.lst == [] - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') @@ -2342,9 +2293,8 @@ self.cpu.compile_loop([i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') - self.cpu.set_future_value_int(0, buflen) - self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) - fail = self.cpu.execute_token(looptoken) + args = [buflen, rffi.cast(lltype.Signed, buffer)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == len(cwd) assert rffi.charp2strn(buffer, buflen) == cwd @@ -2363,9 +2313,7 @@ looptoken = JitCellToken() self.cpu.compile_loop([i0, i1], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 0 assert self.cpu.get_latest_value_int(0) == -42 print 'step 1 ok' @@ -2374,9 +2322,7 @@ # mark as failing self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr assert self.cpu.get_latest_value_int(0) == 9 print 'step 2 ok' @@ -2392,9 +2338,7 @@ ops[0].setfailargs([]) self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail.identifier == 3 assert self.cpu.get_latest_value_int(0) == 9 print 'step 3 ok' @@ -2403,9 +2347,7 @@ # mark as failing again self.cpu.invalidate_loop(looptoken) - self.cpu.set_future_value_int(0, -42) - self.cpu.set_future_value_int(1, 9) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, -42, 9) assert fail is faildescr2 print 'step 4 ok' print '-'*79 @@ -2618,9 +2560,8 @@ FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, EffectInfo.MOST_GENERAL) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(looptoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(looptoken, *args) assert self.cpu.get_latest_value_int(0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] @@ -2632,9 +2573,8 @@ loop = parse(ops, namespace=locals()) othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 13 assert called == [done_number] @@ -2644,9 +2584,8 @@ try: othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - for i in range(10): - self.cpu.set_future_value_int(i, i+1) - res = self.cpu.execute_token(othertoken) + args = [i+1 for i in range(10)] + res = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_latest_value_int(0) == 97 assert not called finally: @@ -2686,9 +2625,9 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.3)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(2.3)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' @@ -2700,9 +2639,9 @@ loop = parse(ops, namespace=locals()) othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called == [done_number] @@ -2713,9 +2652,9 @@ try: othertoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.2)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.2)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.2), + longlong.getfloatstorage(3.2)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.2 + 3.2 assert not called @@ -2780,9 +2719,9 @@ looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr()) - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(2.35)) - res = self.cpu.execute_token(looptoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(2.35)] + res = self.cpu.execute_token(looptoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -2798,9 +2737,9 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken - self.cpu.set_future_value_float(0, longlong.getfloatstorage(1.25)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(3.25)) - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(1.25), + longlong.getfloatstorage(3.25)] + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called == [done_number] @@ -2821,10 +2760,9 @@ self.cpu.redirect_call_assembler(looptoken, looptoken2) # now, our call_assembler should go to looptoken2 - self.cpu.set_future_value_float(0, longlong.getfloatstorage(6.0)) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(1.5)) - # 6.0-1.5 == 1.25+3.25 - res = self.cpu.execute_token(othertoken) + args = [longlong.getfloatstorage(6.0), + longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 + res = self.cpu.execute_token(othertoken, *args) x = self.cpu.get_latest_value_float(0) assert longlong.getrealfloat(x) == 13.5 assert called == [done_number] @@ -3178,9 +3116,11 @@ looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) assert fail.identifier == excdescr.identifier + exc = self.cpu.grab_exc_value() + assert exc == "memoryerror!" + def test_math_sqrt(self): if not self.cpu.supports_floats: py.test.skip("requires floats") @@ -3228,8 +3168,7 @@ operations[6].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 2 res = self.cpu.get_latest_value_int(0) assert res == 10 @@ -3241,8 +3180,7 @@ ] self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken) - self.cpu.set_future_value_int(0, 2) - fail = self.cpu.execute_token(looptoken) + fail = self.cpu.execute_token(looptoken, 2) assert fail.identifier == 3 res = self.cpu.get_latest_value_int(0) assert res == -10 @@ -3322,13 +3260,13 @@ self.cpu.compile_bridge(faildescr1, inputargs, operations, looptoken1) looptoken2 = JitCellToken() - inputargs = [] + inputargs = [BoxInt()] operations = [ ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), ] self.cpu.compile_loop(inputargs, operations, looptoken2) - fail = self.cpu.execute_token(looptoken2) + fail = self.cpu.execute_token(looptoken2, -9) assert fail.identifier == 42 diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -6,6 +6,7 @@ from pypy.jit.metainterp.history import BoxInt, ConstInt, JitCellToken from pypy.jit.metainterp.history import BoxPtr, ConstPtr, TargetToken from pypy.jit.metainterp.history import BoxFloat, ConstFloat, Const +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.executor import execute_nonspec from pypy.jit.metainterp.resoperation import opname @@ -616,8 +617,13 @@ return self.loop._jitcelltoken if not hasattr(self, '_initialjumploop_celltoken'): self._initialjumploop_celltoken = JitCellToken() - self.cpu.compile_loop(self.startvars[:], - [ResOperation(rop.JUMP, self.startvars[:], None, + args = [] + for box in self.startvars: + if box not in self.loop.inputargs: + box = box.constbox() + args.append(box) + self.cpu.compile_loop(self.loop.inputargs, + [ResOperation(rop.JUMP, args, None, descr=self.loop._targettoken)], self._initialjumploop_celltoken) return self._initialjumploop_celltoken @@ -649,14 +655,8 @@ exc = cpu.grab_exc_value() assert not exc - for i, box in enumerate(self.startvars): - if isinstance(box, BoxInt): - cpu.set_future_value_int(i, box.value) - elif isinstance(box, BoxFloat): - cpu.set_future_value_float(i, box.value) - else: - raise NotImplementedError(box) - fail = cpu.execute_token(self.runjitcelltoken()) + arguments = [box.value for box in self.loop.inputargs] + fail = cpu.execute_token(self.runjitcelltoken(), *arguments) assert fail is self.should_fail_by.getdescr() for i, v in enumerate(self.get_fail_args()): if isinstance(v, (BoxFloat, ConstFloat)): @@ -717,10 +717,21 @@ # to build_bridge().) # First make up the other loop... - subset = bridge_builder.subset_of_intvars(r) - subset = [i for i in subset if i in fail_args] - if len(subset) == 0: - return False + # + # New restriction: must have the same argument count and types + # as the original loop + subset = [] + for box in self.loop.inputargs: + srcbox = r.choice(fail_args) + if srcbox.type != box.type: + if box.type == INT: + srcbox = ConstInt(r.random_integer()) + elif box.type == FLOAT: + srcbox = ConstFloat(r.random_float_storage()) + else: + raise AssertionError(box.type) + subset.append(srcbox) + # args = [x.clonebox() for x in subset] rl = RandomLoop(self.builder.cpu, self.builder.fork, r, args) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -38,6 +38,8 @@ from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter import longlong +from pypy.rlib.rarithmetic import intmask +from pypy.rlib.objectmodel import compute_unique_id # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0, # better safe than sorry @@ -57,7 +59,8 @@ self.is_guard_not_invalidated = is_guard_not_invalidated DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed), - ('bridge', lltype.Signed), # 0 or 1 + ('type', lltype.Char), # 'b'ridge, 'l'abel or + # 'e'ntry point ('number', lltype.Signed)) class Assembler386(object): @@ -69,10 +72,6 @@ self.cpu = cpu self.verbose = False self.rtyper = cpu.rtyper - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit) self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, @@ -107,20 +106,6 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) self.memcpy_addr = self.cpu.cast_ptr_to_int(support.memcpy_fn) self._build_failure_recovery(False) self._build_failure_recovery(True) @@ -164,12 +149,15 @@ def finish_once(self): if self._debug: debug_start('jit-backend-counts') - for struct in self.loop_run_counters: - if struct.bridge: - prefix = 'bridge ' + for i in range(len(self.loop_run_counters)): + struct = self.loop_run_counters[i] + if struct.type == 'l': + prefix = 'TargetToken(%d)' % struct.number + elif struct.type == 'b': + prefix = 'bridge ' + str(struct.number) else: - prefix = 'loop ' - debug_print(prefix + str(struct.number) + ':' + str(struct.i)) + prefix = 'entry ' + str(struct.number) + debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') def _build_float_constants(self): @@ -274,7 +262,8 @@ # self.mc = codebuf.MachineCodeBlockWrapper() # call on_leave_jitted_save_exc() - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + default_to_memoryerror=True) self.mc.CALL(imm(addr)) self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) self._call_footer() @@ -309,12 +298,11 @@ mc.MOVSD_sx(8*i, i) # xmm0 to xmm7 # if IS_X86_32: - mc.LEA_rb(eax.value, +8) stack_size += 2*WORD mc.PUSH_r(eax.value) # alignment - mc.PUSH_r(eax.value) + mc.PUSH_r(esp.value) elif IS_X86_64: - mc.LEA_rb(edi.value, +16) + mc.MOV_rr(edi.value, esp.value) # # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) @@ -325,7 +313,7 @@ jnz_location = mc.get_relative_pos() # if IS_X86_32: - mc.ADD_ri(esp.value, 2*WORD) + mc.ADD_ri(esp.value, 2*WORD) # cancel the two PUSHes above elif IS_X86_64: # restore the registers for i in range(7, -1, -1): @@ -421,10 +409,8 @@ def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: - _x86_loop_code (an integer giving an address) - _x86_bootstrap_code (an integer giving an address) - _x86_direct_bootstrap_code ( " " " " ) - _x86_arglocs + _x86_function_addr (address of the generated func, as an int) + _x86_loop_code (debug: addr of the start of the ResOps) _x86_debug_checksum ''' # XXX this function is too longish and contains some code @@ -441,16 +427,16 @@ self.setup(looptoken) if log: - self._register_counter(False, looptoken.number) - operations = self._inject_debugging_code(looptoken, operations) + operations = self._inject_debugging_code(looptoken, operations, + 'e', looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) - arglocs, operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - looptoken._x86_arglocs = arglocs - - bootstrappos = self.mc.get_relative_pos() - stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) + # + self._call_header_with_stack_check() + stackadjustpos = self._patchable_stackadjust() + clt._debug_nbargs = len(inputargs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() looptoken._x86_loop_code = looppos clt.frame_depth = -1 # temporarily @@ -458,19 +444,17 @@ frame_depth, param_depth = self._assemble(regalloc, operations) clt.frame_depth = frame_depth clt.param_depth = param_depth - - directbootstrappos = self.mc.get_relative_pos() - self._assemble_bootstrap_direct_call(arglocs, looppos, - frame_depth+param_depth) + # + size_excluding_failure_stuff = self.mc.get_relative_pos() self.write_pending_failure_recoveries() - fullsize = self.mc.get_relative_pos() + full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( looptoken.number, loopname, rawstart + looppos, - rawstart + directbootstrappos, + rawstart + size_excluding_failure_stuff, rawstart)) debug_stop("jit-backend-addr") self._patch_stackadjust(rawstart + stackadjustpos, @@ -481,18 +465,17 @@ if not we_are_translated(): # used only by looptoken.dump() -- useful in tests looptoken._x86_rawstart = rawstart - looptoken._x86_fullsize = fullsize + looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset + looptoken._x86_function_addr = rawstart - looptoken._x86_bootstrap_code = rawstart + bootstrappos - looptoken._x86_direct_bootstrap_code = rawstart + directbootstrappos self.fixup_target_tokens(rawstart) self.teardown() # oprofile support if self.cpu.profile_agent is not None: name = "Loop # %s: %s" % (looptoken.number, loopname) self.cpu.profile_agent.native_code_written(name, - rawstart, fullsize) + rawstart, full_size) return ops_offset def assemble_bridge(self, faildescr, inputargs, operations, @@ -511,8 +494,8 @@ self.setup(original_loop_token) if log: - self._register_counter(True, descr_number) - operations = self._inject_debugging_code(faildescr, operations) + operations = self._inject_debugging_code(faildescr, operations, + 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) if not we_are_translated(): @@ -619,17 +602,21 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self, bridge, number): - if self._debug: - # YYY very minor leak -- we need the counters to stay alive - # forever, just because we want to report them at the end - # of the process - struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', - track_allocation=False) - struct.i = 0 - struct.bridge = int(bridge) + def _register_counter(self, tp, number, token): + # YYY very minor leak -- we need the counters to stay alive + # forever, just because we want to report them at the end + # of the process + struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', + track_allocation=False) + struct.i = 0 + struct.type = tp + if tp == 'b' or tp == 'e': struct.number = number - self.loop_run_counters.append(struct) + else: + assert token + struct.number = compute_unique_id(token) + self.loop_run_counters.append(struct) + return struct def _find_failure_recovery_bytecode(self, faildescr): adr_jump_offset = faildescr._x86_adr_jump_offset @@ -673,27 +660,36 @@ targettoken._x86_loop_code += rawstart self.target_tokens_currently_compiling = None + def _append_debugging_code(self, operations, tp, number, token): + counter = self._register_counter(tp, number, token) + c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) + box = BoxInt() + box2 = BoxInt() + ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], + box, descr=self.debug_counter_descr), + ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), + ResOperation(rop.SETFIELD_RAW, [c_adr, box2], + None, descr=self.debug_counter_descr)] + operations.extend(ops) + @specialize.argtype(1) - def _inject_debugging_code(self, looptoken, operations): + def _inject_debugging_code(self, looptoken, operations, tp, number): if self._debug: # before doing anything, let's increase a counter s = 0 for op in operations: s += op.getopnum() looptoken._x86_debug_checksum = s - c_adr = ConstInt(rffi.cast(lltype.Signed, - self.loop_run_counters[-1])) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - if operations[0].getopnum() == rop.LABEL: - operations = [operations[0]] + ops + operations[1:] - else: - operations = ops + operations + + newoperations = [] + self._append_debugging_code(newoperations, tp, number, + None) + for op in operations: + newoperations.append(op) + if op.getopnum() == rop.LABEL: + self._append_debugging_code(newoperations, 'l', number, + op.getdescr()) + operations = newoperations return operations def _assemble(self, regalloc, operations): @@ -802,152 +798,21 @@ self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop self.mc.SUB_mi8((ebx.value, 0), 2*WORD) # SUB [ebx], 2*WORD - def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): - if IS_X86_64: - return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) - # XXX pushing ebx esi and edi is a bit pointless, since we store - # all regsiters anyway, for the case of guard_not_forced - # XXX this can be improved greatly. Right now it'll behave like - # a normal call - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - offset = 2 * WORD - tmp = eax - xmmtmp = xmm0 - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert not loc.is_xmm - self.mc.MOV_rb(loc.value, offset) - else: - self.mc.MOV_rb(tmp.value, offset) - self.mc.MOV(loc, tmp) - offset += WORD - loc = floatlocs[i] - if loc is not None: - if isinstance(loc, RegLoc): - assert loc.is_xmm - self.mc.MOVSD_xb(loc.value, offset) - else: - self.mc.MOVSD_xb(xmmtmp.value, offset) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - offset += 2 * WORD - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - - def _assemble_bootstrap_direct_call_64(self, arglocs, jmppos, stackdepth): - # XXX: Very similar to _emit_call_64 - - src_locs = [] - dst_locs = [] - xmm_src_locs = [] - xmm_dst_locs = [] - get_from_stack = [] - - # In reverse order for use with pop() - unused_gpr = [r9, r8, ecx, edx, esi, edi] - unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] - - nonfloatlocs, floatlocs = arglocs - self._call_header_with_stack_check() - self.mc.LEA_rb(esp.value, self._get_offset_of_ebp_from_esp(stackdepth)) - - # The lists are padded with Nones - assert len(nonfloatlocs) == len(floatlocs) - - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is not None: - if len(unused_gpr) > 0: - src_locs.append(unused_gpr.pop()) - dst_locs.append(loc) - else: - get_from_stack.append((loc, False)) - - floc = floatlocs[i] - if floc is not None: - if len(unused_xmm) > 0: - xmm_src_locs.append(unused_xmm.pop()) - xmm_dst_locs.append(floc) - else: - get_from_stack.append((floc, True)) - - remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) - - for i in range(len(get_from_stack)): - loc, is_xmm = get_from_stack[i] - if is_xmm: - self.mc.MOVSD_xb(X86_64_XMM_SCRATCH_REG.value, (2 + i) * WORD) - self.mc.MOVSD(loc, X86_64_XMM_SCRATCH_REG) - else: - self.mc.MOV_rb(X86_64_SCRATCH_REG.value, (2 + i) * WORD) - # XXX: We're assuming that "loc" won't require regloc to - # clobber the scratch register - self.mc.MOV(loc, X86_64_SCRATCH_REG) - - endpos = self.mc.get_relative_pos() + 5 - self.mc.JMP_l(jmppos - endpos) - assert endpos == self.mc.get_relative_pos() - def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking - oldnonfloatlocs, oldfloatlocs = oldlooptoken._x86_arglocs - newnonfloatlocs, newfloatlocs = newlooptoken._x86_arglocs - assert len(oldnonfloatlocs) == len(newnonfloatlocs) - assert len(oldfloatlocs) == len(newfloatlocs) + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs # we overwrite the instructions at the old _x86_direct_bootstrap_code # to start with a JMP to the new _x86_direct_bootstrap_code. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._x86_direct_bootstrap_code - target = newlooptoken._x86_direct_bootstrap_code + oldadr = oldlooptoken._x86_function_addr + target = newlooptoken._x86_function_addr mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) + assert mc.get_relative_pos() <= 13 # keep in sync with prepare_loop() mc.copy_to_raw_memory(oldadr) - def _assemble_bootstrap_code(self, inputargs, arglocs): - nonfloatlocs, floatlocs = arglocs - self._call_header() - stackadjustpos = self._patchable_stackadjust() - tmp = eax - xmmtmp = xmm0 - self.mc.begin_reuse_scratch_register() - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - if isinstance(loc, RegLoc): - target = loc - else: - target = tmp - if inputargs[i].type == REF: - adr = self.fail_boxes_ptr.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - self.mc.MOV(heap(adr), imm0) - else: - adr = self.fail_boxes_int.get_addr_for_num(i) - self.mc.MOV(target, heap(adr)) - if target is not loc: - assert isinstance(loc, StackLoc) - self.mc.MOV_br(loc.value, target.value) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - adr = self.fail_boxes_float.get_addr_for_num(i) - if isinstance(loc, RegLoc): - self.mc.MOVSD(loc, heap(adr)) - else: - self.mc.MOVSD(xmmtmp, heap(adr)) - assert isinstance(loc, StackLoc) - self.mc.MOVSD_bx(loc.value, xmmtmp.value) - self.mc.end_reuse_scratch_register() - return stackadjustpos - def dump(self, text): if not self.verbose: return @@ -974,7 +839,7 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.SUB_ri(esp.value, 8) # = size of doubles self.mc.MOVSD_sx(0, loc.value) - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.PUSH_b(get_ebp_ofs(loc.position)) self.mc.PUSH_b(get_ebp_ofs(loc.position + 1)) @@ -985,13 +850,25 @@ if isinstance(loc, RegLoc) and loc.is_xmm: self.mc.MOVSD_xs(loc.value, 0) self.mc.ADD_ri(esp.value, 8) # = size of doubles - elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8: + elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8: # XXX evil trick self.mc.POP_b(get_ebp_ofs(loc.position + 1)) self.mc.POP_b(get_ebp_ofs(loc.position)) else: self.mc.POP(loc) + def regalloc_immedmem2mem(self, from_loc, to_loc): + # move a ConstFloatLoc directly to a StackLoc, as two MOVs + # (even on x86-64, because the immediates are encoded as 32 bits) + assert isinstance(from_loc, ConstFloatLoc) + assert isinstance(to_loc, StackLoc) + low_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[0] + high_part = rffi.cast(rffi.CArrayPtr(rffi.INT), from_loc.value)[1] + low_part = intmask(low_part) + high_part = intmask(high_part) + self.mc.MOV32_bi(to_loc.value, low_part) + self.mc.MOV32_bi(to_loc.value + 4, high_part) + def regalloc_perform(self, op, arglocs, resloc): genop_list[op.getopnum()](self, op, arglocs, resloc) @@ -1143,18 +1020,18 @@ self.mc.MOVSD_sx(p, loc.value) else: self.mc.MOV_sr(p, loc.value) - p += round_up_to_4(loc.width) + p += loc.get_width() p = 0 for i in range(start, n): loc = arglocs[i] if not isinstance(loc, RegLoc): - if loc.width == 8: + if loc.get_width() == 8: self.mc.MOVSD(xmm0, loc) self.mc.MOVSD_sx(p, xmm0.value) else: self.mc.MOV(tmp, loc) self.mc.MOV_sr(p, tmp.value) - p += round_up_to_4(loc.width) + p += loc.get_width() self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) @@ -1481,46 +1358,10 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) - def genop_new_with_vtable(self, op, arglocs, result_loc): - assert result_loc is eax - loc_vtable = arglocs[-1] - assert isinstance(loc_vtable, ImmedLoc) - arglocs = arglocs[:-1] - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - self.set_vtable(eax, loc_vtable) + # ---------- - def set_vtable(self, loc, loc_vtable): - if self.cpu.vtable_offset is not None: - assert isinstance(loc, RegLoc) - assert isinstance(loc_vtable, ImmedLoc) - self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert isinstance(loc, RegLoc) - assert isinstance(loc_num_elem, ImmedLoc) - self.mc.MOV(mem(loc, ofs_length), loc_num_elem) - - # XXX genop_new is abused for all varsized mallocs with Boehm, for now - # (instead of genop_new_array, genop_newstr, genop_newunicode) - def genop_new(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_new_array(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_array_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newstr(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_str_func_addr, arglocs, eax) - self.propagate_memoryerror_if_eax_is_null() - - def genop_newunicode(self, op, arglocs, result_loc): - assert result_loc is eax - self.call(self.malloc_unicode_func_addr, arglocs, eax) + def genop_call_malloc_gc(self, op, arglocs, result_loc): + self.genop_call(op, arglocs, result_loc) self.propagate_memoryerror_if_eax_is_null() def propagate_memoryerror_if_eax_is_null(self): @@ -1891,10 +1732,10 @@ DESCR_INT = 0x01 DESCR_FLOAT = 0x02 DESCR_SPECIAL = 0x03 - # XXX: 4*8 works on i386, should we optimize for that case? - CODE_FROMSTACK = 4*16 + CODE_FROMSTACK = 4 * (8 + 8*IS_X86_64) CODE_STOP = 0 | DESCR_SPECIAL CODE_HOLE = 4 | DESCR_SPECIAL + CODE_INPUTARG = 8 | DESCR_SPECIAL def write_failure_recovery_description(self, mc, failargs, locs): for i in range(len(failargs)): @@ -1910,7 +1751,11 @@ raise AssertionError("bogus kind") loc = locs[i] if isinstance(loc, StackLoc): - n = self.CODE_FROMSTACK//4 + loc.position + pos = loc.position + if pos < 0: + mc.writechar(chr(self.CODE_INPUTARG)) + pos = ~pos + n = self.CODE_FROMSTACK//4 + pos else: assert isinstance(loc, RegLoc) n = loc.value @@ -1930,6 +1775,7 @@ descr_to_box_type = [REF, INT, FLOAT] bytecode = rffi.cast(rffi.UCHARP, bytecode) arglocs = [] + code_inputarg = False while 1: # decode the next instruction from the bytecode code = rffi.cast(lltype.Signed, bytecode[0]) @@ -1948,11 +1794,17 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break elif code == self.CODE_HOLE: continue + elif code == self.CODE_INPUTARG: + code_inputarg = True + continue else: # 'code' identifies a register kind = code & 3 @@ -1968,6 +1820,7 @@ def grab_frame_values(self, bytecode, frame_addr, allregisters): # no malloc allowed here!! self.fail_ebp = allregisters[16 + ebp.value] + code_inputarg = False num = 0 value_hi = 0 while 1: @@ -1988,6 +1841,9 @@ # load the value from the stack kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 + if code_inputarg: + code = ~code + code_inputarg = False stackloc = frame_addr + get_ebp_ofs(code) value = rffi.cast(rffi.LONGP, stackloc)[0] if kind == self.DESCR_FLOAT and WORD == 4: @@ -2000,6 +1856,9 @@ if code == self.CODE_HOLE: num += 1 continue + if code == self.CODE_INPUTARG: + code_inputarg = True + continue assert code == self.CODE_STOP break code >>= 2 @@ -2104,9 +1963,9 @@ # returns in eax the fail_index # now we return from the complete frame, which starts from - # _assemble_bootstrap_code(). The LEA in _call_footer below throws - # away most of the frame, including all the PUSHes that we did just - # above. + # _call_header_with_stack_check(). The LEA in _call_footer below + # throws away most of the frame, including all the PUSHes that we + # did just above. self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -2171,6 +2030,8 @@ self._genop_call(op, arglocs, resloc, force_index) def _genop_call(self, op, arglocs, resloc, force_index): + from pypy.jit.backend.llsupport.descr import CallDescr + sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -2185,13 +2046,16 @@ else: tmp = eax + descr = op.getdescr() + assert isinstance(descr, CallDescr) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types(), - callconv=op.getdescr().get_call_conv()) + argtypes=descr.get_arg_types(), + callconv=descr.get_call_conv()) - if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: + if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.type == FLOAT: # a float or a long long return - if op.getdescr().get_return_type() == 'L': + if descr.get_result_type() == 'L': self.mc.MOV_br(resloc.value, eax.value) # long long self.mc.MOV_br(resloc.value + 4, edx.value) # XXX should ideally not move the result on the stack, @@ -2200,7 +2064,7 @@ # can just be always a stack location else: self.mc.FSTPL_b(resloc.value) # float return - elif op.getdescr().get_return_type() == 'S': + elif descr.get_result_type() == 'S': # singlefloat return assert resloc is eax if IS_X86_32: @@ -2354,10 +2218,10 @@ self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) descr = op.getdescr() assert isinstance(descr, JitCellToken) - assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) + assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs # - # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + # Write a call to the target assembler + self._emit_call(fail_index, imm(descr._x86_function_addr), arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None @@ -2398,9 +2262,9 @@ # # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: - from pypy.jit.backend.llsupport.descr import BaseFieldDescr + from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset self.mc.MOV(eax, arglocs[1]) self.mc.MOV_mi((eax.value, ofs), 0) @@ -2588,6 +2452,14 @@ self.gcrootmap_retaddr_forced = -1 def closing_jump(self, target_token): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = target_token._x86_clt._debug_nbargs + assert my_nbargs == target_nbargs + # target = target_token._x86_loop_code if target_token in self.target_tokens_currently_compiling: curpos = self.mc.get_relative_pos() + 5 @@ -2595,9 +2467,8 @@ else: self.mc.JMP(imm(target)) - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): - size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) - size = (size + WORD-1) & ~(WORD-1) # round up + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size): + assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) @@ -2633,9 +2504,6 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - # on 64-bits, 'tid' is a value that fits in 31 bits - assert rx86.fits_in_32bits(tid) - self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST @@ -2666,11 +2534,6 @@ num = getattr(rop, opname.upper()) genop_list[num] = value -def round_up_to_4(size): - if size < 4: - return 4 - return size - # XXX: ri386 migration shims: def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) diff --git a/pypy/jit/backend/x86/jump.py b/pypy/jit/backend/x86/jump.py --- a/pypy/jit/backend/x86/jump.py +++ b/pypy/jit/backend/x86/jump.py @@ -1,6 +1,6 @@ import sys from pypy.tool.pairtype import extendabletype -from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc +from pypy.jit.backend.x86.regloc import ImmediateAssemblerLocation, StackLoc def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): pending_dests = len(dst_locations) @@ -12,12 +12,15 @@ srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] - if isinstance(src, ImmedLoc): + if isinstance(src, ImmediateAssemblerLocation): continue key = src._getregkey() if key in srccount: if key == dst_locations[i]._getregkey(): - srccount[key] = -sys.maxint # ignore a move "x = x" + # ignore a move "x = x" + # setting any "large enough" negative value is ok, but + # be careful of overflows, don't use -sys.maxint + srccount[key] = -len(dst_locations) - 1 pending_dests -= 1 else: srccount[key] += 1 @@ -31,7 +34,7 @@ srccount[key] = -1 # means "it's done" pending_dests -= 1 src = src_locations[i] - if not isinstance(src, ImmedLoc): + if not isinstance(src, ImmediateAssemblerLocation): key = src._getregkey() if key in srccount: srccount[key] -= 1 @@ -66,6 +69,13 @@ def _move(assembler, src, dst, tmpreg): if dst.is_memory_reference() and src.is_memory_reference(): + if isinstance(src, ImmediateAssemblerLocation): + assembler.regalloc_immedmem2mem(src, dst) + return + if tmpreg is None: + assembler.regalloc_push(src) + assembler.regalloc_pop(dst) + return assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) @@ -87,7 +97,7 @@ dstloc = dst_locations2[i] if isinstance(loc, StackLoc): key = loc._getregkey() - if (key in dst_keys or (loc.width > WORD and + if (key in dst_keys or (loc.get_width() > WORD and (key + WORD) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -16,8 +16,8 @@ from pypy.jit.codewriter import heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr +from pypy.jit.backend.llsupport.descr import FieldDescr, ArrayDescr +from pypy.jit.backend.llsupport.descr import CallDescr, SizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox, compute_vars_longevity, is_comparison_or_ovf_op @@ -28,7 +28,7 @@ class X86RegisterManager(RegisterManager): box_types = [INT, REF] - all_regs = [eax, ecx, edx, ebx, esi, edi] + all_regs = [ecx, eax, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] frame_reg = ebp @@ -60,7 +60,7 @@ class X86_64_RegisterManager(X86RegisterManager): # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -130,9 +130,9 @@ @staticmethod def frame_pos(i, box_type): if IS_X86_32 and box_type == FLOAT: - return StackLoc(i, get_ebp_ofs(i+1), 2, box_type) + return StackLoc(i, get_ebp_ofs(i+1), box_type) else: - return StackLoc(i, get_ebp_ofs(i), 1, box_type) + return StackLoc(i, get_ebp_ofs(i), box_type) @staticmethod def frame_size(box_type): if IS_X86_32 and box_type == FLOAT: @@ -165,6 +165,7 @@ self.jump_target_descr = None self.close_stack_struct = 0 self.final_jump_op = None + self.min_bytes_before_label = 0 def _prepare(self, inputargs, operations, allgcrefs): self.fm = X86FrameManager() @@ -173,22 +174,26 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - longevity, useful = self._compute_vars_longevity(inputargs, operations) - self.longevity = longevity - self.rm = gpr_reg_mgr_cls(longevity, + self._compute_vars_longevity(inputargs, operations) + self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.xrm = xmm_reg_mgr_cls(longevity, frame_manager = self.fm, + self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) - return operations, useful + return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations, useful = self._prepare(inputargs, operations, allgcrefs) - return self._process_inputargs(inputargs, useful), operations + operations = self._prepare(inputargs, operations, allgcrefs) + self._set_initial_bindings(inputargs) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 13 + return operations def prepare_bridge(self, prev_depths, inputargs, arglocs, operations, allgcrefs): - operations, _ = self._prepare(inputargs, operations, allgcrefs) + operations = self._prepare(inputargs, operations, allgcrefs) self._update_bindings(arglocs, inputargs) self.param_depth = prev_depths[1] return operations @@ -196,46 +201,56 @@ def reserve_param(self, n): self.param_depth = max(self.param_depth, n) - def _process_inputargs(self, inputargs, useful): - # XXX we can sort out here by longevity if we need something - # more optimal - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - # Don't use all_regs[0] for passing arguments around a loop. - # Must be kept in sync with consider_jump(). - # XXX this should probably go to llsupport/regalloc.py - xmmtmp = self.xrm.free_regs.pop(0) - tmpreg = self.rm.free_regs.pop(0) - assert tmpreg == X86RegisterManager.all_regs[0] - assert xmmtmp == X86XMMRegisterManager.all_regs[0] - for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - reg = None - if self.longevity[arg][1] > -1 and arg in useful: - if arg.type == FLOAT: - # xxx is it really a good idea? at the first CALL they - # will all be flushed anyway - reg = self.xrm.try_allocate_reg(arg) + def _set_initial_bindings(self, inputargs): + if IS_X86_64: + inputargs = self._set_initial_bindings_regs_64(inputargs) + # ... + # stack layout: arg2 + # arg1 + # arg0 + # return address + # saved ebp <-- ebp points here + # ... + cur_frame_pos = - 1 - FRAME_FIXED_SIZE + assert get_ebp_ofs(cur_frame_pos-1) == 2*WORD + assert get_ebp_ofs(cur_frame_pos-2) == 3*WORD + # + for box in inputargs: + assert isinstance(box, Box) + # + if IS_X86_32 and box.type == FLOAT: + cur_frame_pos -= 2 + else: + cur_frame_pos -= 1 + loc = self.fm.frame_pos(cur_frame_pos, box.type) + self.fm.set_binding(box, loc) + + def _set_initial_bindings_regs_64(self, inputargs): + # In reverse order for use with pop() + unused_gpr = [r9, r8, ecx, edx, esi, edi] + unused_xmm = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] + # + pass_on_stack = [] + # + for box in inputargs: + assert isinstance(box, Box) + # + if box.type == FLOAT: + if len(unused_xmm) > 0: + ask = unused_xmm.pop() + got = self.xrm.try_allocate_reg(box, selected_reg=ask) + assert ask == got else: - reg = self.rm.try_allocate_reg(arg) - if reg: - loc = reg + pass_on_stack.append(box) else: - loc = self.fm.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc - # otherwise we have it saved on stack, so no worry - self.rm.free_regs.insert(0, tmpreg) - self.xrm.free_regs.insert(0, xmmtmp) - assert tmpreg not in nonfloatlocs - assert xmmtmp not in floatlocs - # note: we need to make a copy of inputargs because possibly_free_vars - # is also used on op args, which is a non-resizable list - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + if len(unused_gpr) > 0: + ask = unused_gpr.pop() + got = self.rm.try_allocate_reg(box, selected_reg=ask) + assert ask == got + else: + pass_on_stack.append(box) + # + return pass_on_stack def possibly_free_var(self, var): if var.type == FLOAT: @@ -436,10 +451,16 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop + def flush_loop(self): + # rare case: if the loop is too short, pad with NOPs + mc = self.assembler.mc + while mc.get_relative_pos() < self.min_bytes_before_label: + mc.NOP() - def loc(self, v): + def loc(self, v): if v is None: # xxx kludgy return None if v.type == FLOAT: @@ -785,9 +806,9 @@ def _consider_call(self, op, guard_not_forced_op=None): calldescr = op.getdescr() - assert isinstance(calldescr, BaseCallDescr) + assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - 1 - size = calldescr.get_result_size(self.translate_support_code) + size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm1 @@ -832,12 +853,15 @@ consider_call_release_gil = consider_call_may_force + def consider_call_malloc_gc(self, op): + self._consider_call(op) + def consider_call_assembler(self, op, guard_op): descr = op.getdescr() assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size(self.translate_support_code) + size = jd.portal_calldescr.get_result_size() vable_index = jd.index_of_virtualizable if vable_index >= 0: self.rm._sync_var(op.getarg(vable_index)) @@ -872,21 +896,10 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb - def fastpath_malloc_fixedsize(self, op, descr): - assert isinstance(descr, BaseSizeDescr) - self._do_fastpath_malloc(op, descr.size, descr.tid) - - def fastpath_malloc_varsize(self, op, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - basesize = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - size = basesize + itemsize * num_elem - self._do_fastpath_malloc(op, size, arraydescr.tid) - self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) - - def _do_fastpath_malloc(self, op, size, tid): - gc_ll_descr = self.assembler.cpu.gc_ll_descr + def consider_call_malloc_nursery(self, op): + size_box = op.getarg(0) + assert isinstance(size_box, ConstInt) + size = size_box.getint() self.rm.force_allocate_reg(op.result, selected_reg=eax) # # We need edx as a temporary, but otherwise don't save any more @@ -895,86 +908,39 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) # + gc_ll_descr = self.assembler.cpu.gc_ll_descr self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - size, tid, - ) - - def consider_new(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.getdescr()): - self.fastpath_malloc_fixedsize(op, op.getdescr()) - else: - args = gc_ll_descr.args_for_new(op.getdescr()) - arglocs = [imm(x) for x in args] - return self._call(op, arglocs) - - def consider_new_with_vtable(self, op): - classint = op.getarg(0).getint() - descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) - if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self.fastpath_malloc_fixedsize(op, descrsize) - self.assembler.set_vtable(eax, imm(classint)) - # result of fastpath malloc is in eax - else: - args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) - - def consider_newstr(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_newunicode(self, op): - loc = self.loc(op.getarg(0)) - return self._call(op, [loc]) - - def consider_new_array(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - arglocs = [imm(x) for x in args] - arglocs.append(self.loc(box_num_elem)) - self._call(op, arglocs) + size) def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.translate_support_code) - ofs = arraydescr.get_base_size(self.translate_support_code) - size = arraydescr.get_item_size(self.translate_support_code) - ptr = arraydescr.is_array_of_pointers() + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize sign = arraydescr.is_item_signed() - return size, ofs, ofs_length, ptr, sign + return size, ofs, sign def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset - size = fielddescr.get_field_size(self.translate_support_code) - ptr = fielddescr.is_pointer_field() + size = fielddescr.field_size sign = fielddescr.is_field_signed() - return imm(ofs), imm(size), ptr, sign + return imm(ofs), imm(size), sign + _unpack_fielddescr._always_inline_ = True def _unpack_interiorfielddescr(self, descr): assert isinstance(descr, InteriorFieldDescr) arraydescr = descr.arraydescr - ofs = arraydescr.get_base_size(self.translate_support_code) - itemsize = arraydescr.get_item_size(self.translate_support_code) - fieldsize = descr.fielddescr.get_field_size(self.translate_support_code) + ofs = arraydescr.basesize + itemsize = arraydescr.itemsize + fieldsize = descr.fielddescr.field_size sign = descr.fielddescr.is_field_signed() ofs += descr.fielddescr.offset return imm(ofs), imm(itemsize), imm(fieldsize), sign def consider_setfield_gc(self, op): - ofs_loc, size_loc, _, _ = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -1032,7 +998,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - itemsize, ofs, _, _, _ = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, _ = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if itemsize == 1: @@ -1049,7 +1015,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, _, sign = self._unpack_fielddescr(op.getdescr()) + ofs_loc, size_loc, sign = self._unpack_fielddescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -1065,7 +1031,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - itemsize, ofs, _, _, sign = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, sign = self._unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -1144,8 +1110,8 @@ def consider_arraylen_gc(self, op): arraydescr = op.getdescr() - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars_for_op(op) @@ -1285,51 +1251,51 @@ # we would like the boxes to be after the jump. def _compute_hint_frame_locations_from_descr(self, descr): - nonfloatlocs, floatlocs = descr._x86_arglocs + arglocs = descr._x86_arglocs jump_op = self.final_jump_op - assert len(nonfloatlocs) == jump_op.numargs() + assert len(arglocs) == jump_op.numargs() for i in range(jump_op.numargs()): box = jump_op.getarg(i) if isinstance(box, Box): - loc = nonfloatlocs[i] + loc = arglocs[i] if isinstance(loc, StackLoc): - assert box.type != FLOAT self.fm.hint_frame_locations[box] = loc - else: - loc = floatlocs[i] - if isinstance(loc, StackLoc): - assert box.type == FLOAT - self.fm.hint_frame_locations[box] = loc def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None descr = op.getdescr() assert isinstance(descr, TargetToken) - nonfloatlocs, floatlocs = descr._x86_arglocs + arglocs = descr._x86_arglocs self.jump_target_descr = descr - # compute 'tmploc' to be all_regs[0] by spilling what is there - box = TempBox() - box1 = TempBox() - tmpreg = X86RegisterManager.all_regs[0] - tmploc = self.rm.force_allocate_reg(box, selected_reg=tmpreg) - xmmtmp = X86XMMRegisterManager.all_regs[0] - self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp) # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + # Do we have a temp var? + if IS_X86_64: + tmpreg = X86_64_SCRATCH_REG + xmmtmp = X86_64_XMM_SCRATCH_REG + else: + tmpreg = None + xmmtmp = None + # Do the remapping remap_frame_layout_mixed(assembler, - src_locations1, dst_locations1, tmploc, + src_locations1, dst_locations1, tmpreg, src_locations2, dst_locations2, xmmtmp) - self.rm.possibly_free_var(box) - self.xrm.possibly_free_var(box1) self.possibly_free_vars_for_op(op) assembler.closing_jump(self.jump_target_descr) @@ -1381,23 +1347,20 @@ self.rm.force_allocate_frame_reg(op.result) def consider_label(self, op): - # XXX big refactoring needed? descr = op.getdescr() assert isinstance(descr, TargetToken) inputargs = op.getarglist() - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) + arglocs = [None] * len(inputargs) # - # we need to make sure that the tmpreg and xmmtmp are free - tmpreg = X86RegisterManager.all_regs[0] - tmpvar = TempBox() - self.rm.force_allocate_reg(tmpvar, selected_reg=tmpreg) - self.rm.possibly_free_var(tmpvar) - # - xmmtmp = X86XMMRegisterManager.all_regs[0] - tmpvar = TempBox() - self.xrm.force_allocate_reg(tmpvar, selected_reg=xmmtmp) - self.xrm.possibly_free_var(tmpvar) + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) # # we need to make sure that no variable is stored in ebp for arg in inputargs: @@ -1408,16 +1371,18 @@ # for i in range(len(inputargs)): arg = inputargs[i] - assert not isinstance(arg, Const) + assert isinstance(arg, Box) loc = self.loc(arg) - assert not (loc is tmpreg or loc is xmmtmp or loc is ebp) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc + assert loc is not ebp + arglocs[i] = loc if isinstance(loc, RegLoc): self.fm.mark_as_free(arg) - descr._x86_arglocs = nonfloatlocs, floatlocs + # + # if we are too close to the start of the loop, the label's target may + # get overridden by redirect_call_assembler(). (rare case) + self.flush_loop() + # + descr._x86_arglocs = arglocs descr._x86_loop_code = self.assembler.mc.get_relative_pos() descr._x86_clt = self.assembler.current_clt self.assembler.target_tokens_currently_compiling[descr] = None @@ -1431,23 +1396,6 @@ if jump_op is not None and jump_op.getdescr() is descr: self._compute_hint_frame_locations_from_descr(descr) -## from pypy.rpython.annlowlevel import llhelper -## def fn(addr): -## print '...label:', hex(addr), nonfloatlocs -## FUNC = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) -## ll_disp = llhelper(FUNC, fn) -## faddr = rffi.cast(lltype.Signed, ll_disp) -## for i in range(16): -## self.assembler.mc.PUSH_r(i) -## self.assembler.mc.CALL_l(0) -## self.assembler.mc.POP(edi) -## self.assembler.mc.MOV(r11, imm(faddr)) -## self.assembler.mc.CALL(r11) -## for i in range(15, -1, -1): -## if i == esp.value: -## i -= 1 -## self.assembler.mc.POP_r(i) - def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -16,8 +16,7 @@ # class AssemblerLocation(object): - # XXX: Is adding "width" here correct? - _attrs_ = ('value', 'width', '_location_code') + _attrs_ = ('value', '_location_code') _immutable_ = True def _getregkey(self): return self.value @@ -28,6 +27,9 @@ def location_code(self): return self._location_code + def get_width(self): + raise NotImplementedError + def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value @@ -43,14 +45,21 @@ _immutable_ = True _location_code = 'b' - def __init__(self, position, ebp_offset, num_words, type): - assert ebp_offset < 0 # so no confusion with RegLoc.value + def __init__(self, position, ebp_offset, type): + # _getregkey() returns self.value; the value returned must not + # conflict with RegLoc._getregkey(). It doesn't a bit by chance, + # so let it fail the following assert if it no longer does. + assert not (0 <= ebp_offset < 8 + 8 * IS_X86_64) self.position = position self.value = ebp_offset - self.width = num_words * WORD # One of INT, REF, FLOAT self.type = type + def get_width(self): + if self.type == FLOAT: + return 8 + return WORD + def __repr__(self): return '%d(%%ebp)' % (self.value,) @@ -64,10 +73,8 @@ self.value = regnum self.is_xmm = is_xmm if self.is_xmm: - self.width = 8 self._location_code = 'x' else: - self.width = WORD self._location_code = 'r' def __repr__(self): if self.is_xmm: @@ -75,6 +82,11 @@ else: return rx86.R.names[self.value] + def get_width(self): + if self.is_xmm: + return 8 + return WORD + def lowest8bits(self): assert not self.is_xmm return RegLoc(rx86.low_byte(self.value), False) @@ -92,9 +104,11 @@ else: return eax -class ImmedLoc(AssemblerLocation): +class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True - width = WORD + +class ImmedLoc(ImmediateAssemblerLocation): + _immutable_ = True _location_code = 'i' def __init__(self, value): @@ -105,6 +119,9 @@ def getint(self): return self.value + def get_width(self): + return WORD + def __repr__(self): From noreply at buildbot.pypy.org Fri Dec 30 17:12:59 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 30 Dec 2011 17:12:59 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: fix indentation after merge Message-ID: <20111230161259.A81CF82C04@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50959:794e761fa494 Date: 2011-12-30 13:53 +0100 http://bitbucket.org/pypy/pypy/changeset/794e761fa494/ Log: fix indentation after merge diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -706,7 +706,7 @@ def malloc_nursery_slowpath(size): """Allocate 'size' null bytes out of the nursery. Note that the fast path is typically inlined by the backend.""" - assert size >= self.minimal_size_in_nursery + assert size >= self.minimal_size_in_nursery if self.DEBUG: self._random_usage_of_xmm_registers() type_id = rffi.cast(llgroup.HALFWORD, 0) # missing here @@ -719,7 +719,7 @@ def malloc_array(itemsize, tid, num_elem): """Allocate an array with a variable-size num_elem. Only works for standard arrays.""" - assert num_elem >= 0, 'num_elem should be >= 0' + assert num_elem >= 0, 'num_elem should be >= 0' type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) return llop1.do_malloc_varsize_clear( From noreply at buildbot.pypy.org Fri Dec 30 17:13:05 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 30 Dec 2011 17:13:05 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: in progress: implement the changes to the backend introduced by the jit-simplify-backendintf Message-ID: <20111230161305.A179E82C03@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50962:206597c40644 Date: 2011-12-30 16:02 +0100 http://bitbucket.org/pypy/pypy/changeset/206597c40644/ Log: in progress: implement the changes to the backend introduced by the jit-simplify-backendintf diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -57,6 +57,8 @@ END_OF_LOCS = '\xFF' + STACK_FIXED_AREA = -1 + def __init__(self, cpu, failargs_limit=1000): self.cpu = cpu self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) @@ -67,10 +69,6 @@ self.fail_force_index = 0 self.setup_failure_recovery() self.mc = None - self.malloc_func_addr = 0 - self.malloc_array_func_addr = 0 - self.malloc_str_func_addr = 0 - self.malloc_unicode_func_addr = 0 self.memcpy_addr = 0 self.pending_guards = None self._exit_code_addr = 0 @@ -79,6 +77,18 @@ self._regalloc = None self.datablockwrapper = None self.propagate_exception_path = 0 + self._compute_stack_size() + + def _compute_stack_size(self): + self.STACK_FIXED_AREA = len(r.callee_saved_registers) * WORD + self.STACK_FIXED_AREA += WORD # FORCE_TOKEN + self.STACK_FIXED_AREA += N_REGISTERS_SAVED_BY_MALLOC * WORD + if self.cpu.supports_floats: + self.STACK_FIXED_AREA += (len(r.callee_saved_vfp_registers) + * 2 * WORD) + if self.STACK_FIXED_AREA % 8 != 0: + self.STACK_FIXED_AREA += WORD # Stack alignment + assert self.STACK_FIXED_AREA % 8 == 0 def setup(self, looptoken, operations): self.current_clt = looptoken.compiled_loop_token @@ -105,21 +115,7 @@ # Addresses of functions called by new_xxx operations gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() - ll_new = gc_ll_descr.get_funcptr_for_new() - self.malloc_func_addr = rffi.cast(lltype.Signed, ll_new) self._build_propagate_exception_path() - if gc_ll_descr.get_funcptr_for_newarray is not None: - ll_new_array = gc_ll_descr.get_funcptr_for_newarray() - self.malloc_array_func_addr = rffi.cast(lltype.Signed, - ll_new_array) - if gc_ll_descr.get_funcptr_for_newstr is not None: - ll_new_str = gc_ll_descr.get_funcptr_for_newstr() - self.malloc_str_func_addr = rffi.cast(lltype.Signed, - ll_new_str) - if gc_ll_descr.get_funcptr_for_newunicode is not None: - ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode() - self.malloc_unicode_func_addr = rffi.cast(lltype.Signed, - ll_new_unicode) if gc_ll_descr.get_malloc_slowpath_addr is not None: self._build_malloc_slowpath() if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: @@ -172,7 +168,8 @@ # call on_leave_jitted_save_exc() # XXX add a check if cpu supports floats with saved_registers(mc, r.caller_resp + [r.ip], r.caller_vfp_resp): - addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + addr = self.cpu.get_on_leave_jitted_int(save_exception=True, + default_to_memoryerror=True) mc.BL(addr) mc.gen_load_int(r.ip.value, self.cpu.propagate_exception_v) mc.MOV_rr(r.r0.value, r.ip.value) @@ -456,33 +453,36 @@ self.mc.writechar(chr(0)) def gen_func_epilog(self, mc=None, cond=c.AL): + stack_size = self.STACK_FIXED_AREA + stack_size -= len(r.callee_saved_registers) * WORD + if self.cpu.supports_floats: + stack_size -= len(r.callee_saved_vfp_registers) * 2 * WORD + gcrootmap = self.cpu.gc_ll_descr.gcrootmap if mc is None: mc = self.mc if gcrootmap and gcrootmap.is_shadow_stack: self.gen_footer_shadowstack(gcrootmap, mc) - offset = 1 - if self.cpu.supports_floats: - offset += 1 # to keep stack alignment mc.MOV_rr(r.sp.value, r.fp.value, cond=cond) - mc.ADD_ri(r.sp.value, r.sp.value, - (N_REGISTERS_SAVED_BY_MALLOC + offset) * WORD, cond=cond) + mc.ADD_ri(r.sp.value, r.sp.value, stack_size, cond=cond) if self.cpu.supports_floats: mc.VPOP([reg.value for reg in r.callee_saved_vfp_registers], cond=cond) mc.POP([reg.value for reg in r.callee_restored_registers], cond=cond) def gen_func_prolog(self): + stack_size = self.STACK_FIXED_AREA + stack_size -= len(r.callee_saved_registers) * WORD + if self.cpu.supports_floats: + stack_size -= len(r.callee_saved_vfp_registers) * 2 * WORD + self.mc.PUSH([reg.value for reg in r.callee_saved_registers]) - offset = 1 if self.cpu.supports_floats: self.mc.VPUSH([reg.value for reg in r.callee_saved_vfp_registers]) - offset += 1 # to keep stack alignment # here we modify the stack pointer to leave room for the 9 registers # that are going to be saved here around malloc calls and one word to # store the force index - self.mc.SUB_ri(r.sp.value, r.sp.value, - (N_REGISTERS_SAVED_BY_MALLOC + offset) * WORD) + self.mc.SUB_ri(r.sp.value, r.sp.value, stack_size) self.mc.MOV_rr(r.fp.value, r.sp.value) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -509,125 +509,6 @@ mc.SUB_ri(r.r5.value, r.r4.value, imm=2 * WORD) # ADD r5, r4 [2*WORD] mc.STR_ri(r.r5.value, r.ip.value) - def gen_bootstrap_code(self, arglocs, inputargs): - nonfloatlocs, floatlocs = arglocs - for i in range(len(nonfloatlocs)): - loc = nonfloatlocs[i] - if loc is None: - continue - arg = inputargs[i] - assert arg.type != FLOAT - if arg.type == REF: - addr = self.fail_boxes_ptr.get_addr_for_num(i) - elif arg.type == INT: - addr = self.fail_boxes_int.get_addr_for_num(i) - else: - assert 0 - if loc.is_reg(): - reg = loc - else: - reg = r.ip - self.mc.gen_load_int(reg.value, addr) - self.mc.LDR_ri(reg.value, reg.value) - if loc.is_stack(): - self.mov_loc_loc(r.ip, loc) - for i in range(len(floatlocs)): - loc = floatlocs[i] - if loc is None: - continue - arg = inputargs[i] - assert arg.type == FLOAT - addr = self.fail_boxes_float.get_addr_for_num(i) - self.mc.gen_load_int(r.ip.value, addr) - if loc.is_vfp_reg(): - self.mc.VLDR(loc.value, r.ip.value) - else: - self.mc.VLDR(r.vfp_ip.value, r.ip.value) - self.mov_loc_loc(r.vfp_ip, loc) - - def gen_direct_bootstrap_code(self, loop_head, arglocs, frame_depth, inputargs): - self.gen_func_prolog() - nonfloatlocs, floatlocs = arglocs - - reg_args = count_reg_args(inputargs) - - selected_reg = 0 - count = 0 - float_args = [] - nonfloat_args = [] - nonfloat_regs = [] - # load reg args - for i in range(reg_args): - arg = inputargs[i] - if arg.type == FLOAT and count % 2 != 0: - selected_reg += 1 - count = 0 - reg = r.all_regs[selected_reg] - - if arg.type == FLOAT: - float_args.append((reg, floatlocs[i])) - else: - nonfloat_args.append(reg) - nonfloat_regs.append(nonfloatlocs[i]) - - if arg.type == FLOAT: - selected_reg += 2 - else: - selected_reg += 1 - count += 1 - - # move float arguments to vfp regsiters - for loc, vfp_reg in float_args: - self.mov_to_vfp_loc(loc, r.all_regs[loc.value + 1], vfp_reg) - - # remap values stored in core registers - remap_frame_layout(self, nonfloat_args, nonfloat_regs, r.ip) - - # load values passed on the stack to the corresponding locations - stack_position = len(r.callee_saved_registers) * WORD + \ - len(r.callee_saved_vfp_registers) * 2 * WORD + \ - N_REGISTERS_SAVED_BY_MALLOC * WORD + \ - 2 * WORD # for the FAIL INDEX and the stack padding - count = 0 - for i in range(reg_args, len(inputargs)): - arg = inputargs[i] - if arg.type == FLOAT: - loc = floatlocs[i] - else: - loc = nonfloatlocs[i] - if loc.is_reg(): - self.mc.LDR_ri(loc.value, r.fp.value, stack_position) - count += 1 - elif loc.is_vfp_reg(): - if count % 2 != 0: - stack_position += WORD - count = 0 - self.mc.VLDR(loc.value, r.fp.value, stack_position) - elif loc.is_stack(): - if loc.type == FLOAT: - if count % 2 != 0: - stack_position += WORD - count = 0 - self.mc.VLDR(r.vfp_ip.value, r.fp.value, stack_position) - self.mov_loc_loc(r.vfp_ip, loc) - elif loc.type == INT or loc.type == REF: - count += 1 - self.mc.LDR_ri(r.ip.value, r.fp.value, stack_position) - self.mov_loc_loc(r.ip, loc) - else: - assert 0, 'invalid location' - else: - assert 0, 'invalid location' - if loc.type == FLOAT: - size = 2 - else: - size = 1 - stack_position += size * WORD - - sp_patch_location = self._prepare_sp_patch_position() - self.mc.B_offs(loop_head) - self._patch_sp_offset(sp_patch_location, frame_depth) - def _dump(self, ops, type='loop'): debug_start('jit-backend-ops') debug_print(type) @@ -635,11 +516,16 @@ debug_print(op.repr()) debug_stop('jit-backend-ops') + def _call_header(self): + self.align() + self.gen_func_prolog() + # cpu interface def assemble_loop(self, inputargs, operations, looptoken, log): clt = CompiledLoopToken(self.cpu, looptoken.number) clt.allgcrefs = [] looptoken.compiled_loop_token = clt + clt._debug_nbargs = len(inputargs) if not we_are_translated(): # Arguments should be unique @@ -648,37 +534,24 @@ operations = self.setup(looptoken, operations) self._dump(operations) - self.align() - self.gen_func_prolog() + self._call_header() sp_patch_location = self._prepare_sp_patch_position() regalloc = Regalloc(assembler=self, frame_manager=ARMFrameManager()) - arglocs = regalloc.prepare_loop(inputargs, operations) - self.gen_bootstrap_code(arglocs, inputargs) - looptoken._arm_arglocs = arglocs + regalloc.prepare_loop(inputargs, operations) + loop_head = self.mc.currpos() - looptoken._arm_loop_code = loop_head - looptoken._arm_bootstrap_code = 0 clt.frame_depth = -1 frame_depth = self._assemble(operations, regalloc) clt.frame_depth = frame_depth self._patch_sp_offset(sp_patch_location, frame_depth) - self.align() - - direct_bootstrap_code = self.mc.currpos() - self.gen_direct_bootstrap_code(loop_head, arglocs, - frame_depth, inputargs) - self.write_pending_failure_recoveries() rawstart = self.materialize_loop(looptoken) - direct_code_start = rawstart + direct_bootstrap_code - - looptoken._arm_bootstrap_code = rawstart - looptoken._arm_direct_bootstrap_code = direct_code_start + looptoken._arm_func_addr = rawstart self.process_pending_guards(rawstart) self.fixup_target_tokens(rawstart) @@ -692,11 +565,13 @@ def _assemble(self, operations, regalloc): regalloc.compute_hint_frame_locations(operations) + #self.mc.BKPT() self._walk_operations(operations, regalloc) frame_depth = regalloc.frame_manager.get_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - frame_depth = max(frame_depth, jump_target_descr._arm_clt.frame_depth) + frame_depth = max(frame_depth, + jump_target_descr._arm_clt.frame_depth) return frame_depth def assemble_bridge(self, faildescr, inputargs, operations, @@ -735,7 +610,8 @@ print 'Bridge', inputargs, operations self.mc._dump_trace(rawstart, 'bridge_%d.asm' % self.cpu.total_compiled_bridges) - self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth) + self.current_clt.frame_depth = max(self.current_clt.frame_depth, + frame_depth) self.teardown() def fixup_target_tokens(self, rawstart): @@ -1161,7 +1037,8 @@ llop.gc_assume_young_pointers(lltype.Void, llmemory.cast_ptr_to_adr(ptrs)) - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size): + assert size & (WORD-1) == 0 # must be correctly aligned size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) size = (size + WORD - 1) & ~(WORD - 1) # round up @@ -1205,9 +1082,6 @@ self.mc.gen_load_int(r.ip.value, nursery_free_adr) self.mc.STR_ri(r.r1.value, r.ip.value) - self.mc.gen_load_int(r.ip.value, tid) - self.mc.STR_ri(r.ip.value, r.r0.value) - def mark_gc_roots(self, force_index, use_copy_area=False): if force_index < 0: return # not needed diff --git a/pypy/jit/backend/arm/jump.py b/pypy/jit/backend/arm/jump.py --- a/pypy/jit/backend/arm/jump.py +++ b/pypy/jit/backend/arm/jump.py @@ -1,6 +1,5 @@ # ../x86/jump.py # XXX combine with ../x86/jump.py and move to llsupport -import sys def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): @@ -18,7 +17,10 @@ key = src.as_key() if key in srccount: if key == dst_locations[i].as_key(): - srccount[key] = -sys.maxint # ignore a move "x = x" + # ignore a move "x = x" + # setting any "large enough" negative value is ok, but + # be careful of overflows, don't use -sys.maxint + srccount[key] = -len(dst_locations) - 1 pending_dests -= 1 else: srccount[key] += 1 diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -305,9 +305,17 @@ _mixin_ = True def emit_op_jump(self, op, arglocs, regalloc, fcond): + # The backend's logic assumes that the target code is in a piece of + # assembler that was also called with the same number of arguments, + # so that the locations [ebp+8..] of the input arguments are valid + # stack locations both before and after the jump. + # descr = op.getdescr() assert isinstance(descr, TargetToken) assert fcond == c.AL + my_nbargs = self.current_clt._debug_nbargs + target_nbargs = descr._arm_clt._debug_nbargs + assert my_nbargs == target_nbargs self._insert_checks() if descr in self.target_tokens_currently_compiling: @@ -376,7 +384,7 @@ if (op.result and not we_are_translated()): #XXX check result type loc = regalloc.rm.call_result_location(op.result) - size = descr.get_result_size(False) + size = descr.get_result_size() signed = descr.is_result_signed() self._ensure_result_bit_extension(loc, size, signed) return cond @@ -786,7 +794,7 @@ #XXX Hack, Hack, Hack if not we_are_translated(): descr = op.getdescr() - size = descr.get_item_size(False) + size = descr.itemsize signed = descr.is_item_signed() self._ensure_result_bit_extension(res, size, signed) return fcond @@ -1000,9 +1008,9 @@ descr = op.getdescr() assert isinstance(descr, JitCellToken) # XXX check this - assert op.numargs() == len(descr._arm_arglocs[0]) + # assert len(arglocs) - 2 == descr.compiled_loop_token._debug_nbargs resbox = TempInt() - self._emit_call(fail_index, descr._arm_direct_bootstrap_code, + self._emit_call(fail_index, descr._arm_func_addr, op.getarglist(), regalloc, fcond, result=resbox) if op.result is None: value = self.cpu.done_with_this_frame_void_v @@ -1059,9 +1067,9 @@ # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: - from pypy.jit.backend.llsupport.descr import BaseFieldDescr + from pypy.jit.backend.llsupport.descr import FieldDescr fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, BaseFieldDescr) + assert isinstance(fielddescr, FieldDescr) ofs = fielddescr.offset resloc = regalloc.force_allocate_reg(resbox) self.mov_loc_loc(arglocs[1], r.ip) @@ -1103,11 +1111,15 @@ # ../x86/assembler.py:668 def redirect_call_assembler(self, oldlooptoken, newlooptoken): - # we overwrite the instructions at the old _x86_direct_bootstrap_code - # to start with a JMP to the new _arm_direct_bootstrap_code. + # some minimal sanity checking + old_nbargs = oldlooptoken.compiled_loop_token._debug_nbargs + new_nbargs = newlooptoken.compiled_loop_token._debug_nbargs + assert old_nbargs == new_nbargs + # we overwrite the instructions at the old _arm_func_adddr + # to start with a JMP to the new _arm_func_addr. # Ideally we should rather patch all existing CALLs, but well. - oldadr = oldlooptoken._arm_direct_bootstrap_code - target = newlooptoken._arm_direct_bootstrap_code + oldadr = oldlooptoken._arm_func_addr + target = newlooptoken._arm_func_addr mc = ARMv7Builder() mc.B(target) mc.copy_to_raw_memory(oldadr) @@ -1191,36 +1203,10 @@ self._emit_call(force_index, self.malloc_func_addr, [size_box], regalloc, result=result) - def emit_op_new(self, op, arglocs, regalloc, fcond): + def emit_op_call_malloc_gc(self, op, arglocs, regalloc, fcond): self.propagate_memoryerror_if_r0_is_null() return fcond - def emit_op_new_with_vtable(self, op, arglocs, regalloc, fcond): - classint = arglocs[0].value - self.set_vtable(op.result, classint) - return fcond - - def set_vtable(self, box, vtable): - if self.cpu.vtable_offset is not None: - adr = rffi.cast(lltype.Signed, vtable) - self.mc.gen_load_int(r.ip.value, adr) - self.mc.STR_ri(r.ip.value, r.r0.value, self.cpu.vtable_offset) - - def set_new_array_length(self, loc, ofs_length, loc_num_elem): - assert loc.is_reg() - self.mc.gen_load_int(r.ip.value, loc_num_elem) - self.mc.STR_ri(r.ip.value, loc.value, imm=ofs_length) - - def emit_op_new_array(self, op, arglocs, regalloc, fcond): - self.propagate_memoryerror_if_r0_is_null() - if len(arglocs) > 0: - value_loc, base_loc, ofs_length = arglocs - self.mc.STR_ri(value_loc.value, base_loc.value, ofs_length.value) - return fcond - - emit_op_newstr = emit_op_new_array - emit_op_newunicode = emit_op_new_array - class FloatOpAssemlber(object): _mixin_ = True diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -12,26 +12,27 @@ check_imm_box ) from pypy.jit.backend.arm.jump import remap_frame_layout_mixed -from pypy.jit.backend.arm.arch import MY_COPY_OF_REGS, WORD +from pypy.jit.backend.arm.arch import MY_COPY_OF_REGS +from pypy.jit.backend.arm.arch import WORD, N_REGISTERS_SAVED_BY_MALLOC from pypy.jit.codewriter import longlong from pypy.jit.metainterp.history import (Const, ConstInt, ConstFloat, ConstPtr, Box, BoxPtr, INT, REF, FLOAT) from pypy.jit.metainterp.history import JitCellToken, TargetToken from pypy.jit.metainterp.resoperation import rop -from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr, \ - BaseSizeDescr, InteriorFieldDescr +from pypy.jit.backend.llsupport.descr import ArrayDescr from pypy.jit.backend.llsupport import symbolic from pypy.rpython.lltypesystem import lltype, rffi, rstr -from pypy.jit.codewriter import heaptracker from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.jit.backend.llsupport.descr import unpack_arraydescr +from pypy.jit.backend.llsupport.descr import unpack_fielddescr +from pypy.jit.backend.llsupport.descr import unpack_interiorfielddescr # xxx hack: set a default value for TargetToken._arm_loop_code. If 0, we know # that it is a LABEL that was not compiled yet. TargetToken._arm_loop_code = 0 - class TempInt(TempBox): type = INT @@ -65,9 +66,11 @@ def frame_pos(loc, type): num_words = ARMFrameManager.frame_size(type) if type == FLOAT: - # Make sure that loc is an even value - # the frame layout requires loc to be even!! - assert (loc & 1) == 0 + if loc > 0: + # Make sure that loc is an even value + # the frame layout requires loc to be even if it is a spilled + # value!! + assert (loc & 1) == 0 return locations.StackLocation(loc + 1, num_words=num_words, type=type) return locations.StackLocation(loc, num_words=num_words, type=type) @@ -118,8 +121,8 @@ if isinstance(thing, Const): assert isinstance(thing, ConstFloat) loc = self.get_scratch_reg(FLOAT, self.temp_boxes + forbidden_vars) - imm = self.convert_to_imm(thing) - self.assembler.load(loc, imm) + immvalue = self.convert_to_imm(thing) + self.assembler.load(loc, immvalue) else: loc = self.make_sure_var_in_reg(thing, forbidden_vars=self.temp_boxes + forbidden_vars) @@ -161,7 +164,8 @@ def convert_to_imm(self, c): if isinstance(c, ConstInt): - return locations.ImmLocation(c.value) + val = rffi.cast(rffi.INT, c.value) + return locations.ImmLocation(val) else: assert isinstance(c, ConstPtr) return locations.ImmLocation(rffi.cast(lltype.Signed, c.value)) @@ -176,8 +180,8 @@ tp = INT loc = self.get_scratch_reg(tp, forbidden_vars=self.temp_boxes + forbidden_vars) - imm = self.convert_to_imm(thing) - self.assembler.load(loc, imm) + immvalue = self.convert_to_imm(thing) + self.assembler.load(loc, immvalue) else: loc = self.make_sure_var_in_reg(thing, forbidden_vars=forbidden_vars) @@ -297,39 +301,63 @@ return self.vfprm.convert_to_imm(value) def _prepare(self, inputargs, operations): - longevity, useful = compute_vars_longevity(inputargs, operations) + longevity, last_real_usage = compute_vars_longevity( + inputargs, operations) self.longevity = longevity + self.last_real_usage = last_real_usage fm = self.frame_manager asm = self.assembler self.vfprm = VFPRegisterManager(longevity, fm, asm) self.rm = ARMv7RegisterManager(longevity, fm, asm) - return useful def prepare_loop(self, inputargs, operations): - useful = self._prepare(inputargs, operations) - return self._process_inputargs(inputargs, useful) + self._prepare(inputargs, operations) + self._set_initial_bindings(inputargs) + self.possibly_free_vars(list(inputargs)) def prepare_bridge(self, inputargs, arglocs, ops): self._prepare(inputargs, ops) self._update_bindings(arglocs, inputargs) - def _process_inputargs(self, inputargs, useful): - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) - for i in range(len(inputargs)): - arg = inputargs[i] - assert not isinstance(arg, Const) - loc = inputargs[i] - if self.longevity[arg][1] > -1 and arg in useful: - self.try_allocate_reg(loc) + def _set_initial_bindings(self, inputargs): + # The first inputargs are passed in registers r0-r3 + # we relly on the soft-float calling convention so we need to move + # float params to the coprocessor. - loc = self.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc + arg_index = 0 + count = 0 + n_register_args = len(r.argument_regs) + cur_frame_pos = - (self.assembler.STACK_FIXED_AREA / WORD) + 1 + for box in inputargs: + assert isinstance(box, Box) + # handle inputargs in argument registers + if box.type == FLOAT and arg_index % 2 != 0: + arg_index += 1 # align argument index for float passed + # in register + if arg_index < n_register_args: + if box.type == FLOAT: + loc = r.argument_regs[arg_index] + loc2 = r.argument_regs[arg_index + 1] + vfpreg = self.try_allocate_reg(box) + # move soft-float argument to vfp + self.assembler.mov_to_vfp_loc(loc, loc2, vfpreg) + arg_index += 2 # this argument used to argument registers + else: + loc = r.argument_regs[arg_index] + self.try_allocate_reg(box, selected_reg=loc) + arg_index += 1 else: - nonfloatlocs[i] = loc - self.possibly_free_vars(list(inputargs)) - return nonfloatlocs, floatlocs + # treat stack args as stack locations with a negative offset + if box.type == FLOAT: + cur_frame_pos -= 2 + if count % 2 != 0: # Stack argument alignment + cur_frame_pos -= 1 + count = 0 + else: + cur_frame_pos -= 1 + count += 1 + loc = self.frame_manager.frame_pos(cur_frame_pos, box.type) + self.frame_manager.set_binding(box, loc) def _update_bindings(self, locs, inputargs): used = {} @@ -533,6 +561,10 @@ args = [imm(rffi.cast(lltype.Signed, op.getarg(0).getint()))] return args + def prepare_op_call_malloc_gc(self, op, fcond): + args = [imm(rffi.cast(lltype.Signed, op.getarg(0).getint()))] + return args + def _prepare_guard(self, op, args=None): if args is None: args = [] @@ -651,41 +683,45 @@ # we would like the boxes to be after the jump. def _compute_hint_frame_locations_from_descr(self, descr): - nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) + arglocs = self.assembler.target_arglocs(descr) jump_op = self.final_jump_op + assert len(arglocs) == jump_op.numargs() for i in range(jump_op.numargs()): box = jump_op.getarg(i) if isinstance(box, Box): - loc = nonfloatlocs[i] + loc = arglocs[i] if loc is not None and loc.is_stack(): - assert box.type != FLOAT self.frame_manager.hint_frame_locations[box] = loc - else: - loc = floatlocs[i] - if loc is not None and loc.is_stack(): - assert box.type == FLOAT - self.frame_manager.hint_frame_locations[box] = loc def prepare_op_jump(self, op, fcond): descr = op.getdescr() assert isinstance(descr, TargetToken) self.jump_target_descr = descr - nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr) + arglocs = self.assembler.target_arglocs(descr) # get temporary locs tmploc = r.ip vfptmploc = r.vfp_ip # Part about non-floats - # XXX we don't need a copy, we only just the original list - src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type != FLOAT] - assert tmploc not in nonfloatlocs - dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + src_locations1 = [] + dst_locations1 = [] # Part about floats - src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) - if op.getarg(i).type == FLOAT] - dst_locations2 = [loc for loc in floatlocs if loc is not None] + src_locations2 = [] + dst_locations2 = [] + + # Build the four lists + for i in range(op.numargs()): + box = op.getarg(i) + src_loc = self.loc(box) + dst_loc = arglocs[i] + if box.type != FLOAT: + src_locations1.append(src_loc) + dst_locations1.append(dst_loc) + else: + src_locations2.append(src_loc) + dst_locations2.append(dst_loc) + remap_frame_layout_mixed(self.assembler, src_locations1, dst_locations1, tmploc, src_locations2, dst_locations2, vfptmploc) @@ -694,7 +730,7 @@ def prepare_op_setfield_gc(self, op, fcond): boxes = list(op.getarglist()) a0, a1 = boxes - ofs, size, ptr = self._unpack_fielddescr(op.getdescr()) + ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self._ensure_value_is_boxed(a0, boxes) value_loc = self._ensure_value_is_boxed(a1, boxes) if check_imm_arg(ofs): @@ -708,7 +744,7 @@ def prepare_op_getfield_gc(self, op, fcond): a0 = op.getarg(0) - ofs, size, ptr = self._unpack_fielddescr(op.getdescr()) + ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self._ensure_value_is_boxed(a0) immofs = imm(ofs) if check_imm_arg(ofs): @@ -726,7 +762,7 @@ prepare_op_getfield_gc_pure = prepare_op_getfield_gc def prepare_op_getinteriorfield_gc(self, op, fcond): - t = self._unpack_interiorfielddescr(op.getdescr()) + t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = t args = op.getarglist() base_loc = self._ensure_value_is_boxed(op.getarg(0), args) @@ -744,7 +780,7 @@ imm(itemsize), imm(fieldsize)] def prepare_op_setinteriorfield_gc(self, op, fcond): - t = self._unpack_interiorfielddescr(op.getdescr()) + t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = t args = op.getarglist() base_loc = self._ensure_value_is_boxed(op.getarg(0), args) @@ -761,8 +797,8 @@ def prepare_op_arraylen_gc(self, op, fcond): arraydescr = op.getdescr() - assert isinstance(arraydescr, BaseArrayDescr) - ofs = arraydescr.get_ofs_length(self.cpu.translate_support_code) + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.lendescr.offset arg = op.getarg(0) base_loc = self._ensure_value_is_boxed(arg) self.possibly_free_vars_for_op(op) @@ -772,27 +808,27 @@ def prepare_op_setarrayitem_gc(self, op, fcond): a0, a1, a2 = list(op.getarglist()) - _, scale, base_ofs, _, ptr = self._unpack_arraydescr(op.getdescr()) - + size, ofs, _ = unpack_arraydescr(op.getdescr()) + scale = get_scale(size) args = op.getarglist() base_loc = self._ensure_value_is_boxed(a0, args) ofs_loc = self._ensure_value_is_boxed(a1, args) value_loc = self._ensure_value_is_boxed(a2, args) - assert check_imm_arg(base_ofs) - return [value_loc, base_loc, ofs_loc, imm(scale), imm(base_ofs)] + assert check_imm_arg(ofs) + return [value_loc, base_loc, ofs_loc, imm(scale), imm(ofs)] prepare_op_setarrayitem_raw = prepare_op_setarrayitem_gc def prepare_op_getarrayitem_gc(self, op, fcond): a0, a1 = boxes = list(op.getarglist()) - _, scale, base_ofs, _, ptr = self._unpack_arraydescr(op.getdescr()) - + size, ofs, _ = unpack_arraydescr(op.getdescr()) + scale = get_scale(size) base_loc = self._ensure_value_is_boxed(a0, boxes) ofs_loc = self._ensure_value_is_boxed(a1, boxes) self.possibly_free_vars_for_op(op) self.free_temp_vars() res = self.force_allocate_reg(op.result) - assert check_imm_arg(base_ofs) - return [res, base_loc, ofs_loc, imm(scale), imm(base_ofs)] + assert check_imm_arg(ofs) + return [res, base_loc, ofs_loc, imm(scale), imm(ofs)] prepare_op_getarrayitem_raw = prepare_op_getarrayitem_gc prepare_op_getarrayitem_gc_pure = prepare_op_getarrayitem_gc @@ -906,86 +942,22 @@ prepare_op_cast_ptr_to_int = prepare_op_same_as prepare_op_cast_int_to_ptr = prepare_op_same_as - def prepare_op_new(self, op, fcond): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if gc_ll_descr.can_inline_malloc(op.getdescr()): - self.fastpath_malloc_fixedsize(op, op.getdescr()) - else: - arglocs = self._prepare_args_for_new_op(op.getdescr()) - force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, - self.assembler.malloc_func_addr, arglocs, - self, fcond, result=op.result) - self.possibly_free_vars(arglocs) - self.possibly_free_var(op.result) - return [] + def prepare_op_call_malloc_nursery(self, op, fcond): + size_box = op.getarg(0) + assert isinstance(size_box, ConstInt) + size = size_box.getint() - def prepare_op_new_with_vtable(self, op, fcond): - classint = op.getarg(0).getint() - descrsize = heaptracker.vtable2descr(self.cpu, classint) - if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self.fastpath_malloc_fixedsize(op, descrsize) - else: - callargs = self._prepare_args_for_new_op(descrsize) - force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, - self.assembler.malloc_func_addr, callargs, - self, fcond, result=op.result) - self.possibly_free_vars(callargs) - self.possibly_free_var(op.result) - return [imm(classint)] - - def prepare_op_new_array(self, op, fcond): - gc_ll_descr = self.cpu.gc_ll_descr - if gc_ll_descr.get_funcptr_for_newarray is not None: - # framework GC - box_num_elem = op.getarg(0) - if isinstance(box_num_elem, ConstInt): - num_elem = box_num_elem.value - if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), - num_elem): - self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) - return [] - args = self.assembler.cpu.gc_ll_descr.args_for_new_array( - op.getdescr()) - argboxes = [ConstInt(x) for x in args] - argboxes.append(box_num_elem) - force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, - self.assembler.malloc_array_func_addr, argboxes, self, - fcond, result=op.result) - return [] - # boehm GC - itemsize, scale, basesize, ofs_length, _ = ( - self._unpack_arraydescr(op.getdescr())) - return self._malloc_varsize(basesize, ofs_length, itemsize, op) - - def fastpath_malloc_varsize(self, op, arraydescr, num_elem): - assert isinstance(arraydescr, BaseArrayDescr) - ofs_length = arraydescr.get_ofs_length(self.cpu.translate_support_code) - basesize = arraydescr.get_base_size(self.cpu.translate_support_code) - itemsize = arraydescr.get_item_size(self.cpu.translate_support_code) - size = basesize + itemsize * num_elem - self._do_fastpath_malloc(op, size, arraydescr.tid) - # we know the resullt of the malloc call is in r0 - self.assembler.set_new_array_length(r.r0, ofs_length, num_elem) - - def fastpath_malloc_fixedsize(self, op, descr): - assert isinstance(descr, BaseSizeDescr) - self._do_fastpath_malloc(op, descr.size, descr.tid) - - def _do_fastpath_malloc(self, op, size, tid): - gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=r.r0) t = TempInt() self.rm.force_allocate_reg(t, selected_reg=r.r1) self.possibly_free_var(op.result) self.possibly_free_var(t) + gc_ll_descr = self.assembler.cpu.gc_ll_descr self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - size, tid, + size ) def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): @@ -1007,59 +979,6 @@ return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) - def prepare_op_newstr(self, op, fcond): - gc_ll_descr = self.cpu.gc_ll_descr - if gc_ll_descr.get_funcptr_for_newstr is not None: - force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, - self.assembler.malloc_str_func_addr, [op.getarg(0)], - self, fcond, op.result) - return [] - # boehm GC - ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return self._malloc_varsize(ofs_items, ofs, itemsize, op) - - def prepare_op_newunicode(self, op, fcond): - gc_ll_descr = self.cpu.gc_ll_descr - if gc_ll_descr.get_funcptr_for_newunicode is not None: - force_index = self.assembler.write_new_force_index() - self.assembler._emit_call(force_index, - self.assembler.malloc_unicode_func_addr, - [op.getarg(0)], self, fcond, op.result) - return [] - # boehm GC - ofs_items, _, ofs = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - _, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - return self._malloc_varsize(ofs_items, ofs, itemsize, op) - - def _malloc_varsize(self, ofs_items, ofs_length, itemsize, op): - v = op.getarg(0) - res_v = op.result - boxes = [v, res_v] - immofs = imm(ofs_items) - if check_imm_arg(ofs_items): - ofs_items_loc = immofs - else: - ofs_items_loc = self.get_scratch_reg(INT, boxes) - self.assembler.load(ofs_items_loc, immofs) - vloc = self._ensure_value_is_boxed(v, [res_v]) - - size = self.get_scratch_reg(INT, boxes) - self.assembler.load(itemsize) - - self.assembler._regalloc_malloc_varsize(size, size_box, - vloc, vbox, ofs_items_loc, self, res_v) - base_loc = self.make_sure_var_in_reg(res_v) - - value_loc = self._ensure_value_is_boxed(v, [res_v]) - assert value_loc.is_reg() - assert base_loc.is_reg() - return [value_loc, base_loc, imm(ofs_length)] - prepare_op_debug_merge_point = void prepare_op_jit_debug = void @@ -1088,20 +1007,28 @@ descr = op.getdescr() assert isinstance(descr, TargetToken) inputargs = op.getarglist() - floatlocs = [None] * len(inputargs) - nonfloatlocs = [None] * len(inputargs) + arglocs = [None] * len(inputargs) + # + # we use force_spill() on the boxes that are not going to be really + # used any more in the loop, but that are kept alive anyway + # by being in a next LABEL's or a JUMP's argument or fail_args + # of some guard + position = self.rm.position + for arg in inputargs: + assert isinstance(arg, Box) + if self.last_real_usage.get(arg, -1) <= position: + self.force_spill_var(arg) + # for i in range(len(inputargs)): arg = inputargs[i] - assert not isinstance(arg, Const) + assert isinstance(arg, Box) loc = self.loc(arg) - if arg.type == FLOAT: - floatlocs[i] = loc - else: - nonfloatlocs[i] = loc + arglocs[i] = loc if loc.is_reg(): self.frame_manager.mark_as_free(arg) - descr._arm_arglocs = nonfloatlocs, floatlocs + # + descr._arm_arglocs = arglocs descr._arm_loop_code = self.assembler.mc.currpos() descr._arm_clt = self.assembler.current_clt self.assembler.target_tokens_currently_compiling[descr] = None @@ -1114,7 +1041,6 @@ jump_op = self.final_jump_op if jump_op is not None and jump_op.getdescr() is descr: self._compute_hint_frame_locations_from_descr(descr) - return None def prepare_guard_call_may_force(self, op, guard_op, fcond): faildescr = guard_op.getdescr() @@ -1159,8 +1085,7 @@ assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd assert jd is not None - size = jd.portal_calldescr.get_result_size( - self.cpu.translate_support_code) + size = jd.portal_calldescr.get_result_size() vable_index = jd.index_of_virtualizable if vable_index >= 0: self._sync_var(op.getarg(vable_index)) @@ -1182,40 +1107,6 @@ arglocs.append(t) return arglocs - # from ../x86/regalloc.py:791 - def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) - ofs = fielddescr.offset - size = fielddescr.get_field_size(self.cpu.translate_support_code) - ptr = fielddescr.is_pointer_field() - return ofs, size, ptr - - # from ../x86/regalloc.py:779 - def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) - cpu = self.cpu - ofs_length = arraydescr.get_ofs_length(cpu.translate_support_code) - ofs = arraydescr.get_base_size(cpu.translate_support_code) - size = arraydescr.get_item_size(cpu.translate_support_code) - ptr = arraydescr.is_array_of_pointers() - scale = 0 - while (1 << scale) < size: - scale += 1 - assert (1 << scale) == size - return size, scale, ofs, ofs_length, ptr - - # from ../x86/regalloc.py:965 - def _unpack_interiorfielddescr(self, descr): - assert isinstance(descr, InteriorFieldDescr) - arraydescr = descr.arraydescr - ofs = arraydescr.get_base_size(self.cpu.translate_support_code) - itemsize = arraydescr.get_item_size(self.cpu.translate_support_code) - fieldsize = descr.fielddescr.get_field_size( - self.cpu.translate_support_code) - sign = descr.fielddescr.is_field_signed() - ofs += descr.fielddescr.offset - return ofs, itemsize, fieldsize, sign - prepare_op_float_add = prepare_float_op(name='prepare_op_float_add') prepare_op_float_sub = prepare_float_op(name='prepare_op_float_sub') prepare_op_float_mul = prepare_float_op(name='prepare_op_float_mul') @@ -1294,6 +1185,14 @@ operations = [notimplemented] * (rop._LAST + 1) operations_with_guard = [notimplemented_with_guard] * (rop._LAST + 1) + +def get_scale(size): + scale = 0 + while (1 << scale) < size: + scale += 1 + assert (1 << scale) == size + return scale + for key, value in rop.__dict__.items(): key = key.lower() if key.startswith('_'): diff --git a/pypy/jit/backend/arm/registers.py b/pypy/jit/backend/arm/registers.py --- a/pypy/jit/backend/arm/registers.py +++ b/pypy/jit/backend/arm/registers.py @@ -21,7 +21,7 @@ all_regs = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10] all_vfp_regs = vfpregisters[:-1] -caller_resp = [r0, r1, r2, r3] +argument_regs = caller_resp = [r0, r1, r2, r3] callee_resp = [r4, r5, r6, r7, r8, r9, r10, fp] callee_saved_registers = callee_resp + [lr] callee_restored_registers = callee_resp + [pc] diff --git a/pypy/jit/backend/arm/runner.py b/pypy/jit/backend/arm/runner.py --- a/pypy/jit/backend/arm/runner.py +++ b/pypy/jit/backend/arm/runner.py @@ -9,7 +9,6 @@ class ArmCPU(AbstractLLCPU): - BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) supports_floats = True def __init__(self, rtyper, stats, opts=None, translate_support_code=False, @@ -47,15 +46,6 @@ self.assembler.assemble_bridge(faildescr, inputargs, operations, original_loop_token, log=log) - def set_future_value_float(self, index, floatvalue): - self.assembler.fail_boxes_float.setitem(index, floatvalue) - - def set_future_value_int(self, index, intvalue): - self.assembler.fail_boxes_int.setitem(index, intvalue) - - def set_future_value_ref(self, index, ptrvalue): - self.assembler.fail_boxes_ptr.setitem(index, ptrvalue) - def get_latest_value_float(self, index): return self.assembler.fail_boxes_float.getitem(index) @@ -80,27 +70,29 @@ for index in range(count): setitem(index, null) - def execute_token(self, executable_token): - #i = [self.get_latest_value_int(x) for x in range(10)] - #print 'Inputargs: %r for token %r' % (i, executable_token) - addr = executable_token._arm_bootstrap_code - assert addr % 8 == 0 - func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr) - fail_index = self._execute_call(func) - return self.get_fail_descr_from_number(fail_index) + def make_execute_token(self, *ARGS): + FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, lltype.Signed)) - def _execute_call(self, func): - prev_interpreter = None - if not self.translate_support_code: - prev_interpreter = LLInterpreter.current_interpreter - LLInterpreter.current_interpreter = self.debug_ll_interpreter - res = 0 - try: - res = func() - finally: + def execute_token(executable_token, *args): + clt = executable_token.compiled_loop_token + assert len(args) == clt._debug_nbargs + # + addr = executable_token._arm_func_addr + assert addr % 8 == 0 + func = rffi.cast(FUNCPTR, addr) + #llop.debug_print(lltype.Void, ">>>> Entering", addr) + prev_interpreter = None # help flow space if not self.translate_support_code: - LLInterpreter.current_interpreter = prev_interpreter - return res + prev_interpreter = LLInterpreter.current_interpreter + LLInterpreter.current_interpreter = self.debug_ll_interpreter + try: + fail_index = func(*args) + finally: + if not self.translate_support_code: + LLInterpreter.current_interpreter = prev_interpreter + #llop.debug_print(lltype.Void, "<<<< Back") + return self.get_fail_descr_from_number(fail_index) + return execute_token def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) diff --git a/pypy/jit/backend/arm/test/test_calling_convention.py b/pypy/jit/backend/arm/test/test_calling_convention.py --- a/pypy/jit/backend/arm/test/test_calling_convention.py +++ b/pypy/jit/backend/arm/test/test_calling_convention.py @@ -32,9 +32,8 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - for x in range(11): - self.cpu.set_future_value_int(x, x) - self.cpu.execute_token(looptoken) + args = [x for x in range(11)] + self.cpu.execute_token(looptoken, *args) for x in range(11): assert self.cpu.get_latest_value_int(x) == x assert self.cpu.get_latest_value_int(11) == 38 diff --git a/pypy/jit/backend/arm/test/test_generated.py b/pypy/jit/backend/arm/test/test_generated.py --- a/pypy/jit/backend/arm/test/test_generated.py +++ b/pypy/jit/backend/arm/test/test_generated.py @@ -43,17 +43,8 @@ looptoken = JitCellToken() operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -12) - cpu.set_future_value_int(1, -26) - cpu.set_future_value_int(2, -19) - cpu.set_future_value_int(3, 7) - cpu.set_future_value_int(4, -5) - cpu.set_future_value_int(5, -24) - cpu.set_future_value_int(6, -37) - cpu.set_future_value_int(7, 62) - cpu.set_future_value_int(8, 9) - cpu.set_future_value_int(9, 12) - op = cpu.execute_token(looptoken) + args = [-12 , -26 , -19 , 7 , -5 , -24 , -37 , 62 , 9 , 12] + op = cpu.execute_token(looptoken, *args) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 62 assert cpu.get_latest_value_int(2) == -19 @@ -103,17 +94,8 @@ operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 16) - cpu.set_future_value_int(1, 5) - cpu.set_future_value_int(2, 5) - cpu.set_future_value_int(3, 16) - cpu.set_future_value_int(4, 46) - cpu.set_future_value_int(5, 6) - cpu.set_future_value_int(6, 63) - cpu.set_future_value_int(7, 39) - cpu.set_future_value_int(8, 78) - cpu.set_future_value_int(9, 0) - op = cpu.execute_token(looptoken) + args = [16 , 5 , 5 , 16 , 46 , 6 , 63 , 39 , 78 , 0] + op = cpu.execute_token(looptoken, *args) assert cpu.get_latest_value_int(0) == 105 assert cpu.get_latest_value_int(1) == 63 assert cpu.get_latest_value_int(2) == 0 @@ -154,16 +136,7 @@ operations[4].setfailargs([v2, v12, v1, v3, v4]) looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -5) - cpu.set_future_value_int(1, 24) - cpu.set_future_value_int(2, 46) - cpu.set_future_value_int(3, -15) - cpu.set_future_value_int(4, 13) - cpu.set_future_value_int(5, -8) - cpu.set_future_value_int(6, 0) - cpu.set_future_value_int(7, -6) - cpu.set_future_value_int(8, 6) - cpu.set_future_value_int(9, 6) + args = [-5 , 24 , 46 , -15 , 13 , -8 , 0 , -6 , 6 , 6] op = cpu.execute_token(looptoken) assert op.identifier == 2 assert cpu.get_latest_value_int(0) == 24 @@ -205,17 +178,8 @@ operations[5].setfailargs([]) looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 19) - cpu.set_future_value_int(1, -3) - cpu.set_future_value_int(2, -58) - cpu.set_future_value_int(3, -7) - cpu.set_future_value_int(4, 12) - cpu.set_future_value_int(5, 22) - cpu.set_future_value_int(6, -54) - cpu.set_future_value_int(7, -29) - cpu.set_future_value_int(8, -19) - cpu.set_future_value_int(9, -64) - op = cpu.execute_token(looptoken) + args = [19 , -3 , -58 , -7 , 12 , 22 , -54 , -29 , -19 , -64] + op = cpu.execute_token(looptoken, *args) assert cpu.get_latest_value_int(0) == -29 assert cpu.get_latest_value_int(1) == -3 assert cpu.get_latest_value_int(2) == 22 @@ -257,17 +221,8 @@ looptoken = JitCellToken() operations[5].setfailargs([]) cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 1073741824) - cpu.set_future_value_int(1, 95) - cpu.set_future_value_int(2, -16) - cpu.set_future_value_int(3, 5) - cpu.set_future_value_int(4, 92) - cpu.set_future_value_int(5, 12) - cpu.set_future_value_int(6, 32) - cpu.set_future_value_int(7, 17) - cpu.set_future_value_int(8, 37) - cpu.set_future_value_int(9, -63) - op = cpu.execute_token(looptoken) + args = [1073741824 , 95 , -16 , 5 , 92 , 12 , 32 , 17 , 37 , -63] + op = cpu.execute_token(looptoken, *args) assert cpu.get_latest_value_int(0) == 1073741824 assert cpu.get_latest_value_int(1) == 5 assert cpu.get_latest_value_int(2) == -63 @@ -320,18 +275,9 @@ operations[1].setfailargs([v8, v6, v1]) operations[7].setfailargs([v4]) operations[9].setfailargs([v10, v13]) - cpu.set_future_value_int(0, 32) - cpu.set_future_value_int(1, 41) - cpu.set_future_value_int(2, -9) - cpu.set_future_value_int(3, 12) - cpu.set_future_value_int(4, -18) - cpu.set_future_value_int(5, 46) - cpu.set_future_value_int(6, 15) - cpu.set_future_value_int(7, 17) - cpu.set_future_value_int(8, 10) - cpu.set_future_value_int(9, 12) + args = [32 , 41 , -9 , 12 , -18 , 46 , 15 , 17 , 10 , 12] cpu.compile_loop(inputargs, operations, looptoken) - op = cpu.execute_token(looptoken) + op = cpu.execute_token(looptoken, *args) assert op.identifier == 3 assert cpu.get_latest_value_int(0) == 12 assert cpu.get_latest_value_int(1) == 23 @@ -378,17 +324,8 @@ operations[8].setfailargs([v5, v9]) looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -8) - cpu.set_future_value_int(1, 0) - cpu.set_future_value_int(2, 62) - cpu.set_future_value_int(3, 35) - cpu.set_future_value_int(4, 16) - cpu.set_future_value_int(5, 9) - cpu.set_future_value_int(6, 30) - cpu.set_future_value_int(7, 581610154) - cpu.set_future_value_int(8, -1) - cpu.set_future_value_int(9, 738197503) - op = cpu.execute_token(looptoken) + args = [-8 , 0 , 62 , 35 , 16 , 9 , 30 , 581610154 , -1 , 738197503] + op = cpu.execute_token(looptoken, *args) assert op.identifier == 2 assert cpu.get_latest_value_int(0) == 16 assert cpu.get_latest_value_int(1) == -1 @@ -436,17 +373,8 @@ operations[-2].setfailargs([v9, v4, v10, v11, v14]) looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -39) - cpu.set_future_value_int(1, -18) - cpu.set_future_value_int(2, 1588243114) - cpu.set_future_value_int(3, -9) - cpu.set_future_value_int(4, -4) - cpu.set_future_value_int(5, 1252698794) - cpu.set_future_value_int(6, 0) - cpu.set_future_value_int(7, 715827882) - cpu.set_future_value_int(8, -15) - cpu.set_future_value_int(9, 536870912) - op = cpu.execute_token(looptoken) + args = [-39 , -18 , 1588243114 , -9 , -4 , 1252698794 , 0 , 715827882 , -15 , 536870912] + op = cpu.execute_token(looptoken, *args) assert op.identifier == 1 assert cpu.get_latest_value_int(0) == -15 assert cpu.get_latest_value_int(1) == -9 @@ -499,17 +427,8 @@ operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 0) - cpu.set_future_value_int(1, -2) - cpu.set_future_value_int(2, 24) - cpu.set_future_value_int(3, 1) - cpu.set_future_value_int(4, -4) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, -95) - cpu.set_future_value_int(7, 33) - cpu.set_future_value_int(8, 2) - cpu.set_future_value_int(9, -44) - op = cpu.execute_token(looptoken) + args = [0 , -2 , 24 , 1 , -4 , 13 , -95 , 33 , 2 , -44] + op = cpu.execute_token(looptoken, *args) assert op.identifier == 3 assert cpu.get_latest_value_int(0) == -4 assert cpu.get_latest_value_int(1) == -95 @@ -549,17 +468,8 @@ operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 3) - cpu.set_future_value_int(1, -5) - cpu.set_future_value_int(2, 1431655765) - cpu.set_future_value_int(3, 47) - cpu.set_future_value_int(4, 12) - cpu.set_future_value_int(5, 1789569706) - cpu.set_future_value_int(6, 15) - cpu.set_future_value_int(7, 939524096) - cpu.set_future_value_int(8, 16) - cpu.set_future_value_int(9, -43) - op = cpu.execute_token(looptoken) + args = [3 , -5 , 1431655765 , 47 , 12 , 1789569706 , 15 , 939524096 , 16 , -43] + op = cpu.execute_token(looptoken, *args) assert op.identifier == 1 assert cpu.get_latest_value_int(0) == -43 assert cpu.get_latest_value_int(1) == 1431655765 @@ -606,17 +516,8 @@ operations[4].setfailargs([v14]) looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 14) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, 18) - cpu.set_future_value_int(3, -2058005163) - cpu.set_future_value_int(4, 6) - cpu.set_future_value_int(5, 1) - cpu.set_future_value_int(6, -16) - cpu.set_future_value_int(7, 11) - cpu.set_future_value_int(8, 0) - cpu.set_future_value_int(9, 19) - op = cpu.execute_token(looptoken) + args = [14 , -20 , 18 , -2058005163 , 6 , 1 , -16 , 11 , 0 , 19] + op = cpu.execute_token(looptoken, *args) assert op.identifier == 1 assert cpu.get_latest_value_int(0) == -2058005163 assert cpu.get_latest_value_int(1) == 19 diff --git a/pypy/jit/backend/arm/test/test_recompilation.py b/pypy/jit/backend/arm/test/test_recompilation.py --- a/pypy/jit/backend/arm/test/test_recompilation.py +++ b/pypy/jit/backend/arm/test/test_recompilation.py @@ -21,8 +21,7 @@ finish(i3, descr=fdescr2) ''' bridge = self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 @@ -57,8 +56,7 @@ #assert descr._x86_bridge_param_depth == 0 # the force_spill() forces the stack to grow assert new > previous - self.cpu.set_future_value_int(0, 0) - fail = self.run(loop) + fail = self.run(loop, 0) assert fail.identifier == 2 assert self.getint(0) == 21 assert self.getint(1) == 22 @@ -85,8 +83,7 @@ jump(i3, 1, 2, 3, 4, 5, 6, 7, descr=targettoken) ''' bridge = self.attach_bridge(ops, other_loop, 1) - self.cpu.set_future_value_int(0, 1) - fail = self.run(other_loop) + fail = self.run(other_loop, 1) assert fail.identifier == 1 def test_bridge_jumps_to_self_deeper(self): @@ -125,10 +122,7 @@ # the force_spill() forces the stack to grow assert guard_op.getdescr()._arm_bridge_frame_depth > loop_frame_depth #assert guard_op.getdescr()._x86_bridge_param_depth == 0 - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 @@ -152,10 +146,7 @@ jump(i3, 0, 1, descr=targettoken) ''' bridge = self.attach_bridge(ops, loop, 5) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 0) - self.cpu.set_future_value_int(2, 0) - self.run(loop) + self.run(loop, 0, 0, 0) assert self.getint(0) == 1 assert self.getint(1) == 20 diff --git a/pypy/jit/backend/arm/test/test_regalloc.py b/pypy/jit/backend/arm/test/test_regalloc.py --- a/pypy/jit/backend/arm/test/test_regalloc.py +++ b/pypy/jit/backend/arm/test/test_regalloc.py @@ -151,19 +151,20 @@ loop = self.parse(ops) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - for i, arg in enumerate(args): + args = [] + for arg in args: if isinstance(arg, int): - self.cpu.set_future_value_int(i, arg) + args.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) - self.cpu.set_future_value_float(i, arg) + args.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) - self.cpu.set_future_value_ref(i, llgcref) + args.append(llgcref) loop._jitcelltoken = looptoken if run: - self.cpu.execute_token(looptoken) + self.cpu.execute_token(looptoken, *args) return loop def prepare_loop(self, ops): @@ -202,8 +203,8 @@ loop._jitcelltoken) return bridge - def run(self, loop): - return self.cpu.execute_token(loop._jitcelltoken) + def run(self, loop, *args): + return self.cpu.execute_token(loop._jitcelltoken, *args) class TestRegallocSimple(BaseTestRegalloc): @@ -245,8 +246,7 @@ jump(i4, i4, i4, i4, descr=targettoken) ''' self.attach_bridge(bridge_ops, loop2, 5) - self.cpu.set_future_value_int(0, 0) - self.run(loop2) + self.run(loop2, 0) assert self.getint(0) == 31 assert self.getint(1) == 30 assert self.getint(2) == 30 @@ -284,8 +284,7 @@ loop = self.interpret(ops, [0]) assert self.getint(0) == 1 self.attach_bridge(bridge_ops, loop, 2) - self.cpu.set_future_value_int(0, 0) - self.run(loop) + self.run(loop, 0) assert self.getint(0) == 1 def test_inputarg_unused(self): @@ -311,9 +310,7 @@ assert self.getint(0) == 0 assert self.getint(1) == 10 self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 10) - self.run(loop) + relf.run(loop, 0, 10) assert self.getint(0) == 0 assert self.getint(1) == 10 @@ -330,9 +327,7 @@ finish(1, 2) ''' self.attach_bridge(bridge_ops, loop, 0) - self.cpu.set_future_value_int(0, 0) - self.cpu.set_future_value_int(1, 1) - self.run(loop) + self.run(loop, 0, 1) def test_spill_for_constant(self): ops = ''' @@ -450,9 +445,8 @@ finish(i0, i1, i2, i3, i4, i5, i6, i7, i8) ''' self.attach_bridge(bridge_ops, loop, 1) - for i in range(9): - self.cpu.set_future_value_int(i, i) - self.run(loop) + args = [i for i in range(9)] + self.run(loop, *args) assert self.getints(9) == range(9) def test_loopargs(self): @@ -725,9 +719,7 @@ ''' self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 5 * 7 def test_bridge_calls_2(self): @@ -746,9 +738,7 @@ ''' self.attach_bridge(ops, loop, -2) - self.cpu.set_future_value_int(0, 4) - self.cpu.set_future_value_int(1, 7) - self.run(loop) + self.run(loop, 4, 7) assert self.getint(0) == 29 @@ -787,7 +777,7 @@ large = self.interpret(loop1, range(11), run=False) large._jitcelltoken.outermost_jitdriver_sd = FakeJitDriverSD() self.namespace['looptoken'] = large._jitcelltoken - assert self.namespace['looptoken']._arm_bootstrap_code != 0 + assert self.namespace['looptoken']._arm_func_addr != 0 loop2 = """ [i0] i1 = force_token() diff --git a/pypy/jit/backend/arm/test/test_regalloc2.py b/pypy/jit/backend/arm/test/test_regalloc2.py --- a/pypy/jit/backend/arm/test/test_regalloc2.py +++ b/pypy/jit/backend/arm/test/test_regalloc2.py @@ -25,8 +25,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 9) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, 9) assert cpu.get_latest_value_int(0) == (9 >> 3) assert cpu.get_latest_value_int(1) == (~18) @@ -48,8 +47,7 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -10) - cpu.execute_token(looptoken) + cpu.execute_token(looptoken, -10) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == -1000 assert cpu.get_latest_value_int(2) == 1 @@ -145,17 +143,8 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, -13) - cpu.set_future_value_int(1, 10) - cpu.set_future_value_int(2, 10) - cpu.set_future_value_int(3, 8) - cpu.set_future_value_int(4, -8) - cpu.set_future_value_int(5, -16) - cpu.set_future_value_int(6, -18) - cpu.set_future_value_int(7, 46) - cpu.set_future_value_int(8, -12) - cpu.set_future_value_int(9, 26) - cpu.execute_token(looptoken) + args = [-13 , 10 , 10 , 8 , -8 , -16 , -18 , 46 , -12 , 26] + cpu.execute_token(looptoken, *args) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 0 assert cpu.get_latest_value_int(2) == 0 @@ -260,17 +249,8 @@ cpu.setup_once() looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) - cpu.set_future_value_int(0, 17) - cpu.set_future_value_int(1, -20) - cpu.set_future_value_int(2, -6) - cpu.set_future_value_int(3, 6) - cpu.set_future_value_int(4, 1) - cpu.set_future_value_int(5, 13) - cpu.set_future_value_int(6, 13) - cpu.set_future_value_int(7, 9) - cpu.set_future_value_int(8, 49) - cpu.set_future_value_int(9, 8) - cpu.execute_token(looptoken) + args = [17 , -20 , -6 , 6 , 1 , 13 , 13 , 9 , 49 , 8] + cpu.execute_token(looptoken, *args) assert cpu.get_latest_value_int(0) == 0 assert cpu.get_latest_value_int(1) == 8 assert cpu.get_latest_value_int(2) == 1 diff --git a/pypy/jit/backend/arm/test/test_runner.py b/pypy/jit/backend/arm/test/test_runner.py --- a/pypy/jit/backend/arm/test/test_runner.py +++ b/pypy/jit/backend/arm/test/test_runner.py @@ -57,9 +57,8 @@ ResOperation(rop.FINISH, out, None, descr=BasicFailDescr(1)), ] cpu.compile_loop(inp, operations, looptoken) - for i in range(1, 15): - self.cpu.set_future_value_int(i - 1, i) - self.cpu.execute_token(looptoken) + args = [i for i in range(1, 15)] + self.cpu.execute_token(looptoken, *args) output = [self.cpu.get_latest_value_int(i - 1) for i in range(1, 15)] expected = [3, 7, 11, 15, 19, 23, 27, 3, 7, 11, 15, 19, 23, 27] assert output == expected @@ -100,26 +99,13 @@ self.cpu.compile_loop(loop2.inputargs, loop2.operations, lt2) self.cpu.compile_loop(loop3.inputargs, loop3.operations, lt3) self.cpu.compile_loop(loop1.inputargs, loop1.operations, lt1) - self.cpu.set_future_value_int(0, 11) - self.cpu.execute_token(lt1) + self.cpu.execute_token(lt1, 11) assert self.cpu.get_latest_value_int(0) == 12 self.cpu.redirect_call_assembler(lt2, lt3) - self.cpu.set_future_value_int(0, 11) - self.cpu.execute_token(lt1) + self.cpu.execute_token(lt1, 11) assert self.cpu.get_latest_value_int(0) == 10 - def test_new_array_with_const_length(self): - """ Test for an issue with malloc_varsize when the size is an imm - that gets lost around the call to malloc""" - A = lltype.GcArray(lltype.Signed) - arraydescr = self.cpu.arraydescrof(A) - r1 = self.execute_operation(rop.NEW_ARRAY, [ConstInt(6)], - 'ref', descr=arraydescr) - a = lltype.cast_opaque_ptr(lltype.Ptr(A), r1.value) - assert a[0] == 0 - assert len(a) == 6 - def test_cond_call_gc_wb_array_card_marking_fast_path(self): py.test.skip('ignore this fast path for now') diff --git a/pypy/jit/backend/arm/test/test_ztranslate_backend.py b/pypy/jit/backend/arm/test/test_ztranslate_backend.py --- a/pypy/jit/backend/arm/test/test_ztranslate_backend.py +++ b/pypy/jit/backend/arm/test/test_ztranslate_backend.py @@ -47,12 +47,11 @@ ResOperation(rop.JUMP, [i1b], None, descr=looptoken), ] bridge[1].setfailargs([i1b]) - assert looptoken._arm_bootstrap_code != 0 + assert looptoken._arm_func_addr != 0 assert looptoken._arm_loop_code != 0 cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken, True) - cpu.set_future_value_int(0, 2) - fail = cpu.execute_token(looptoken) + fail = cpu.execute_token(looptoken, 2) res = cpu.get_latest_value_int(0) return fail.identifier * 1000 + res diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1225,7 +1225,7 @@ loop = parse(loopops) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - args = [1] + args = [1] args.append(longlong.getfloatstorage(132.25)) args.append(longlong.getfloatstorage(0.75)) fail = self.cpu.execute_token(looptoken, *args) #xxx check @@ -1242,10 +1242,10 @@ ] self.cpu.compile_bridge(loop.operations[-2].getdescr(), fboxes, bridgeops, looptoken) - self.cpu.set_future_value_int(0, 1) - self.cpu.set_future_value_float(1, longlong.getfloatstorage(132.25)) - self.cpu.set_future_value_float(2, longlong.getfloatstorage(0.75)) - fail = self.cpu.execute_token(looptoken) + args = [1, + longlong.getfloatstorage(132.25), + longlong.getfloatstorage(0.75)] + fail = self.cpu.execute_token(looptoken, *args) assert fail.identifier == 100 f1 = self.cpu.get_latest_value_float(0) f2 = self.cpu.get_latest_value_float(1) @@ -1278,8 +1278,7 @@ # cpu = self.cpu for value in [-42, 0, 1, 10]: - cpu.set_future_value_int(0, value) - fail = cpu.execute_token(looptoken) + fail = cpu.execute_token(looptoken, value) # expected = compare(value) expected ^= guard_case diff --git a/pypy/jit/backend/test/test_frame_size.py b/pypy/jit/backend/test/test_frame_size.py --- a/pypy/jit/backend/test/test_frame_size.py +++ b/pypy/jit/backend/test/test_frame_size.py @@ -85,7 +85,7 @@ """ large = self.interpret(large_frame_loop, range(15), run=False) self.namespace['looptoken'] = large.token - assert self.namespace['looptoken']._arm_bootstrap_code != 0 + assert self.namespace['looptoken']._arm_func_addr != 0 small_frame_loop = """ [i0] i1 = int_add(i0, 1) From noreply at buildbot.pypy.org Fri Dec 30 17:13:06 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 30 Dec 2011 17:13:06 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: remove dead code Message-ID: <20111230161306.D2C5082C03@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50963:0768ebedbaed Date: 2011-12-30 17:10 +0100 http://bitbucket.org/pypy/pypy/changeset/0768ebedbaed/ Log: remove dead code diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -1188,21 +1188,6 @@ _mixin_ = True - # from: ../x86/regalloc.py:750 - # called from regalloc - # XXX kill this function at some point - def _regalloc_malloc_varsize(self, size, size_box, vloc, vbox, - ofs_items_loc, regalloc, result): - self.mc.MUL(size.value, size.value, vloc.value) - if ofs_items_loc.is_imm(): - self.mc.ADD_ri(size.value, size.value, ofs_items_loc.value) - else: - self.mc.ADD_rr(size.value, size.value, ofs_items_loc.value) - force_index = self.write_new_force_index() - regalloc.force_spill_var(vbox) - self._emit_call(force_index, self.malloc_func_addr, [size_box], - regalloc, result=result) - def emit_op_call_malloc_gc(self, op, arglocs, regalloc, fcond): self.propagate_memoryerror_if_r0_is_null() return fcond From noreply at buildbot.pypy.org Fri Dec 30 17:13:01 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 30 Dec 2011 17:13:01 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: update compute_vars_longevity and return computed values instead of directly assigning them Message-ID: <20111230161301.C476A82C05@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50960:43bf7458dfe1 Date: 2011-12-30 13:58 +0100 http://bitbucket.org/pypy/pypy/changeset/43bf7458dfe1/ Log: update compute_vars_longevity and return computed values instead of directly assigning them diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -491,7 +491,7 @@ # only to guard operations or to jump or to finish produced = {} last_used = {} - useful = {} + last_real_usage = {} for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -502,10 +502,13 @@ opnum = op.getopnum() for j in range(op.numargs()): arg = op.getarg(j) - if opnum != rop.JUMP and opnum != rop.FINISH: - useful[arg] = None - if isinstance(arg, Box) and arg not in last_used: + if not isinstance(arg, Box): + continue + if arg not in last_used: last_used[arg] = i + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i if op.is_guard(): for arg in op.getfailargs(): if arg is None: # hole @@ -513,7 +516,7 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + # longevity = {} for arg in produced: if arg in last_used: @@ -529,8 +532,7 @@ longevity[arg] = (0, last_used[arg]) del last_used[arg] assert len(last_used) == 0 - return longevity, useful - + return longevity, last_real_usage def is_comparison_or_ovf_op(opnum): from pypy.jit.metainterp.resoperation import opclasses diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -174,7 +174,10 @@ operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - self._compute_vars_longevity(inputargs, operations) + longevity, last_real_usage = compute_vars_longevity( + inputargs, operations) + self.longevity = longevity + self.last_real_usage = last_real_usage self.rm = gpr_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) From noreply at buildbot.pypy.org Fri Dec 30 17:13:08 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 30 Dec 2011 17:13:08 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: fix indentation error Message-ID: <20111230161308.EA54682C03@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50964:23a8ffceb879 Date: 2011-12-30 17:11 +0100 http://bitbucket.org/pypy/pypy/changeset/23a8ffceb879/ Log: fix indentation error diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -466,7 +466,7 @@ while mc.get_relative_pos() < self.min_bytes_before_label: mc.NOP() - def loc(self, v): + def loc(self, v): if v is None: # xxx kludgy return None if v.type == FLOAT: From noreply at buildbot.pypy.org Fri Dec 30 17:13:03 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 30 Dec 2011 17:13:03 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: move the descrs unpacking to llsuport to be shared between the backends Message-ID: <20111230161303.466F782C06@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50961:1348332a6293 Date: 2011-12-30 14:00 +0100 http://bitbucket.org/pypy/pypy/changeset/1348332a6293/ Log: move the descrs unpacking to llsuport to be shared between the backends diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -478,3 +478,31 @@ cache[key] = calldescr assert repr(calldescr.result_size) == repr(result_size) return calldescr + + +def unpack_arraydescr(arraydescr): + assert isinstance(arraydescr, ArrayDescr) + ofs = arraydescr.basesize + size = arraydescr.itemsize + sign = arraydescr.is_item_signed() + return size, ofs, sign + + +def unpack_fielddescr(fielddescr): + assert isinstance(fielddescr, FieldDescr) + ofs = fielddescr.offset + size = fielddescr.field_size + sign = fielddescr.is_field_signed() + return ofs, size, sign +unpack_fielddescr._always_inline_ = True + + +def unpack_interiorfielddescr(descr): + assert isinstance(descr, InteriorFieldDescr) + arraydescr = descr.arraydescr + ofs = arraydescr.basesize + itemsize = arraydescr.itemsize + fieldsize = descr.fielddescr.field_size + sign = descr.fielddescr.is_field_signed() + ofs += descr.fielddescr.offset + return ofs, itemsize, fieldsize, sign diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,6 +19,9 @@ from pypy.jit.backend.llsupport.descr import FieldDescr, ArrayDescr from pypy.jit.backend.llsupport.descr import CallDescr, SizeDescr from pypy.jit.backend.llsupport.descr import InteriorFieldDescr +from pypy.jit.backend.llsupport.descr import unpack_arraydescr +from pypy.jit.backend.llsupport.descr import unpack_fielddescr +from pypy.jit.backend.llsupport.descr import unpack_interiorfielddescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox, compute_vars_longevity, is_comparison_or_ovf_op from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE @@ -917,33 +920,11 @@ gc_ll_descr.get_nursery_top_addr(), size) - def _unpack_arraydescr(self, arraydescr): - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.basesize - size = arraydescr.itemsize - sign = arraydescr.is_item_signed() - return size, ofs, sign - - def _unpack_fielddescr(self, fielddescr): - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - size = fielddescr.field_size - sign = fielddescr.is_field_signed() - return imm(ofs), imm(size), sign - _unpack_fielddescr._always_inline_ = True - - def _unpack_interiorfielddescr(self, descr): - assert isinstance(descr, InteriorFieldDescr) - arraydescr = descr.arraydescr - ofs = arraydescr.basesize - itemsize = arraydescr.itemsize - fieldsize = descr.fielddescr.field_size - sign = descr.fielddescr.is_field_signed() - ofs += descr.fielddescr.offset - return imm(ofs), imm(itemsize), imm(fieldsize), sign def consider_setfield_gc(self, op): - ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr()) + ofs, size, _ = unpack_fielddescr(op.getdescr()) + ofs_loc = imm(ofs) + size_loc = imm(size) assert isinstance(size_loc, ImmedLoc) if size_loc.value == 1: need_lower_byte = True @@ -959,8 +940,8 @@ consider_setfield_raw = consider_setfield_gc def consider_setinteriorfield_gc(self, op): - t = self._unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, _ = t + t = unpack_interiorfielddescr(op.getdescr()) + ofs, itemsize, fieldsize = imm(t[0]), imm(t[1]), imm(t[2]) args = op.getarglist() if fieldsize.value == 1: need_lower_byte = True @@ -1001,7 +982,7 @@ consider_unicodesetitem = consider_strsetitem def consider_setarrayitem_gc(self, op): - itemsize, ofs, _ = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) if itemsize == 1: @@ -1018,7 +999,9 @@ consider_setarrayitem_raw = consider_setarrayitem_gc def consider_getfield_gc(self, op): - ofs_loc, size_loc, sign = self._unpack_fielddescr(op.getdescr()) + ofs, size, sign = unpack_fielddescr(op.getdescr()) + ofs_loc = imm(ofs) + size_loc = imm(size) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.rm.possibly_free_vars(args) @@ -1034,7 +1017,7 @@ consider_getfield_gc_pure = consider_getfield_gc def consider_getarrayitem_gc(self, op): - itemsize, ofs, sign = self._unpack_arraydescr(op.getdescr()) + itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) @@ -1051,8 +1034,8 @@ consider_getarrayitem_gc_pure = consider_getarrayitem_gc def consider_getinteriorfield_gc(self, op): - t = self._unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t + t = unpack_interiorfielddescr(op.getdescr()) + ofs, itemsize, fieldsize, sign = imm(t[0]), imm(t[1]), imm(t[2]), t[3] if sign: sign_loc = imm1 else: From noreply at buildbot.pypy.org Fri Dec 30 17:13:11 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 30 Dec 2011 17:13:11 +0100 (CET) Subject: [pypy-commit] pypy arm-backend-2: actually call malloc when emitting call_malloc_gc Message-ID: <20111230161311.1257982C03@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: arm-backend-2 Changeset: r50965:aad89a4cbc45 Date: 2011-12-30 17:12 +0100 http://bitbucket.org/pypy/pypy/changeset/aad89a4cbc45/ Log: actually call malloc when emitting call_malloc_gc diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -1189,6 +1189,7 @@ _mixin_ = True def emit_op_call_malloc_gc(self, op, arglocs, regalloc, fcond): + self.emit_op_call(op, arglocs, regalloc, fcond) self.propagate_memoryerror_if_r0_is_null() return fcond From noreply at buildbot.pypy.org Fri Dec 30 18:22:15 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Fri, 30 Dec 2011 18:22:15 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: allocate and deallocate SCRATCH in store_reg. Message-ID: <20111230172215.4E7E482C03@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50966:03c64ec12500 Date: 2011-12-30 12:16 -0500 http://bitbucket.org/pypy/pypy/changeset/03c64ec12500/ Log: allocate and deallocate SCRATCH in store_reg. refer to r.SCRATCH in b_abs and bl_abs. diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -990,11 +990,12 @@ self.ldx(rD.value, 0, rD.value) def store_reg(self, source_reg, addr): - self.load_imm(r.r0, addr) + self.alloc_scratch_reg(addr) if IS_PPC_32: - self.stwx(source_reg.value, 0, r.r0.value) + self.stwx(source_reg.value, 0, r.SCRATCH.value) else: - self.stdx(source_reg.value, 0, r.r0.value) + self.stdx(source_reg.value, 0, r.SCRATCH.value) + self.free_scratch_reg() def b_offset(self, offset): curpos = self.currpos() @@ -1021,7 +1022,7 @@ def b_abs(self, address, trap=False): self.alloc_scratch_reg(address) - self.mtctr(r.r0.value) + self.mtctr(r.SCRATCH.value) self.free_scratch_reg() if trap: self.trap() @@ -1029,7 +1030,7 @@ def bl_abs(self, address): self.alloc_scratch_reg(address) - self.mtctr(r.r0.value) + self.mtctr(r.SCRATCH.value) self.free_scratch_reg() self.bctrl() From noreply at buildbot.pypy.org Fri Dec 30 18:22:16 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Fri, 30 Dec 2011 18:22:16 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Materialize function address once. Load descriptor value using offsets. Message-ID: <20111230172216.84EF082C03@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50967:d4c1f774d5ac Date: 2011-12-30 12:19 -0500 http://bitbucket.org/pypy/pypy/changeset/d4c1f774d5ac/ Log: Materialize function address once. Load descriptor value using offsets. Save and restore TOC around CALL in emit_call. diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py --- a/pypy/jit/backend/ppc/ppcgen/opassembler.py +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -414,12 +414,14 @@ if IS_PPC_32: self.mc.bl_abs(adr) else: - self.mc.load_from_addr(r.SCRATCH, adr) - self.mc.load_from_addr(r.TOC, adr + WORD) - self.mc.load_from_addr(r.r11, adr + 2 * WORD) + self.mc.std(r.TOC.value, r.SP.value, 5 * WORD) + self.mc.load_imm(r.r11, adr) + self.mc.load(r.SCRATCH.value, r.r11.value, 0) self.mc.mtctr(r.SCRATCH.value) + self.mc.load(r.TOC.value, r.r11.value, WORD) + self.mc.load(r.r11.value, r.r11.value, 2 * WORD) self.mc.bctrl() - + self.mc.ld(r.TOC.value, r.SP.value, 5 * WORD) self.mark_gc_roots(force_index) regalloc.possibly_free_vars(args) @@ -880,10 +882,11 @@ if IS_PPC_32: self.mc.bl_abs(func) else: - self.mc.load_from_addr(r.SCRATCH, func) - self.mc.load_from_addr(r.TOC, func + WORD) - self.mc.load_from_addr(r.r11, func + 2 * WORD) + self.mc.load_imm(r.r11, func) + self.mc.load(r.SCRATCH.value, r.r11.value, 0) self.mc.mtctr(r.SCRATCH.value) + self.mc.load(r.TOC.value, r.r11.value, WORD) + self.mc.load(r.r11.value, r.r11.value, 2 * WORD) self.mc.bctrl() # patch the JZ above @@ -952,10 +955,11 @@ if IS_PPC_32: self.mc.bl_abs(asm_helper_adr) else: - self.mc.load_from_addr(r.SCRATCH, asm_helper_adr) - self.mc.load_from_addr(r.TOC, asm_helper_adr + WORD) - self.mc.load_from_addr(r.r11, asm_helper_adr + 2 * WORD) - self.mc.mtctr(r.r0.value) + self.mc.load_imm(r.r11, asm_helper_adr) + self.mc.load(r.SCRATCH.value, r.r11.value, 0) + self.mc.mtctr(r.SCRATCH.value) + self.mc.load(r.TOC.value, r.r11.value, WORD) + self.mc.load(r.r11.value, r.r11.value, 2 * WORD) self.mc.bctrl() if op.result: From noreply at buildbot.pypy.org Fri Dec 30 18:22:18 2011 From: noreply at buildbot.pypy.org (edelsohn) Date: Fri, 30 Dec 2011 18:22:18 +0100 (CET) Subject: [pypy-commit] pypy ppc-jit-backend: Materialize function address once. Load function descriptor values via offsets. Message-ID: <20111230172218.677AD82C03@wyvern.cs.uni-duesseldorf.de> Author: edelsohn Branch: ppc-jit-backend Changeset: r50968:16225a003d71 Date: 2011-12-30 12:21 -0500 http://bitbucket.org/pypy/pypy/changeset/16225a003d71/ Log: Materialize function address once. Load function descriptor values via offsets. diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -280,10 +280,11 @@ if IS_PPC_32: mc.bl_abs(addr) else: - mc.load_from_addr(r.SCRATCH, addr) - mc.load_from_addr(r.r2, addr + WORD) - mc.load_from_addr(r.r11, addr + 2 * WORD) + mc.load_imm(r.r11, addr) + mc.load(r.SCRATCH.value, r.r11.value, 0) mc.mtctr(r.SCRATCH.value) + mc.load(r.r2.value, r.r11.value, WORD) + mc.load(r.r11.value, r.r11.value, 2 * WORD) mc.bctrl() #mc.alloc_scratch_reg(self.cpu.propagate_exception_v) #mc.mr(r.RES.value, r.SCRATCH.value) @@ -300,10 +301,11 @@ if IS_PPC_32: mc.bl_abs(addr) else: - mc.load_from_addr(r.SCRATCH, addr) - mc.load_from_addr(r.r2, addr + WORD) - mc.load_from_addr(r.r11, addr + 2 * WORD) + mc.load_imm(r.r11, addr) + mc.load(r.SCRATCH.value, r.r11.value, 0) mc.mtctr(r.SCRATCH.value) + mc.load(r.r2.value, r.r11.value, WORD) + mc.load(r.r11.value, r.r11.value, 2 * WORD) mc.bctrl() mc.b_abs(self.exit_code_adr) @@ -325,32 +327,28 @@ self._save_managed_regs(mc) decode_func_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func) - if IS_PPC_32: - addr = rffi.cast(lltype.Signed, decode_func_addr) - else: - intp = lltype.Ptr(lltype.Array(lltype.Signed, hints={'nolength': True})) - descr = rffi.cast(intp, decode_func_addr) - addr = descr[0] - r2_value = descr[1] - r11_value = descr[2] + addr = rffi.cast(lltype.Signed, decode_func_addr) # load parameters into parameter registers mc.load(r.r3.value, r.SPP.value, self.ENCODING_AREA) # address of state encoding mc.mr(r.r4.value, r.SPP.value) # load spilling pointer # # load address of decoding function into SCRATCH - mc.alloc_scratch_reg(addr) - if IS_PPC_64: - mc.std(r.r2.value, r.SP.value, 5 * WORD) - # load TOC pointer and environment pointer - mc.load_imm(r.r2, r2_value) - mc.load_imm(r.r11, r11_value) + if IS_PPC_32: + mc.alloc_scratch_reg(addr) + mc.mtctr(r.SCRATCH.value) + mc.free_scratch_reg() # ... and branch there - mc.mtctr(r.SCRATCH.value) - mc.free_scratch_reg() - mc.bctrl() - if IS_PPC_64: - mc.ld(r.r2.value, r.SP.value, 5 * WORD) + mc.bctrl() + else: + mc.std(r.TOC.value, r.SP.value, 5 * WORD) + mc.load_imm(r.r11, addr) + mc.load(r.SCRATCH.value, r.r11.value, 0) + mc.mtctr(r.SCRATCH.value) + mc.load(r.TOC.value, r.r11.value, WORD) + mc.load(r.r11.value, r.r11.value, 2 * WORD) + mc.bctrl() + mc.ld(r.TOC.value, r.SP.value, 5 * WORD) # # save SPP in r5 # (assume that r5 has been written to failboxes)